-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
LuKks
committed
Dec 4, 2019
1 parent
a9eaf1a
commit 3262aae
Showing
10 changed files
with
753 additions
and
751 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,88 +1,92 @@ | ||
package neural | ||
|
||
import ( | ||
"math" | ||
"math" | ||
) | ||
|
||
// ForwardFn is used to think | ||
type ForwardFn func (sum float64) float64 | ||
type ForwardFn func(sum float64) float64 | ||
|
||
// BackwardFn is used to learn (derivative of forward) | ||
type BackwardFn func (activation float64) float64 | ||
type BackwardFn func(activation float64) float64 | ||
|
||
// LinearForward is the linear fn | ||
func LinearForward (sum float64) float64 { | ||
return sum | ||
func LinearForward(sum float64) float64 { | ||
return sum | ||
} | ||
|
||
// LinearBackward is the linear derivative | ||
func LinearBackward (activation float64) float64 { | ||
return 1.0 | ||
func LinearBackward(activation float64) float64 { | ||
return 1.0 | ||
} | ||
|
||
// SigmoidForward is the sigmoid fn | ||
func SigmoidForward (sum float64) float64 { | ||
return 1.0 / (1.0 + math.Exp(-sum)) | ||
func SigmoidForward(sum float64) float64 { | ||
return 1.0 / (1.0 + math.Exp(-sum)) | ||
} | ||
|
||
// SigmoidBackward is the sigmoid derivative | ||
func SigmoidBackward (activation float64) float64 { | ||
return activation * (1.0 - activation) | ||
func SigmoidBackward(activation float64) float64 { | ||
return activation * (1.0 - activation) | ||
} | ||
|
||
// TanhForward is the tanh fn | ||
func TanhForward (sum float64) float64 { | ||
return math.Tanh(sum) | ||
func TanhForward(sum float64) float64 { | ||
return math.Tanh(sum) | ||
} | ||
|
||
// TanhBackward is the tanh derivative | ||
func TanhBackward (activation float64) float64 { | ||
return 1 - activation * activation | ||
func TanhBackward(activation float64) float64 { | ||
return 1 - activation*activation | ||
} | ||
|
||
// ReluForward is the relu fn | ||
func ReluForward (sum float64) float64 { | ||
if sum < 0.0 { | ||
return 0.0 | ||
} | ||
return sum | ||
func ReluForward(sum float64) float64 { | ||
if sum < 0.0 { | ||
return 0.0 | ||
} | ||
return sum | ||
} | ||
|
||
// ReluBackward is the relu derivative | ||
func ReluBackward (activation float64) float64 { | ||
if activation <= 0.0 { | ||
return 0.0 | ||
} | ||
return 1.0 | ||
func ReluBackward(activation float64) float64 { | ||
if activation <= 0.0 { | ||
return 0.0 | ||
} | ||
return 1.0 | ||
} | ||
|
||
// ActivationSet is a forward and backward fn with its range | ||
type ActivationSet struct { | ||
// Forward fn | ||
Forward ForwardFn | ||
// Backward fn | ||
Backward BackwardFn | ||
// Range of the activation | ||
Ranges []float64 | ||
// Forward fn | ||
Forward ForwardFn | ||
// Backward fn | ||
Backward BackwardFn | ||
// Range of the activation | ||
Ranges []float64 | ||
} | ||
|
||
func selectActivation (activation string) ActivationSet { | ||
set := ActivationSet{} | ||
func selectActivation(activation string) ActivationSet { | ||
set := ActivationSet{} | ||
|
||
if activation == "linear" { | ||
set.Forward = LinearForward | ||
set.Backward = LinearBackward | ||
} else if activation == "" || activation == "sigmoid" { | ||
set.Forward = SigmoidForward | ||
set.Backward = SigmoidBackward | ||
set.Ranges = []float64{ 0.0, 1.0 } | ||
} else if activation == "tanh" { | ||
set.Forward = TanhForward | ||
set.Backward = TanhBackward | ||
set.Ranges = []float64{ -1.0, 1.0 } | ||
} else if activation == "relu" { | ||
set.Forward = ReluForward | ||
set.Backward = ReluBackward | ||
set.Ranges = []float64{ 0.0, 1.0 } | ||
} else { | ||
panic("need a valid activation name") | ||
} | ||
if activation == "linear" { | ||
set.Forward = LinearForward | ||
set.Backward = LinearBackward | ||
} else if activation == "" || activation == "sigmoid" { | ||
set.Forward = SigmoidForward | ||
set.Backward = SigmoidBackward | ||
set.Ranges = []float64{0.0, 1.0} | ||
} else if activation == "tanh" { | ||
set.Forward = TanhForward | ||
set.Backward = TanhBackward | ||
set.Ranges = []float64{-1.0, 1.0} | ||
} else if activation == "relu" { | ||
set.Forward = ReluForward | ||
set.Backward = ReluBackward | ||
set.Ranges = []float64{0.0, 1.0} | ||
} else { | ||
panic("need a valid activation name") | ||
} | ||
|
||
return set | ||
return set | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,66 +1,66 @@ | ||
package main | ||
|
||
import ( | ||
"fmt" | ||
"time" | ||
"github.com/lukks/neural-go" | ||
"fmt" | ||
"github.com/lukks/neural-go" | ||
"time" | ||
) | ||
|
||
const fmtColor = "\033[0;36m%s\033[0m" | ||
|
||
func main () { | ||
xor := neural.NewNeural([]*neural.Layer{ | ||
{ Inputs: 2, Units: 3 }, | ||
{ Units: 3 }, | ||
{ Units: 1, Loss: "mse" }, | ||
}) | ||
func main() { | ||
xor := neural.NewNeural([]*neural.Layer{ | ||
{Inputs: 2, Units: 3}, | ||
{Units: 3}, | ||
{Units: 1, Loss: "mse"}, | ||
}) | ||
|
||
fmt.Printf(fmtColor, "think some values:\n") | ||
fmt.Printf("0, 0 [0] -> %f\n", xor.Think([]float64{0, 0})) | ||
fmt.Printf("1, 0 [1] -> %f\n", xor.Think([]float64{1, 0})) | ||
fmt.Printf("0, 1 [1] -> %f\n", xor.Think([]float64{0, 1})) | ||
fmt.Printf("1, 1 [0] -> %f\n", xor.Think([]float64{1, 1})) | ||
fmt.Printf(fmtColor, "think some values:\n") | ||
fmt.Printf("0, 0 [0] -> %f\n", xor.Think([]float64{0, 0})) | ||
fmt.Printf("1, 0 [1] -> %f\n", xor.Think([]float64{1, 0})) | ||
fmt.Printf("0, 1 [1] -> %f\n", xor.Think([]float64{0, 1})) | ||
fmt.Printf("1, 1 [0] -> %f\n", xor.Think([]float64{1, 1})) | ||
|
||
fmt.Printf(fmtColor, "learning:\n") | ||
start := millis() | ||
fmt.Printf(fmtColor, "learning:\n") | ||
start := millis() | ||
|
||
xor = xor.Evolve(neural.Evolve{ | ||
Population: 20, | ||
Mutate: 0.05, | ||
Crossover: 0.5, | ||
Elitism: 5, | ||
Epochs: 100, | ||
Iterations: 50, | ||
Threshold: 0.00005, | ||
Dataset: [][][]float64{ | ||
{ {0, 0}, {0} }, | ||
{ {1, 0}, {1} }, | ||
{ {0, 1}, {1} }, | ||
{ {1, 1}, {0} }, | ||
}, | ||
Callback: func (epoch int, loss float64) bool { | ||
if epoch % 10 == 0 || epoch == 99 { | ||
fmt.Printf("epoch=%v loss=%f elapsed=%v\n", epoch, loss, millis() - start) | ||
} | ||
xor = xor.Evolve(neural.Evolve{ | ||
Population: 20, | ||
Mutate: 0.05, | ||
Crossover: 0.5, | ||
Elitism: 5, | ||
Epochs: 100, | ||
Iterations: 50, | ||
Threshold: 0.00005, | ||
Dataset: [][][]float64{ | ||
{{0, 0}, {0}}, | ||
{{1, 0}, {1}}, | ||
{{0, 1}, {1}}, | ||
{{1, 1}, {0}}, | ||
}, | ||
Callback: func(epoch int, loss float64) bool { | ||
if epoch%10 == 0 || epoch == 99 { | ||
fmt.Printf("epoch=%v loss=%f elapsed=%v\n", epoch, loss, millis()-start) | ||
} | ||
|
||
return true | ||
}, | ||
}) | ||
return true | ||
}, | ||
}) | ||
|
||
fmt.Printf(fmtColor, "think some values:\n") | ||
fmt.Printf("0, 0 [0] -> %f\n", xor.Think([]float64{0, 0})) | ||
fmt.Printf("1, 0 [1] -> %f\n", xor.Think([]float64{1, 0})) | ||
fmt.Printf("0, 1 [1] -> %f\n", xor.Think([]float64{0, 1})) | ||
fmt.Printf("1, 1 [0] -> %f\n", xor.Think([]float64{1, 1})) | ||
fmt.Printf(fmtColor, "think some values:\n") | ||
fmt.Printf("0, 0 [0] -> %f\n", xor.Think([]float64{0, 0})) | ||
fmt.Printf("1, 0 [1] -> %f\n", xor.Think([]float64{1, 0})) | ||
fmt.Printf("0, 1 [1] -> %f\n", xor.Think([]float64{0, 1})) | ||
fmt.Printf("1, 1 [0] -> %f\n", xor.Think([]float64{1, 1})) | ||
|
||
fmt.Printf(fmtColor, "export:\n") | ||
json, _ := xor.Export() | ||
fmt.Printf("%s\n", json) | ||
// stream the json over network | ||
fmt.Printf(fmtColor, "export:\n") | ||
json, _ := xor.Export() | ||
fmt.Printf("%s\n", json) | ||
// stream the json over network | ||
|
||
// or just xor.ToFile("./evolve.json") | ||
// or just xor.ToFile("./evolve.json") | ||
} | ||
|
||
func millis() int64 { | ||
return time.Now().UnixNano() / 1e6 | ||
return time.Now().UnixNano() / 1e6 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,48 +1,48 @@ | ||
package main | ||
|
||
import ( | ||
"fmt" | ||
"github.com/lukks/neural-go" | ||
"fmt" | ||
"github.com/lukks/neural-go" | ||
) | ||
|
||
const fmtColor = "\033[0;36m%s\033[0m" | ||
|
||
func main () { | ||
xor := neural.NewNeural([]*neural.Layer{ | ||
{ Inputs: 2, Units: 16, Activation: "sigmoid", Rate: 0.002, Momentum: 0.999 }, | ||
{ Units: 16, Activation: "tanh", Rate: 0.001 }, | ||
{ Units: 1, Activation: "sigmoid", Loss: "mse", Rate: 0.0005 }, | ||
}) | ||
// that is just to show different configurations | ||
// normally you want same rate and momentum for all layers | ||
|
||
/* | ||
// Change rate or momentum to all layers | ||
xor.Rate(0.002) | ||
xor.Momentum(0.999) | ||
// Change to specific layer | ||
xor.Layers[0].Rate = 0.002 | ||
xor.Layers[0].Momentum = 0.999 | ||
*/ | ||
|
||
fmt.Printf(fmtColor, "learning:\n") | ||
for i := 0; i <= 5000; i++ { | ||
loss := xor.Learns([][][]float64{ | ||
{ {0, 0}, {0} }, | ||
{ {1, 0}, {1} }, | ||
{ {0, 1}, {1} }, | ||
{ {1, 1}, {0} }, | ||
}) | ||
|
||
if i % 1000 == 0 { | ||
fmt.Printf("iter %v, loss %f\n", i, loss) | ||
} | ||
} | ||
|
||
fmt.Printf(fmtColor, "think some values:\n") | ||
fmt.Printf("0, 0 [0] -> %f\n", xor.Think([]float64{0, 0})) | ||
fmt.Printf("1, 0 [1] -> %f\n", xor.Think([]float64{1, 0})) | ||
fmt.Printf("0, 1 [1] -> %f\n", xor.Think([]float64{0, 1})) | ||
fmt.Printf("1, 1 [0] -> %f\n", xor.Think([]float64{1, 1})) | ||
func main() { | ||
xor := neural.NewNeural([]*neural.Layer{ | ||
{Inputs: 2, Units: 16, Activation: "sigmoid", Rate: 0.002, Momentum: 0.999}, | ||
{Units: 16, Activation: "tanh", Rate: 0.001}, | ||
{Units: 1, Activation: "sigmoid", Loss: "mse", Rate: 0.0005}, | ||
}) | ||
// that is just to show different configurations | ||
// normally you want same rate and momentum for all layers | ||
|
||
/* | ||
// Change rate or momentum to all layers | ||
xor.Rate(0.002) | ||
xor.Momentum(0.999) | ||
// Change to specific layer | ||
xor.Layers[0].Rate = 0.002 | ||
xor.Layers[0].Momentum = 0.999 | ||
*/ | ||
|
||
fmt.Printf(fmtColor, "learning:\n") | ||
for i := 0; i <= 5000; i++ { | ||
loss := xor.Learns([][][]float64{ | ||
{{0, 0}, {0}}, | ||
{{1, 0}, {1}}, | ||
{{0, 1}, {1}}, | ||
{{1, 1}, {0}}, | ||
}) | ||
|
||
if i%1000 == 0 { | ||
fmt.Printf("iter %v, loss %f\n", i, loss) | ||
} | ||
} | ||
|
||
fmt.Printf(fmtColor, "think some values:\n") | ||
fmt.Printf("0, 0 [0] -> %f\n", xor.Think([]float64{0, 0})) | ||
fmt.Printf("1, 0 [1] -> %f\n", xor.Think([]float64{1, 0})) | ||
fmt.Printf("0, 1 [1] -> %f\n", xor.Think([]float64{0, 1})) | ||
fmt.Printf("1, 1 [0] -> %f\n", xor.Think([]float64{1, 1})) | ||
} |
Oops, something went wrong.