以下是一个简单的神经网络实现,使用Golang编写,不依赖于任何第三方库。
首先,我们需要定义一些基本的结构:
type Neuron struct {
weights []float64
}
type Layer struct {
neurons []Neuron
}
type NeuralNetwork struct {
layers []Layer
}
其中,Neuron
表示一个神经元,Layer
表示一个神经网络层,NeuralNetwork
则表示整个神经网络。
接下来,我们需要实现一些基本的函数,用于初始化和训练神经网络:
func NewNeuron(numInputs int) Neuron {
rand.Seed(time.Now().UnixNano())
weights := make([]float64, numInputs)
for i := range weights {
weights[i] = rand.Float64()
}
return Neuron{weights: weights}
}
func NewLayer(numNeurons, numInputs int) Layer {
neurons := make([]Neuron, numNeurons)
for i := range neurons {
neurons[i] = NewNeuron(numInputs)
}
return Layer{neurons: neurons}
}
func NewNeuralNetwork(numInputs, numOutputs, numHiddenLayers, numNeuronsPerLayer int) NeuralNetwork {
layers := make([]Layer, numHiddenLayers+2)
// input layer
layers[0] = NewLayer(numInputs, 1)
// hidden layers
for i := 1; i <= numHiddenLayers; i++ {
layers[i] = NewLayer(numNeuronsPerLayer, len(layers[i-1].neurons))
}
// output layer
layers[numHiddenLayers+1] = NewLayer(numOutputs, len(layers[numHiddenLayers].neurons))
return NeuralNetwork{layers: layers}
}
func (n *NeuralNetwork) FeedForward(inputs []float64) []float64 {
// set input values
for i, val := range inputs {
n.layers[0].neurons[i].weights[0] = val
}
// propagate input through hidden layers
for i := 1; i < len(n.layers); i++ {
prevLayer := n.layers[i-1]
currLayer := n.layers[i]
for j := range currLayer.neurons {
sum := 0.0
for k, weight := range prevLayer.neurons {
sum += weight.weights[j] * prevLayer.neurons[k].weights[0]
}
currLayer.neurons[j].weights[0] = sigmoid(sum)
}
}
// collect output values
outputs := make([]float64, len(n.layers[len(n.layers)-1].neurons))
for i, neuron := range n.layers[len(n.layers)-1].neurons {
outputs[i] = neuron.weights[0]
}
return outputs
}
func (n *NeuralNetwork) Train(inputs [][]float64, targets [][]float64, learningRate float64, iterations int) {
for iter := 0; iter < iterations; iter++ {
for i, input := range inputs {
n.FeedForward(input)
// calculate error signal for each output neuron
outputLayer := n.layers[len(n.layers)-1]
for j, neuron := range outputLayer.neurons {
errorSignal := targets[i][j] - neuron.weights[0]
delta := errorSignal * sigmoidDerivative(neuron.weights[0])
neuron.weights[0] += delta * learningRate
}
// propagate error signal backwards through hidden layers
for j := len(n.layers) - 2; j > 0; j-- {
currLayer := n.layers[j]
nextLayer := n.layers[j+1]
for k := range currLayer.neurons {
errorSignal := 0.0
for l := range nextLayer.neurons {
errorSignal += nextLayer.neurons[l].weights[k] * sigmoidDerivative(currLayer.neurons[k].weights[0])
}
delta := errorSignal * sigmoidDerivative(currLayer.neurons[k].weights[0])
for l := range currLayer.neurons[k].weights {
currLayer.neurons[k].weights[l] += delta * learningRate * nextLayer.neurons[l].weights[k]
}
}
}
}
}
}
以上函数的功能如下:
NewNeuron
:初始化一个神经元,其权重随机分布在0到1之间。NewLayer
:初始化一层神经元,并对其中每个神经元调用NewNeuron
进行初始化。NewNeuralNetwork
:初始化整个神经网络,包括输入层、隐藏层和输出层,在输入层和隐藏层中每个神经元的权重随机初始化,输出层则不需要进行初始化。FeedForward
:使用前向传播方法将输入数据传递到神经网络中,返回输出结果。Train
:使用反向传播方法训练神经网络,对于每个样本计算网络输出与目标值之间的误差,然后将误差反向传播到每个神经元中进行权重更新。
最后,我们需要实现一些辅助函数来完成神经网络计算中的某些操作:
func sigmoid(x float64) float64 {
return 1.0 / (1.0 + math.Exp(-x))
}
func sigmoidDerivative(x float64) float64 {
return x * (1.0 - x)
}
以上函数分别表示sigmoid函数及其导数。
至此,我们就完成了一个简单的神经网络的实现。我们可以使用以下代码测试它:
func main() {
inputs := [][]float64{
{0, 0},
{0, 1},
{1, 0},
{1, 1},
}
targets := [][]float64{
{0},
{1},
{1},
{0},
}
nn := NewNeuralNetwork(2, 1, 1, 3)
nn.Train(inputs, targets, 0.1, 10000)
for _, input := range inputs {
output := nn.FeedForward(input)
fmt.Println(input, output)
}
}
以上代码将训练神经网络来解决XOR问题,并输出其结果。