|
|
|
@ -144,6 +144,28 @@ def mlp(x, scope, n_state, *, hparams):
|
|
|
|
|
|
|
|
|
|
在这个非常简单的例子中,我们可以看到权重`w`在每次迭代后都在逐渐增加(直到loss无限接近0),以减少预测值`y_pred`和真实值`y`之间的差异。在实际应用中,我们会使用所有样本来计算损失和梯度,并可能使用更复杂的网络结构和优化算法。但这个例子展示了神经网络权重更新的基本原理。
|
|
|
|
|
|
|
|
|
|
代码如下,可以自由改变`lr`值和`Epoch`值,来观察`loss`和`y_pred`
|
|
|
|
|
|
|
|
|
|
~~~python
|
|
|
|
|
w = 0.5
|
|
|
|
|
lr = 0.1
|
|
|
|
|
x = 1
|
|
|
|
|
y = 2
|
|
|
|
|
Epoch = 3
|
|
|
|
|
|
|
|
|
|
for i in range(Epoch):
|
|
|
|
|
print(f"=========")
|
|
|
|
|
print(f"迭代{i+1}:")
|
|
|
|
|
y_pred = w * x
|
|
|
|
|
print("y_pred =", y_pred)
|
|
|
|
|
loss = (y_pred - y)**2
|
|
|
|
|
print("loss =", loss)
|
|
|
|
|
dloss_dw = 2 * (y_pred - y) * x
|
|
|
|
|
print("loss =", dloss_dw)
|
|
|
|
|
w = w - lr * dloss_dw
|
|
|
|
|
print("w =", w)
|
|
|
|
|
~~~
|
|
|
|
|
|
|
|
|
|
**简单的神经网络等价于线性回归**,想要深入了解的可以看[线性回归原理](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/53ad238b5b7dbb5c39520401de2f10208825e4f9/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E7%AC%AC%E4%B8%80%E7%AB%A0%E2%80%94%E2%80%94%E7%BA%BF%E6%80%A7%E5%9B%9E%E5%BD%92%E5%8E%9F%E7%90%86.md)
|
|
|
|
|
|
|
|
|
|
### 总结
|
|
|
|
|