PyTorch Tutorial 05 - Gradient Descen...

教程Python代碼:numpy版
import numpy as np
# f = w * x 此處不加偏置
# f = 2 * x
X = np.array([1,2,3,4],dtype=np.float32)
Y = np.array([2,4,6,8],dtype=np.float32)
# 初始化權(quán)重
w = 0.0
# model prediction,計算模型
def forward(x):
return w * x
# loss = MSE(Mean Square Error),均方誤差計算損失
def loss(y,y_predicted):
return ((y_predicted - y)**2).mean()
# gradient,手動計算損失的梯度
# MSE = 1/N * (w*x - y)**2
# dJ/dw = 1/N * 2x * (w*x - y) , 這是數(shù)值計算的計算導(dǎo)數(shù)
def gradient(x,y,y_predicted):
return np.dot(2*x, y_predicted-y).mean()
print(f'Prediction befor training: f(5) = {forward(5):.3f}')
# Training
learning_rate = 0.01 #學(xué)習(xí)率
n_iters = 20 #多次迭代
for epoch in range(n_iters):
# prediction = forward pass
y_pred = forward(X)
# loss
l = loss(Y,y_pred)
# gradients
dw = gradient(X, Y, y_pred)
# update weights 更新公式:權(quán)重 = 權(quán)重 - (步長或?qū)W習(xí)速率 * dw)
w -= learning_rate * dw
#打印每一步
if epoch % 1 == 0:
print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}')
print(f'Prediction after training: f(5) = {forward(5):.3f}')