對于人工智能代碼的一些注釋,便于理解
import numpy as np
data = np.array([
? ?[80,200],
? ?[90,230],
? ?[104,245],
? ?[112,274],
? ?[125,259],
? ?[135,262]
])#將data數(shù)據(jù)集轉(zhuǎn)換為 NumPy 數(shù)組
# 設(shè)置初始斜率和截距
m=1
b=1
# 將自變量和因變量從數(shù)據(jù)集中提取出來
xarray=data[:, 0]
yreal = data[:,-1]
# 學(xué)習(xí)率設(shè)置為 0.001
(注意一會刪掉這行),需要learningrate更改三個數(shù)值,0.001,0.0001,0.00001
learningrate = 0.001
#grandentdecent() 函數(shù)計算當(dāng)前斜率和截距下的損失函數(shù)的梯度,即 MSE對截距和斜率的偏導(dǎo)數(shù)
#m是斜率,b是截距
def grandentdecent():
? ?bslop = 0
? ?for index, x in enumerate(xarray):
? ? ? ?bslop = bslop +m*x+b- yreal[index]
? ?bslop = bslop*2/len (xarray)
? ?mslop=0
? ?for index, x in enumerate(xarray):
? ? ? ?mslop = mslop +m*x+b- yreal[index]
? ?mslop = mslop*2/len(xarray)
? ?return (bslop, mslop)
#train() 函數(shù)則是執(zhí)行了 10次循環(huán)
def train():
? ?for i in range(1,10):
? ? ? ?bslop,mslop = grandentdecent()#計算每個數(shù)據(jù)點對斜率和截距的偏導(dǎo)數(shù)之和,并除以數(shù)據(jù)量得到平均值,從而得到了均方誤差的偏導(dǎo)數(shù)
? ? ? ?global m
? ? ? ?m = m - mslop*learningrate
? ? ? ?global b
? ? ? ?b=b- bslop*learningrate
? ? ? ?if (abs(mslop)<0.5 and abs (bslop)<0.5):
? ? ? ? ? ?break
? ?print('m={},b={}'.format(m,b))
if __name__=='__main__':
? ?train()