深度學(xué)習(xí)
import torch
# prepare dataset
# x,y是矩陣,3行1列 也就是說總共有3個(gè)數(shù)據(jù),每個(gè)數(shù)據(jù)只有1個(gè)特征
x_data = torch.tensor([[1.0], [2.0], [3.0]])
y_data = torch.tensor([[2.0], [4.0], [6.0]])
?
#design model using class
"""
our model class should be inherit from nn.Module, which is base class for all neural network modules.
member methods __init__() and forward() have to be implemented
class nn.linear contain two member Tensors: weight and bias
class nn.Linear has implemented the magic method __call__(),which enable the instance of the class can
be called just like a function.Normally the forward() will be called
"""
class LinearModel(torch.nn.Module):
??? def __init__(self):
??????? super(LinearModel, self).__init__()
??????? # (1,1)是指輸入x和輸出y的特征維度,這里數(shù)據(jù)集中的x和y的特征都是1維的
??????? # 該線性層需要學(xué)習(xí)的參數(shù)是w和b? 獲取w/b的方式分別是~linear.weight/linear.bias
??????? self.linear = torch.nn.Linear(1, 1)
?
??? def forward(self, x):
??????? y_pred = self.linear(x)
??????? return y_pred
?
model = LinearModel()
?
# construct loss and optimizer
# criterion = torch.nn.MSELoss(size_average = False)
criterion = torch.nn.MSELoss(reduction = 'sum')
optimizer = torch.optim.SGD(model.parameters(), lr = 0.01) # model.parameters()自動(dòng)完成參數(shù)的初始化操作,這個(gè)地方我可能理解錯(cuò)了
?
# training cycle forward, backward, update
for epoch in range(100):
??? y_pred = model(x_data) # forward:predict
??? loss = criterion(y_pred, y_data) # forward: loss
??? print(epoch, loss.item())
?
??? optimizer.zero_grad() # the grad computer by .backward() will be accumulated. so before backward, remember set the grad to zero
??? loss.backward() # backward: autograd,自動(dòng)計(jì)算梯度
??? optimizer.step() # update 參數(shù),即更新w和b的值
?
print('w = ', model.linear.weight.item())
print('b = ', model.linear.bias.item())
?
x_test = torch.tensor([[4.0]])
y_test = model(x_test)
print('y_pred = ', y_test.data)
import torch
# prepare dataset
# x,y是矩陣,3行1列 也就是說總共有3個(gè)數(shù)據(jù),每個(gè)數(shù)據(jù)只有1個(gè)特征
x_data = torch.tensor([[1.0], [2.0], [3.0]])
y_data = torch.tensor([[2.0], [4.0], [6.0]])
?
#design model using class
"""
our model class should be inherit from nn.Module, which is base class for all neural network modules.
member methods __init__() and forward() have to be implemented
class nn.linear contain two member Tensors: weight and bias
class nn.Linear has implemented the magic method __call__(),which enable the instance of the class can
be called just like a function.Normally the forward() will be called
"""
class LinearModel(torch.nn.Module):
??? def __init__(self):
??????? super(LinearModel, self).__init__()
??????? # (1,1)是指輸入x和輸出y的特征維度,這里數(shù)據(jù)集中的x和y的特征都是1維的
??????? # 該線性層需要學(xué)習(xí)的參數(shù)是w和b? 獲取w/b的方式分別是~linear.weight/linear.bias
??????? self.linear = torch.nn.Linear(1, 1)
?
??? def forward(self, x):
??????? y_pred = self.linear(x)
??????? return y_pred
?
model = LinearModel()
?
# construct loss and optimizer
# criterion = torch.nn.MSELoss(size_average = False)
criterion = torch.nn.MSELoss(reduction = 'sum')
optimizer = torch.optim.SGD(model.parameters(), lr = 0.01) # model.parameters()自動(dòng)完成參數(shù)的初始化操作,這個(gè)地方我可能理解錯(cuò)了
?
# training cycle forward, backward, update
for epoch in range(100):
??? y_pred = model(x_data) # forward:predict
??? loss = criterion(y_pred, y_data) # forward: loss
??? print(epoch, loss.item())
?
??? optimizer.zero_grad() # the grad computer by .backward() will be accumulated. so before backward, remember set the grad to zero
??? loss.backward() # backward: autograd,自動(dòng)計(jì)算梯度
??? optimizer.step() # update 參數(shù),即更新w和b的值
?
print('w = ', model.linear.weight.item())
print('b = ', model.linear.bias.item())
?
x_test = torch.tensor([[4.0]])
y_test = model(x_test)
print('y_pred = ', y_test.data)
————————————————
版權(quán)聲明:本文為CSDN博主「錯(cuò)錯(cuò)莫」的原創(chuàng)文章,遵循CC 4.0 BY-SA版權(quán)協(xié)議,轉(zhuǎn)載請(qǐng)附上原文出處鏈接及本聲明。
原文鏈接:https://blog.csdn.net/bit452/article/details/109677086
