最美情侣中文字幕电影,在线麻豆精品传媒,在线网站高清黄,久久黄色视频

歡迎光臨散文網(wǎng) 會員登陸 & 注冊

PyTorch Tutorial 14 - Convolutional N...

2023-02-17 15:43 作者:Mr-南喬  | 我要投稿

教程Python代碼如下:


import torch

import torch.nn as nn

import torch.nn.functional as F

import torchvision

import torchvision.transforms as transforms

import matplotlib.pyplot as plt

import numpy as np


# Device configuration, 將程序遷移到GPU

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


# Hyper-parameters, 定義超參數(shù)

num_epochs = 20

batch_size = 4

learning_rate = 0.001


"""加載數(shù)據(jù)集,數(shù)據(jù)集已經(jīng)在 Pytorch 中提供,我們直接使用"""

# dataset has PILImage images of range [0, 1].

# We transform them to Tensors of normalized range [-1, 1]

transform = transforms.Compose(

??[transforms.ToTensor(),

???transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])


"""定義 Pytorch 數(shù)據(jù)集"""

# CIFAR10: 60000 32x32 color images in 10 classes, with 6000 images per class

train_dataset = torchvision.datasets.CIFAR10(root='./Data', train=True,

???????????????????????download=True, transform=transform)


test_dataset = torchvision.datasets.CIFAR10(root='./Data', train=False,

??????????????????????download=True, transform=transform)


"""定義 Pytorch 數(shù)據(jù)加載器"""

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,

??????????????????????shuffle=True)


test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,

?????????????????????shuffle=False)


classes = ('plane', 'car', 'bird', 'cat',

??????'deer', 'dog', 'frog', 'horse', 'ship', 'truck')



def imshow(img):

??img = img / 2 + 0.5?# unnormalize

??npimg = img.numpy()

??plt.imshow(np.transpose(npimg, (1, 2, 0)))

??plt.show()



# get some random training images

dataiter = iter(train_loader)

images, labels = next(dataiter)


# show images

imshow(torchvision.utils.make_grid(images))


# implem conv net, 定義卷積神經(jīng)網(wǎng)絡(luò)

class ConvNet(nn.Module):

??def __init__(self):

????super(ConvNet, self).__init__()

????self.conv1 = nn.Conv2d(3, 6, 5)?# 第一個卷積層,輸入通道:3(三個顏色通道),假設(shè)輸出通道:6(),卷積核大?。?()

????self.pool = nn.MaxPool2d(2, 2)??# 池化層,最大池化,內(nèi)核大小為2(2*2 Max-Pool),步幅為2

????self.conv2 = nn.Conv2d(6, 16, 5) # 第二個卷積層,輸出通道:6(上一個輸出通道的大小),假設(shè)輸出通道:16(),卷積核大?。?()

????"""為什么是 16*5*5, """

????self.fc1 = nn.Linear(16 * 5 * 5, 120)?#第一個全連接層,輸入特征:16*5*5(),假設(shè)輸出特征:120

????self.fc2 = nn.Linear(120, 84)?????#第二個全連接層,輸入特征:120(上一層的輸出特征),假設(shè)輸出特征:84

????self.fc3 = nn.Linear(84, 10)??????#最后一個全連接層,輸入特征:84(上一層的輸出特征),輸出特征:10(項目中只有10個不同的類別)


??def forward(self, x):

????# -> n, 3, 32, 32

????x = self.pool(F.relu(self.conv1(x)))?# -> n, 6, 14, 14?# 給第一個卷積層一個激活函數(shù)之后,令其進入池化層

????x = self.pool(F.relu(self.conv2(x)))?# -> n, 16, 5, 5??# 給第二個卷積層一個激活函數(shù)之后,令其進入池化層

????x = x.view(-1, 16 * 5 * 5)?# -> n, 400

????x = F.relu(self.fc1(x))?# -> n, 120

????x = F.relu(self.fc2(x))?# -> n, 84

????x = self.fc3(x)?# -> n, 10

????return x



model = ConvNet().to(device)


# Loss and optimizer, 定義損失和優(yōu)化器

"""這是一個多類分類問題,所以我們用交叉熵損失"""

criterion = nn.CrossEntropyLoss()?# softmax已經(jīng)包含在這里的交叉熵損失中

optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)


n_total_steps = len(train_loader)


"""批量優(yōu)化和批量訓練"""

for epoch in range(num_epochs):

??for i, (images, labels) in enumerate(train_loader):

????# origin shape: [4, 3, 32, 32] = 4, 3, 1024

????# input_layer: 3 input channels, 6 output channels, 5 kernel size

????"""這里把圖像和標簽加載到設(shè)備上,獲得GPU支持"""

????images = images.to(device)

????labels = labels.to(device)


????# Forward pass

????"""前向傳遞,制造loss"""

????outputs = model(images)

????loss = criterion(outputs, labels)


????# Backward and optimize

????"""反向傳遞,不要忘記清空grad"""

????optimizer.zero_grad()

????loss.backward()

????optimizer.step()


????if (i + 1) % 2000 == 0:

??????print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{n_total_steps}], Loss: {loss.item():.4f}')


print('Finished Training')

PATH = './cnn.pth'

torch.save(model.state_dict(), PATH)


"""模型評估,聲明無梯度,因為我們不需要這里的反向傳播和梯度計算"""

with torch.no_grad():

??n_correct = 0

??n_samples = 0

??n_class_correct = [0 for i in range(10)]

??n_class_samples = [0 for i in range(10)]

??for images, labels in test_loader:

????images = images.to(device)

????labels = labels.to(device)

????outputs = model(images)

????# max returns (value ,index)

????_, predicted = torch.max(outputs, 1)

????n_samples += labels.size(0)

????n_correct += (predicted == labels).sum().item()


????for i in range(batch_size):

??????label = labels[i]

??????pred = predicted[i]

??????if (label == pred):

????????n_class_correct[label] += 1

??????n_class_samples[label] += 1


??acc = 100.0 * n_correct / n_samples

??print(f'Accuracy of the network: {acc} %')


??"""計算每個類的累計精度"""

??for i in range(10):

????acc = 100.0 * n_class_correct[i] / n_class_samples[i]

????print(f'Accuracy of {classes[i]}: {acc} %')

PyTorch Tutorial 14 - Convolutional N...的評論 (共 條)

分享到微博請遵守國家法律
牙克石市| 湟源县| 临城县| 营口市| 丰宁| 安达市| 彩票| 灌南县| 子长县| 朝阳县| 习水县| 河源市| 福安市| 屏东县| 柘城县| 通州市| 鄂伦春自治旗| 峨眉山市| 靖安县| 多伦县| 灵川县| 二连浩特市| 富阳市| 凤凰县| 安泽县| 若尔盖县| 达拉特旗| 霍邱县| 志丹县| 斗六市| 云林县| 定结县| 临朐县| 大丰市| 涟源市| 临邑县| 许昌市| 建德市| 阿拉善左旗| 淮阳县| 七台河市|