深度学习02 神经网络实现手写数字案例
目录
下载手写数字图像(图像+标签)
展示手写数字图片
数据打包
判断当前设备是否支持GPU
建立神经网络模型
设置训练集与测试集
创建损失函数、优化器
开始训练
下载手写数字图像(图像+标签)
training_data=datasets.MNIST(
root='data',
train=True,
download=True,
transform=ToTensor(),
)
test_data=datasets.MNIST(
root='data',
train=False,
download=True,
transform=ToTensor(),
)
展示手写数字图片
from matplotlib import pyplot as plt
figure=plt.figure()
for i in range(16):
img,label=training_data[i+59000]
figure.add_subplot(4,4,i+1)
plt.title(label)
plt.axis('off')
plt.imshow(img.squeeze(),cmap='gray')
a=img.squeeze()
plt.show()
数据打包
train_dataloader=DataLoader(training_data,batch_size=64)
test_dataloader=DataLoader(test_data,batch_size=64)
判断当前设备是否支持GPU
device='cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
print(device)
建立神经网络模型
class Neturalwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten=nn.Flatten()
self.hidden1=nn.Linear(28*28,128)
self.hidden2=nn.Linear(128,256)
self.out=nn.Linear(256,10)
def forward(self,x):
x=self.flatten(x)
x=self.hidden1(x)
x=torch.sigmoid(x)
x=self.hidden2(x)
x=torch.sigmoid(x)
x=self.out(x)
return x
model=Neturalwork().to(device)
print(model)
设置训练集与测试集
def train(dataloader,model,loss_fn,optimizer):
model.train()
batch_size_num=1
for x,y in dataloader:
x,y=x.to(device),y.to(device)
pred=model.forward(x)
loss=loss_fn(pred,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_value=loss.item()
if batch_size_num%100==0:
print(f'loss:{loss_value:>7f} [number:{batch_size_num}]')
batch_size_num+=1
def test(dataloader,model,loss_fn):
size=len(dataloader.dataset)
num_batches=len(dataloader)
model.eval()
test_loss,correct=0,0
with torch.no_grad():
for x,y in dataloader:
x,y=x.to(device),y.to(device)
pred=model.forward(x)
test_loss+=loss_fn(pred,y).item()
correct+=(pred.argmax(1)==y).type(torch.float).sum().item()
a=(pred.argmax(1)==y)
b=(pred.argmax(1)==y).type(torch.float)
test_loss/=num_batches
correct/=size
print(f'Test result:\n Accuracy:{(100*correct)}%,Avg loss:{test_loss}')
创建损失函数、优化器
loss_fn=nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # 学习率可以根据需要调整
开始训练
epochs=15
for t in range(epochs):
print(f'EPOCH {t+1}\n-----------')
train(train_dataloader,model,loss_fn,optimizer)
print('结束')
test(test_dataloader,model,loss_fn)