1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
| import torch.optim as optim from torchvision import datasets, transforms
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])
train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform) test_dataset = datasets.MNIST('./data', train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000, shuffle=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = Net().to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in range(10): model.train() train_loss = 0.0 correct = 0 total = 0
for data, target in train_loader: data, target = data.to(device), target.to(device)
optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step()
train_loss += loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item()
model.eval() test_loss = 0.0 test_correct = 0 test_total = 0
with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += criterion(output, target).item() _, predicted = output.max(1) test_total += target.size(0) test_correct += predicted.eq(target).sum().item()
print(f'Epoch {epoch+1}: ' f'Train Loss={train_loss/len(train_loader):.4f}, ' f'Train Acc={100.*correct/total:.2f}%, ' f'Test Loss={test_loss/len(test_loader):.4f}, ' f'Test Acc={100.*test_correct/test_total:.2f}%')
|