In [1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
In [2]:
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1,
                              out_channels=6,
                              kernel_size=5)
        self.conv2 = nn.Conv2d(in_channels=6,
                              out_channels=16,
                              kernel_size=5)
        # y = Wx + b 
        self.fc1  = nn.Linear(in_features = 16 * 4 * 4, out_features=120) # bias = True 가 기본
        self.fc2  = nn.Linear(in_features=120, out_features=84)
        self.fc3 =  nn.Linear(in_features=84, out_features=10)
        
    def forward(self, x):
        # x.shape : b, 1, 28,28 --> b, 6, 24, 24 
        # H(input 높이) , K(kernel size) : K(conv) --> H - K + 1
        x = self.conv1(x)
        x = F.relu(x)
        # x.shape: b, 6, 24, 24 --> b, 6, 12,12
        x = F.max_pool2d(x, (2,2))
        
        # x.shape: b, 6, 12, 12 --> b, 16, 8, 8
        x = self.conv2(x)
        x = F.relu(x)
        # x.shape: b, 16, 8, 8 --> b, 16, 4, 4
        x = F.max_pool2d(x, (2,2))
                
        # x.shape: b, 16, 4, 4 - > b, 256  (-1 이 쫙피게 연산해줌)
        x = x.view(x.size(0), -1)
        # x.shape: b , 256 --> b, 120
        x = self.fc1(x)
        x = F.relu(x)
        # x.shape: b, 120 --> b, 84
        x = self.fc2(x)
        x = F.relu(x)
        # x.shape: b,84 --> b, 10
        x = self.fc3(x)
        return x
    
net = Net()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#print(device)
net = net.to(device=device)
print(net)
Net(
  (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
  (fc1): Linear(in_features=256, out_features=120, bias=True)
  (fc2): Linear(in_features=120, out_features=84, bias=True)
  (fc3): Linear(in_features=84, out_features=10, bias=True)
)
In [3]:
batch_size = 128
# transform(x)
transform = transforms.Compose(
    [transforms.ToTensor(),
    transforms.Normalize((0.5,),(0.5,))] # mean, variance
)

trainset = datasets.MNIST(root='./data', train=True, download=True, 
                         transform=transform)

trainloader = DataLoader(trainset, batch_size=batch_size, 
                         shuffle=True, 
                         num_workers=2)

testset = datasets.MNIST(root='./data', train=False, download=True, 
                         transform=transform)

testloader = DataLoader(testset, batch_size=batch_size, 
                         shuffle=False, 
                         num_workers=2)

optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9)
criterion = nn.CrossEntropyLoss() 
In [4]:
def train(epoch):
    net.train()
    running_loss = 0.0
    for i,(inputs, labels) in enumerate(trainloader):
        inputs, labels = inputs.to(device), labels.to(device) # loss.__class__: Tensor.
        
        #zero the parameter gradients
        optimizer.zero_grad()
        
        #forward + backward + optimize
        #net.foward(inputs)
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
        
        if i%50 == 49:
            print('[%d,%d]loss: %.3f'%(epoch+1, i+1, running_loss/50))
            running_loss = 0.0
In [5]:
def test():
    net.eval()
    correct = 0.
    total = 0.
    class_correct = [0.] * 10 #[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]
    class_total = [0.] * 10 
    
    for images, labels in testloader:
        images, labels = images.to(device), labels.to(device)
        with torch.no_grad():
            outputs = net(images)
        correct += labels.eq(torch.argmax(outputs, dim=1)).sum().item()  # 0 이 COLUMN 방향 1 이 RAW 방향이라 그방향으로 보는것 
        total += labels.size(0)
        
        for idx in range(10): #mnist 는 class 가 10개니까 0~9 
            label_idx = (labels == idx) # label_idx 는 bool 형태임 
            if label_idx.sum().item() == 0:
                continue
            class_correct[idx] += labels[label_idx].eq(torch.argmax(
            outputs[label_idx],1)).sum().item()
            class_total[idx] += labels[label_idx].size(0)
        
    print('Accuracy: %d %%' %(100. * correct/total))
    for i in range(10):
        print('Accuracy of %5s : %2d %%' %(str(i), 100.*class_correct[i]/
                                           class_total[i]))
In [6]:
for epoch in range(5):
    train(epoch)
test()
[1,50]loss: 2.030
[1,100]loss: 0.614
[1,150]loss: 0.300
[1,200]loss: 0.235
[1,250]loss: 0.200
[1,300]loss: 0.196
[1,350]loss: 0.158
[1,400]loss: 0.147
[1,450]loss: 0.150
[2,50]loss: 0.109
[2,100]loss: 0.113
[2,150]loss: 0.134
[2,200]loss: 0.132
[2,250]loss: 0.116
[2,300]loss: 0.126
[2,350]loss: 0.103
[2,400]loss: 0.106
[2,450]loss: 0.099
[3,50]loss: 0.074
[3,100]loss: 0.075
[3,150]loss: 0.101
[3,200]loss: 0.074
[3,250]loss: 0.099
[3,300]loss: 0.094
[3,350]loss: 0.103
[3,400]loss: 0.094
[3,450]loss: 0.073
[4,50]loss: 0.062
[4,100]loss: 0.068
[4,150]loss: 0.073
[4,200]loss: 0.078
[4,250]loss: 0.067
[4,300]loss: 0.063
[4,350]loss: 0.091
[4,400]loss: 0.075
[4,450]loss: 0.073
[5,50]loss: 0.074
[5,100]loss: 0.060
[5,150]loss: 0.072
[5,200]loss: 0.061
[5,250]loss: 0.076
[5,300]loss: 0.068
[5,350]loss: 0.075
[5,400]loss: 0.055
[5,450]loss: 0.058
Accuracy: 97 %
Accuracy of     0 : 98 %
Accuracy of     1 : 99 %
Accuracy of     2 : 98 %
Accuracy of     3 : 97 %
Accuracy of     4 : 90 %
Accuracy of     5 : 99 %
Accuracy of     6 : 98 %
Accuracy of     7 : 96 %
Accuracy of     8 : 95 %
Accuracy of     9 : 97 %
In [7]:
import matplotlib.pyplot as plt
import numpy as np

batch_size = 128
# transform(x)
transform = transforms.Compose(
    [transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))] # mean, variance
)

trainset = datasets.CIFAR10(root='./data', train=True, download=True, 
                         transform=transform)

trainloader = DataLoader(trainset, batch_size=batch_size, 
                         shuffle=True, 
                         num_workers=2)

testset = datasets.CIFAR10(root='./data', train=False, download=True, 
                         transform=transform)

testloader = DataLoader(testset, batch_size=batch_size, 
                         shuffle=False, 
                         num_workers=2)
Files already downloaded and verified
Files already downloaded and verified
In [8]:
def imshow(img):
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))


# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()

# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % labels[j].item() for j in range(4)))
    1     1     2     2
In [9]:
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3,
                              out_channels=6,
                              kernel_size=5)
        self.conv2 = nn.Conv2d(in_channels=6,
                              out_channels=16,
                              kernel_size=5)
        # y = Wx + b 
        self.fc1  = nn.Linear(in_features = 16 * 5 * 5, out_features=120) # bias = True 가 기본
        self.fc2  = nn.Linear(in_features=120, out_features=84)
        self.fc3 =  nn.Linear(in_features=84, out_features=10)
        
    def forward(self, x):
        # x.shape : b, 1, 28,28 --> b, 6, 24, 24 
        # H(input 높이) , K(kernel size) : K(conv) --> H - K + 1
        x = self.conv1(x)
        x = F.relu(x)
        # x.shape: b, 6, 24, 24 --> b, 6, 12,12
        x = F.max_pool2d(x, (2,2))
        
        # x.shape: b, 6, 12, 12 --> b, 16, 8, 8
        x = self.conv2(x)
        x = F.relu(x)
        # x.shape: b, 16, 8, 8 --> b, 16, 4, 4
        x = F.max_pool2d(x, (2,2))
                
        # x.shape: b, 16, 4, 4 - > b, 256  (-1 이 쫙피게 연산해줌)
        x = x.view(x.size(0), -1)
        # x.shape: b , 256 --> b, 120
        x = self.fc1(x)
        x = F.relu(x)
        # x.shape: b, 120 --> b, 84
        x = self.fc2(x)
        x = F.relu(x)
        # x.shape: b,84 --> b, 10
        x = self.fc3(x)
        return x
    
net = Net()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#print(device)
net = net.to(device=device)
print(net)

optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9)
criterion = nn.CrossEntropyLoss() 
Net(
  (conv1): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1))
  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
  (fc1): Linear(in_features=400, out_features=120, bias=True)
  (fc2): Linear(in_features=120, out_features=84, bias=True)
  (fc3): Linear(in_features=84, out_features=10, bias=True)
)
In [12]:
def test():
    net.eval()
    correct = 0.
    total = 0.
    class_correct = [0.] * 10 #[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]
    class_total = [0.] * 10 
    
    for images, labels in testloader:
        images, labels = images.to(device), labels.to(device)
        with torch.no_grad():
            outputs = net(images)
        correct += labels.eq(torch.argmax(outputs, dim=1)).sum().item()  # 0 이 COLUMN 방향 1 이 RAW 방향이라 그방향으로 보는것 
        total += labels.size(0)
        
        for idx in range(10): #mnist 는 class 가 10개니까 0~9 
            label_idx = (labels == idx) # label_idx 는 bool 형태임 
            if label_idx.sum().item() == 0:
                continue
            class_correct[idx] += labels[label_idx].eq(torch.argmax(
            outputs[label_idx],1)).sum().item()
            class_total[idx] += labels[label_idx].size(0)
        
    print('Accuracy: %d %%' %(100. * correct/total))
    classes = ('plane', 'car', 'bird', 'cat', 'deer','dog', 'frog', 'horse', 'ship', 'truck')
    for i in range(10):
        print('Accuracy of %5s : %2d %%' %(classes[i], 100.*class_correct[i]/
                                           class_total[i]))

for epoch in range(10):
    train(epoch)
test()
[1,50]loss: 1.282
[1,100]loss: 1.343
[1,150]loss: 1.272
[1,200]loss: 1.383
[1,250]loss: 1.328
[1,300]loss: 1.312
[1,350]loss: 1.328
[2,50]loss: 1.295
[2,100]loss: 1.285
[2,150]loss: 1.320
[2,200]loss: 1.329
[2,250]loss: 1.347
[2,300]loss: 1.319
[2,350]loss: 1.354
[3,50]loss: 1.275
[3,100]loss: 1.302
[3,150]loss: 1.341
[3,200]loss: 1.330
[3,250]loss: 1.349
[3,300]loss: 1.412
[3,350]loss: 1.388
[4,50]loss: 1.260
[4,100]loss: 1.295
[4,150]loss: 1.336
[4,200]loss: 1.369
[4,250]loss: 1.348
[4,300]loss: 1.381
[4,350]loss: 1.409
[5,50]loss: 1.252
[5,100]loss: 1.336
[5,150]loss: 1.327
[5,200]loss: 1.411
[5,250]loss: 1.373
[5,300]loss: 1.387
[5,350]loss: 1.392
[6,50]loss: 1.302
[6,100]loss: 1.300
[6,150]loss: 1.296
[6,200]loss: 1.362
[6,250]loss: 1.345
[6,300]loss: 1.344
[6,350]loss: 1.377
[7,50]loss: 1.315
[7,100]loss: 1.314
[7,150]loss: 1.264
[7,200]loss: 1.343
[7,250]loss: 1.384
[7,300]loss: 1.347
[7,350]loss: 1.392
[8,50]loss: 1.312
[8,100]loss: 1.346
[8,150]loss: 1.441
[8,200]loss: 1.317
[8,250]loss: 1.356
[8,300]loss: 1.381
[8,350]loss: 1.383
[9,50]loss: 1.384
[9,100]loss: 1.291
[9,150]loss: 1.389
[9,200]loss: 1.349
[9,250]loss: 1.382
[9,300]loss: 1.351
[9,350]loss: 1.377
[10,50]loss: 1.306
[10,100]loss: 1.360
[10,150]loss: 1.389
[10,200]loss: 1.406
[10,250]loss: 1.430
[10,300]loss: 1.436
[10,350]loss: 1.399
Accuracy: 47 %
Accuracy of plane : 61 %
Accuracy of   car : 60 %
Accuracy of  bird : 13 %
Accuracy of   cat : 33 %
Accuracy of  deer : 41 %
Accuracy of   dog : 38 %
Accuracy of  frog : 40 %
Accuracy of horse : 60 %
Accuracy of  ship : 66 %
Accuracy of truck : 59 %
In [ ]: