From 8142d293525bd0a8aea6a1bcc3310ec5437366cc Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:14:48 +0900 Subject: [PATCH 01/12] with "volatile=True" with "with torch.no_grad():" --- 09_2_softmax_mnist.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/09_2_softmax_mnist.py b/09_2_softmax_mnist.py index 48d2483..3ef7e26 100644 --- a/09_2_softmax_mnist.py +++ b/09_2_softmax_mnist.py @@ -75,7 +75,8 @@ def test(): test_loss = 0 correct = 0 for data, target in test_loader: - data, target = Variable(data, volatile=True), Variable(target) + with torch.no_grad(): + data, target = Variable(data, volatile=True), Variable(target) output = model(data) # sum up batch loss test_loss += criterion(output, target).data[0] From 99719257508f0f2cf615038565ffb04a1056b09a Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:15:27 +0900 Subject: [PATCH 02/12] with "volatile=True" with "with torch.no_grad():" --- 10_1_cnn_mnist.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/10_1_cnn_mnist.py b/10_1_cnn_mnist.py index 547c477..d854370 100644 --- a/10_1_cnn_mnist.py +++ b/10_1_cnn_mnist.py @@ -74,7 +74,8 @@ def test(): test_loss = 0 correct = 0 for data, target in test_loader: - data, target = Variable(data, volatile=True), Variable(target) + with torch.no_grad(): + data, target = Variable(data), Variable(target) output = model(data) # sum up batch loss test_loss += F.nll_loss(output, target, size_average=False).data[0] From 41ea68b97174fa4b2cb14c8b04f9ca625b67c3e5 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:16:03 +0900 Subject: [PATCH 03/12] Add label = label.unsqueeze(0) --- 12_2_hello_rnn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/12_2_hello_rnn.py b/12_2_hello_rnn.py index c0364e3..6db3012 100644 --- a/12_2_hello_rnn.py +++ b/12_2_hello_rnn.py @@ -72,6 +72,7 @@ def init_hidden(self): sys.stdout.write("predicted string: ") for input, label in zip(inputs, labels): # print(input.size(), label.size()) + label = label.unsqueeze(0) hidden, output = model(hidden, input) val, idx = output.max(1) sys.stdout.write(idx2char[idx.data[0]]) From e0e6c88b3ef222f9f5dffb5214963f06f99a85e6 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:22:55 +0900 Subject: [PATCH 04/12] Add model parameters and Replace "loss.data[0]" with "loss.item()" --- 12_4_hello_rnn_emb.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/12_4_hello_rnn_emb.py b/12_4_hello_rnn_emb.py index 3ab2098..e42aed2 100644 --- a/12_4_hello_rnn_emb.py +++ b/12_4_hello_rnn_emb.py @@ -27,12 +27,19 @@ class Model(nn.Module): - def __init__(self): + def __init__(self, num_classes, input_size, hidden_size, num_layers, embedding_size): super(Model, self).__init__() - self.embedding = nn.Embedding(input_size, embedding_size) - self.rnn = nn.RNN(input_size=embedding_size, + + self.num_classes = num_classes + self.num_layers = num_layers + self.input_size = input_size + self.hidden_size = hidden_size + self.embedding_size = embedding_size + + self.embedding = nn.Embedding(self.input_size, self.embedding_size) + self.rnn = nn.RNN(input_size=self.embedding_size, hidden_size=5, batch_first=True) - self.fc = nn.Linear(hidden_size, num_classes) + self.fc = nn.Linear(self.hidden_size, self.num_classes) def forward(self, x): # Initialize hidden and cell states @@ -51,7 +58,7 @@ def forward(self, x): # Instantiate RNN model -model = Model() +model = Model(num_classes, input_size, hidden_size, num_layers, embedding_size) print(model) # Set loss and optimizer function From d98fde4699a3126712ae364aa3b09cf025935458 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:25:29 +0900 Subject: [PATCH 05/12] Replace "loss.data[0]" with "loss.item()" --- 05_linear_regression.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/05_linear_regression.py b/05_linear_regression.py index 0afd430..16cd686 100644 --- a/05_linear_regression.py +++ b/05_linear_regression.py @@ -1,4 +1,3 @@ - import torch from torch.autograd import Variable @@ -41,7 +40,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() From e4d8f9341f871ca3e46c14ae50527fe3d0823307 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:26:47 +0900 Subject: [PATCH 06/12] Replace "loss.data[0]" with "loss.item()" --- 06_logistic_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/06_logistic_regression.py b/06_logistic_regression.py index 4d16c83..2fbb30c 100644 --- a/06_logistic_regression.py +++ b/06_logistic_regression.py @@ -41,7 +41,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() From ba0b6d7d120b58982ea89be5398efcfb04a7ee98 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:28:33 +0900 Subject: [PATCH 07/12] Remove ".gz" and Replace "loss.data[0]" with "loss.item()" --- 07_diabets_logistic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/07_diabets_logistic.py b/07_diabets_logistic.py index 14535cf..ecf3dcd 100644 --- a/07_diabets_logistic.py +++ b/07_diabets_logistic.py @@ -3,7 +3,7 @@ from torch.autograd import Variable import numpy as np -xy = np.loadtxt('./data/diabetes.csv.gz', delimiter=',', dtype=np.float32) +xy = np.loadtxt('./data/diabetes.csv', delimiter=',', dtype=np.float32) x_data = Variable(torch.from_numpy(xy[:, 0:-1])) y_data = Variable(torch.from_numpy(xy[:, [-1]])) @@ -52,7 +52,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() From be72dde9895ba7708dc1bc236767d7140d4efa4e Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:31:56 +0900 Subject: [PATCH 08/12] Add "#num_workers=0 in cpu version" --- 08_1_dataset_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/08_1_dataset_loader.py b/08_1_dataset_loader.py index aba0797..77843e6 100644 --- a/08_1_dataset_loader.py +++ b/08_1_dataset_loader.py @@ -29,7 +29,7 @@ def __len__(self): train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, - num_workers=2) + num_workers=2) #num_workers=0 in cpu version for epoch in range(2): for i, data in enumerate(train_loader, 0): From d122ac6eca2acede9e970a1e8d83e75a3ab2b026 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:38:58 +0900 Subject: [PATCH 09/12] Replace "volatile=True", ".data[0]" with "torch.no_grad():", ".item()" --- 09_2_softmax_mnist.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/09_2_softmax_mnist.py b/09_2_softmax_mnist.py index 3ef7e26..b72d5c7 100644 --- a/09_2_softmax_mnist.py +++ b/09_2_softmax_mnist.py @@ -67,7 +67,7 @@ def train(epoch): if batch_idx % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), - 100. * batch_idx / len(train_loader), loss.data[0])) + 100. * batch_idx / len(train_loader), loss.item())) def test(): @@ -76,10 +76,10 @@ def test(): correct = 0 for data, target in test_loader: with torch.no_grad(): - data, target = Variable(data, volatile=True), Variable(target) + data, target = Variable(data), Variable(target) output = model(data) # sum up batch loss - test_loss += criterion(output, target).data[0] + test_loss += criterion(output, target).item() # get the index of the max pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() From 8202f4ab1c4ecd21f8f083d645471c55466d52e4 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:43:38 +0900 Subject: [PATCH 10/12] Remove ".gz" and Add "#num_workers=0 in cpu version" --- 08_2_dataset_loade_logistic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/08_2_dataset_loade_logistic.py b/08_2_dataset_loade_logistic.py index 43ba9f4..27bb196 100644 --- a/08_2_dataset_loade_logistic.py +++ b/08_2_dataset_loade_logistic.py @@ -12,7 +12,7 @@ class DiabetesDataset(Dataset): # Initialize your data, download, etc. def __init__(self): - xy = np.loadtxt('./data/diabetes.csv.gz', + xy = np.loadtxt('./data/diabetes.csv', delimiter=',', dtype=np.float32) self.len = xy.shape[0] self.x_data = torch.from_numpy(xy[:, 0:-1]) @@ -29,7 +29,7 @@ def __len__(self): train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, - num_workers=2) + num_workers=2) #num_workers=0 in cpu version class Model(torch.nn.Module): From 14c40bfbbebe556f28a340424beb1d065e9dc98b Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:44:29 +0900 Subject: [PATCH 11/12] Replace "loss.data[0]" with "loss.item()" --- 08_2_dataset_loade_logistic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/08_2_dataset_loade_logistic.py b/08_2_dataset_loade_logistic.py index 27bb196..2c89400 100644 --- a/08_2_dataset_loade_logistic.py +++ b/08_2_dataset_loade_logistic.py @@ -80,7 +80,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, labels) - print(epoch, i, loss.data[0]) + print(epoch, i, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() From 28ff6a7a622aa6dd82246861a079ec4883ee75f0 Mon Sep 17 00:00:00 2001 From: Gyeonghoon Lee Date: Tue, 3 Jul 2018 17:51:03 +0900 Subject: [PATCH 12/12] Replace "volatile=True", ".data[0]" with "torch.no_grad():", ".item()" --- 11_1_toy_inception_mnist.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/11_1_toy_inception_mnist.py b/11_1_toy_inception_mnist.py index fe8209a..3bffa93 100644 --- a/11_1_toy_inception_mnist.py +++ b/11_1_toy_inception_mnist.py @@ -104,7 +104,7 @@ def train(epoch): if batch_idx % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), - 100. * batch_idx / len(train_loader), loss.data[0])) + 100. * batch_idx / len(train_loader), loss.item())) def test(): @@ -112,10 +112,11 @@ def test(): test_loss = 0 correct = 0 for data, target in test_loader: - data, target = Variable(data, volatile=True), Variable(target) + with torch.no_grad(): + data, target = Variable(data), Variable(target) output = model(data) # sum up batch loss - test_loss += F.nll_loss(output, target, size_average=False).data[0] + test_loss += F.nll_loss(output, target, size_average=False).item() # get the index of the max log-probability pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum()