From 6f98bb7cc71f29d223e5af38837ab2fb671030ef Mon Sep 17 00:00:00 2001 From: jaewoogwak Date: Sat, 23 Mar 2024 17:19:45 +0900 Subject: [PATCH] bug fixed tensor.data -> tensor.item() --- 11_1_toy_inception_mnist.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/11_1_toy_inception_mnist.py b/11_1_toy_inception_mnist.py index fe8209a..70f771e 100644 --- a/11_1_toy_inception_mnist.py +++ b/11_1_toy_inception_mnist.py @@ -104,7 +104,7 @@ def train(epoch): if batch_idx % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), - 100. * batch_idx / len(train_loader), loss.data[0])) + 100. * batch_idx / len(train_loader), loss.data.item())) def test(): @@ -115,7 +115,7 @@ def test(): data, target = Variable(data, volatile=True), Variable(target) output = model(data) # sum up batch loss - test_loss += F.nll_loss(output, target, size_average=False).data[0] + test_loss += F.nll_loss(output, target, size_average=False).item() # get the index of the max log-probability pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum()