diff --git a/05_linear_regression.py b/05_linear_regression.py index 0afd430..16cd686 100644 --- a/05_linear_regression.py +++ b/05_linear_regression.py @@ -1,4 +1,3 @@ - import torch from torch.autograd import Variable @@ -41,7 +40,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() diff --git a/06_logistic_regression.py b/06_logistic_regression.py index 4d16c83..2fbb30c 100644 --- a/06_logistic_regression.py +++ b/06_logistic_regression.py @@ -41,7 +41,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() diff --git a/07_diabets_logistic.py b/07_diabets_logistic.py index 14535cf..ecf3dcd 100644 --- a/07_diabets_logistic.py +++ b/07_diabets_logistic.py @@ -3,7 +3,7 @@ from torch.autograd import Variable import numpy as np -xy = np.loadtxt('./data/diabetes.csv.gz', delimiter=',', dtype=np.float32) +xy = np.loadtxt('./data/diabetes.csv', delimiter=',', dtype=np.float32) x_data = Variable(torch.from_numpy(xy[:, 0:-1])) y_data = Variable(torch.from_numpy(xy[:, [-1]])) @@ -52,7 +52,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() diff --git a/08_1_dataset_loader.py b/08_1_dataset_loader.py index aba0797..77843e6 100644 --- a/08_1_dataset_loader.py +++ b/08_1_dataset_loader.py @@ -29,7 +29,7 @@ def __len__(self): train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, - num_workers=2) + num_workers=2) #num_workers=0 in cpu version for epoch in range(2): for i, data in enumerate(train_loader, 0): diff --git a/08_2_dataset_loade_logistic.py b/08_2_dataset_loade_logistic.py index 43ba9f4..2c89400 100644 --- a/08_2_dataset_loade_logistic.py +++ b/08_2_dataset_loade_logistic.py @@ -12,7 +12,7 @@ class DiabetesDataset(Dataset): # Initialize your data, download, etc. def __init__(self): - xy = np.loadtxt('./data/diabetes.csv.gz', + xy = np.loadtxt('./data/diabetes.csv', delimiter=',', dtype=np.float32) self.len = xy.shape[0] self.x_data = torch.from_numpy(xy[:, 0:-1]) @@ -29,7 +29,7 @@ def __len__(self): train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, - num_workers=2) + num_workers=2) #num_workers=0 in cpu version class Model(torch.nn.Module): @@ -80,7 +80,7 @@ def forward(self, x): # Compute and print loss loss = criterion(y_pred, labels) - print(epoch, i, loss.data[0]) + print(epoch, i, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() diff --git a/09_2_softmax_mnist.py b/09_2_softmax_mnist.py index 48d2483..b72d5c7 100644 --- a/09_2_softmax_mnist.py +++ b/09_2_softmax_mnist.py @@ -67,7 +67,7 @@ def train(epoch): if batch_idx % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), - 100. * batch_idx / len(train_loader), loss.data[0])) + 100. * batch_idx / len(train_loader), loss.item())) def test(): @@ -75,10 +75,11 @@ def test(): test_loss = 0 correct = 0 for data, target in test_loader: - data, target = Variable(data, volatile=True), Variable(target) + with torch.no_grad(): + data, target = Variable(data), Variable(target) output = model(data) # sum up batch loss - test_loss += criterion(output, target).data[0] + test_loss += criterion(output, target).item() # get the index of the max pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() diff --git a/10_1_cnn_mnist.py b/10_1_cnn_mnist.py index 547c477..d854370 100644 --- a/10_1_cnn_mnist.py +++ b/10_1_cnn_mnist.py @@ -74,7 +74,8 @@ def test(): test_loss = 0 correct = 0 for data, target in test_loader: - data, target = Variable(data, volatile=True), Variable(target) + with torch.no_grad(): + data, target = Variable(data), Variable(target) output = model(data) # sum up batch loss test_loss += F.nll_loss(output, target, size_average=False).data[0] diff --git a/11_1_toy_inception_mnist.py b/11_1_toy_inception_mnist.py index fe8209a..3bffa93 100644 --- a/11_1_toy_inception_mnist.py +++ b/11_1_toy_inception_mnist.py @@ -104,7 +104,7 @@ def train(epoch): if batch_idx % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), - 100. * batch_idx / len(train_loader), loss.data[0])) + 100. * batch_idx / len(train_loader), loss.item())) def test(): @@ -112,10 +112,11 @@ def test(): test_loss = 0 correct = 0 for data, target in test_loader: - data, target = Variable(data, volatile=True), Variable(target) + with torch.no_grad(): + data, target = Variable(data), Variable(target) output = model(data) # sum up batch loss - test_loss += F.nll_loss(output, target, size_average=False).data[0] + test_loss += F.nll_loss(output, target, size_average=False).item() # get the index of the max log-probability pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() diff --git a/12_2_hello_rnn.py b/12_2_hello_rnn.py index c0364e3..6db3012 100644 --- a/12_2_hello_rnn.py +++ b/12_2_hello_rnn.py @@ -72,6 +72,7 @@ def init_hidden(self): sys.stdout.write("predicted string: ") for input, label in zip(inputs, labels): # print(input.size(), label.size()) + label = label.unsqueeze(0) hidden, output = model(hidden, input) val, idx = output.max(1) sys.stdout.write(idx2char[idx.data[0]]) diff --git a/12_4_hello_rnn_emb.py b/12_4_hello_rnn_emb.py index 3ab2098..e42aed2 100644 --- a/12_4_hello_rnn_emb.py +++ b/12_4_hello_rnn_emb.py @@ -27,12 +27,19 @@ class Model(nn.Module): - def __init__(self): + def __init__(self, num_classes, input_size, hidden_size, num_layers, embedding_size): super(Model, self).__init__() - self.embedding = nn.Embedding(input_size, embedding_size) - self.rnn = nn.RNN(input_size=embedding_size, + + self.num_classes = num_classes + self.num_layers = num_layers + self.input_size = input_size + self.hidden_size = hidden_size + self.embedding_size = embedding_size + + self.embedding = nn.Embedding(self.input_size, self.embedding_size) + self.rnn = nn.RNN(input_size=self.embedding_size, hidden_size=5, batch_first=True) - self.fc = nn.Linear(hidden_size, num_classes) + self.fc = nn.Linear(self.hidden_size, self.num_classes) def forward(self, x): # Initialize hidden and cell states @@ -51,7 +58,7 @@ def forward(self, x): # Instantiate RNN model -model = Model() +model = Model(num_classes, input_size, hidden_size, num_layers, embedding_size) print(model) # Set loss and optimizer function