diff --git a/PyTorch-Fundamentals/pytorch1.py b/PyTorch-Fundamentals/pytorch1.py new file mode 100644 index 00000000..c3f38e64 --- /dev/null +++ b/PyTorch-Fundamentals/pytorch1.py @@ -0,0 +1,37 @@ +import numpy as np +import matplotlib.pyplot as plt + +x_data = [1.0, 2.0, 3.0] +y_data = [2.0, 4.0, 6.0] + + +#like we have that y=ax+b we can treak here aisa ki x*w +def forward(x): + return x * w + + +# Square loss the one we used in yolo! +def loss(x, y): + y_pred = forward(x) + return (y_pred - y) * (y_pred - y) + + +w_list = [] +mse_list = [] + +for w in np.arange(0.0, 4.1, 0.1): + print("w=", w) + l_sum = 0 + for x_val, y_val in zip(x_data, y_data): + y_pred_val = forward(x_val) + l = loss(x_val, y_val) + l_sum += l + print("\t", x_val, y_val, y_pred_val, l) + print("MSE=", l_sum / 3) + w_list.append(w) + mse_list.append(l_sum / 3) + +plt.plot(w_list, mse_list) +plt.ylabel('Loss') +plt.xlabel('w') +plt.show() \ No newline at end of file diff --git a/PyTorch-Fundamentals/pytorch2.py b/PyTorch-Fundamentals/pytorch2.py new file mode 100644 index 00000000..df4e53cf --- /dev/null +++ b/PyTorch-Fundamentals/pytorch2.py @@ -0,0 +1,41 @@ +import numpy as np +import matplotlib.pyplot as plt + +x_data = [1.0, 2.0, 3.0] +y_data = [2.0, 4.0, 6.0] + +w=1.0 +#like we have that y=ax+b we can treak here aisa ki x*w +def forward(x): + return x * w + + +# Square loss the one we used in yolo! +def loss(x, y): + y_pred = forward(x) + return (y_pred - y) * (y_pred - y) + + +def gradient(x,y): + + return 2*x*(x*w-y) + + + + +# ikkada weights update cheyakunda try chesamu +print("Before update", 4, forward(4)) + +# here we did update weights +for epoch in range(10): + for x_val, y_val in zip(x_data, y_data): + grad = gradient(x_val, y_val) + w = w - 0.01 * grad#ikkaga 0.01 is the learning rate gurthu petukovalisndi enti ante smaller the learning rate greater the accuracy + #kani smaller the learning rate more the time it will take to learn, anduvaluna una computation power batti nerchukovali + print("\tgrad: ", x_val, y_val, round(grad, 2)) + l = loss(x_val, y_val) + + print("progress:", epoch, "w=", round(w, 2), "loss=", round(l, 2)) + +# update taruvata chudamu emi avutundo +print("after update in other words training", "4 hours", forward(4)) \ No newline at end of file diff --git a/PyTorch-Fundamentals/pytorch3.py b/PyTorch-Fundamentals/pytorch3.py new file mode 100644 index 00000000..a30c93d2 --- /dev/null +++ b/PyTorch-Fundamentals/pytorch3.py @@ -0,0 +1,44 @@ +#The gradient function is too large and we can not do anything about it +# so we use predefined framework tools to compute gradient +# in pytroch,The data should be retrieved in the appropriate variable type for pytorch +import torch +from torch.autograd import Variable + +x_data = [1.0, 2.0, 3.0] +y_data = [2.0, 4.0, 6.0] + + +#this will draw the forward graph + +#this in brief is telling compute the weights using gradient automatically by drwaing a graph +w = Variable(torch.Tensor([1.0]), requires_grad=True) #The params here tell it Required_grad + + +def forward(x): + return x * w + + + + +def loss(x, y): + y_pred = forward(x) + return (y_pred - y) * (y_pred - y) + +print("Before update", 4, forward(4)) + +# Training loop +for epoch in range(10): + for x_val, y_val in zip(x_data, y_data): + l = loss(x_val, y_val) + #loss computed + l.backward() + print("\tgrad: ", x_val, y_val, w.grad.data[0]) + w.data = w.data - 0.01 * w.grad.data + + # Manually zero the gradients after updating weights + w.grad.data.zero_() + + print("progress:", epoch, l.data[0]) + +# After training +print("predict (after training)", 4, forward(4).data[0]) diff --git a/PyTorch-Fundamentals/pytorch4.py b/PyTorch-Fundamentals/pytorch4.py new file mode 100644 index 00000000..029db141 --- /dev/null +++ b/PyTorch-Fundamentals/pytorch4.py @@ -0,0 +1,53 @@ +import torch +from torch.autograd import Variable +import torch.nn.functional as F + +x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]])) +y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]])) + + +class Model(torch.nn.Module): + #initialize parameters model --> linear and one input and one output + #forward function --> predict based on linear + def __init__(self): + + super(Model, self).__init__() + self.linear = torch.nn.Linear(1, 1) # One in and one out + + def forward(self, x): + y_pred = self.linear(x) + return y_pred + + +#initlize model +model = Model() + + +#Mean Square loss +criterion = torch.nn.MSELoss(size_average=False) +#here SGD stands for Standaard gradient descent default params and learning rate +optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + +#training epochs +for epoch in range(500): + #this is like a forward pass + y_pred = model(x_data) + + #calculate loss on predicted and actual + loss = criterion(y_pred, y_data) + print(epoch, loss.data[0]) + #so till now we have got the loss from the variable graph now we need to back prop and update + #initlize + optimizer.zero_grad() + #back prop the loss since loss is alredy a variable + loss.backward() + #Update the weightss + optimizer.step() + + +#convert data you want to predict also into a Variable +hour_var = Variable(torch.Tensor([[4.0]])) +#preict +y_pred = model(hour_var) +print("predict (after training)", 4, model(hour_var).data[0][0]) diff --git a/PyTorch-Fundamentals/pytorch5.py b/PyTorch-Fundamentals/pytorch5.py new file mode 100644 index 00000000..c9d13b80 --- /dev/null +++ b/PyTorch-Fundamentals/pytorch5.py @@ -0,0 +1,52 @@ +import torch +from torch.autograd import Variable +import torch.nn.functional as F + +x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]])) +y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]])) + + +class Model(torch.nn.Module): + #initialize parameters model --> linear and one input and one output + #forward function --> predict based on linear + def __init__(self): + + super(Model, self).__init__() + self.linear = torch.nn.Linear(1, 1) # One in and one out + + def forward(self, x): + y_pred = F.sigmoid(self.linear(x)) + return y_pred + + +#initlize model +model = Model() + + +#Mean Square loss +criterion = torch.nn.BCELoss(size_average=False) +#here SGD stands for Standard gradient descent default params and learning rate +optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + +#training epochs +for epoch in range(500): + #this is like a forward pass + y_pred = model(x_data) + + #calculate loss on predicted and actual + loss = criterion(y_pred, y_data) + print(epoch, loss.data[0]) + #so till now we have got the loss from the variable graph now we need to back prop and update + #initlize + optimizer.zero_grad() + #back prop the loss since loss is alredy a variable + loss.backward() + #Update the weightss + optimizer.step() + + +hour_var = Variable(torch.Tensor([[1.0]])) +print("predict 1 hour ", 1.0, model(hour_var).data[0][0] > 0.5) +hour_var = Variable(torch.Tensor([[7.0]])) +print("predict 7 hours", 7.0, model(hour_var).data[0][0] > 0.5) diff --git a/PyTorch-Fundamentals/pytorch6.py b/PyTorch-Fundamentals/pytorch6.py new file mode 100644 index 00000000..d7ecd765 --- /dev/null +++ b/PyTorch-Fundamentals/pytorch6.py @@ -0,0 +1,44 @@ +import torch +from torch.autograd import Variable +import numpy as np + +xy = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32) +x_data = Variable(torch.from_numpy(xy[:, 0:-1])) +y_data = Variable(torch.from_numpy(xy[:, [-1]])) + +print(x_data.data.shape) +print(y_data.data.shape) + + +class Model(torch.nn.Module): + + def __init__(self): + + super(Model, self).__init__() + self.l1 = torch.nn.Linear(8, 6) + self.l2 = torch.nn.Linear(6, 4) + self.l3 = torch.nn.Linear(4, 1) + + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, x): + out1 = self.sigmoid(self.l1(x)) + out2 = self.sigmoid(self.l2(out1)) + y_pred = self.sigmoid(self.l3(out2)) + return y_pred + +model = Model() + + +criterion = torch.nn.BCELoss(size_average=True) +optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + +for epoch in range(100): + y_pred = model(x_data) + + loss = criterion(y_pred, y_data) + print(epoch, loss.data[0]) + + optimizer.zero_grad() + loss.backward() + optimizer.step() \ No newline at end of file diff --git a/PyTorch-Fundamentals/pytorch7.py b/PyTorch-Fundamentals/pytorch7.py new file mode 100644 index 00000000..db1865ce --- /dev/null +++ b/PyTorch-Fundamentals/pytorch7.py @@ -0,0 +1,43 @@ +#over a period of time as data gets bigger and bigger +# we should divide the data into batches as a performance enhancer +# we go one batch at a time calculate the gradient and update + +# 1000 training examples +# batch size 500 +# so it will take 2 iterations to complete 1 epoch + +# so DATALOADER in pytorch will handle this issue for us +import torch +import numpy as np +from torch.autograd import Variable +from torch.utils.data import Dataset, DataLoader + + +class DiabetesDataset(Dataset): + + def __init__(self): + xy = np.loadtxt('diabetes.csv', + delimiter=',', dtype=np.float32) + self.len = xy.shape[0] + self.x_data = torch.from_numpy(xy[:, 0:-1]) + self.y_data = torch.from_numpy(xy[:, [-1]]) + + def __getitem__(self, index): + return self.x_data[index], self.y_data[index] + + def __len__(self): + return self.len + + +dataset = DiabetesDataset() +train_loader = DataLoader(dataset=dataset, + batch_size=32, + shuffle=True, + num_workers=2) + +for epoch in range(2): + for i, data in enumerate(train_loader, 0): + + inputs, labels = data + inputs, labels = Variable(inputs), Variable(labels) + print(epoch, i, "inputs", inputs.data, "labels", labels.data) diff --git a/PyTorch-Fundamentals/pytorch8.py b/PyTorch-Fundamentals/pytorch8.py new file mode 100644 index 00000000..e1726db2 --- /dev/null +++ b/PyTorch-Fundamentals/pytorch8.py @@ -0,0 +1,99 @@ +from __future__ import print_function +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms +from torch.autograd import Variable + +batch_size = 64 + +train_dataset = datasets.MNIST(root='./mnist_data/', + train=True, + transform=transforms.ToTensor(), + download=True) + + +test_dataset = datasets.MNIST(root='./mnist_data/', + train=False, + transform=transforms.ToTensor()) + + + + + + + +train_loader = torch.utils.data.DataLoader(dataset=train_dataset, + batch_size=batch_size, + shuffle=True) + + + +test_loader = torch.utils.data.DataLoader(dataset=test_dataset, + batch_size=batch_size, + shuffle=False) + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + self.l1 = nn.Linear(784, 520) + self.l2 = nn.Linear(520, 320) + self.l3 = nn.Linear(320, 240) + self.l4 = nn.Linear(240, 120) + self.l5 = nn.Linear(120, 10) + + def forward(self, x): + x = x.view(-1, 784) # Flatten the data (n, 1, 28, 28)-> (n, 784) + x = F.relu(self.l1(x)) + x = F.relu(self.l2(x)) + x = F.relu(self.l3(x)) + x = F.relu(self.l4(x)) + return self.l5(x) + + +model = Net() + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) + + +def train(epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = Variable(data), Variable(target) + optimizer.zero_grad() + output = model(data) + loss = criterion(output, target) + loss.backward() + optimizer.step() + if batch_idx % 10 == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.data[0])) + + +def test(): + model.eval() + test_loss = 0 + correct = 0 + for data, target in test_loader: + data, target = Variable(data, volatile=True), Variable(target) + output = model(data) + # sum up batch loss + test_loss += criterion(output, target).data[0] + # get the index of the max + pred = output.data.max(1, keepdim=True)[1] + correct += pred.eq(target.data.view_as(pred)).cpu().sum() + + test_loss /= len(test_loader.dataset) + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + + +for epoch in range(1, 10): + train(epoch) + test()