Skip to content

Commit

Permalink
PyTorch Content Added
Browse files Browse the repository at this point in the history
  • Loading branch information
avnoor-488 authored Jun 20, 2022
1 parent d9f6ffb commit ecfe9a2
Show file tree
Hide file tree
Showing 8 changed files with 413 additions and 0 deletions.
37 changes: 37 additions & 0 deletions PyTorch-Fundamentals/pytorch1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import numpy as np
import matplotlib.pyplot as plt

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]


#like we have that y=ax+b we can treak here aisa ki x*w
def forward(x):
return x * w


# Square loss the one we used in yolo!
def loss(x, y):
y_pred = forward(x)
return (y_pred - y) * (y_pred - y)


w_list = []
mse_list = []

for w in np.arange(0.0, 4.1, 0.1):
print("w=", w)
l_sum = 0
for x_val, y_val in zip(x_data, y_data):
y_pred_val = forward(x_val)
l = loss(x_val, y_val)
l_sum += l
print("\t", x_val, y_val, y_pred_val, l)
print("MSE=", l_sum / 3)
w_list.append(w)
mse_list.append(l_sum / 3)

plt.plot(w_list, mse_list)
plt.ylabel('Loss')
plt.xlabel('w')
plt.show()
41 changes: 41 additions & 0 deletions PyTorch-Fundamentals/pytorch2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import numpy as np
import matplotlib.pyplot as plt

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

w=1.0
#like we have that y=ax+b we can treak here aisa ki x*w
def forward(x):
return x * w


# Square loss the one we used in yolo!
def loss(x, y):
y_pred = forward(x)
return (y_pred - y) * (y_pred - y)


def gradient(x,y):

return 2*x*(x*w-y)




# ikkada weights update cheyakunda try chesamu
print("Before update", 4, forward(4))

# here we did update weights
for epoch in range(10):
for x_val, y_val in zip(x_data, y_data):
grad = gradient(x_val, y_val)
w = w - 0.01 * grad#ikkaga 0.01 is the learning rate gurthu petukovalisndi enti ante smaller the learning rate greater the accuracy
#kani smaller the learning rate more the time it will take to learn, anduvaluna una computation power batti nerchukovali
print("\tgrad: ", x_val, y_val, round(grad, 2))
l = loss(x_val, y_val)

print("progress:", epoch, "w=", round(w, 2), "loss=", round(l, 2))

# update taruvata chudamu emi avutundo
print("after update in other words training", "4 hours", forward(4))
44 changes: 44 additions & 0 deletions PyTorch-Fundamentals/pytorch3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#The gradient function is too large and we can not do anything about it
# so we use predefined framework tools to compute gradient
# in pytroch,The data should be retrieved in the appropriate variable type for pytorch
import torch
from torch.autograd import Variable

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]


#this will draw the forward graph

#this in brief is telling compute the weights using gradient automatically by drwaing a graph
w = Variable(torch.Tensor([1.0]), requires_grad=True) #The params here tell it Required_grad


def forward(x):
return x * w




def loss(x, y):
y_pred = forward(x)
return (y_pred - y) * (y_pred - y)

print("Before update", 4, forward(4))

# Training loop
for epoch in range(10):
for x_val, y_val in zip(x_data, y_data):
l = loss(x_val, y_val)
#loss computed
l.backward()
print("\tgrad: ", x_val, y_val, w.grad.data[0])
w.data = w.data - 0.01 * w.grad.data

# Manually zero the gradients after updating weights
w.grad.data.zero_()

print("progress:", epoch, l.data[0])

# After training
print("predict (after training)", 4, forward(4).data[0])
53 changes: 53 additions & 0 deletions PyTorch-Fundamentals/pytorch4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import torch
from torch.autograd import Variable
import torch.nn.functional as F

x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]]))
y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]]))


class Model(torch.nn.Module):
#initialize parameters model --> linear and one input and one output
#forward function --> predict based on linear
def __init__(self):

super(Model, self).__init__()
self.linear = torch.nn.Linear(1, 1) # One in and one out

def forward(self, x):
y_pred = self.linear(x)
return y_pred


#initlize model
model = Model()


#Mean Square loss
criterion = torch.nn.MSELoss(size_average=False)
#here SGD stands for Standaard gradient descent default params and learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


#training epochs
for epoch in range(500):
#this is like a forward pass
y_pred = model(x_data)

#calculate loss on predicted and actual
loss = criterion(y_pred, y_data)
print(epoch, loss.data[0])
#so till now we have got the loss from the variable graph now we need to back prop and update
#initlize
optimizer.zero_grad()
#back prop the loss since loss is alredy a variable
loss.backward()
#Update the weightss
optimizer.step()


#convert data you want to predict also into a Variable
hour_var = Variable(torch.Tensor([[4.0]]))
#preict
y_pred = model(hour_var)
print("predict (after training)", 4, model(hour_var).data[0][0])
52 changes: 52 additions & 0 deletions PyTorch-Fundamentals/pytorch5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import torch
from torch.autograd import Variable
import torch.nn.functional as F

x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]]))
y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]]))


class Model(torch.nn.Module):
#initialize parameters model --> linear and one input and one output
#forward function --> predict based on linear
def __init__(self):

super(Model, self).__init__()
self.linear = torch.nn.Linear(1, 1) # One in and one out

def forward(self, x):
y_pred = F.sigmoid(self.linear(x))
return y_pred


#initlize model
model = Model()


#Mean Square loss
criterion = torch.nn.BCELoss(size_average=False)
#here SGD stands for Standard gradient descent default params and learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


#training epochs
for epoch in range(500):
#this is like a forward pass
y_pred = model(x_data)

#calculate loss on predicted and actual
loss = criterion(y_pred, y_data)
print(epoch, loss.data[0])
#so till now we have got the loss from the variable graph now we need to back prop and update
#initlize
optimizer.zero_grad()
#back prop the loss since loss is alredy a variable
loss.backward()
#Update the weightss
optimizer.step()


hour_var = Variable(torch.Tensor([[1.0]]))
print("predict 1 hour ", 1.0, model(hour_var).data[0][0] > 0.5)
hour_var = Variable(torch.Tensor([[7.0]]))
print("predict 7 hours", 7.0, model(hour_var).data[0][0] > 0.5)
44 changes: 44 additions & 0 deletions PyTorch-Fundamentals/pytorch6.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import torch
from torch.autograd import Variable
import numpy as np

xy = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32)
x_data = Variable(torch.from_numpy(xy[:, 0:-1]))
y_data = Variable(torch.from_numpy(xy[:, [-1]]))

print(x_data.data.shape)
print(y_data.data.shape)


class Model(torch.nn.Module):

def __init__(self):

super(Model, self).__init__()
self.l1 = torch.nn.Linear(8, 6)
self.l2 = torch.nn.Linear(6, 4)
self.l3 = torch.nn.Linear(4, 1)

self.sigmoid = torch.nn.Sigmoid()

def forward(self, x):
out1 = self.sigmoid(self.l1(x))
out2 = self.sigmoid(self.l2(out1))
y_pred = self.sigmoid(self.l3(out2))
return y_pred

model = Model()


criterion = torch.nn.BCELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)

for epoch in range(100):
y_pred = model(x_data)

loss = criterion(y_pred, y_data)
print(epoch, loss.data[0])

optimizer.zero_grad()
loss.backward()
optimizer.step()
43 changes: 43 additions & 0 deletions PyTorch-Fundamentals/pytorch7.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#over a period of time as data gets bigger and bigger
# we should divide the data into batches as a performance enhancer
# we go one batch at a time calculate the gradient and update

# 1000 training examples
# batch size 500
# so it will take 2 iterations to complete 1 epoch

# so DATALOADER in pytorch will handle this issue for us
import torch
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader


class DiabetesDataset(Dataset):

def __init__(self):
xy = np.loadtxt('diabetes.csv',
delimiter=',', dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:, 0:-1])
self.y_data = torch.from_numpy(xy[:, [-1]])

def __getitem__(self, index):
return self.x_data[index], self.y_data[index]

def __len__(self):
return self.len


dataset = DiabetesDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=32,
shuffle=True,
num_workers=2)

for epoch in range(2):
for i, data in enumerate(train_loader, 0):

inputs, labels = data
inputs, labels = Variable(inputs), Variable(labels)
print(epoch, i, "inputs", inputs.data, "labels", labels.data)
Loading

0 comments on commit ecfe9a2

Please sign in to comment.