1import torch
2import numpy as np
3
4
5
6class Perceptron(torch.nn.Module):
7 def __init__(self):
8 super().__init__()
9 self.linear = torch.nn.Linear(2,1)
10
11 def forward(self, x):
12 z = self.linear(x)
13 y = torch.tanh(z)
14 return y
15
16model = Perceptron()
17
18
19
20
21optim = torch.optim.SGD(model.parameters(), lr=5e-1)
22
23
24loss_fun = torch.nn.MSELoss()
25
26
27X_train = torch.tensor([[1., 1.],
28 [1., -1.],
29 [-1., 1.],
30 [-1., -1.]])
31y_train = torch.tensor([1., -1., -1., -1.]).reshape(-1,1)
32
33
34ns = y_train.size(0)
35
36print("\nDados de treinamento")
37print("X_train =")
38print(X_train)
39print("y_train = ")
40print(y_train)
41
42
43nepochs = 5000
44tol = 1e-3
45
46for epoch in range(nepochs):
47
48
49 y_est = model(X_train)
50
51
52 loss = loss_fun(y_est, y_train)
53
54 print(f'{epoch}: {loss.item():.4e}')
55
56
57 if (loss.item() < tol):
58 break
59
60
61 for s in torch.randperm(ns):
62 loss_s = (y_est[s,:] - y_train[s,:])**2
63 optim.zero_grad()
64 loss_s.backward()
65 optim.step()
66 y_est = model(X_train)
67
68
69
70y = model(X_train)
71print(f'y_est = {y}')