pygrad/pygrad/nn.py

67 lines
1.5 KiB
Python

# Neural networks from scratch with numpy.
import numpy as np
from pygrad.tensor import tensor, Tensor
def mean_absolute_error(x, y):
return np.mean(np.abs(x - y))
def mean_squared_error(x: Tensor, y: Tensor):
return x.sub(y).expt(2).div(tensor([[2.0]]))
def cross_entropy_loss(x: Tensor, y: Tensor):
return y.exp().div(np.sum(x.exp())).log().neg()
# prepare inputs and outputs
x = tensor(np.array([[1, 0]]))
y = tensor(np.array([[1]]))
# we're doing xavier initialisation - see <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>
w1 = tensor(np.random.randn(2, 3) / np.sqrt(2))
w2 = tensor(np.random.randn(3, 1) / np.sqrt(3))
def single_pass():
global w1, w2
# forward pass
h = x.mul(w1)
h_hat = h.tanh()
j = h_hat.mul(w2)
print(f"prediction {j}")
# loss calculation
loss = mean_squared_error(j, y)
print(f"loss {loss}")
loss.backward()
print(w1.grad, w2.grad)
w1.value -= 0.1 * w1.grad
w2.value -= 0.1 * w2.grad
# initialise layers
# self.lin1 = nn.Linear(2, 3)
# self.lin2 = nn.Linear(3, 1)
# self.loss = nn.MSELoss()
# and then
# x = self.lin1(x)
# x = F.relu(x)
# x = self.lin2(x)
# x = F.softmax(x)
# loss = self.loss(x, y)
# Small test to see if autograd works.
def test():
# Input tensors.
x, y, z = Tensor(np.array([[1, 2, 3]])), Tensor(np.array([[2, 3, 4]])), Tensor(np.array([[1], [2], [3]]))
# Forward pass.
q = x.add(y)
h = q.expt(2)
w = h.mul(z)
print(f"q = {q}, w = {w}")
# Backward pass.
w.backward()
print(f"is: dw = {w.grad}, dz = {z.grad}, dy = {y.grad}, dx = {x.grad}")