uwu it actually works

This commit is contained in:
Aodhnait Étaín 2020-11-14 19:27:42 +00:00
commit a1c91c7cda
2 changed files with 154 additions and 0 deletions

24
README.md Normal file
View file

@ -0,0 +1,24 @@
# PyGrad
Neural network library written in Python completely from scratch, using only numpy.
## Usage
```
import nn
a = nn.Tensor(1), b = nn.Tensor(2)
c = a.mul(b)
c.backward()
print(a.grad, b.grad) # should output (2, 1)
```
## References
- <https://github.com/geohot/tinygrad/>
- <https://github.com/karpathy/micrograd/>
## Copying
This is free and unencumbered software released into the public domain.

130
nn.py Normal file
View file

@ -0,0 +1,130 @@
# neural networks from scratch with numpy
import numpy as np
def mean_absolute_error(x, y):
return np.mean(np.abs(x - y))
def mean_squared_error(x, y):
return np.mean(np.power(x - y, 2))
def cross_entropy_loss(x, y):
return -np.log(np.exp(y) / np.sum(np.exp(x)))
# preapre inputs and outputs
x = np.array([[1, 0]])
y = np.array([[1]])
# we're doing xavier initialisation - see <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>
w1 = np.random.randn(2, 3) / np.sqrt(2)
w2 = np.random.randn(3, 1) / np.sqrt(3)
def single_pass():
# forward pass
h = np.matmul(x, w1)
h_hat = np.tanh(h)
j = np.matmul(h_hat, w2)
print("prediction {}".format(j))
# loss calculation
loss = cross_entropy_loss(j, y)
print("loss {}".format(loss))
# TODO Backward pass.
return
# initialise layers
# self.lin1 = nn.Linear(2, 3)
# self.lin2 = nn.Linear(3, 1)
# self.loss = nn.MSELoss()
# and then
# x = self.lin1(x)
# x = F.relu(x)
# x = self.lin2(x)
# x = F.softmax(x)
# loss = self.loss(x, y)
# TODO Add support for numpy matrices.
class Tensor:
def __init__(self, value):
self.value = value
self.grad = 0
# Required for backprop.
self._parents = None
self._back = None
def __repr__(self) -> str:
return f"Tensor(value={self.value}, grad={self.grad})"
# Save values for the backward pass.
def _save(self, *args):
self._parents = args
def add(self, other):
tensor = Tensor(self.value + other.value)
tensor._save(self, other)
def back(upstream):
return upstream * 1, upstream * 1
tensor._back = back
return tensor
def mul(self, other):
tensor = Tensor(self.value * other.value)
tensor._save(self, other)
def back(upstream):
a, b = tensor._parents
return upstream * b.value, upstream * a.value
tensor._back = back
return tensor
def expt(self, exponent):
tensor = Tensor(self.value ** exponent)
tensor._save(self)
def back(upstream):
a, = tensor._parents
return [ upstream * exponent * (a.value ** (exponent - 1)) ]
tensor._back = back
return tensor
def reciprocal(self):
tensor = Tensor(1.0 / self.value)
tensor._save(self)
def back(upstream):
a, = tensor._parents
return [ -1.0 / (a.value ** 2) ]
tensor._back = back
return tensor
def _backprop(tensor, upstream):
# Backprop through the tensor iff it has any parents.
if tensor._parents is not None:
for node, grad in zip(tensor._parents, tensor._back(upstream)):
# Set the node gradient to the computed gradient.
node.grad = grad
# Iterate through all (possible) parent nodes of this node.
node._backprop(grad)
def backward(self):
# Partial of self with respect to self is ALWAYS 1.
self.grad = 1
Tensor._backprop(self, self.grad)
def test():
# Forward pass.
x, y, z = Tensor(-2), Tensor(5), Tensor(-4)
q = x.add(y)
h = q.expt(2)
w = h.mul(z)
print(f"q = {q}, w = {w}")
# Backward pass.
w.backward()
print(f"is: dw = {w.grad}, dz = {z.grad}, dy = {y.grad}, dx = {x.grad}")