Add more TODOs

This commit is contained in:
Aodhnait Étaín 2020-11-14 22:38:30 +00:00
parent a7004274f5
commit b417bd0572

7
nn.py
View file

@ -44,8 +44,9 @@ def single_pass():
# x = F.softmax(x)
# loss = self.loss(x, y)
# TODO Add support for numpy matrices.
# TODO Add support for numpy arrays.
class Tensor:
# TODO Implement 'requires_grad' functionality.
def __init__(self, value):
self.value = value
self.grad = 0
@ -53,6 +54,7 @@ class Tensor:
self._parents = None
self._back = None
# uwu literally the only place where I have type annotations
def __repr__(self) -> str:
return f"Tensor(value={self.value}, grad={self.grad})"
@ -60,6 +62,7 @@ class Tensor:
def _save(self, *args):
self._parents = args
# TODO Maybe refactor the functions system? Maybe something like pytorch/tinygrad?
def add(self, other):
tensor = Tensor(self.value + other.value)
tensor._save(self, other)
@ -114,6 +117,7 @@ class Tensor:
tensor._back = back
return tensor
# TODO Compute gradients only for tensors that need it.
def _backprop(tensor, upstream):
# Backprop through the tensor iff it has any parents.
if tensor._parents is not None:
@ -128,6 +132,7 @@ class Tensor:
self.grad = 1
Tensor._backprop(self, self.grad)
# Small test to see if autograd works.
def test():
# Forward pass.
x, y, z = Tensor(-2), Tensor(5), Tensor(-4)