Add more TODOs
This commit is contained in:
parent
a7004274f5
commit
b417bd0572
7
nn.py
7
nn.py
|
@ -44,8 +44,9 @@ def single_pass():
|
||||||
# x = F.softmax(x)
|
# x = F.softmax(x)
|
||||||
# loss = self.loss(x, y)
|
# loss = self.loss(x, y)
|
||||||
|
|
||||||
# TODO Add support for numpy matrices.
|
# TODO Add support for numpy arrays.
|
||||||
class Tensor:
|
class Tensor:
|
||||||
|
# TODO Implement 'requires_grad' functionality.
|
||||||
def __init__(self, value):
|
def __init__(self, value):
|
||||||
self.value = value
|
self.value = value
|
||||||
self.grad = 0
|
self.grad = 0
|
||||||
|
@ -53,6 +54,7 @@ class Tensor:
|
||||||
self._parents = None
|
self._parents = None
|
||||||
self._back = None
|
self._back = None
|
||||||
|
|
||||||
|
# uwu literally the only place where I have type annotations
|
||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
return f"Tensor(value={self.value}, grad={self.grad})"
|
return f"Tensor(value={self.value}, grad={self.grad})"
|
||||||
|
|
||||||
|
@ -60,6 +62,7 @@ class Tensor:
|
||||||
def _save(self, *args):
|
def _save(self, *args):
|
||||||
self._parents = args
|
self._parents = args
|
||||||
|
|
||||||
|
# TODO Maybe refactor the functions system? Maybe something like pytorch/tinygrad?
|
||||||
def add(self, other):
|
def add(self, other):
|
||||||
tensor = Tensor(self.value + other.value)
|
tensor = Tensor(self.value + other.value)
|
||||||
tensor._save(self, other)
|
tensor._save(self, other)
|
||||||
|
@ -114,6 +117,7 @@ class Tensor:
|
||||||
tensor._back = back
|
tensor._back = back
|
||||||
return tensor
|
return tensor
|
||||||
|
|
||||||
|
# TODO Compute gradients only for tensors that need it.
|
||||||
def _backprop(tensor, upstream):
|
def _backprop(tensor, upstream):
|
||||||
# Backprop through the tensor iff it has any parents.
|
# Backprop through the tensor iff it has any parents.
|
||||||
if tensor._parents is not None:
|
if tensor._parents is not None:
|
||||||
|
@ -128,6 +132,7 @@ class Tensor:
|
||||||
self.grad = 1
|
self.grad = 1
|
||||||
Tensor._backprop(self, self.grad)
|
Tensor._backprop(self, self.grad)
|
||||||
|
|
||||||
|
# Small test to see if autograd works.
|
||||||
def test():
|
def test():
|
||||||
# Forward pass.
|
# Forward pass.
|
||||||
x, y, z = Tensor(-2), Tensor(5), Tensor(-4)
|
x, y, z = Tensor(-2), Tensor(5), Tensor(-4)
|
||||||
|
|
Loading…
Reference in a new issue