mirror of
https://github.com/kashifulhaque/smoltorch.git
synced 2025-12-06 07:02:51 +00:00
Initial commit
This commit is contained in:
0
micrograd/__init__.py
Normal file
0
micrograd/__init__.py
Normal file
104
micrograd/engine.py
Normal file
104
micrograd/engine.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import math
|
||||
|
||||
class Value:
|
||||
def __init__(self, data, _parents=(), _op=''):
|
||||
self.data = data
|
||||
self._parents = _parents
|
||||
self._op = _op
|
||||
|
||||
# gradient
|
||||
self.grad = 0.0 # at init, the value does not affect the output
|
||||
self._backward = lambda: None
|
||||
|
||||
def __repr__(self):
|
||||
return f"Value(data={self.data})"
|
||||
|
||||
def __add__(self, other: 'Value') -> 'Value':
|
||||
other = other if isinstance(other, Value) else Value(other)
|
||||
out = Value(self.data + other.data, (self, other), '+')
|
||||
|
||||
def _backward():
|
||||
self.grad += 1.0 * out.grad
|
||||
other.grad += 1.0 * out.grad
|
||||
out._backward = _backward
|
||||
|
||||
return out
|
||||
|
||||
def __radd__(self, other: 'Value') -> 'Value':
|
||||
return self + other
|
||||
|
||||
def __mul__(self, other: 'Value') -> 'Value':
|
||||
other = other if isinstance(other, Value) else Value(other)
|
||||
out = Value(self.data * other.data, (self, other), '*')
|
||||
|
||||
def _backward():
|
||||
self.grad += other.data * out.grad
|
||||
other.grad += self.data * out.grad
|
||||
out._backward = _backward
|
||||
|
||||
return out
|
||||
|
||||
def __neg__(self) -> 'Value':
|
||||
return -1 * self
|
||||
|
||||
def __sub__(self, other: 'Value') -> 'Value':
|
||||
return self + (-other)
|
||||
|
||||
def __rsub__(self, other: 'Value') -> 'Value':
|
||||
return Value(other) - self
|
||||
|
||||
def __rmul__(self, other: 'Value') -> 'Value':
|
||||
return self * other
|
||||
|
||||
def __pow__(self, other: 'Value') -> 'Value':
|
||||
assert isinstance(other, (int, float)), "only support int/float powers for now"
|
||||
out = Value(self.data**other, (self, ), f'**{other}')
|
||||
|
||||
def _backward():
|
||||
self.grad += (other * self.data**(other - 1)) * out.grad
|
||||
out._backward = _backward
|
||||
|
||||
return out
|
||||
|
||||
def __truediv__(self, other: 'Value') -> 'Value':
|
||||
return self * other**-1
|
||||
|
||||
def tanh(self) -> 'Value':
|
||||
x = self.data
|
||||
_tanh = (math.exp(2*x) - 1) / (math.exp(2*x) + 1)
|
||||
out = Value(_tanh, (self, ), 'tanh')
|
||||
|
||||
def _backward():
|
||||
self.grad += (1 - _tanh ** 2) * out.grad
|
||||
out._backward = _backward
|
||||
|
||||
return out
|
||||
|
||||
def exp(self) -> 'Value':
|
||||
x = self.data
|
||||
out = Value(math.exp(x), (self, ), 'exp')
|
||||
|
||||
def _backward():
|
||||
self.grad += out.data * out.grad
|
||||
out._backward = _backward
|
||||
|
||||
return out
|
||||
|
||||
def backward(self):
|
||||
topo = []
|
||||
visited = set()
|
||||
|
||||
def build_topo(v: 'Value'):
|
||||
if v not in visited:
|
||||
visited.add(v)
|
||||
|
||||
for child in v._parents:
|
||||
build_topo(child)
|
||||
|
||||
topo.append(v)
|
||||
|
||||
build_topo(self)
|
||||
|
||||
self.grad = 1.0
|
||||
for node in reversed(topo):
|
||||
node._backward()
|
||||
39
micrograd/nn.py
Normal file
39
micrograd/nn.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import random
|
||||
from micrograd.engine import Value
|
||||
|
||||
class Neuron:
|
||||
def __init__(self, n_inputs: int):
|
||||
self.w = [Value(random.uniform(-1, 1)) for _ in range(n_inputs)]
|
||||
self.b = Value(random.uniform(-1, 1))
|
||||
|
||||
def __call__(self, x: list) -> Value:
|
||||
activations = sum((w_i * x_i for w_i, x_i in zip(self.w, x)), self.b)
|
||||
out = activations.tanh()
|
||||
return out
|
||||
|
||||
def parameters(self):
|
||||
return self.w + [self.b]
|
||||
|
||||
class Layer:
|
||||
def __init__(self, n_inputs: int, n_outputs: int):
|
||||
self.neurons = [Neuron(n_inputs) for _ in range(n_outputs)]
|
||||
|
||||
def __call__(self, x: list) -> list[Value]:
|
||||
outs = [n(x) for n in self.neurons]
|
||||
return outs
|
||||
|
||||
def parameters(self):
|
||||
return [p for n in self.neurons for p in n.parameters()]
|
||||
|
||||
class MLP:
|
||||
def __init__(self, n_inputs: int, n_outputs: int):
|
||||
sz = [n_inputs] + n_outputs
|
||||
self.layers = [Layer(sz[i], sz[i + 1]) for i in range(len(n_outputs))]
|
||||
|
||||
def __call__(self, x):
|
||||
for layer in self.layers:
|
||||
x = layer(x)
|
||||
return x
|
||||
|
||||
def parameters(self):
|
||||
return [p for layer in self.layers for p in layer.parameters()]
|
||||
Reference in New Issue
Block a user