feat: add bias to nn parameters

This commit is contained in:
Lenoctambule
2026-03-27 04:35:22 +01:00
parent 439a11a828
commit 1e8a27ddaa
2 changed files with 11 additions and 7 deletions

View File

@@ -10,6 +10,7 @@ class Encoder:
lr: float,
activation_func: types.FunctionType):
self.W = np.random.uniform(-1, 1, (in_size, out_size))
self.B = np.zeros((out_size))
self.lr = lr
self.last_input = None
self.last_output = None
@@ -17,13 +18,14 @@ class Encoder:
def forward(self, V: np.ndarray) -> np.ndarray:
self.last_input = V
z = V @ self.W
self.last_output = regularize(self.activation_func(z))
res = V @ self.W + self.B
self.last_output = regularize(self.activation_func(res))
return self.last_output
def backprop(self, error: np.ndarray):
dW = np.outer(self.last_input, error)
self.W -= self.lr * dW
self.B -= self.lr * error
return error @ self.W.T
@@ -34,6 +36,7 @@ class Decoder:
lr: float,
activation_func):
self.W = np.random.uniform(-1, 1, (in_size, out_size))
self.B = np.zeros((out_size))
self.lr = lr
self.last_input = None
self.last_output = None
@@ -41,14 +44,15 @@ class Decoder:
def forward(self, V: np.ndarray) -> np.ndarray:
self.last_input = V
z = V @ self.W
self.last_output = regularize(self.activation_func(z))
res = V @ self.W + self.B
self.last_output = regularize(self.activation_func(res))
return self.last_output
def backprop(self, target: np.ndarray):
error = self.last_output - target
dW = np.outer(self.last_input, error)
self.W -= self.lr * dW
self.B -= self.lr * error
return error @ self.W.T

View File

@@ -18,7 +18,7 @@ def mnist_embed():
prev_error = float('inf')
losses = []
epoch = 0
x_train = x_train[:1_000]
x_train = x_train[:]
while True:
error = 0
for x in x_train:
@@ -30,10 +30,10 @@ def mnist_embed():
prev_error = error
losses.append(error)
dynamic_loss_plot_update(ax, line, losses)
if NO_IMPROV > 10:
if NO_IMPROV > 5:
print('Done !')
break
if epoch > 200:
if epoch > 500:
break
epoch += 1
dynamic_loss_plot_finish(ax, line)