From 1e8a27ddaa78792c4c678129bc4217a8965058ac Mon Sep 17 00:00:00 2001 From: Lenoctambule <106790775+lenoctambule@users.noreply.github.com> Date: Fri, 27 Mar 2026 04:35:22 +0100 Subject: [PATCH] feat: add bias to nn parameters --- autoencoder.py | 12 ++++++++---- mnist_test.py | 6 +++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/autoencoder.py b/autoencoder.py index b64dd35..7de9ca0 100644 --- a/autoencoder.py +++ b/autoencoder.py @@ -10,6 +10,7 @@ class Encoder: lr: float, activation_func: types.FunctionType): self.W = np.random.uniform(-1, 1, (in_size, out_size)) + self.B = np.zeros((out_size)) self.lr = lr self.last_input = None self.last_output = None @@ -17,13 +18,14 @@ class Encoder: def forward(self, V: np.ndarray) -> np.ndarray: self.last_input = V - z = V @ self.W - self.last_output = regularize(self.activation_func(z)) + res = V @ self.W + self.B + self.last_output = regularize(self.activation_func(res)) return self.last_output def backprop(self, error: np.ndarray): dW = np.outer(self.last_input, error) self.W -= self.lr * dW + self.B -= self.lr * error return error @ self.W.T @@ -34,6 +36,7 @@ class Decoder: lr: float, activation_func): self.W = np.random.uniform(-1, 1, (in_size, out_size)) + self.B = np.zeros((out_size)) self.lr = lr self.last_input = None self.last_output = None @@ -41,14 +44,15 @@ class Decoder: def forward(self, V: np.ndarray) -> np.ndarray: self.last_input = V - z = V @ self.W - self.last_output = regularize(self.activation_func(z)) + res = V @ self.W + self.B + self.last_output = regularize(self.activation_func(res)) return self.last_output def backprop(self, target: np.ndarray): error = self.last_output - target dW = np.outer(self.last_input, error) self.W -= self.lr * dW + self.B -= self.lr * error return error @ self.W.T diff --git a/mnist_test.py b/mnist_test.py index 3a7ed85..e10a4c1 100644 --- a/mnist_test.py +++ b/mnist_test.py @@ -18,7 +18,7 @@ def mnist_embed(): prev_error = float('inf') losses = [] epoch = 0 - x_train = x_train[:1_000] + x_train = x_train[:] while True: error = 0 for x in x_train: @@ -30,10 +30,10 @@ def mnist_embed(): prev_error = error losses.append(error) dynamic_loss_plot_update(ax, line, losses) - if NO_IMPROV > 10: + if NO_IMPROV > 5: print('Done !') break - if epoch > 200: + if epoch > 500: break epoch += 1 dynamic_loss_plot_finish(ax, line)