From e5520bf0508ec4a57118d4246d5e1c4d367c6cd1 Mon Sep 17 00:00:00 2001 From: Lenoctambule <106790775+lenoctambule@users.noreply.github.com> Date: Sat, 28 Mar 2026 02:07:23 +0100 Subject: [PATCH] feat: use DeepNNLayer in Autoencoder --- autoencoder.py | 33 +++++++++++++++++---------------- mnist_test.py | 13 +++++++++++-- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/autoencoder.py b/autoencoder.py index 0b626bc..b5c5a1c 100644 --- a/autoencoder.py +++ b/autoencoder.py @@ -3,27 +3,28 @@ from utils import (dynamic_loss_plot_init, dynamic_loss_plot_update, dynamic_loss_plot_finish) from tqdm import tqdm -from layers import NNLayer +from layers import DeepNNLayer LOADER = ['⡿', '⣟', '⣯', '⣷', '⣾', '⣽', '⣻', '⢿'] class Autoencoder: def __init__(self, - in_len: int, - bottleneck: int, + encoder_layers: list[int], + decoder_layers: list[int], lr: float, activation_func): - self.encoder = NNLayer(in_len, bottleneck, lr, activation_func) - self.decoder = NNLayer(bottleneck, in_len, lr, activation_func) + self.encoder = DeepNNLayer(encoder_layers, lr, activation_func) + self.decoder = DeepNNLayer(decoder_layers, lr, activation_func) - def train(self, v: np.ndarray) -> float: - encoded = self.encoder.forward(v) - reconstructed = self.decoder.forward(encoded) - error = self.decoder.backprop(reconstructed - v) - self.encoder.backprop(error) - error = v - reconstructed - return np.sum(np.abs(error)) + def train(self, v: np.ndarray): + out = self.decoder.forward( + self.encoder.forward(v) + ) + self.encoder.backprop( + self.decoder.backprop(out - v) + ) + return np.sum(np.abs(out - v)) / len(v) def train_dataset(self, data_set: list[np.ndarray], @@ -60,10 +61,10 @@ class Autoencoder: if epoch > max_epoch: break epoch += 1 - if display_loss is True: - dynamic_loss_plot_finish(ax, line) - print("#Training complete !") - return losses + print("Training complete !") + if display_loss is True: + dynamic_loss_plot_finish(ax, line) + return losses def encode(self, v: np.ndarray) -> np.ndarray: return self.encoder.forward(v) diff --git a/mnist_test.py b/mnist_test.py index f5b130c..f19af30 100644 --- a/mnist_test.py +++ b/mnist_test.py @@ -26,8 +26,17 @@ def mnist_test( x_train = np.divide(x_train, 255) x_test = np.divide(x_train, 255) in_len = x_train[0].shape[0] * x_train[0].shape[0] - autoencoder = Autoencoder(in_len, bottleneck, 0.001, relu) - autoencoder.train_dataset(x_train, max_epoch, patience, display_loss=True) + autoencoder = Autoencoder( + [in_len, bottleneck], + [bottleneck, in_len], + 0.1, + relu + ) + autoencoder.train_dataset( + x_train, + max_epoch, + patience, + display_loss=True) example: np.ndarray = x_test[np.random.randint(0, len(x_test))] code = autoencoder.encode(example.flatten()) output = autoencoder.decode(code)