From eee37c8496be63f927817e4b90a530b70d405660 Mon Sep 17 00:00:00 2001 From: Lenoctambule <106790775+lenoctambule@users.noreply.github.com> Date: Fri, 27 Mar 2026 23:15:13 +0100 Subject: [PATCH] refactor: code de-dup --- .gitignore | 3 ++- autoencoder.py | 65 +++++--------------------------------------------- layers.py | 29 ++++++++++++++++++++++ mnist_test.py | 4 ++-- 4 files changed, 39 insertions(+), 62 deletions(-) create mode 100644 layers.py diff --git a/.gitignore b/.gitignore index d315b16..8eefd96 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ __pycache__ *.pyc -.venv \ No newline at end of file +.venv +mnist.npz \ No newline at end of file diff --git a/autoencoder.py b/autoencoder.py index 9f2e7e9..0b626bc 100644 --- a/autoencoder.py +++ b/autoencoder.py @@ -1,74 +1,21 @@ import numpy as np -from utils import (regularize, - dynamic_loss_plot_init, +from utils import (dynamic_loss_plot_init, dynamic_loss_plot_update, dynamic_loss_plot_finish) -import types from tqdm import tqdm +from layers import NNLayer LOADER = ['⡿', '⣟', '⣯', '⣷', '⣾', '⣽', '⣻', '⢿'] -class Encoder: - def __init__(self, - in_size: int, - out_size: int, - lr: float, - activation_func: types.FunctionType): - self.W = np.random.uniform(-1, 1, (in_size, out_size)) - self.B = np.zeros((out_size)) - self.lr = lr - self.last_input = None - self.last_output = None - self.activation_func = activation_func - - def forward(self, V: np.ndarray) -> np.ndarray: - self.last_input = V - res = V @ self.W + self.B - self.last_output = regularize(self.activation_func(res)) - return self.last_output - - def backprop(self, error: np.ndarray): - dW = np.outer(self.last_input, error) - self.W -= self.lr * dW - self.B -= self.lr * error - return error @ self.W.T - - -class Decoder: - def __init__(self, - in_size: int, - out_size: int, - lr: float, - activation_func): - self.W = np.random.uniform(-1, 1, (in_size, out_size)) - self.B = np.zeros((out_size)) - self.lr = lr - self.last_input = None - self.last_output = None - self.activation_func = activation_func - - def forward(self, V: np.ndarray) -> np.ndarray: - self.last_input = V - res = V @ self.W + self.B - self.last_output = regularize(self.activation_func(res)) - return self.last_output - - def backprop(self, error: np.ndarray): - dW = np.outer(self.last_input, error) - self.W -= self.lr * dW - self.B -= self.lr * error - return error @ self.W.T - - class Autoencoder: def __init__(self, in_len: int, bottleneck: int, lr: float, activation_func): - self.encoder = Encoder(in_len, bottleneck, lr, activation_func) - self.decoder = Decoder(bottleneck, in_len, lr, activation_func) + self.encoder = NNLayer(in_len, bottleneck, lr, activation_func) + self.decoder = NNLayer(bottleneck, in_len, lr, activation_func) def train(self, v: np.ndarray) -> float: encoded = self.encoder.forward(v) @@ -89,10 +36,10 @@ class Autoencoder: epoch = 0 no_improv = 0 prev_error = float('inf') - with tqdm(bar_format="{desc} {elapsed} {rate_fmt}") as lbar : + with tqdm(bar_format="{desc} {elapsed} {rate_fmt}") as lbar: while True: lbar.set_description( - f"{LOADER[epoch % len(LOADER)]} Training ({epoch=} error={prev_error:.2f})", + f"{LOADER[epoch % len(LOADER)]} Training ({epoch=} error={prev_error:.2f})", # noqa ) lbar.update() error = 0 diff --git a/layers.py b/layers.py new file mode 100644 index 0000000..4fc2ed5 --- /dev/null +++ b/layers.py @@ -0,0 +1,29 @@ +import numpy as np +import types +from utils import regularize + + +class NNLayer: + def __init__(self, + in_size: int, + out_size: int, + lr: float, + activation_func: types.FunctionType): + self.W = np.random.uniform(-1, 1, (in_size, out_size)) + self.B = np.zeros((out_size)) + self.lr = lr + self.last_input = None + self.last_output = None + self.activation_func = activation_func + + def forward(self, V: np.ndarray) -> np.ndarray: + self.last_input = V + res = V @ self.W + self.B + self.last_output = regularize(self.activation_func(res)) + return self.last_output + + def backprop(self, error: np.ndarray): + dW = np.outer(self.last_input, error) + self.W -= self.lr * dW + self.B -= self.lr * error + return error @ self.W.T diff --git a/mnist_test.py b/mnist_test.py index c2acf10..f5b130c 100644 --- a/mnist_test.py +++ b/mnist_test.py @@ -9,7 +9,7 @@ def load_mnist(): import requests mnist_path = "./mnist.npz" - mnist_url = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz" + mnist_url = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz" # noqa if not os.path.exists(mnist_path): with open(mnist_path, "w+b") as f: f.write(requests.get(mnist_url, stream=True).content) @@ -25,7 +25,7 @@ def mnist_test( x_train, _, x_test, _ = load_mnist() x_train = np.divide(x_train, 255) x_test = np.divide(x_train, 255) - in_len = x_train[0].flatten().shape[0] + in_len = x_train[0].shape[0] * x_train[0].shape[0] autoencoder = Autoencoder(in_len, bottleneck, 0.001, relu) autoencoder.train_dataset(x_train, max_epoch, patience, display_loss=True) example: np.ndarray = x_test[np.random.randint(0, len(x_test))]