refactor: code de-dup

This commit is contained in:
Lenoctambule
2026-03-27 23:15:13 +01:00
parent c37d1c9c26
commit eee37c8496
4 changed files with 39 additions and 62 deletions

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
__pycache__
*.pyc
.venv
mnist.npz

View File

@@ -1,74 +1,21 @@
import numpy as np
from utils import (regularize,
dynamic_loss_plot_init,
from utils import (dynamic_loss_plot_init,
dynamic_loss_plot_update,
dynamic_loss_plot_finish)
import types
from tqdm import tqdm
from layers import NNLayer
LOADER = ['', '', '', '', '', '', '', '']
class Encoder:
def __init__(self,
in_size: int,
out_size: int,
lr: float,
activation_func: types.FunctionType):
self.W = np.random.uniform(-1, 1, (in_size, out_size))
self.B = np.zeros((out_size))
self.lr = lr
self.last_input = None
self.last_output = None
self.activation_func = activation_func
def forward(self, V: np.ndarray) -> np.ndarray:
self.last_input = V
res = V @ self.W + self.B
self.last_output = regularize(self.activation_func(res))
return self.last_output
def backprop(self, error: np.ndarray):
dW = np.outer(self.last_input, error)
self.W -= self.lr * dW
self.B -= self.lr * error
return error @ self.W.T
class Decoder:
def __init__(self,
in_size: int,
out_size: int,
lr: float,
activation_func):
self.W = np.random.uniform(-1, 1, (in_size, out_size))
self.B = np.zeros((out_size))
self.lr = lr
self.last_input = None
self.last_output = None
self.activation_func = activation_func
def forward(self, V: np.ndarray) -> np.ndarray:
self.last_input = V
res = V @ self.W + self.B
self.last_output = regularize(self.activation_func(res))
return self.last_output
def backprop(self, error: np.ndarray):
dW = np.outer(self.last_input, error)
self.W -= self.lr * dW
self.B -= self.lr * error
return error @ self.W.T
class Autoencoder:
def __init__(self,
in_len: int,
bottleneck: int,
lr: float,
activation_func):
self.encoder = Encoder(in_len, bottleneck, lr, activation_func)
self.decoder = Decoder(bottleneck, in_len, lr, activation_func)
self.encoder = NNLayer(in_len, bottleneck, lr, activation_func)
self.decoder = NNLayer(bottleneck, in_len, lr, activation_func)
def train(self, v: np.ndarray) -> float:
encoded = self.encoder.forward(v)
@@ -89,10 +36,10 @@ class Autoencoder:
epoch = 0
no_improv = 0
prev_error = float('inf')
with tqdm(bar_format="{desc} {elapsed} {rate_fmt}") as lbar :
with tqdm(bar_format="{desc} {elapsed} {rate_fmt}") as lbar:
while True:
lbar.set_description(
f"{LOADER[epoch % len(LOADER)]} Training ({epoch=} error={prev_error:.2f})",
f"{LOADER[epoch % len(LOADER)]} Training ({epoch=} error={prev_error:.2f})", # noqa
)
lbar.update()
error = 0

29
layers.py Normal file
View File

@@ -0,0 +1,29 @@
import numpy as np
import types
from utils import regularize
class NNLayer:
def __init__(self,
in_size: int,
out_size: int,
lr: float,
activation_func: types.FunctionType):
self.W = np.random.uniform(-1, 1, (in_size, out_size))
self.B = np.zeros((out_size))
self.lr = lr
self.last_input = None
self.last_output = None
self.activation_func = activation_func
def forward(self, V: np.ndarray) -> np.ndarray:
self.last_input = V
res = V @ self.W + self.B
self.last_output = regularize(self.activation_func(res))
return self.last_output
def backprop(self, error: np.ndarray):
dW = np.outer(self.last_input, error)
self.W -= self.lr * dW
self.B -= self.lr * error
return error @ self.W.T

View File

@@ -9,7 +9,7 @@ def load_mnist():
import requests
mnist_path = "./mnist.npz"
mnist_url = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz"
mnist_url = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz" # noqa
if not os.path.exists(mnist_path):
with open(mnist_path, "w+b") as f:
f.write(requests.get(mnist_url, stream=True).content)
@@ -25,7 +25,7 @@ def mnist_test(
x_train, _, x_test, _ = load_mnist()
x_train = np.divide(x_train, 255)
x_test = np.divide(x_train, 255)
in_len = x_train[0].flatten().shape[0]
in_len = x_train[0].shape[0] * x_train[0].shape[0]
autoencoder = Autoencoder(in_len, bottleneck, 0.001, relu)
autoencoder.train_dataset(x_train, max_epoch, patience, display_loss=True)
example: np.ndarray = x_test[np.random.randint(0, len(x_test))]