feat: use DeepNNLayer in Autoencoder
This commit is contained in:
@@ -3,27 +3,28 @@ from utils import (dynamic_loss_plot_init,
|
|||||||
dynamic_loss_plot_update,
|
dynamic_loss_plot_update,
|
||||||
dynamic_loss_plot_finish)
|
dynamic_loss_plot_finish)
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from layers import NNLayer
|
from layers import DeepNNLayer
|
||||||
|
|
||||||
LOADER = ['⡿', '⣟', '⣯', '⣷', '⣾', '⣽', '⣻', '⢿']
|
LOADER = ['⡿', '⣟', '⣯', '⣷', '⣾', '⣽', '⣻', '⢿']
|
||||||
|
|
||||||
|
|
||||||
class Autoencoder:
|
class Autoencoder:
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
in_len: int,
|
encoder_layers: list[int],
|
||||||
bottleneck: int,
|
decoder_layers: list[int],
|
||||||
lr: float,
|
lr: float,
|
||||||
activation_func):
|
activation_func):
|
||||||
self.encoder = NNLayer(in_len, bottleneck, lr, activation_func)
|
self.encoder = DeepNNLayer(encoder_layers, lr, activation_func)
|
||||||
self.decoder = NNLayer(bottleneck, in_len, lr, activation_func)
|
self.decoder = DeepNNLayer(decoder_layers, lr, activation_func)
|
||||||
|
|
||||||
def train(self, v: np.ndarray) -> float:
|
def train(self, v: np.ndarray):
|
||||||
encoded = self.encoder.forward(v)
|
out = self.decoder.forward(
|
||||||
reconstructed = self.decoder.forward(encoded)
|
self.encoder.forward(v)
|
||||||
error = self.decoder.backprop(reconstructed - v)
|
)
|
||||||
self.encoder.backprop(error)
|
self.encoder.backprop(
|
||||||
error = v - reconstructed
|
self.decoder.backprop(out - v)
|
||||||
return np.sum(np.abs(error))
|
)
|
||||||
|
return np.sum(np.abs(out - v)) / len(v)
|
||||||
|
|
||||||
def train_dataset(self,
|
def train_dataset(self,
|
||||||
data_set: list[np.ndarray],
|
data_set: list[np.ndarray],
|
||||||
@@ -60,10 +61,10 @@ class Autoencoder:
|
|||||||
if epoch > max_epoch:
|
if epoch > max_epoch:
|
||||||
break
|
break
|
||||||
epoch += 1
|
epoch += 1
|
||||||
if display_loss is True:
|
print("Training complete !")
|
||||||
dynamic_loss_plot_finish(ax, line)
|
if display_loss is True:
|
||||||
print("#Training complete !")
|
dynamic_loss_plot_finish(ax, line)
|
||||||
return losses
|
return losses
|
||||||
|
|
||||||
def encode(self, v: np.ndarray) -> np.ndarray:
|
def encode(self, v: np.ndarray) -> np.ndarray:
|
||||||
return self.encoder.forward(v)
|
return self.encoder.forward(v)
|
||||||
|
|||||||
@@ -26,8 +26,17 @@ def mnist_test(
|
|||||||
x_train = np.divide(x_train, 255)
|
x_train = np.divide(x_train, 255)
|
||||||
x_test = np.divide(x_train, 255)
|
x_test = np.divide(x_train, 255)
|
||||||
in_len = x_train[0].shape[0] * x_train[0].shape[0]
|
in_len = x_train[0].shape[0] * x_train[0].shape[0]
|
||||||
autoencoder = Autoencoder(in_len, bottleneck, 0.001, relu)
|
autoencoder = Autoencoder(
|
||||||
autoencoder.train_dataset(x_train, max_epoch, patience, display_loss=True)
|
[in_len, bottleneck],
|
||||||
|
[bottleneck, in_len],
|
||||||
|
0.1,
|
||||||
|
relu
|
||||||
|
)
|
||||||
|
autoencoder.train_dataset(
|
||||||
|
x_train,
|
||||||
|
max_epoch,
|
||||||
|
patience,
|
||||||
|
display_loss=True)
|
||||||
example: np.ndarray = x_test[np.random.randint(0, len(x_test))]
|
example: np.ndarray = x_test[np.random.randint(0, len(x_test))]
|
||||||
code = autoencoder.encode(example.flatten())
|
code = autoencoder.encode(example.flatten())
|
||||||
output = autoencoder.decode(code)
|
output = autoencoder.decode(code)
|
||||||
|
|||||||
Reference in New Issue
Block a user