Compare commits
2 Commits
a93bb0a692
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e6b508f739 | ||
|
|
cc74b62afd |
@@ -25,3 +25,11 @@ class LeakyReLU(ActivationFunc):
|
||||
|
||||
def derivative(self, x):
|
||||
return (x > 0) + self.k * (x <= 0)
|
||||
|
||||
|
||||
class Identity(ActivationFunc):
|
||||
def __call__(self, x):
|
||||
return x
|
||||
|
||||
def derivative(x):
|
||||
return 1
|
||||
|
||||
114
autoencoder.py
114
autoencoder.py
@@ -3,43 +3,14 @@ from utils import (dynamic_loss_plot_init,
|
||||
dynamic_loss_plot_update,
|
||||
dynamic_loss_plot_finish)
|
||||
from tqdm import tqdm
|
||||
from layers import DeepNNLayer
|
||||
from layers import DeepNNLayer, SamplingLayer
|
||||
from activations import ActivationFunc
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
LOADER = ['⡿', '⣟', '⣯', '⣷', '⣾', '⣽', '⣻', '⢿']
|
||||
|
||||
|
||||
class Autoencoder:
|
||||
def __init__(self,
|
||||
encoder_layers: list[int],
|
||||
decoder_layers: list[int],
|
||||
lr: float,
|
||||
activation_func: ActivationFunc):
|
||||
if encoder_layers[-1] != decoder_layers[0]:
|
||||
raise Exception(
|
||||
f"Encoder output and decoder input don't match {encoder_layers[-1]} != {encoder_layers[0]}" # noqa
|
||||
)
|
||||
self.encoder = DeepNNLayer(encoder_layers, lr, activation_func)
|
||||
self.decoder = DeepNNLayer(decoder_layers, lr, activation_func)
|
||||
|
||||
def __str__(self):
|
||||
return f'Encoder:\n{self.encoder}\n\nDecoder:\n{self.decoder}'
|
||||
|
||||
def loss(self, data_set: list[np.ndarray]) -> float:
|
||||
loss = 0
|
||||
for x in data_set:
|
||||
loss += np.sum(np.abs(x - self.forward(x)[0])) / len(x)
|
||||
return loss / len(data_set)
|
||||
|
||||
def train(self, v: np.ndarray):
|
||||
out = self.decoder.forward(
|
||||
self.encoder.forward(v)
|
||||
)
|
||||
self.encoder.backprop(
|
||||
self.decoder.backprop(out - v)
|
||||
)
|
||||
return np.sum(np.abs(out - v)) / len(v)
|
||||
|
||||
class AAutoencoder(ABC):
|
||||
def train_dataset(self,
|
||||
data_set: list[np.ndarray],
|
||||
max_epoch: int,
|
||||
@@ -80,6 +51,65 @@ class Autoencoder:
|
||||
dynamic_loss_plot_finish(ax, line)
|
||||
return losses
|
||||
|
||||
def loss(self, data_set: list[np.ndarray]) -> float:
|
||||
loss = 0
|
||||
for x in data_set:
|
||||
loss += np.sum(np.abs(x - self.forward(x)[0])) / len(x)
|
||||
return loss / len(data_set)
|
||||
|
||||
def save(self, path: str):
|
||||
path = path.removesuffix('.npy')
|
||||
np.save(path, self)
|
||||
|
||||
def load(path: str) -> 'Autoencoder':
|
||||
path = path.removesuffix('.npy') + '.npy'
|
||||
data = np.load(path, allow_pickle=True)
|
||||
return data.item()
|
||||
|
||||
@abstractmethod
|
||||
def train(self, v: np.ndarray) -> float:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def encode(self, v: np.ndarray) -> np.ndarray:
|
||||
return self.encoder.forward(v)
|
||||
|
||||
@abstractmethod
|
||||
def decode(self, v: np.ndarray) -> np.ndarray:
|
||||
return self.decoder.forward(v)
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, v: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
||||
code = self.encode(v)
|
||||
out = self.decode(code)
|
||||
return out, code
|
||||
|
||||
|
||||
class Autoencoder(AAutoencoder):
|
||||
def __init__(self,
|
||||
encoder_layers: list[int],
|
||||
decoder_layers: list[int],
|
||||
lr: float,
|
||||
activation_func: ActivationFunc):
|
||||
if encoder_layers[-1] != decoder_layers[0]:
|
||||
raise Exception(
|
||||
f"Encoder output and decoder input don't match {encoder_layers[-1]} != {encoder_layers[0]}" # noqa
|
||||
)
|
||||
self.encoder = DeepNNLayer(encoder_layers, lr, activation_func)
|
||||
self.decoder = DeepNNLayer(decoder_layers, lr, activation_func)
|
||||
|
||||
def train(self, v: np.ndarray) -> float:
|
||||
out = self.decoder.forward(
|
||||
self.encoder.forward(v)
|
||||
)
|
||||
self.encoder.backprop(
|
||||
self.decoder.backprop(out - v)
|
||||
)
|
||||
return np.sum(np.abs(out - v)) / len(v)
|
||||
|
||||
def __str__(self):
|
||||
return f'Encoder:\n{self.encoder}\n\nDecoder:\n{self.decoder}'
|
||||
|
||||
def encode(self, v: np.ndarray) -> np.ndarray:
|
||||
return self.encoder.forward(v)
|
||||
|
||||
@@ -91,11 +121,17 @@ class Autoencoder:
|
||||
out = self.decode(code)
|
||||
return out, code
|
||||
|
||||
def save(self, path: str):
|
||||
path = path.removesuffix('.npy')
|
||||
np.save(path, self)
|
||||
|
||||
def load(path: str) -> 'Autoencoder':
|
||||
path = path.removesuffix('.npy') + '.npy'
|
||||
data = np.load(path, allow_pickle=True)
|
||||
return data.item()
|
||||
class VariationalAutoencoder(AAutoencoder):
|
||||
def __init__(self,
|
||||
encoder_layers: list[int],
|
||||
decoder_layers: list[int],
|
||||
lr: float,
|
||||
activation_func: ActivationFunc):
|
||||
if encoder_layers[-1] != decoder_layers[0]:
|
||||
raise Exception(
|
||||
f"Encoder output and decoder input don't match {encoder_layers[-1]} != {encoder_layers[0]}" # noqa
|
||||
)
|
||||
self.encoder = DeepNNLayer(encoder_layers, lr, activation_func)
|
||||
self.decoder = DeepNNLayer(decoder_layers, lr, activation_func)
|
||||
self.sampler = SamplingLayer(decoder_layers[0], lr, activation_func)
|
||||
|
||||
17
layers.py
17
layers.py
@@ -38,6 +38,23 @@ class NNLayer:
|
||||
return ret
|
||||
|
||||
|
||||
class SamplingLayer:
|
||||
def __init__(self,
|
||||
in_size: int,
|
||||
lr: float,
|
||||
activation_func: ActivationFunc):
|
||||
self.W_mean = np.random.uniform(-0.1, 0.1, (in_size, in_size))
|
||||
self.W_variance = np.random.uniform(-0.1, 0.1, (in_size, in_size))
|
||||
|
||||
def forward(self, v) -> np.ndarray:
|
||||
mean = self.W_mean @ v
|
||||
variance = self.W_variance @ v
|
||||
return np.random.normal(mean, variance)
|
||||
|
||||
def backprop(self, error: np.ndarray) -> np.ndarray:
|
||||
pass
|
||||
|
||||
|
||||
class DeepNNLayer:
|
||||
def __init__(self,
|
||||
layers: list[int],
|
||||
|
||||
Reference in New Issue
Block a user