init: first draft of autoencoder
This commit is contained in:
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
__pycache__
|
||||
*.pyc
|
||||
.venv
|
||||
76
autoencoder.py
Normal file
76
autoencoder.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import numpy as np
|
||||
from utils import regularize
|
||||
import types
|
||||
|
||||
|
||||
class Encoder:
|
||||
def __init__(self,
|
||||
in_size: int,
|
||||
out_size: int,
|
||||
lr: float,
|
||||
activation_func: types.FunctionType):
|
||||
self.W = np.random.uniform(-1, 1, (in_size, out_size))
|
||||
self.lr = lr
|
||||
self.last_input = None
|
||||
self.last_output = None
|
||||
self.activation_func = activation_func
|
||||
|
||||
def forward(self, V: np.ndarray) -> np.ndarray:
|
||||
self.last_input = V
|
||||
z = V @ self.W
|
||||
self.last_output = regularize(self.activation_func(z))
|
||||
return self.last_output
|
||||
|
||||
def backprop(self, error: np.ndarray):
|
||||
dW = np.outer(self.last_input, error)
|
||||
self.W -= self.lr * dW
|
||||
return error @ self.W.T
|
||||
|
||||
|
||||
class Decoder:
|
||||
def __init__(self,
|
||||
in_size: int,
|
||||
out_size: int,
|
||||
lr: float,
|
||||
activation_func):
|
||||
self.W = np.random.uniform(-1, 1, (in_size, out_size))
|
||||
self.lr = lr
|
||||
self.last_input = None
|
||||
self.last_output = None
|
||||
self.activation_func = activation_func
|
||||
|
||||
def forward(self, V: np.ndarray) -> np.ndarray:
|
||||
self.last_input = V
|
||||
z = V @ self.W
|
||||
self.last_output = regularize(self.activation_func(z))
|
||||
return self.last_output
|
||||
|
||||
def backprop(self, target: np.ndarray):
|
||||
error = self.last_output - target
|
||||
dW = np.outer(self.last_input, error)
|
||||
self.W -= self.lr * dW
|
||||
return error @ self.W.T
|
||||
|
||||
|
||||
class Autoencoder:
|
||||
def __init__(self,
|
||||
in_len: int,
|
||||
bottleneck: int,
|
||||
lr: float,
|
||||
activation_func):
|
||||
self.encoder = Encoder(in_len, bottleneck, lr, activation_func)
|
||||
self.decoder = Decoder(bottleneck, in_len, lr, activation_func)
|
||||
|
||||
def train(self, v: np.ndarray) -> float:
|
||||
encoded = self.encoder.forward(v)
|
||||
reconstructed = self.decoder.forward(encoded)
|
||||
error = self.decoder.backprop(v)
|
||||
self.encoder.backprop(error)
|
||||
error = v - reconstructed
|
||||
return np.sum(np.abs(error))
|
||||
|
||||
def encode(self, v: np.ndarray) -> np.ndarray:
|
||||
return self.encoder.forward(v)
|
||||
|
||||
def decode(self, v: np.ndarray) -> np.ndarray:
|
||||
return self.decoder.forward(v)
|
||||
4
requirements.txt
Normal file
4
requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
numpy
|
||||
matplotlib
|
||||
keras
|
||||
tensorflow
|
||||
50
utils.py
Normal file
50
utils.py
Normal file
@@ -0,0 +1,50 @@
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def softmax(v: np.ndarray) -> np.ndarray:
|
||||
v = v - np.max(v)
|
||||
exp_v = np.exp(v)
|
||||
return exp_v / np.sum(exp_v)
|
||||
|
||||
|
||||
def relu(x: np.ndarray) -> np.ndarray:
|
||||
return x * (x > 0)
|
||||
|
||||
|
||||
def normalize(v: np.ndarray) -> np.ndarray:
|
||||
return v / (np.linalg.norm(v) + 1e-8)
|
||||
|
||||
|
||||
def regularize(v: np.ndarray) -> np.ndarray:
|
||||
v_min = v.min(axis=0)
|
||||
v_max = v.max(axis=0)
|
||||
if v_min - v_max == 0:
|
||||
return v
|
||||
return (v - v_min) / (v_max - v_min)
|
||||
|
||||
|
||||
def dynamic_loss_plot_init():
|
||||
plt.ion()
|
||||
fig, ax = plt.subplots()
|
||||
line, = ax.plot([], [], label="Loss")
|
||||
ax.set_xlabel("Epoch")
|
||||
ax.set_ylabel("Loss")
|
||||
ax.set_title("Training Loss")
|
||||
ax.legend()
|
||||
return ax, line
|
||||
|
||||
|
||||
def dynamic_loss_plot_update(ax, line, loss):
|
||||
line.set_xdata(range(len(loss)))
|
||||
line.set_ydata(loss)
|
||||
ax.relim()
|
||||
ax.autoscale_view()
|
||||
plt.draw()
|
||||
plt.pause(0.1)
|
||||
|
||||
|
||||
def dynamic_loss_plot_finish(ax, line):
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
Reference in New Issue
Block a user