Skip to content

Commit a016808

Browse files
💡 Rewriting Network Wrapper from scratch to make it more flexible
1 parent 2e06c07 commit a016808

File tree

5 files changed

+183
-11
lines changed

5 files changed

+183
-11
lines changed

TinyFlow/Activations.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,12 @@ def backward(self, dvalues):
2626
# Zero gradient where the input values were <= 0
2727
self.dvalues[self.inputs <= 0] = 0
2828

29+
# Calculate prediction for outputs
30+
def predictions(self, outputs):
31+
return outputs
32+
2933
def __str__(self):
30-
return "ReLU"
34+
return "ReLU Activation"
3135

3236

3337
# Softmax activation
@@ -56,8 +60,12 @@ def backward(self, dvalues):
5660

5761
self.dvalues = dvalues.copy()
5862

63+
# return predictions for outputs
64+
def predictions(self, outputs):
65+
return np.argmax(outputs, axis=1)
66+
5967
def __str__(self):
60-
return "Softmax"
68+
return "Softmax Activation"
6169

6270

6371
# Sigmoid activation
@@ -73,6 +81,13 @@ def backward(self, dvalues):
7381
# d/dx(sigm(x)) = sigm(x) * [1 - sigm(x)]
7482
self.dvalues = dvalues * (1 - self.output) * self.output
7583

84+
# Calculate predictions for outputs
85+
def predictions(self, outputs):
86+
return (outputs > 0.5) * 1
87+
88+
def __str__(self):
89+
return "Sigmoid Activation"
90+
7691

7792
# Linear activation
7893
class Activation_Linear:
@@ -90,3 +105,10 @@ def backward(self, dvalues):
90105
# downstream gradient = (local derivative ) * (upstream gradient)
91106
# self.dvalues = 1 * dvalues
92107
self.dvalues = dvalues.copy()
108+
109+
# Calculate predictions for outputs
110+
def predictions(self, outputs):
111+
return outputs
112+
113+
def __str__(self):
114+
return "Linear Activation"

TinyFlow/Loss.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,72 @@ def regularization_loss(self, layer):
3636

3737
return regularization_loss
3838

39+
# Regularization loss calculation
40+
41+
def network_regularization_loss(self):
42+
'''network_regularization_loss (self)\n
43+
Internal method for network wrapper for auto calculation
44+
of regularization loss of all the trainable layers
45+
'''
46+
47+
# 0 by default
48+
regularization_loss = 0
49+
50+
# Calculate regularization loss - iterate over all trainable layers
51+
for layer in self.trainable_layers:
52+
# L1 regularization - weights
53+
# Only calculate when factor greater than 0
54+
if layer.weight_regularizer_l1 > 0:
55+
regularization_loss += layer.weight_regularizer_l1 * \
56+
np.sum(np.abs(layer.weights))
57+
58+
# L2 regularization - weights
59+
# Only calculate when factor greater than 0
60+
if layer.weight_regularizer_l2 > 0:
61+
regularization_loss += layer.weight_regularizer_l2 * \
62+
np.sum(layer.weights * layer.weights)
63+
64+
# L1 regularization - biases
65+
# Only calculate when factor greater than 0
66+
if layer.bias_regularizer_l1 > 0:
67+
regularization_loss += layer.bias_regularizer_l1 * \
68+
np.sum(np.abs(layer.biases))
69+
70+
# L2 regularization - biases
71+
# Only calculate when factor greater than 0
72+
if layer.bias_regularizer_l2 > 0:
73+
regularization_loss += layer.bias_regularizer_l2 * \
74+
np.sum(layer.biases * layer.biases)
75+
76+
return regularization_loss
77+
78+
# Set/remember trainable layers
79+
def remember_trainable_layers(self, trainable_layers):
80+
'''remember_trainable_layers (self, trainable_layers)\n
81+
internal method for Network wrapper to keep track of trainable layers
82+
'''
83+
84+
self.trainable_layers = trainable_layers
85+
86+
# Calculates the data and regularization losses
87+
# given model output and ground truth values
88+
89+
def calculate(self, output, y):
90+
'''calculate(self, output, ground_truth)\n
91+
internal method for Network wrapper\n
92+
Calculates the data and regularization losses
93+
given model output and ground truth values
94+
'''
95+
96+
# Calculate sample losses
97+
sample_losses = self.forward(output, y)
98+
99+
# Calculate the mean loss
100+
data_loss = np.mean(sample_losses)
101+
102+
# Return the data and regularization losses
103+
return data_loss, self.network_regularization_loss()
104+
39105

40106
# Cross-entropy loss
41107
class Loss_CategoricalCrossEntropy(Loss):

TinyFlow/Metrics.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,43 @@
11
import numpy as np
22

33

4+
# Common Accuracy class
5+
class Accuracy:
6+
7+
# Calculates accuracy, given predictions and ground truth values
8+
def calculate(self, predictions, y):
9+
10+
# Get comparison results
11+
comparisons = self.compare(predictions, y)
12+
13+
# Calculate an accuracy
14+
accuracy = np.mean(comparisons)
15+
16+
# Return the accuracy
17+
return accuracy
18+
19+
20+
class Accuracy_Regression(Accuracy):
21+
22+
def __init__(self):
23+
# Create a precision property
24+
self.precision = None
25+
26+
# Calculates precision value based on passed in ground truth values
27+
def init(self, y, reinit=False):
28+
'''init (self, y, reinit=False)\n
29+
reinit - Forces reinitilization of precision\n
30+
Calculates precision value based on passed in ground truth values
31+
'''
32+
33+
if self.precision is None or reinit:
34+
self.precision = np.std(y) / 500
35+
36+
# Compare predictions to the ground truth values
37+
def compare(self, predictions, y):
38+
return np.absolute(predictions - y) < self.precision
39+
40+
441
def model_accuracy_softmax(outputs, labels):
542
'''Returns the accuracy of the model on the current batch'''
643

TinyFlow/Network.py

Lines changed: 54 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,10 @@ def __init__(self):
1313
def add(self, layer):
1414
self.layers.append(layer)
1515

16-
def set(self, *, loss, optimizer):
16+
def set(self, *, loss, optimizer, accuracy):
1717
self.loss = loss
1818
self.optimizer = optimizer
19+
self.accuracy = accuracy
1920

2021
def compile_model(self):
2122

@@ -25,6 +26,9 @@ def compile_model(self):
2526
# Count all the objects
2627
layer_count = len(self.layers)
2728

29+
# Initialize a list containing trainable layers
30+
self.trainable_layers = []
31+
2832
# Iterate through all the objects
2933
for i in range(layer_count):
3034

@@ -40,11 +44,23 @@ def compile_model(self):
4044
self.layers[i].next = self.layers[i + 1]
4145

4246
# The last layer will have the next object as the loss
47+
# We also save aside the reference to the last object
48+
# whose output is the model's output
4349
else:
4450
self.layers[i].prev = self.layers[i - 1]
4551
self.layers[i].next = self.loss
52+
self.output_layer_activation = self.layers[i]
53+
54+
# If the layer contains an attribute called 'weights', it's a
55+
# trainable layer, add it to the list of trainable layers
56+
# We dont't need to check for biases, checking for weights is enough
57+
if hasattr(self.layers[i], 'weights'):
58+
self.trainable_layers.append(self.layers[i])
59+
60+
# Update the loss object with trainable layers
61+
self.loss.remember_trainable_layers(self.trainable_layers)
4662

47-
def forward(self, X):
63+
def forward(self, X, training):
4864

4965
# Call the forward method on the input layer
5066
# This will set the output property that the first layer
@@ -59,19 +75,50 @@ def forward(self, X):
5975
# 'layer' is now the last object from the list, return its output
6076
return layer.output
6177

62-
def train(self, X, y, *, epochs=1, print_every=1):
78+
def backward(self, output, y):
79+
80+
# First call the backward method on loss
81+
# This will set the dvalues property that the last
82+
# layer will try to access shortly
83+
self.loss.backward(output, y)
84+
85+
# Call backward method going through all the objects in
86+
# reversed order, passing down the dvalues as a parameter
87+
for layer in reversed(self.layers):
88+
layer.backward(layer.next.dvalues)
89+
90+
def train(self, X, y, *, epochs=1, print_every=1, validation_data=None):
91+
92+
# Initialize accuracy object
93+
self.accuracy.init(y)
6394

6495
# Main training loop
6596
for epoch in range(1, epochs+1):
6697

6798
# Perform the forward pass
68-
output = self.forward(X)
99+
output = self.forward(X, training=True)
100+
101+
# Calculate loss
102+
data_loss, regularization_loss = self.loss.calculate(output, y)
103+
104+
loss = data_loss + regularization_loss
105+
106+
# Get predictions and calculate accuracy
107+
predictions = self.output_layer_activation.predictions(output)
108+
accuracy = self.accuracy.calculate(predictions, y)
69109

70-
# Temporary
71-
print(output)
72-
exit()
110+
# Backward pass
111+
self.backward(output, y)
73112

113+
# Optimize (update parameters)
114+
self.optimizer.pre_update_params()
115+
for layer in self.trainable_layers:
116+
self.optimizer.update_params(layer)
117+
self.optimizer.post_update_params()
74118

119+
# Print a summary
120+
if not epoch % print_every:
121+
print(f'epoch: {epoch}, acc: {accuracy:.3f}, loss: {loss:.3f} (data_loss: {data_loss:.3f}, reg_loss: {regularization_loss:.3f}), lr: {self.optimizer.current_learning_rate}')
75122

76123

77124
class Layer_Input:

demo_Network.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from TinyFlow.Activations import Activation_Linear, Activation_ReLU
1010
from TinyFlow.Optimizers import Optimizer_Adam
1111
from TinyFlow.Loss import Loss_MeanSquaredError
12-
12+
from TinyFlow.Metrics import Accuracy_Regression
1313
# Create a dataset
1414
X, y = sine_data()
1515

@@ -25,7 +25,7 @@
2525
model.add(Activation_Linear())
2626

2727
# Set loss and optimizer objects
28-
model.set(loss=Loss_MeanSquaredError(), optimizer=Optimizer_Adam(decay=1e-8))
28+
model.set(loss=Loss_MeanSquaredError(), optimizer=Optimizer_Adam(decay=1e-8), accuracy=Accuracy_Regression())
2929

3030
# Compile the model
3131
model.compile_model()

0 commit comments

Comments
 (0)