|
1 | | -import numpy as np |
2 | | -from TinyFlow import Layers |
3 | | -from TinyFlow import Activations |
4 | | -from TinyFlow import Loss |
5 | | -from TinyFlow import Metrics |
6 | | -from TinyFlow import Optimizers |
7 | | - |
| 1 | +###################### |
| 2 | +## UNDER DEVELOPMENT## |
| 3 | +###################### |
8 | 4 |
|
| 5 | +# Network class |
9 | 6 | class Network: |
10 | | - '''Network(inputFeatures) |
11 | | - Creates a new Neural Network object. |
12 | | - inputFeatures: Dimensions per sample |
13 | | - ''' |
14 | 7 |
|
15 | | - def __init__(self, inputFeatures): |
| 8 | + def __init__(self): |
| 9 | + |
| 10 | + # Create a list of all Network objects |
16 | 11 | self.layers = [] |
17 | | - self.inputFeatures = inputFeatures |
18 | | - self.prev = -1 |
19 | | - |
20 | | - def train(self, input, labels, epochs, lossFunction, optimizer): |
21 | | - '''Performs a forward pass on the input data through the network for the |
22 | | - specified number of epochs. |
23 | | - parameters: |
24 | | - input: np.array object\n |
25 | | - labels: np.array object\n |
26 | | - epochs: integer\n |
27 | | - lossFunction: instance of some loss function (eg. Loss_CategoricalCrossEntropy)\n |
28 | | - optimizer: instance of an optimizer (eg. Adam, AdaGrad, SGD etc.) |
29 | | - ''' |
30 | | - |
31 | | - assert input.shape[1] == self.inputFeatures |
32 | | - assert len(self.layers) > 0 |
33 | | - |
34 | | - for epoch in range(epochs): |
35 | | - inputLayerOutput = self.layers[0].forward(input) |
36 | | - |
37 | | - # Forward pass |
38 | | - for idx in range(1, len(self.layers)): |
39 | | - self.layers[idx].forward(self.layers[idx - 1].output) |
40 | | - |
41 | | - # Get metrics |
42 | | - loss = lossFunction.forward(self.layers[-1].output, labels) |
43 | | - accuracy = Metrics.model_accuracy_softmax(self.layers[-1].output, labels) |
44 | | - |
45 | | - if not epoch % 100: |
46 | | - print( |
47 | | - f'\nepoch: {epoch}, acc: {accuracy:.3f}, loss: {loss:.3f}, lr: {optimizer.current_learning_rate}') |
48 | | - |
49 | | - # Backward pass |
50 | | - lossFunction.backward(self.layers[-1].output, labels) |
51 | | - self.layers[-1].backward(lossFunction.dvalues) |
52 | | - for idx in range(len(self.layers) - 2, -1, -1): |
53 | | - self.layers[idx].backward(self.layers[idx + 1].dvalues) |
54 | | - |
55 | | - # Update weights |
56 | | - optimizer.pre_update_params() |
57 | | - for idx in range(len(self.layers)): |
58 | | - if isinstance(self.layers[idx], Layers.Layer_Dense): |
59 | | - optimizer.update_params(self.layers[idx]) |
60 | | - optimizer.post_update_params |
61 | | - |
62 | | - def test(self, X_test, y_test, lossFunction): |
63 | | - |
64 | | - inputLayerOutput = self.layers[0].forward(X_test) |
65 | | - |
66 | | - for idx in range(1, len(self.layers)): |
67 | | - self.layers[idx].forward(self.layers[idx - 1].output) |
68 | | - |
69 | | - loss = lossFunction.forward(self.layers[-1].output, y_test) |
70 | | - accuracy = Metrics.model_accuracy_softmax(self.layers[-1].output, y_test) |
71 | | - |
72 | | - print(f'validation, acc: {accuracy:.3f}, loss: {loss:.3f}') |
73 | | - |
74 | | - def addDenseLayer(self, neurons, weight_regularizer_l1=0, weight_regularizer_l2=0, bias_regulariser_l1=0, bias_regulariser_l2=0): |
75 | | - if len(self.layers) == 0: |
76 | | - denseX = Layers.Layer_Dense(self.inputFeatures, neurons, weight_regularizer_l1, |
77 | | - weight_regularizer_l2, bias_regulariser_l1, bias_regulariser_l2) |
78 | | - self.layers.append(denseX) |
79 | | - else: |
80 | | - denseX = Layers.Layer_Dense( |
81 | | - self.layers[self.prev].weights.shape[1], neurons) |
82 | | - self.layers.append(denseX) |
83 | | - self.prev = len(self.layers) - 1 |
84 | | - |
85 | | - def addDropoutLayer(self, rate): |
86 | | - dropoutX = Layers.Layer_Dropout(rate) |
87 | | - self.layers.append(dropoutX) |
88 | | - |
89 | | - def addReLU(self): |
90 | | - reluX = Activations.Activation_ReLU() |
91 | | - self.layers.append(reluX) |
92 | | - |
93 | | - def addSoftmax(self): |
94 | | - softmaxX = Activations.Activation_Softmax() |
95 | | - self.layers.append(softmaxX) |
96 | | - |
97 | | - def getSummary(self): |
98 | | - summary = "" |
99 | | - for i in range(len(self.layers)): |
100 | | - summary += f"Layer {str(i)} <" + self.layers[i].__str__() + ">\n" |
101 | | - summary = summary.strip() |
102 | | - return summary |
| 12 | + |
| 13 | + def add(self, layer): |
| 14 | + self.layers.append(layer) |
| 15 | + |
| 16 | + def set(self, *, loss, optimizer): |
| 17 | + self.loss = loss |
| 18 | + self.optimizer = optimizer |
| 19 | + |
| 20 | + def compile_model(self): |
| 21 | + |
| 22 | + # Create and set the input layer |
| 23 | + self.input_layer = Layer_Input() |
| 24 | + |
| 25 | + # Count all the objects |
| 26 | + layer_count = len(self.layers) |
| 27 | + |
| 28 | + # Iterate through all the objects |
| 29 | + for i in range(layer_count): |
| 30 | + |
| 31 | + # If this is the first layer, the input layer will be considered |
| 32 | + # as the previous object |
| 33 | + if i == 0: |
| 34 | + self.layers[i].prev = self.input_layer |
| 35 | + self.layers[i].next = self.layers[i + 1] |
| 36 | + |
| 37 | + # All layers except first and last |
| 38 | + elif i < layer_count - 1: |
| 39 | + self.layers[i].prev = self.layers[i - 1] |
| 40 | + self.layers[i].next = self.layers[i + 1] |
| 41 | + |
| 42 | + # The last layer will have the next object as the loss |
| 43 | + else: |
| 44 | + self.layers[i].prev = self.layers[i - 1] |
| 45 | + self.layers[i].next = self.loss |
| 46 | + |
| 47 | + def forward(self, X): |
| 48 | + |
| 49 | + # Call the forward method on the input layer |
| 50 | + # This will set the output property that the first layer |
| 51 | + # is expecting as the 'prev' object |
| 52 | + self.input_layer.forward(X) |
| 53 | + |
| 54 | + # Call the forward methid of every object in sequence |
| 55 | + # while passing the output property of the previous object as a parameter |
| 56 | + for layer in self.layers: |
| 57 | + layer.forward(layer.prev.output) |
| 58 | + |
| 59 | + # 'layer' is now the last object from the list, return its output |
| 60 | + return layer.output |
| 61 | + |
| 62 | + def train(self, X, y, *, epochs=1, print_every=1): |
| 63 | + |
| 64 | + # Main training loop |
| 65 | + for epoch in range(1, epochs+1): |
| 66 | + |
| 67 | + # Perform the forward pass |
| 68 | + output = self.forward(X) |
| 69 | + |
| 70 | + # Temporary |
| 71 | + print(output) |
| 72 | + exit() |
| 73 | + |
| 74 | + |
| 75 | + |
| 76 | + |
| 77 | +class Layer_Input: |
| 78 | + |
| 79 | + # Pass the input |
| 80 | + def forward(self, inputs): |
| 81 | + self.output = inputs |
0 commit comments