File tree Expand file tree Collapse file tree 2 files changed +29
-6
lines changed Expand file tree Collapse file tree 2 files changed +29
-6
lines changed Original file line number Diff line number Diff line change @@ -7,6 +7,17 @@ def linear_activation(z):
77def tanh_activation (z ):
88 return np .tanh (z )
99
10+
11+ def averageOf3 (input1 , input2 , input3 ):
12+ w1 = 1.0 / 3.0
13+ w2 = 1.0 / 3.0
14+ w3 = 1.0 / 3.0
15+ bias = 0
16+ z = input1 * w1 + input2 * w2 + input3 * w3 + bias
17+ y = linear_activation (z )
18+ return y
19+
20+
1021# 2 layer NN for implementation of OR gate
1122def orgate (input1 , input2 ):
1223 bias = - 1
@@ -31,3 +42,5 @@ def boolToBinary(bool1,bool2):
3142
3243input1 , input2 = boolToBinary (True ,True )
3344print (orgate (input1 ,input2 ))
45+
46+ print (averageOf3 (1 , 0 , 3 ))
Original file line number Diff line number Diff line change @@ -59,24 +59,34 @@ def random_nn(x):
5959
6060 return a_3
6161
62- print ("On 3 layer network, input {} fed forward gives {}" .format (x , random_nn (x )))
62+ # print("On 3 layer network, input {} fed forward gives {}".format(x, random_nn(x)))
6363
6464# 4 layer NN for computing whether absolute difference is between 1 and 3
6565# if between 1 and 3 outputs >0 else output <=0
6666def multilayer (x ):
67+
68+ # layer 2
6769 w1 = np .array ([1 ,- 1 ])
6870 b1 = 0
6971 weighted_input1 = np .matmul (w1 ,x ) + b1
70- output1 = parametric_activation (- 1 ,weighted_input1 )
72+
73+ # output of layer 2
74+ output2 = parametric_activation (- 1 , weighted_input1 )
75+
76+ # layer 3
7177 w2 = np .array ([1 ])
7278 b2 = - 2
73- weighted_input2 = np .matmul (w2 ,[output1 ]) + b2
74- output2 = parametric_activation (- 1 ,weighted_input2 )
79+ weighted_input2 = np .matmul (w2 , [output2 ]) + b2
80+
81+ # output of layer 3
82+ output3 = parametric_activation (- 1 , weighted_input2 )
83+
84+ # final layer!
7585 w3 = np .array ([- 1 ])
7686 b3 = 1
77- weighted_input3 = np .matmul (w3 ,[ output2 ]) + b3
87+ weighted_input3 = np .matmul (w3 , [ output3 ]) + b3
7888 y = tanh_activation (weighted_input3 )
7989 return y
8090
8191x = np .array ([4 ,5.5 ])
82- print (multilayer (x ))
92+ # print(multilayer(x))
You can’t perform that action at this time.
0 commit comments