11import time
2-
32import numpy as np
4-
53import tensorflow as tf
64import tensorlayer as tl
75from tensorlayer .layers import Dense , Dropout , Input
@@ -19,7 +17,6 @@ class CustomModel(Model):
1917
2018 def __init__ (self ):
2119 super (CustomModel , self ).__init__ ()
22-
2320 self .dropout1 = Dropout (keep = 0.8 ) #(self.innet)
2421 self .dense1 = Dense (n_units = 800 , act = tf .nn .relu , in_channels = 784 ) #(self.dropout1)
2522 self .dropout2 = Dropout (keep = 0.8 ) #(self.dense1)
@@ -52,27 +49,20 @@ def forward(self, x, foo=None):
5249for epoch in range (n_epoch ): ## iterate the dataset n_epoch times
5350 start_time = time .time ()
5451 ## iterate over the entire training set once (shuffle the data via training)
55-
5652 for X_batch , y_batch in tl .iterate .minibatches (X_train , y_train , batch_size , shuffle = True ):
57-
5853 MLP .train () # enable dropout
59-
6054 with tf .GradientTape () as tape :
6155 ## compute outputs
6256 _logits = MLP (X_batch , foo = 1 )
6357 ## compute loss and update model
6458 _loss = tl .cost .cross_entropy (_logits , y_batch , name = 'train_loss' )
65-
6659 grad = tape .gradient (_loss , train_weights )
6760 optimizer .apply_gradients (zip (grad , train_weights ))
6861
6962 ## use training and evaluation sets to evaluate the model every print_freq epoch
7063 if epoch + 1 == 1 or (epoch + 1 ) % print_freq == 0 :
71-
7264 MLP .eval () # disable dropout
73-
7465 print ("Epoch {} of {} took {}" .format (epoch + 1 , n_epoch , time .time () - start_time ))
75-
7666 train_loss , train_acc , n_iter = 0 , 0 , 0
7767 for X_batch , y_batch in tl .iterate .minibatches (X_train , y_train , batch_size , shuffle = False ):
7868 _logits = MLP (X_batch , foo = 1 )
@@ -81,7 +71,6 @@ def forward(self, x, foo=None):
8171 n_iter += 1
8272 print (" train foo=1 loss: {}" .format (train_loss / n_iter ))
8373 print (" train foo=1 acc: {}" .format (train_acc / n_iter ))
84-
8574 val_loss , val_acc , n_iter = 0 , 0 , 0
8675 for X_batch , y_batch in tl .iterate .minibatches (X_val , y_val , batch_size , shuffle = False ):
8776 _logits = MLP (X_batch , foo = 1 ) # is_train=False, disable dropout
@@ -90,7 +79,6 @@ def forward(self, x, foo=None):
9079 n_iter += 1
9180 print (" val foo=1 loss: {}" .format (val_loss / n_iter ))
9281 print (" val foo=1 acc: {}" .format (val_acc / n_iter ))
93-
9482 val_loss , val_acc , n_iter = 0 , 0 , 0
9583 for X_batch , y_batch in tl .iterate .minibatches (X_val , y_val , batch_size , shuffle = False ):
9684 _logits = MLP (X_batch ) # is_train=False, disable dropout
0 commit comments