Sunday, June 9, 2019

Tensor Flow on IRIS Data set which Optimizer works Best (Low Level Graph API)

Test

Tensor Flow on IRIS Data set which Optimizer works Best (Low Level Graph API)

Hello welcome to my Blog in this Blog i want to show you guys which optimizer works the best and you should always normalize the data set. before getting started with tutorials let me go through my results first

In [2]:
%%html
<img src="https://media.licdn.com/dms/image/C4D12AQE3FmmfM1aALQ/article-inline_image-shrink_1500_2232/0?e=1565827200&v=beta&t=n5UWO_uxYyRowXwZorwx9Ynmx7MJz9JWkAJl6deHAKE" , width=600, height=300>

clearly from my results i think whenever we are performing a classification example it is necessary to use soft max in the last layer and sigmoid or relu in first layer

It also very important that you do not over fit the network we should usually use drop out and L2 Reg techniques to avoid over fitting of network. In Most of cases Adam Optimizer works Best and learning rate should not be to High. its Good practice to implement Batch Normalization.

Let me Show you my object Oriented code for IRIS data set written in Low ever Tensor flow Graph API

In this code i have not implemented L2 reg and Batch Normalizati

Step 1 : Process the Data (Clean Data set, Normalize the Data set, Perform Train Test Split )

Step 2: Create a Function to Plot Loss

Step 3: Create a train Functio

Step 4: Create a Test Function (Compute Acc)

Step 5: Create Model and play with Hyper Parameters

In [ ]:
try:
    import tensorflow as tf

    # For Data Processing
    import numpy as np
    import pandas as pd

    # For Plotting
    import matplotlib.pyplot as plt
    from matplotlib import style
    # For Data Processing
    from sklearn.model_selection import train_test_split
    from sklearn.metrics import confusion_matrix,classification_report
    style.use('ggplot')
    print('Library Loaded .........')
except:
    print('One or More Library was not Found ! ')


class NN(object):
    def __init__(self, learning_rate=0.01 , numEpoch=200, num_hidden_nodes=[5,10,20]):
        self.X_Train, self.X_Test, self.Y_Train, self.Y_Test = self.readTrainTestData()
        # Define the Network parameters
        self.numFeature_map = self.X_Train.shape[1]
        self.output = 3
        self.num_hidden_nodes = num_hidden_nodes

        self.lossPlotData = {5:[],10:[],20:[]}
        self.learning_rate = learning_rate
        self.numEpoch = numEpoch

        self.W1 = {5:None, 10:None, 20:None}
        self.W2 = {5:None, 10:None, 20:None}
        self.B1 = {5:None, 10:None, 20:None}
        self.B2 = {5:None, 10:None, 20:None}

        for hidden_node in self.num_hidden_nodes:
            self.W1[hidden_node] = np.random.uniform(low=-0.1, high=0.1, size=(self.numFeature_map,hidden_node))
            self.W2[hidden_node] = np.random.uniform(low=-0.1, high=0.1, size=(hidden_node, self.output))
            self.B1[hidden_node] = np.zeros(shape=(hidden_node))
            self.B2[hidden_node] = np.zeros(shape=(self.output))

    def readTrainTestData(self):

        names = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm',
                 'Species']
        path = "IRIS.csv"
        df = pd.read_csv(path, names=names, header=0)
        columns_norm = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']

        X_Data = df[columns_norm]
        X_Data = X_Data.apply(lambda x:( (x - x.min()) / (x.max()-x.min())))
        Y_Data = df["Species"]
        Y_Data = pd.get_dummies(Y_Data)

        # Y_Data = df["Species"].map({
        #    "Iris-setosa":0,
        #    "Iris-virginica":1,
        #    "Iris-versicolor":2})

        X_Train, X_Test, Y_Train, Y_Test = train_test_split(X_Data,
                                                            Y_Data,
                                                            test_size=0.3,
                                                            random_state=101)
        return X_Train, X_Test, Y_Train, Y_Test

    def plot_loss(self):
        plt.figure(figsize=(8,8))
        for hidden_node in self.num_hidden_nodes:
            plt.plot((range(self.numEpoch)),(self.lossPlotData[hidden_node]),
                     label = "NN: 4-{}-3".format(hidden_node))


        plt.xlabel("Iteration ", fontsize =12)
        plt.ylabel("Loss", fontsize=12)
        plt.legend(fontsize=12)
        plt.show()

    def trainAndTestArchitecture(self):

        for hidden_node in self.num_hidden_nodes:
            self.trainmodel(hidden_node)

        self.plot_loss()
        self.computeAccuracy()

    def computeAccuracy(self):
        print("inside ")
        # Test Architecture
        X = tf.placeholder(shape=(self.X_Test.shape[0], self.numFeature_map), dtype=tf.float64, name="X")
        Y = tf.placeholder(shape=(self.Y_Test.shape[0], self.output), dtype=tf.float64, name="Y")

        for hidden_node in self.num_hidden_nodes:
            # Weights and bias
            W1 = tf.Variable(self.W1[hidden_node])
            W2 = tf.Variable(self.W2[hidden_node])
            B1 = tf.Variable(self.B1[hidden_node])
            B2 = tf.Variable(self.B2[hidden_node])

            # foreward Pass
            A1 = tf.sigmoid(tf.add(tf.matmul(X,W1),B1))
            A2 = tf.nn.softmax(tf.add(tf.matmul(A1,W2),B2))

            # run inside session
            init = tf.global_variables_initializer()
            with tf.Session() as sess:
                sess.run(init)
                A2p = sess.run(A2, feed_dict={X:self.X_Test,
                                              Y:self.Y_Test})
                sess.close()
            correct = [estimate.argmax(axis=0) == target.argmax(axis=0)
                       for estimate, target in zip(A2p,self.Y_Test.values)]
            accuracy = 100 * (sum(correct) / len(correct) )
            print("Network Architecture 4-{}-3 Accuracy : {}".format(hidden_node, accuracy))

    def trainmodel(self,hidden_node):
        tf.reset_default_graph()

        batchsize = 10

        X = tf.placeholder(shape=(batchsize,self.numFeature_map), dtype=tf.float64,name='X')
        Y = tf.placeholder(shape=(batchsize, self.output), dtype=tf.float64, name="Y")

        # Weights and bias
        W1 = tf.Variable(self.W1[hidden_node],dtype=tf.float64)
        W2 = tf.Variable(self.W2[hidden_node], dtype=tf.float64)
        B1 = tf.Variable(self.B1[hidden_node], dtype=tf.float64)
        B2 = tf.Variable(self.B2[hidden_node], dtype=tf.float64)

        # foreward Pass
        A1 = tf.sigmoid(tf.add(tf.matmul(X,W1),B1))
        A2 =  tf.nn.softmax(tf.add(tf.matmul(A1,W2),B2))

        error = tf.square(A2 - Y)
        loss = tf.reduce_sum(error)

        optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
        #optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        #optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
        train = optimizer.minimize(loss)

        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        for i in range(self.numEpoch):
            for j in range(0, int(self.X_Train.shape[0]/batchsize)):

                res = sess.run([loss,train],
                               feed_dict={X:self.X_Train[j*batchsize:(j+1)*batchsize],
                                          Y:self.Y_Train[j*batchsize:(j+1)*batchsize]})

                if j == int((self.X_Train.shape[0]/120) - 1):
                    self.lossPlotData[hidden_node].append(res[0])

                self.W1[hidden_node] = sess.run(W1)
                self.W2[hidden_node] = sess.run(W2)
                self.B1[hidden_node] = sess.run(B1)
                self.B2[hidden_node] = sess.run(B2)

        writer = tf.summary.FileWriter("myoutput", sess.graph)
        writer.close()
        print("Epoch: {}    losss: ".format(self.numEpoch, self.lossPlotData[hidden_node][-1]))
        sess.close()


if __name__ == "__main__":
    network = NN(numEpoch=500, learning_rate=0.1)
    network.trainAndTestArchitecture()

No comments:

Post a Comment

Develop Full Text Search (Semantics Search) with Postgres (PGVector) and Python Hands on Lab

final-notebook Develop Full Text Search (Semantics Search) with Postgres (PGVector)...