Friday, May 17, 2019

Neural Network Keras identifies a cat and Dog ACC 71 % in 3 EPOCH

Keras Image Dataset

Import Library

In [15]:
import tensorflow as tf
import numpy as np
import os
import sys
import cv2
import matplotlib.pyplot as plt
import pickle
import random
import pandas as pd

import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,Dropout,Activation, Flatten,Conv2D,MaxPooling2D
import pickle

Declare PATH for TRAIN AND TEST

In [2]:
DATA_DIR = "/Users/soumilshah/IdeaProjects/mytensorflow/Dataset/training_set"
CATERGORIES = ['cats','dogs']
IMAGE_SIZE = 50

Load dataset

In [3]:
def create_training_data():
    training_date = []
    for categories in CATERGORIES:
        path = os.path.join(DATA_DIR,categories)
        class_num = CATERGORIES.index(categories)
        for img in os.listdir(path):
            try:
                img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
                new_array = cv2.resize(img_array,(IMAGE_SIZE,IMAGE_SIZE))
                training_date.append([new_array,class_num])
            except:
                pass
    return training_date

cnvert list to np ndarray

In [4]:
data = np.asarray(create_training_data())
In [5]:
x_data = []
y_data = []

for x in data:
    x_data.append(x[0])
    y_data.append(x[1])

Normalize image

In [7]:
x_data_np = np.asarray(x_data)/255.0
y_data_np = np.asarray(y_data)

Store the data in pickle file so every time you dont have to load images

In [8]:
pickle_out = open('x_data_np','wb')
pickle.dump(x_data_np,pickle_out)
pickle_out.close()

Load the data set which is ND array

In [9]:
pickle_out = open('y_data_np','wb')
pickle.dump(y_data_np,pickle_out)
pickle_out.close()
In [10]:
X_Temp = open('x_data_np','rb')
x_data_np = pickle.load(X_Temp)

Y_Temp = open('y_data_np','rb')
y_data_np = pickle.load(Y_Temp)
In [13]:
x_data_np = x_data_np.reshape(-1, 50, 50, 1)

Perform train test split

In [14]:
from sklearn.model_selection import train_test_split
X_Train, X_Test, Y_Train, Y_Test = train_test_split(x_data_np, y_data_np, test_size=0.3,random_state=101)

create Model

In [18]:
model = Sequential()
model.add(Conv2D(150, (3, 3), input_shape=x_data_np.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(75, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
In [19]:
model.fit(x_data_np, y_data_np, batch_size=32, epochs=3, validation_split=0.3)
Train on 5600 samples, validate on 2400 samples
WARNING:tensorflow:From /anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Epoch 1/3
5600/5600 [==============================] - 68s 12ms/sample - loss: 0.6125 - acc: 0.7064 - val_loss: 1.1417 - val_acc: 0.0000e+00
Epoch 2/3
5600/5600 [==============================] - 69s 12ms/sample - loss: 0.5900 - acc: 0.7145 - val_loss: 1.0541 - val_acc: 0.0012
Epoch 3/3
5600/5600 [==============================] - 72s 13ms/sample - loss: 0.5637 - acc: 0.7198 - val_loss: 0.9673 - val_acc: 0.2829
Out[19]:
<tensorflow.python.keras.callbacks.History at 0xb4d9c5e10>

No comments:

Post a Comment

How to Use Publish-Audit-Merge Workflow in Apache Iceberg: A Beginner’s Guide

publish How to Use Publish-Audit-Merge Workflow in Apache Iceberg: A Beginner’s Guide ¶ In [24]: from ...