Step 1:¶
import library¶
import tensorflow as tf
import numpy as np import pandas as pd
import matplotlib.pyplot as plt %matplotlib inline
from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix,classification_report import scikitplot as skplt
from keras.models import Sequential from keras.layers import Dense from keras.callbacks import ModelCheckpoint
Step 2:¶
Process Data¶
def Data_Process():
"""
This will read the CSV and Normalize the Data and
Perform Train Test Split and Return
X_Train, X_Test, Y_Train, Y_Test
"""
# Name for the column or Features Map
columns_to_named = ["Pregnancies","Glucose","BloodPressure",
"SkinThickness","Insulin","BMI","DiabetesPedigreeFunction",
"Age","Class"]
# Read the Dataset and Rename the Column
df = pd.read_csv("pima-indians-diabetes.csv",header=0,names=columns_to_named)
col_norm =['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin',
'BMI', 'DiabetesPedigreeFunction','Age']
# Normalization using Custom Lambda Function
df1_norm = df[col_norm].apply(lambda x :( (x - x.min()) / (x.max()-x.min()) ) )
X_Data = df1_norm
Y_Data = df["Class"]
X_Train, X_Test, Y_Train, Y_Test = train_test_split(X_Data,Y_Data, test_size=0.3,random_state=101)
return X_Train, X_Test, Y_Train, Y_Test
X_Train, X_Test, Y_Train, Y_Test = Data_Process()
X_Train.shape
Step 3:¶
Create Model¶
model = Sequential()
model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
# 2nd layer: 8 nodes, RELU
model.add(Dense(10, init='uniform', activation='relu'))
# output layer: dim=1, activation sigmoid
model.add(Dense(1, init='uniform', activation='sigmoid' ))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
Train¶
history = model.fit(X_Train.to_numpy(),
Y_Train.to_numpy(),
epochs=200, batch_size=30,
validation_data=(X_Test.to_numpy(), Y_Test.to_numpy()),
verbose=2)
Accuracy¶
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'])
plt.show()
Loss¶
# Model Losss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'])
plt.show()
scores = model.evaluate(X_Test, Y_Test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
No comments:
Post a Comment