import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
#from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import dlib
from PIL import Image
from skimage import io
# Detect faces
def load_data(name,X,Y,label):
for i in range(0,30):
filename_face='./images/Face_'+name+str(i)+'.jpg'
img_path = filename_face
img = image.load_img(img_path, target_size=(64, 64, 3)) # attention to target size
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
#image = io.imread(img_path)
X.append(x)
Y.append(label)
return X,Y
X=[]
Y=[]
X,Y=load_data('Jim',X,Y,0)
X,Y=load_data('Hua',X,Y,1)
X,Y=load_data('Yugang',X,Y,2)
X,Y=load_data('Alice',X,Y,3)
X = np.array(X).reshape([120,64,64,3])
Y = np.array(Y).reshape([120,1])
name_dic ={0: 'Jiangming Yao',
1: 'Hua Mei',
2: 'Yugang Yao',
3: 'Alice Yao'}
plt.imshow(X[0])
plt.imshow(X[119])
In realistic applications, one in principle needs to divide the whole dataset randomly into train, demo, and test sets. Here, we just take the entire dataset into training model for the sake of simplicity.
X_train = X
Y_train = Y
# GRADED FUNCTION: FaceModel
def Face_Model(nclasses):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
input_shape=(64,64,3) # This should be consistent with the size of images
X_input = Input(input_shape)
X = ZeroPadding2D((3,3))(X_input)
X = Conv2D(32,(7,7),strides=(1,1),name='conv0')(X)
X = BatchNormalization(axis=3,name='bn0')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2,2), name='max_pool')(X)
X = Flatten()(X)
X = Dense(nclasses, activation='sigmoid', name='fc')(X)
model = Model(inputs = X_input, outputs = X, name='FaceModel')
### END CODE HERE ###
return model
nb_classes = 4
data = Y_train
def indices_to_one_hot(data, nb_classes):
"""Convert an iterable of indices to one-hot encoded labels."""
targets = np.array(data).reshape(-1)
return np.eye(nb_classes)[targets]
Y_train_One = np.array(indices_to_one_hot(data, nb_classes))
Y_train_One[100]
FaceModel = Face_Model(nclasses=4)
FaceModel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
FaceModel.fit(X_train, Y_train_One, epochs=2, batch_size=20)
def check_person(filename):
img_path = filename
img = image.load_img(img_path, target_size=(64, 64)) # attention to target size
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
imshow(img)
print("This is a photo of: {}".format(name_dic[np.argmax(FaceModel.predict(x))]))
check_person('images/Face_Jim9.jpg')
check_person('images/Face_Alice9.jpg')
check_person('images/Face_Hua9.jpg')
check_person('images/Face_Yugang9.jpg')
check_person('images/Face_Trump01.jpg')
Oops, the model recognized Donald Trump as Yugang Yao !
check_person('images/Face_Xinru01.jpg')
check_person('images/Face_Xinru02.jpg')
Oops, the model recognized Lin Xinru as either Hua Mei or Yugang Yao!
It is well known that the images of Trump and Xinru are not in the training dataset. The model simply categorized the test photos into the given four classes.