Face recognition for my family

In [18]:
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
#from kt_utils import *

import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
 

import dlib
from PIL import Image
from skimage import io 

Load image data

In [94]:
# Detect faces
def load_data(name,X,Y,label): 
    for i in range(0,30):
        filename_face='./images/Face_'+name+str(i)+'.jpg'
        img_path = filename_face
        img = image.load_img(img_path, target_size=(64, 64, 3)) # attention to target size 
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        #image = io.imread(img_path)
        X.append(x)
        Y.append(label)
    return X,Y
In [95]:
X=[]
Y=[]
X,Y=load_data('Jim',X,Y,0)
X,Y=load_data('Hua',X,Y,1)
X,Y=load_data('Yugang',X,Y,2)
X,Y=load_data('Alice',X,Y,3)
In [96]:
X = np.array(X).reshape([120,64,64,3])
Y = np.array(Y).reshape([120,1])
In [143]:
name_dic ={0: 'Jiangming Yao',
          1: 'Hua Mei',
          2: 'Yugang Yao',
          3: 'Alice Yao'}
In [73]:
plt.imshow(X[0])
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Out[73]:
<matplotlib.image.AxesImage at 0x12a21cf60>
In [74]:
plt.imshow(X[119])
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Out[74]:
<matplotlib.image.AxesImage at 0x12a2ac4e0>

Training model with the whole dataset

In realistic applications, one in principle needs to divide the whole dataset randomly into train, demo, and test sets. Here, we just take the entire dataset into training model for the sake of simplicity.

In [157]:
X_train = X
Y_train = Y
In [158]:
# GRADED FUNCTION: FaceModel

def Face_Model(nclasses):
    """
    Implementation of the HappyModel.
    
    Arguments:
    input_shape -- shape of the images of the dataset

    Returns:
    model -- a Model() instance in Keras
    """ 
    input_shape=(64,64,3)  # This should be consistent with the size of images
    
    X_input = Input(input_shape)
    
    X = ZeroPadding2D((3,3))(X_input)
    X = Conv2D(32,(7,7),strides=(1,1),name='conv0')(X)
    X = BatchNormalization(axis=3,name='bn0')(X)
    X = Activation('relu')(X)
    
    X = MaxPooling2D((2,2), name='max_pool')(X)
    
    X = Flatten()(X)
    X = Dense(nclasses, activation='sigmoid', name='fc')(X)
    
    model = Model(inputs = X_input, outputs = X, name='FaceModel')
    
    
    ### END CODE HERE ###
    
    return model
In [159]:
nb_classes = 4
data = Y_train

def indices_to_one_hot(data, nb_classes):
    """Convert an iterable of indices to one-hot encoded labels."""
    targets = np.array(data).reshape(-1)
    return np.eye(nb_classes)[targets]

Y_train_One = np.array(indices_to_one_hot(data, nb_classes))
In [128]:
Y_train_One[100]
Out[128]:
array([0., 0., 0., 1.])
In [160]:
FaceModel = Face_Model(nclasses=4) 
FaceModel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
FaceModel.fit(X_train, Y_train_One, epochs=2, batch_size=20) 
Epoch 1/2
120/120 [==============================] - 1s 11ms/step - loss: 0.8442 - acc: 0.7750
Epoch 2/2
120/120 [==============================] - 1s 7ms/step - loss: 0.0687 - acc: 0.9833
Out[160]:
<keras.callbacks.History at 0x12bbb9a20>

Prediction

In [155]:
def check_person(filename):
    img_path = filename 
    img = image.load_img(img_path, target_size=(64, 64)) # attention to target size
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    imshow(img)
    print("This is a photo of: {}".format(name_dic[np.argmax(FaceModel.predict(x))]))
In [151]:
check_person('images/Face_Jim9.jpg')
This is a photo of: Jiangming Yao
In [152]:
check_person('images/Face_Alice9.jpg')
This is a photo of: Alice Yao
In [153]:
check_person('images/Face_Hua9.jpg')
This is a photo of: Hua Mei
In [154]:
check_person('images/Face_Yugang9.jpg')
This is a photo of: Yugang Yao
In [163]:
check_person('images/Face_Trump01.jpg')
This is a photo of: Yugang Yao

Oops, the model recognized Donald Trump as Yugang Yao !

In [164]:
check_person('images/Face_Xinru01.jpg')
This is a photo of: Hua Mei
In [165]:
check_person('images/Face_Xinru02.jpg')
This is a photo of: Yugang Yao

Oops, the model recognized Lin Xinru as either Hua Mei or Yugang Yao!

It is well known that the images of Trump and Xinru are not in the training dataset. The model simply categorized the test photos into the given four classes.