需要源码和数据集请点赞关注收藏后评论区留言私信~~~
下面利用tensorflow平台进行人脸识别实战,使用的是Olivetti Faces人脸图像 部分数据集展示如下
程序训练过程如下
接下来训练CNN模型 可以看到训练进度和损失值变化
接下来展示人脸识别结果
程序会根据一张图片自动去图片集中寻找相似的人脸 如上图所示
部分代码如下 需要全部源码和数据集请点赞关注收藏后评论区留言私信~~~文章来源:https://www.toymoban.com/news/detail-697902.html
from os import listdir
import numpy as np
from PIL import Image
import cv2
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten
from sklearn.model_selection import train_test_split
from tensorflow.python.keras.utils import np_utils
# 读取人脸图片数据
def img2vector(fileNamestr):
# 创建向量
returnVect = np.zeros((57,47))
image = Image.open(fileNamestr).convert('L')
img = np.asarray(image).reshape(57,47)
return img
# 制作人脸数据集
def GetDataset(imgDataDir):
print('| Step1 |: Get dataset...')
imgDataDir='faces_4/'
FileDir = listdir(imgDataDir)
m = len(FileDir)
imgarray=[]
hwLabels=[]
hwdata=[]
# 逐个读取图片文件
for i in range(m):
# 提取子目录
className=i
subdirName='faces_4/'+str(FileDir[i])+'/'
fileNames = listdir(subdirName)
lenFiles=len(fileNames)
# 提取文件名
for j in range(lenFiles):
fileNamestr = subdirName+fileNames[j]
hwLabels.append(className)
imgarray=img2vector(fileNamestr)
hwdata.append(imgarray)
hwdata = np.array(hwdata)
return hwdata,hwLabels,6
# CNN模型类
class MyCNN(object):
FILE_PATH = "face_recognition.h5" # 模型存储/读取目录
picHeight = 57 # 模型的人脸图片长47,宽57
picWidth = 47
def __init__(self):
self.model = None
# 获取训练数据集
def read_trainData(self, dataset):
self.dataset = dataset
# 建立Sequential模型,并赋予参数
def build_model(self):
print('| Step2 |: Init CNN model...')
self.model = Sequential()
print('self.dataset.X_train.shape[1:]',self.dataset.X_train.shape[1:])
self.model.add( Convolution2D( filters=32,
kernel_size=(5, 5),
padding='same',
#dim_ordering='th',
input_shape=self.dataset.X_train.shape[1:]))
self.model.add(Activation('relu'))
self.model.add( MaxPooling2D(pool_size=(2, 2),
strides=(2, 2),
padding='same' ) )
self.model.add(Convolution2D(filters=64,
kernel_size=(5, 5),
padding='same') )
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2),
strides=(2, 2),
padding='same') )
self.model.add(Flatten())
self.model.add(Dense(512))
self.model.add(Activation('relu'))
self.model.add(Dense(self.dataset.num_classes))
self.model.add(Activation('softmax'))
self.model.summary()
# 模型训练
def train_model(self):
print('| Step3 |: Train CNN model...')
self.model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# epochs:训练代次、batch_size:每次训练样本数
self.model.fit(self.dataset.X_train, self.dataset.Y_train, epochs=10, batch_size=20)
def evaluate_model(self):
loss, accuracy = self.model.evaluate(self.dataset.X_test, self.dataset.Y_test)
print('| Step4 |: Evaluate performance...')
print('===================================')
print('Loss Value is :', loss)
print('Accuracy Value is :', accuracy)
def save(self, file_path=FILE_PATH):
print('| Step5 |: Save model...')
self.model.save(file_path)
print('Model ',file_path,'is succeesfuly saved.')
# 建立一个用于存储和格式化读取训练数据的类
class DataSet(object):
def __init__(self, path):
self.num_classes = None
self.X_train = None
self.X_test = None
self.Y_train = None
self.Y_test = None
self.picWidth = 47
self.picHeight = 57
self.makeDataSet(path) # 在这个类初始化的过程中读取path下的训练数据
def makeDataSet(self, path):
# 根据指定路径读取出图片、标签和类别数
imgs, labels, clasNum = GetDataset(path)
# 将数据集打乱随机分组
X_train, X_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.2,random_state=1)
# 重新格式化和标准化
X_train = X_train.reshape(X_train.shape[0], 1, self.picHeight, self.picWidth) / 255.0
X_test = X_test.reshape(X_test.shape[0], 1, self.picHeight, self.picWidth) / 255.0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# 将labels转成 binary class matrices
Y_train = np_utils.to_categorical(y_train, num_classes=clasNum)
Y_test = np_utils.to_categorical(y_test, num_classes=clasNum)
# 将格式化后的数据赋值给类的属性上
self.X_train = X_train
self.X_test = X_test
self.Y_train = Y_train
self.Y_test = Y_test
self.num_classes = clasNum
# 人脸图片目录
dataset = DataSet('faces_4/')
model = MyCNN()
model.read_trainData(dataset)
model.build_model()
model.train_model()
model.evaluate_model()
model.save()
创作不易 觉得有帮助请点赞关注收藏~~~文章来源地址https://www.toymoban.com/news/detail-697902.html
到了这里,关于【Python深度学习】Tensorflow+CNN进行人脸识别实战(附源码和数据集)的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!