深度学习实践-人脸活体检测(NUAA)

1.使用数据集NUAA

数据地址链接:https://pan.baidu.com/s/1XocT...
提取码:oimq

2.读取数据

  1. 训练集
def read_train_file():
    base_path = r'E:\ml\fas\data\NUAA'
    train_file_path = os.path.join(base_path, "train.txt")
    train_image_path_list = []
    train_labels_list = []
    with open(train_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            train_image_path_list.append(image_path)
            train_labels_list.append(label)
    train_df = pd.DataFrame(list(zip(train_image_path_list, train_labels_list)), columns=['image', 'label'])
    return train_df
  1. 验证集

def read_val_file():
    base_path = r'E:\ml\fas\data\NUAA'
    val_file_path = os.path.join(base_path, "val.txt")
    val_image_path_list = []
    val_labels_list = []
    with open(val_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            val_image_path_list.append(image_path)
            val_labels_list.append(label)
    val_df = pd.DataFrame(list(zip(val_image_path_list, val_labels_list)), columns=['image', 'label'])
    return val_df
  1. 测试集

def read_test_file():
    base_path = r'E:\ml\fas\data\NUAA'
    test_file_path = os.path.join(base_path, "test.txt")
    test_image_path_list = []
    test_labels_list = []
    with open(test_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            test_image_path_list.append(image_path)
            test_labels_list.append(label)
    test_df = pd.DataFrame(list(zip(test_image_path_list, test_labels_list)), columns=['image', 'label'])
    return test_df

3.数据增强,加载

def data_processing(train_df, val_df, test_def):
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=30,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       horizontal_flip='true')
    val_datagen = ImageDataGenerator(rescale=1. / 255)
    x_train = train_datagen.flow_from_dataframe(dataframe=train_df, x_col='image', y_col='label',
                                                target_size=(64, 64))
    x_val = val_datagen.flow_from_dataframe(dataframe=val_df, x_col='image', y_col='label', target_size=(64, 64))
    x_test = val_datagen.flow_from_dataframe(dataframe=test_def, x_col='image', y_col='label', target_size=(64, 64))
    return x_train, x_val, x_test

4.搭建模型

def model1():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer=Adam(1e-3), loss=categorical_crossentropy, metrics=['acc'])
    return model

5.训练

train_df = read_train_file()
    val_df = read_val_file()
    test_df = read_test_file()
    x_train, x_val, x_test = data_processing(train_df, val_df, test_df)
    log_dir = "logs/face_dect/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    # 创建tensorboard callback 回调
    tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
    model = model1()
    model.load_weights('model/live_model.h5')
    model.fit_generator(x_train, validation_data=x_val, epochs=20, verbose=2, callbacks=[tensorboard_callback])
    test_accuracy = model.evaluate_generator(x_test)
    print('Test accuracy is : ', test_accuracy, '%')

6.完整代码

import pandas as pd
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, GlobalAveragePooling2D, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy, SparseCategoricalCrossentropy
import datetime
from tensorflow.keras import callbacks
from tensorflow.keras import regularizers
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Model
import cv2
import numpy as np


def read_train_file():
    base_path = r'E:\ml\fas\data\NUAA'
    train_file_path = os.path.join(base_path, "train.txt")
    train_image_path_list = []
    train_labels_list = []
    with open(train_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            train_image_path_list.append(image_path)
            train_labels_list.append(label)
    train_df = pd.DataFrame(list(zip(train_image_path_list, train_labels_list)), columns=['image', 'label'])
    return train_df


def read_val_file():
    base_path = r'E:\ml\fas\data\NUAA'
    val_file_path = os.path.join(base_path, "val.txt")
    val_image_path_list = []
    val_labels_list = []
    with open(val_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            val_image_path_list.append(image_path)
            val_labels_list.append(label)
    val_df = pd.DataFrame(list(zip(val_image_path_list, val_labels_list)), columns=['image', 'label'])
    return val_df


def read_test_file():
    base_path = r'E:\ml\fas\data\NUAA'
    test_file_path = os.path.join(base_path, "test.txt")
    test_image_path_list = []
    test_labels_list = []
    with open(test_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            test_image_path_list.append(image_path)
            test_labels_list.append(label)
    test_df = pd.DataFrame(list(zip(test_image_path_list, test_labels_list)), columns=['image', 'label'])
    return test_df


def data_processing(train_df, val_df, test_def):
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=30,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       horizontal_flip='true')
    val_datagen = ImageDataGenerator(rescale=1. / 255)
    x_train = train_datagen.flow_from_dataframe(dataframe=train_df, x_col='image', y_col='label',
                                                target_size=(64, 64))
    x_val = val_datagen.flow_from_dataframe(dataframe=val_df, x_col='image', y_col='label', target_size=(64, 64))
    x_test = val_datagen.flow_from_dataframe(dataframe=test_def, x_col='image', y_col='label', target_size=(64, 64))
    return x_train, x_val, x_test

def model1():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(64, 64, 3)))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer=Adam(1e-3), loss=categorical_crossentropy, metrics=['acc'])
    return model


def get_test_data():
    base_path = r'E:\ml\fas\data\NUAA'
    file_path = os.path.join(base_path, "test.txt")
    img_list = []
    labels_list = []
    with open(file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            img = cv2.imread(image_path)
            img_list.append(img)
            labels_list.append(label)
    return img_list, labels_list


if __name__ == '__main__':
    # img_list, labels_list = get_test_data()
    # print(len(img_list), len(labels_list))
    train_df = read_train_file()
    val_df = read_val_file()
    test_df = read_test_file()
    x_train, x_val, x_test = data_processing(train_df, val_df, test_df)
    log_dir = "logs/face_dect/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    # 创建tensorboard callback 回调
    tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
    model = model1()
    model.fit_generator(x_train, validation_data=x_val, epochs=20, verbose=2, callbacks=[tensorboard_callback])
    test_accuracy = model.evaluate_generator(x_test)
    print('Test accuracy is : ', test_accuracy, '%')

7.训练结果

Epoch 1/10
110/110 [==============================] - 114s 1s/step - loss: 1.1759 - accuracy: 0.7893 - val_loss: 8.1524 - val_accuracy: 0.4881
Epoch 2/10
110/110 [==============================] - 120s 1s/step - loss: 0.3173 - accuracy: 0.8982 - val_loss: 4.1894 - val_accuracy: 0.6540
Epoch 3/10
110/110 [==============================] - 114s 1s/step - loss: 0.1600 - accuracy: 0.9525 - val_loss: 2.8076 - val_accuracy: 0.5163
Epoch 4/10
110/110 [==============================] - 116s 1s/step - loss: 0.0711 - accuracy: 0.9761 - val_loss: 6.8555 - val_accuracy: 0.4534
Epoch 5/10
110/110 [==============================] - 112s 1s/step - loss: 0.7138 - accuracy: 0.8394 - val_loss: 3.6469 - val_accuracy: 0.6328
Epoch 6/10
110/110 [==============================] - 114s 1s/step - loss: 0.9670 - accuracy: 0.5319 - val_loss: 0.6889 - val_accuracy: 0.6284
Epoch 7/10
110/110 [==============================] - 114s 1s/step - loss: 0.6931 - accuracy: 0.5052 - val_loss: 0.6909 - val_accuracy: 0.6284
Epoch 8/10
110/110 [==============================] - 116s 1s/step - loss: 0.6932 - accuracy: 0.5003 - val_loss: 0.6924 - val_accuracy: 0.6284
Epoch 9/10
110/110 [==============================] - 115s 1s/step - loss: 0.6932 - accuracy: 0.4881 - val_loss: 0.6927 - val_accuracy: 0.6284
Epoch 10/10
110/110 [==============================] - 151s 1s/step - loss: 0.6932 - accuracy: 0.4942 - val_loss: 0.6930 - val_accuracy: 0.6284
<tensorflow.python.keras.callbacks.History object at 0x000001BF407A7DA0>
200/200 - 48s - loss: 0.6930 - accuracy: 0.6328
0.6929949522018433 0.6327905058860779
32 声望
13 粉丝
0 条评论
推荐阅读
推荐系统评测指标
一. 评测指标用户满意度、预测准确度、覆盖率、多样性、 新颖性、惊喜度、信任度、实时性、健壮性、商业目标1. 用户满意度满意度是评测推荐系统的最重要指标,只能通过用户调查或者在线实验获得,主要是通过调查...

捕风阅读 591评论 1

视频清晰度优化指南
随着移动互联网的深入发展,视频消费场景逐渐变成主流,早期由于手机硬件的限制问题,导致生产出来的视频画质、清晰度存在较大的问题,用户体验不太好,当时的网络也处于4G的发展阶段,网络的限制也无法持续支持...

得物技术2阅读 849

python里打印list的四种方法
原文链接标题:Print lists in Python (4 Different Ways)用for循环来打印 {代码...} 结果1 2 3 4 5用 * 星号来打印 {代码...} 结果 {代码...} 把list转换为str来打印 {代码...} 结果 {代码...} 用map把数组里非...

chiiinnn阅读 10.2k

封面图
Ubuntu20.04 从源代码编译安装 python3.10
Ubuntu 22.04 Release DateUbuntu 22.04 Jammy Jellyfish is scheduled for release on April 21, 2022If you’re ready to use Ubuntu 22.04 Jammy Jellyfish, you can either upgrade your current Ubuntu syste...

ponponon1阅读 3.9k

“3D 元宇宙技术”在汽车新零售领域的应用与实践
随着不久前汽车之家新零售项目震撼发布,我们直击用户看车选车痛点首次提出 ABC 新体验模式,以元宇宙科技打造沉浸式交互服务,开放元宇宙能源空间站体验店,为用户打造更“有用”的体验。

之家技术阅读 5.1k

封面图
日常Python 代码片段整理
1、简单的 HTTP Web 服务器 {代码...} 2、单行循环List {代码...} 3、更新字典 {代码...} 4、拆分多行字符串 {代码...} 5、跟踪列表中元素的频率 {代码...} 6、不使用 Pandas 读取 CSV 文件 {代码...} 7、将列表...

墨城2阅读 288

Unicode 正则表达式(qbit)
前言本文根据《精通正则表达式》和 Unicode Regular Expressions 整理。本文的示例默认以 Python3 为实现语言,用到 Python3 的 re 模块或 regex 库。基本的 Unicode 属性分类 {代码...} 基本的 Unicode 子属性Le...

qbit阅读 4.3k

32 声望
13 粉丝
宣传栏