深度学习实践-人脸活体检测(NUAA)

捕风

1.使用数据集NUAA

数据地址链接:https://pan.baidu.com/s/1XocT...
提取码:oimq

2.读取数据

  1. 训练集
def read_train_file():
    base_path = r'E:\ml\fas\data\NUAA'
    train_file_path = os.path.join(base_path, "train.txt")
    train_image_path_list = []
    train_labels_list = []
    with open(train_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            train_image_path_list.append(image_path)
            train_labels_list.append(label)
    train_df = pd.DataFrame(list(zip(train_image_path_list, train_labels_list)), columns=['image', 'label'])
    return train_df
  1. 验证集

def read_val_file():
    base_path = r'E:\ml\fas\data\NUAA'
    val_file_path = os.path.join(base_path, "val.txt")
    val_image_path_list = []
    val_labels_list = []
    with open(val_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            val_image_path_list.append(image_path)
            val_labels_list.append(label)
    val_df = pd.DataFrame(list(zip(val_image_path_list, val_labels_list)), columns=['image', 'label'])
    return val_df
  1. 测试集

def read_test_file():
    base_path = r'E:\ml\fas\data\NUAA'
    test_file_path = os.path.join(base_path, "test.txt")
    test_image_path_list = []
    test_labels_list = []
    with open(test_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            test_image_path_list.append(image_path)
            test_labels_list.append(label)
    test_df = pd.DataFrame(list(zip(test_image_path_list, test_labels_list)), columns=['image', 'label'])
    return test_df

3.数据增强,加载

def data_processing(train_df, val_df, test_def):
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=30,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       horizontal_flip='true')
    val_datagen = ImageDataGenerator(rescale=1. / 255)
    x_train = train_datagen.flow_from_dataframe(dataframe=train_df, x_col='image', y_col='label',
                                                target_size=(64, 64))
    x_val = val_datagen.flow_from_dataframe(dataframe=val_df, x_col='image', y_col='label', target_size=(64, 64))
    x_test = val_datagen.flow_from_dataframe(dataframe=test_def, x_col='image', y_col='label', target_size=(64, 64))
    return x_train, x_val, x_test

4.搭建模型

def model1():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer=Adam(1e-3), loss=categorical_crossentropy, metrics=['acc'])
    return model

5.训练

train_df = read_train_file()
    val_df = read_val_file()
    test_df = read_test_file()
    x_train, x_val, x_test = data_processing(train_df, val_df, test_df)
    log_dir = "logs/face_dect/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    # 创建tensorboard callback 回调
    tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
    model = model1()
    model.load_weights('model/live_model.h5')
    model.fit_generator(x_train, validation_data=x_val, epochs=20, verbose=2, callbacks=[tensorboard_callback])
    test_accuracy = model.evaluate_generator(x_test)
    print('Test accuracy is : ', test_accuracy, '%')

6.完整代码

import pandas as pd
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, GlobalAveragePooling2D, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy, SparseCategoricalCrossentropy
import datetime
from tensorflow.keras import callbacks
from tensorflow.keras import regularizers
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Model
import cv2
import numpy as np


def read_train_file():
    base_path = r'E:\ml\fas\data\NUAA'
    train_file_path = os.path.join(base_path, "train.txt")
    train_image_path_list = []
    train_labels_list = []
    with open(train_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            train_image_path_list.append(image_path)
            train_labels_list.append(label)
    train_df = pd.DataFrame(list(zip(train_image_path_list, train_labels_list)), columns=['image', 'label'])
    return train_df


def read_val_file():
    base_path = r'E:\ml\fas\data\NUAA'
    val_file_path = os.path.join(base_path, "val.txt")
    val_image_path_list = []
    val_labels_list = []
    with open(val_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            val_image_path_list.append(image_path)
            val_labels_list.append(label)
    val_df = pd.DataFrame(list(zip(val_image_path_list, val_labels_list)), columns=['image', 'label'])
    return val_df


def read_test_file():
    base_path = r'E:\ml\fas\data\NUAA'
    test_file_path = os.path.join(base_path, "test.txt")
    test_image_path_list = []
    test_labels_list = []
    with open(test_file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            test_image_path_list.append(image_path)
            test_labels_list.append(label)
    test_df = pd.DataFrame(list(zip(test_image_path_list, test_labels_list)), columns=['image', 'label'])
    return test_df


def data_processing(train_df, val_df, test_def):
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=30,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       horizontal_flip='true')
    val_datagen = ImageDataGenerator(rescale=1. / 255)
    x_train = train_datagen.flow_from_dataframe(dataframe=train_df, x_col='image', y_col='label',
                                                target_size=(64, 64))
    x_val = val_datagen.flow_from_dataframe(dataframe=val_df, x_col='image', y_col='label', target_size=(64, 64))
    x_test = val_datagen.flow_from_dataframe(dataframe=test_def, x_col='image', y_col='label', target_size=(64, 64))
    return x_train, x_val, x_test

def model1():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(64, 64, 3)))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer=Adam(1e-3), loss=categorical_crossentropy, metrics=['acc'])
    return model


def get_test_data():
    base_path = r'E:\ml\fas\data\NUAA'
    file_path = os.path.join(base_path, "test.txt")
    img_list = []
    labels_list = []
    with open(file_path) as f:
        lines = f.readlines()
        for line in lines:
            image_path = line.split(',')[0]
            label = line.split(',')[1]
            img = cv2.imread(image_path)
            img_list.append(img)
            labels_list.append(label)
    return img_list, labels_list


if __name__ == '__main__':
    # img_list, labels_list = get_test_data()
    # print(len(img_list), len(labels_list))
    train_df = read_train_file()
    val_df = read_val_file()
    test_df = read_test_file()
    x_train, x_val, x_test = data_processing(train_df, val_df, test_df)
    log_dir = "logs/face_dect/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    # 创建tensorboard callback 回调
    tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
    model = model1()
    model.fit_generator(x_train, validation_data=x_val, epochs=20, verbose=2, callbacks=[tensorboard_callback])
    test_accuracy = model.evaluate_generator(x_test)
    print('Test accuracy is : ', test_accuracy, '%')

7.训练结果

Epoch 1/10
110/110 [==============================] - 114s 1s/step - loss: 1.1759 - accuracy: 0.7893 - val_loss: 8.1524 - val_accuracy: 0.4881
Epoch 2/10
110/110 [==============================] - 120s 1s/step - loss: 0.3173 - accuracy: 0.8982 - val_loss: 4.1894 - val_accuracy: 0.6540
Epoch 3/10
110/110 [==============================] - 114s 1s/step - loss: 0.1600 - accuracy: 0.9525 - val_loss: 2.8076 - val_accuracy: 0.5163
Epoch 4/10
110/110 [==============================] - 116s 1s/step - loss: 0.0711 - accuracy: 0.9761 - val_loss: 6.8555 - val_accuracy: 0.4534
Epoch 5/10
110/110 [==============================] - 112s 1s/step - loss: 0.7138 - accuracy: 0.8394 - val_loss: 3.6469 - val_accuracy: 0.6328
Epoch 6/10
110/110 [==============================] - 114s 1s/step - loss: 0.9670 - accuracy: 0.5319 - val_loss: 0.6889 - val_accuracy: 0.6284
Epoch 7/10
110/110 [==============================] - 114s 1s/step - loss: 0.6931 - accuracy: 0.5052 - val_loss: 0.6909 - val_accuracy: 0.6284
Epoch 8/10
110/110 [==============================] - 116s 1s/step - loss: 0.6932 - accuracy: 0.5003 - val_loss: 0.6924 - val_accuracy: 0.6284
Epoch 9/10
110/110 [==============================] - 115s 1s/step - loss: 0.6932 - accuracy: 0.4881 - val_loss: 0.6927 - val_accuracy: 0.6284
Epoch 10/10
110/110 [==============================] - 151s 1s/step - loss: 0.6932 - accuracy: 0.4942 - val_loss: 0.6930 - val_accuracy: 0.6284
<tensorflow.python.keras.callbacks.History object at 0x000001BF407A7DA0>
200/200 - 48s - loss: 0.6930 - accuracy: 0.6328
0.6929949522018433 0.6327905058860779
阅读 263
26 声望
4 粉丝
0 条评论
你知道吗?

26 声望
4 粉丝
文章目录
宣传栏