当前位置: 首页 > news >正文

基于Deepface的情绪识别c++

基于Deepface的情绪识别c++

文章目录

  • 基于Deepface的情绪识别c++
    • 简介
    • 下载模型并转为onnx(facial_expression_model_weights.h5)
    • 测试
      • 取出照片的人脸部分并处理成模型输入格式
      • 用模型推理一下看看结果
    • 用onnxruntime的c++库推理

简介

DeepFace是一个基于深度学习的开源人脸识别与属性分析框架,其情绪识别模块通过卷积神经网络(CNN)架构实现了对7种基础情绪(生气、厌恶、恐惧、开心、悲伤、惊讶、中性)的高精度分类。该技术结合了OpenCV的图像处理能力,支持从人脸检测、对齐到情绪预测的全流程自动化处理,准确率高达97.53%,超越人类平均水平。其核心模型如VGG-Face、ArcFace等通过大规模数据集(如FER2013、AffectNet)训练,能够捕捉面部微表情的细微差异。(以上内容来着deepseek)
deepface情绪识别

下载模型并转为onnx(facial_expression_model_weights.h5)

这是情绪识别部分的源码

# stdlib dependencies
from typing import List, Union

# 3rd party dependencies
import numpy as np
import cv2

# project dependencies
from deepface.commons import package_utils, weight_utils
from deepface.models.Demography import Demography
from deepface.commons.logger import Logger

# dependency configuration
tf_version = package_utils.get_tf_major_version()

if tf_version == 1:
    from keras.models import Sequential
    from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
else:
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import (
        Conv2D,
        MaxPooling2D,
        AveragePooling2D,
        Flatten,
        Dense,
        Dropout,
    )

# Labels for the emotions that can be detected by the model.
labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]

logger = Logger()

# pylint: disable=line-too-long, disable=too-few-public-methods

WEIGHTS_URL = "https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5"


class EmotionClient(Demography):
    """
    Emotion model class
    """

    def __init__(self):
        self.model = load_model()
        self.model_name = "Emotion"

    def _preprocess_image(self, img: np.ndarray) -> np.ndarray:
        """
        Preprocess single image for emotion detection
        Args:
            img: Input image (224, 224, 3)
        Returns:
            Preprocessed grayscale image (48, 48)
        """
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_gray = cv2.resize(img_gray, (48, 48))
        return img_gray

    def predict(self, img: Union[np.ndarray, List[np.ndarray]]) -> np.ndarray:
        """
        Predict emotion probabilities for single or multiple faces
        Args:
            img: Single image as np.ndarray (224, 224, 3) or
                List of images as List[np.ndarray] or
                Batch of images as np.ndarray (n, 224, 224, 3)
        Returns:
            np.ndarray (n, n_emotions)
            where n_emotions is the number of emotion categories
        """
        # Preprocessing input image or image list.
        imgs = self._preprocess_batch_or_single_input(img)

        processed_imgs = np.expand_dims(np.array([self._preprocess_image(img) for img in imgs]), axis=-1)

        # Prediction
        predictions = self._predict_internal(processed_imgs)

        return predictions


def load_model(
    url=WEIGHTS_URL,
) -> Sequential:
    """
    Consruct emotion model, download and load weights
    """

    num_classes = 7

    model = Sequential()

    # 1st convolution layer
    model.add(Conv2D(64, (5, 5), activation="relu", input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))

    # 2nd convolution layer
    model.add(Conv2D(64, (3, 3), activation="relu"))
    model.add(Conv2D(64, (3, 3), activation="relu"))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    # 3rd convolution layer
    model.add(Conv2D(128, (3, 3), activation="relu"))
    model.add(Conv2D(128, (3, 3), activation="relu"))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Flatten())

    # fully connected neural networks
    model.add(Dense(1024, activation="relu"))
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation="relu"))
    model.add(Dropout(0.2))

    model.add(Dense(num_classes, activation="softmax"))

    # ----------------------------

    weight_file = weight_utils.download_weights_if_necessary(
        file_name="facial_expression_model_weights.h5", source_url=url
    )

    model = weight_utils.load_model_weights(model=model, weight_file=weight_file)

    return model

下载模型并转为onnx

import tensorflow as tf
import tf2onnx
import onnx

# 1. 加载原始模型
model = load_model()

# 2. 定义输入签名
input_signature = [tf.TensorSpec(shape=(None, 48, 48, 1), dtype=tf.float32, name='input')]

# 3. 转换为ONNX
onnx_model, _ = tf2onnx.convert.from_keras(
    model,
    input_signature=input_signature,
    opset=13,
    output_path="deepface_emotion.onnx"
)

测试

取出照片的人脸部分并处理成模型输入格式

测试图片:
请添加图片描述
取出人脸图片:
在这里插入图片描述

import cv2
import matplotlib.pyplot as plt

#处理成输入格式
img = cv2.imread("../img/test2_face.jpg")
img = img.astype(np.float32)/255.0
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray = cv2.resize(img_gray, (48, 48))
gray_img = np.expand_dims(img_gray, axis=-1)
test_input = np.expand_dims(gray_img, axis=0) 

用模型推理一下看看结果

import onnxruntime as ort
import numpy as np

# 加载ONNX模型
ort_session = ort.InferenceSession("deepface_emotion.onnx", providers=['CPUExecutionProvider'])

# 运行推理
outputs = ort_session.run(None, {'input': test_input})
print("预测概率:", outputs[0])

# 获取预测结果
predicted_class = np.argmax(outputs[0])
print("预测类别:", predicted_class)

labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]
print(f"预测类型:{labels[predicted_class]},概率:{outputs[0][0][predicted_class]:.2f}")

结果:

预测概率: [[2.0709881e-10 1.9225350e-18 4.2878087e-06 9.9998260e-01 1.3058154e-05
  1.4756028e-11 1.7358280e-08]]
预测类别: 3
预测类型:happy,概率:1.00

用onnxruntime的c++库推理

#pragma once
#include <iostream>
#include <numeric> //数值计算
#include <tuple>   //C++17 元组
#include <opencv2/opencv.hpp>
#include <onnxruntime_cxx_api.h>

namespace LIANGBAIKAI_BASE_MODEL_NAME
{

#define ORT_OLD_VISON 12 // ort1.12.0 之前的版本为旧版本API

    class Deepface_Emotion_Onnxruntime
    {

    public:
        enum Severity_log
        {
            E_INTERNAL_ERROR = 0, // 内部错误
            E_ERROR = 1,          // 一般错误
            E_WARNING = 2,        // 警告
            E_INFO = 3,           // 信息
        };

        Deepface_Emotion_Onnxruntime() : _OrtMemoryInfo(Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtDeviceAllocator, OrtMemType::OrtMemTypeCPUOutput)) {};
        virtual ~Deepface_Emotion_Onnxruntime() {};

        /**
         * @description: 设置日志等级
         * @param {Severity_log} severity
         * @return {*}
         */
        virtual void setReportableSeverity(Severity_log severity)
        {
            _reportableSeverity = severity;
            log(E_INFO, "reportableSeverity set to " + std::to_string(severity));
        }

        /**
         * @description:
         * @param {string} &modelPath 模型文件
         * @param {int} netWidth 模型输入尺寸宽
         * @param {int} netHeight 模型输入尺寸高
         * @param {bool} isCuda 是否使用cuda
         * @param {int} cudaID cuda的id
         * @param {bool} warmUp warm up gpu-model
         * @return {*}  返回是否初始化成功
         */
        bool init(const std::string &modelPath, int netWidth = 48, int netHeight = 48, bool isCuda = true, int cudaID = 0, bool warmUp = true)
        {
            _netWidth = netWidth;
            _netHeight = netHeight;
            bool rec = ReadModel(modelPath, isCuda, cudaID, warmUp);
            return rec;
        }

        /**
         * @description: 推理
         * @param {cv::Mat} &img : 输入人脸图片
         * @return {std::tuple<int, float>}  返回情绪,置信度
         */
        std::tuple<int, float> infer(const cv::Mat &img)
        {
            log(E_INFO, "infer");
            cv::Mat img_tmp = img.clone();

            cv::resize(img_tmp, img_tmp, cv::Size(224, 224));

            cv::Mat normalizedImage;
            //  cv::normalize(img_tmp, normalizedImage, 0, 1, cv::NORM_MINMAX, CV_32F); // 归一化 结果与python的源码结果有一点偏差,用下面的方法
            img_tmp.convertTo(normalizedImage, CV_32F, 1.0 / 255.0);

            cv::Mat resizedImage;
            cv::resize(normalizedImage, resizedImage, cv::Size(_netWidth, _netHeight));

            cv::Mat grayImage;
            cv::cvtColor(resizedImage, grayImage, cv::COLOR_BGR2GRAY);

            cv::Mat blob;
            cv::dnn::blobFromImage(grayImage, blob, 1.0, cv::Size(0, 0), cv::Scalar(0), false, false);

            int64_t input_tensor_length = VectorProduct(_inputTensorShape);
            std::vector<Ort::Value> input_tensors;
            std::vector<Ort::Value> output_tensors;
            input_tensors.push_back(Ort::Value::CreateTensor<float>(_OrtMemoryInfo, (float *)blob.data, input_tensor_length, _inputTensorShape.data(), _inputTensorShape.size()));

            log(E_INFO, "infer run");
            output_tensors = _OrtSession->Run(Ort::RunOptions{nullptr},
                                              _inputNodeNames.data(),
                                              input_tensors.data(),
                                              _inputNodeNames.size(),
                                              _outputNodeNames.data(),
                                              _outputNodeNames.size());

            float *all_data = output_tensors[0].GetTensorMutableData<float>();

            int max_index = 0;
            float max_value = 0.0f;
            for (int i = 0; i < _outputTensorShape[1]; i++)
            {
                log(E_INFO, "result" + std::to_string(i) + ": " + std::to_string(all_data[i]));
                if (all_data[i] > max_value)
                {
                    max_value = all_data[i];
                    max_index = i;
                }
            }
            return std::make_tuple(max_index, max_value * 99.99);
        }

    private:
        /**
         * @description: 读取onnx模型
         * @param {string} &modelPath : onnx模型路径
         * @param {bool} isCuda: 如果为true,使用Ort-GPU,否则在cpu上运行。
         * @param {int} cudaID: 如果isCuda==true,在cudaID上运行Ort-GPU。
         * @param {bool} warmUp: 如果isCuda==true,预热GPU-model。
         * @return {*}
         */
        bool ReadModel(const std::string &modelPath, bool isCuda = true, int cudaID = 0, bool warmUp = true)
        {
            if (_batchSize < 1)
            {
                _batchSize = 1;
            }
            try
            {
                // 列出可用的推理提供者 cpu cuda dml tensorrt openvino
                std::vector<std::string> available_providers = Ort::GetAvailableProviders();
                auto cuda_available = std::find(available_providers.begin(), available_providers.end(), "CUDAExecutionProvider");

                if (isCuda && (cuda_available == available_providers.end()))
                {
                    log(E_ERROR, "Your ORT build without GPU. Change to CPU.");
                    log(E_INFO, "************* Infer model on CPU! *************");
                }
                else if (isCuda && (cuda_available != available_providers.end()))
                {
                    log(E_INFO, "************* Infer model on GPU! *************");

#if ORT_API_VERSION < ORT_OLD_VISON
                    OrtCUDAProviderOptions cudaOption;
                    cudaOption.device_id = cudaID;
                    _OrtSessionOptions.AppendExecutionProvider_CUDA(cudaOption);
#else
                    // 添加CUDA执行提供者
                    OrtStatus *status = OrtSessionOptionsAppendExecutionProvider_CUDA(_OrtSessionOptions, cudaID);
                    if (status != NULL)
                    {
                        log(E_ERROR, "OrtSessionOptionsAppendExecutionProvider_CUDA ERROR");
                    }
#endif
                }
                else
                {
                    log(E_INFO, "************* Infer model on CPU! *************");
                }

                // 设置图优化级别
                _OrtSessionOptions.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);

#ifdef _WIN32
                std::wstring model_path(modelPath.begin(), modelPath.end());
                _OrtSession = std::make_shared<Ort::Session>(_OrtEnv, modelPath.c_str(), _OrtSessionOptions);
#else
                // 创建会话
                _OrtSession = std::make_shared<Ort::Session>(_OrtEnv, modelPath.c_str(), _OrtSessionOptions);
#endif
                // 创建默认的内存分配器
                Ort::AllocatorWithDefaultOptions allocator;
                // 初始化输入
                _inputNodesNum = _OrtSession->GetInputCount();
#if ORT_API_VERSION < ORT_OLD_VISON
                _inputName = _OrtSession->GetInputName(0, allocator);
                _inputNodeNames.push_back(_inputName);
#else
                // 获取输入名称
                _inputName = std::move(_OrtSession->GetInputNameAllocated(0, allocator));
                _inputNodeNames.push_back(_inputName.get());
#endif
                // 获取输入类型信息
                Ort::TypeInfo inputTypeInfo = _OrtSession->GetInputTypeInfo(0);
                // 获取输入张量类型和形状信息
                auto input_tensor_info = inputTypeInfo.GetTensorTypeAndShapeInfo();
                // 获取输入数据类型
                _inputNodeDataType = input_tensor_info.GetElementType();
                // 获取输入张量形状
                _inputTensorShape = input_tensor_info.GetShape();

                if (_inputTensorShape[0] == -1)
                {
                    _isDynamicShape = true;
                    _inputTensorShape[0] = _batchSize;
                }
                if (_inputTensorShape[2] == -1 || _inputTensorShape[3] == -1)
                {
                    _isDynamicShape = true;
                    _inputTensorShape[2] = _netHeight;
                    _inputTensorShape[3] = _netWidth;
                }
                // 获取输出节点数
                _outputNodesNum = _OrtSession->GetOutputCount();
                // 获取输出名称
#if ORT_API_VERSION < ORT_OLD_VISON
                _output_name0 = _OrtSession->GetOutputName(0, allocator);
                _outputNodeNames.push_back(_output_name0);
#else
                _output_name0 = std::move(_OrtSession->GetOutputNameAllocated(0, allocator));
                _outputNodeNames.push_back(_output_name0.get());
#endif
                Ort::TypeInfo type_info_output0(nullptr);
                type_info_output0 = _OrtSession->GetOutputTypeInfo(0); // output0
                // 获取输出张量类型和形状信息
                auto tensor_info_output0 = type_info_output0.GetTensorTypeAndShapeInfo();
                // 获取输出数据类型
                _outputNodeDataType = tensor_info_output0.GetElementType();
                // 获取输出张量形状
                _outputTensorShape = tensor_info_output0.GetShape();

                log(E_INFO, "inputNodesNum:" + std::to_string(_inputNodesNum));
                log(E_INFO, "inputNodeNames:" + std::string(_inputNodeNames[0]));

                std::string inputTensorShapeStr = " ";
                for (unsigned int i = 0; i < _inputTensorShape.size(); i++)
                {
                    inputTensorShapeStr += std::to_string(_inputTensorShape[i]) + " ";
                }
                log(E_INFO, "inputTensorShape:" + inputTensorShapeStr);

                log(E_INFO, "outputNodesNum:" + std::to_string(_outputNodesNum));
                log(E_INFO, "outputNodeNames:" + std::string(_outputNodeNames[0]));

                std::string outputTensorShapeStr = " ";
                for (unsigned int i = 0; i < _outputTensorShape.size(); i++)
                {
                    outputTensorShapeStr += std::to_string(_outputTensorShape[i]) + " ";
                }
                log(E_INFO, "outputTensorShape:" + outputTensorShapeStr);
                log(E_INFO, "outputNodeDataType:" + std::to_string(_outputNodeDataType));

                // warm up
                if (isCuda && warmUp)
                {
                    // draw run
                    log(E_INFO, "Start warming up");

                    // 计算输入张量长度
                    size_t input_tensor_length = VectorProduct(_inputTensorShape);
                    float *temp = new float[input_tensor_length];
                    // 创建输入张量
                    std::vector<Ort::Value> input_tensors;
                    // 创建输出张量
                    std::vector<Ort::Value> output_tensors;
                    input_tensors.push_back(Ort::Value::CreateTensor<float>(
                        _OrtMemoryInfo, temp, input_tensor_length, _inputTensorShape.data(),
                        _inputTensorShape.size()));
                    for (int i = 0; i < 3; ++i)
                    {
                        output_tensors = _OrtSession->Run(Ort::RunOptions{nullptr},
                                                          _inputNodeNames.data(),
                                                          input_tensors.data(),
                                                          _inputNodeNames.size(),
                                                          _outputNodeNames.data(),
                                                          _outputNodeNames.size());
                    }

                    delete[] temp;
                }
            }
            catch (const std::exception &)
            {
                log(E_ERROR, "read model error !");
                return false;
            }
            log(E_INFO, "read model success !");
            return true;
        }

        void log(Severity_log severity, const std::string msg) noexcept
        {
            // 根据严重性级别决定是否打印日志
            if (severity <= _reportableSeverity)
            {
                switch (severity)
                {
                case Severity_log::E_INTERNAL_ERROR:
                    std::cerr << "[INTERNAL ERROR] " << msg << std::endl;
                    break;
                case Severity_log::E_ERROR:
                    std::cerr << "[ERROR] " << msg << std::endl;
                    break;
                case Severity_log::E_WARNING:
                    std::cerr << "[WARNING] " << msg << std::endl;
                    break;
                case Severity_log::E_INFO:
                    std::cout << "[INFO] " << msg << std::endl;
                    break;
                default:
                    break;
                }
            }
        }

        // 计算向量中所有元素的乘积
        template <typename T>
        T VectorProduct(const std::vector<T> &v)
        {
            return std::accumulate(v.begin(), v.end(), 1, std::multiplies<T>());
        };

        int _netWidth = 48;  // ONNX-net-input-width
        int _netHeight = 48; // ONNX-net-input-height

        int _batchSize = 1;           // if multi-batch,set this
        bool _isDynamicShape = false; // onnx support dynamic shape

        // ONNXRUNTIME
        Ort::Env _OrtEnv = Ort::Env(OrtLoggingLevel::ORT_LOGGING_LEVEL_ERROR, "Resnet");
        Ort::SessionOptions _OrtSessionOptions = Ort::SessionOptions();

        std::shared_ptr<Ort::Session> _OrtSession;
        Ort::MemoryInfo _OrtMemoryInfo;
#if ORT_API_VERSION < ORT_OLD_VISON
        char *_inputName, *_output_name0;
#else
        std::shared_ptr<char> _inputName, _output_name0;
#endif

        std::vector<char *> _inputNodeNames;  // 输入节点名
        std::vector<char *> _outputNodeNames; // 输出节点名

        size_t _inputNodesNum = 0;  // 输入节点数
        size_t _outputNodesNum = 0; // 输出节点数

        ONNXTensorElementDataType _inputNodeDataType; // 数据类型
        ONNXTensorElementDataType _outputNodeDataType;
        std::vector<int64_t> _inputTensorShape; // 输入张量shape

        std::vector<int64_t> _outputTensorShape;

        Severity_log _reportableSeverity = Severity_log::E_ERROR;
    };
}

完整项目代码

相关文章:

  • HOW - Axios 拦截器特性
  • 量子芯火燎原:AI算力革命的密码
  • RabbitMQ高级特性1
  • Ubuntu 24 云服务器上部署网站_详细版_1
  • 关于c++ trt推理YOLO系列,出现检测框混乱,集中左上角的问题
  • git技法-对比master和release两个版本差异提交
  • 搭建redis遇到问题:
  • PVE如何查看某块硬盘被哪些虚拟机使用
  • 使用axios发请求
  • 华为项目管理“六步一法”方法论全解析:目标确认、项目活动分解与日事清系统协同
  • 基于51单片机的贪吃蛇小游戏proteus仿真
  • 通过 C# 提取PDF文档中的图片
  • C++ | 文件读写(ofstream/ifstream/fstream)
  • 谷歌TV认证,谷歌TADA认证,谷歌电视认证介绍
  • C++ | 类模板
  • DBAPI设置服务器开机自启动
  • DeepSeek 助力心理医生小程序赋能!心理咨询小程序 线上咨询平台搭建
  • 如何保障话费api接口的稳定性?
  • 最新扣子(Coze)案例教程:最新抖音视频文案提取方法替代方案,音频视频提取文案插件制作,手把手教学,完全免费教程
  • 基于web的生产过程执行管理系统(源码+lw+部署文档+讲解),源码可白嫖!