Newer
Older
CasicBioRecNew / casic / face / CasicFaceInterface.cpp
tanyue on 1 Feb 2023 17 KB 20230201 更新日志組件庫
#include <QString>
#include <QElapsedTimer>
#include "CasicFaceInterface.h"
#include "utils/UtilInclude.h"
//#include "utils/easyloggingpp/easylogging++.h"

casic::face::CasicFaceInterface::CasicFaceInterface()
{
    // 构建OpenCV自带的眼睛分类器
//    if (this->cascade == nullptr) {
//        this->cascade = new cv::CascadeClassifier();
//        this->cascade->load(cvFaceCascadeName);

//        LOG(DEBUG) << "构建OpenCV自带的人脸分类器";
//    }
}


casic::face::CasicFaceInterface::~CasicFaceInterface()
{
    if (this->detector != nullptr) {
        delete this->detector;
        delete this->marker;

        this->detector = nullptr;
        this->marker = nullptr;
    }

    if (this->poseEx != nullptr) {
        delete this->poseEx;
        this->poseEx = nullptr;
    }

    if (this->processor != nullptr) {
        delete this->processor;
        this->processor = nullptr;
    }

    if (this->recognizer != nullptr) {
        delete this->recognizer;
        this->recognizer = nullptr;
    }

    if (this->cascade != nullptr)
    {
        delete this->cascade;
        this->cascade = nullptr;
    }
}

void casic::face::CasicFaceInterface::setDetectorModelPath(std::string detectorModelPath)
{
    this->detectorModelPath = detectorModelPath;
}

void casic::face::CasicFaceInterface::setMarkPts5ModelPath(std::string markPts5ModelPath)
{
    this->markPts5ModelPath = markPts5ModelPath;
}

void casic::face::CasicFaceInterface::setPoseModelPath(std::string poseModelPath)
{
    this->poseModelPath = poseModelPath;
}

void casic::face::CasicFaceInterface::setFas1stModelPath(std::string fas1stModelPath)
{
    this->fas1stModelPath = fas1stModelPath;
}

void casic::face::CasicFaceInterface::setFas2ndModelPath(std::string fas2ndModelPath)
{
    this->fas2ndModelPath = fas2ndModelPath;
}

void casic::face::CasicFaceInterface::setRecognizerModelPath(std::string recognizerModelPath)
{
    this->recognizerModelPath = recognizerModelPath;
}

void casic::face::CasicFaceInterface::setAntiThreshold(float clarity, float reality)
{
    this->clarity = clarity;
    this->reality = reality;
    if (this->processor != nullptr)
    {
        this->processor->SetThreshold(clarity, reality);
    }
}


CasicFaceInfo casic::face::CasicFaceInterface::faceDetect(cv::Mat frame)
{
    SeetaImageData image;
    image.height = frame.rows;
    image.width = frame.cols;
    image.channels = frame.channels();
    image.data = frame.data;

    // 构建人脸检测和标注模型
    if (this->detector == nullptr) {
        seeta::ModelSetting msd; // 人脸检测模型属性
        msd.set_device(this->device);
        msd.set_id(this->deviceId);
        msd.append(this->detectorModelPath);

        this->detector = new seeta::FaceDetector(msd);

        seeta::ModelSetting msm; // 人脸标注模型属性
        msm.set_device(this->device);
        msm.set_id(this->deviceId);
        msm.append(this->markPts5ModelPath);

        this->marker = new seeta::FaceLandmarker(msm);
    }

    QElapsedTimer timer;
    timer.start();

    // ★调用seeta的detect算法检测人脸模型
    SeetaFaceInfoArray faces = this->detector->detect(image);

    if (faces.size != 0)
    {
//        LOG(DEBUG) << QString("人脸检测算法[tm: %1 ms][count: %2][rect: (%3,%4), (%5,%6)][size: (%7,%8)]")
//                      .arg(timer.elapsed()).arg(faces.size)
//                      .arg(faces.data[0].pos.x).arg(faces.data[0].pos.y).arg(faces.data[0].pos.x + faces.data[0].pos.width).arg(faces.data[0].pos.y + faces.data[0].pos.height)
//                      .arg(faces.data[0].pos.width).arg(faces.data[0].pos.height).toStdString();
        LOG_DEBUG(QString("人脸检测算法[tm: %1 ms][count: %2][rect: (%3,%4), (%5,%6)][size: (%7,%8)]")
                  .arg(timer.elapsed()).arg(faces.size)
                  .arg(faces.data[0].pos.x).arg(faces.data[0].pos.y).arg(faces.data[0].pos.x + faces.data[0].pos.width).arg(faces.data[0].pos.y + faces.data[0].pos.height)
                  .arg(faces.data[0].pos.width).arg(faces.data[0].pos.height).toStdString());

    }

    CasicFaceInfo faceInfo;
    if (faces.size == 0) // 没找到人脸, 直接返回
    {
        faceInfo.hasFace = false;
        faceInfo.data = image;
        faceInfo.matData = frame;
        return faceInfo;
    }

    // 找到人脸
    faceInfo.hasFace = true;
    faceInfo.data = image;
    faceInfo.matData = frame;
    faceInfo.face = faces.data[0]; // 默认使用第一个人脸, 算法返回的人脸是按照置信度排序的
    faceInfo.points = std::vector<SeetaPointF>(this->marker->number());
    faceInfo.faceRecTL = new int[2] {(int) faces.data[0].pos.x, (int) faces.data[0].pos.y};
    faceInfo.faceRecRB = new int[2] {(int) faces.data[0].pos.x + faces.data[0].pos.width, (int) faces.data[0].pos.y + faces.data[0].pos.height};

    // ★调用seeta的mark算法, 标记人脸的五个关键点
    this->marker->mark(image, faceInfo.face.pos, faceInfo.points.data());

    return faceInfo;
}

CasicFaceInfo casic::face::CasicFaceInterface::faceQuality(CasicFaceInfo faceInfo)
{
    if (faceInfo.hasFace == true)
    {
        SeetaImageData image = faceInfo.data;
        auto &face = faceInfo.face.pos;
        auto points = faceInfo.points.data();

        QElapsedTimer timer;
        timer.start();

        // 亮度评估
        seeta::QualityOfBrightness qBright;
        seeta::QualityResult brightResult = qBright.check(image, face, points, 5);

//        LOG(DEBUG) << QString("亮度评估[tm: %1 ms][bright: %2][score: %3]").arg(timer.elapsed()).arg(brightResult.level).arg(brightResult.score).toStdString();
        LOG_DEBUG(QString("亮度评估[tm: %1 ms][bright: %2][score: %3]").arg(timer.elapsed()).arg(brightResult.level).arg(brightResult.score).toStdString());

        if (brightResult.level != seeta::QualityLevel::HIGH)
        {
            // 亮度评估不满足要求, 直接返回
            faceInfo.quality = brightResult;
            return faceInfo;
        }

        timer.restart();

        // 清晰度评估
        seeta::QualityOfClarity qClarity;
        seeta::QualityResult clarityResult = qClarity.check(image, face, points, 5);

//        LOG(DEBUG) << QString("清晰度评估[tm: %1 ms][clarity: %2]").arg(timer.elapsed()).arg(clarityResult.level).toStdString();
        LOG_DEBUG(QString("清晰度评估[tm: %1 ms][clarity: %2]").arg(timer.elapsed()).arg(clarityResult.level).toStdString());

        if (clarityResult.level != seeta::QualityLevel::HIGH)
        {
            // 清晰度不够, 直接返回
            faceInfo.quality = clarityResult;
            return faceInfo;
        }

/*
        timer.restart();

        // 完整度评估
        seeta::QualityOfIntegrity qIntegrity;
        seeta::QualityResult integrityResult = qIntegrity.check(image, face, points, 5);
        LOG(DEBUG) << "完整度评估"
                   << QString("[tm: %1 ms][integrity: %2]").arg(timer.elapsed()).arg(integrityResult.level).toStdString();

        if (integrityResult.level != seeta::QualityLevel::HIGH)
        {
            // 完整度不够, 直接返回
            faceInfo.quality = integrityResult;
            return faceInfo;
        }
*/
        timer.restart();

        // 分辨率评估
        seeta::QualityOfResolution qReso;
        seeta::QualityResult resoResult = qReso.check(image, face, points, 5);
//        LOG(DEBUG) << QString("分辨率评估[tm: %1 ms][reso: %2]").arg(timer.elapsed()).arg(resoResult.level).toStdString();
        LOG_DEBUG(QString("分辨率评估[tm: %1 ms][reso: %2]").arg(timer.elapsed()).arg(resoResult.level).toStdString());

        if (resoResult.level != seeta::QualityLevel::HIGH)
        {
            // 分辨率不够, 直接返回
            faceInfo.quality = resoResult;
            return faceInfo;
        }

        timer.restart();

        // 姿势评估(深度学习方法)
        if (this->poseEx == nullptr) {
            seeta::ModelSetting msp; // 人脸姿势检测模型属性
            msp.set_device(this->device);
            msp.set_id(this->deviceId);
            msp.append(this->poseModelPath);

            this->poseEx = new seeta::QualityOfPoseEx(msp);

            // 设置三个方向的默认阈值
            poseEx->set(seeta::QualityOfPoseEx::YAW_LOW_THRESHOLD, 25);
            poseEx->set(seeta::QualityOfPoseEx::YAW_HIGH_THRESHOLD, 10);

            poseEx->set(seeta::QualityOfPoseEx::PITCH_LOW_THRESHOLD, 20);
            poseEx->set(seeta::QualityOfPoseEx::PITCH_HIGH_THRESHOLD, 10);

            poseEx->set(seeta::QualityOfPoseEx::ROLL_LOW_THRESHOLD, 33.33f);
            poseEx->set(seeta::QualityOfPoseEx::ROLL_HIGH_THRESHOLD, 16.67f);
        }

        seeta::QualityResult poseResult = poseEx->check(image, face, points, 5);

//        LOG(DEBUG) << QString("姿势评估[tm: %1ms][pose: %2][score: %3]").arg(timer.elapsed()).arg(poseResult.score).arg(poseResult.level).toStdString();
        LOG_DEBUG(QString("姿势评估[tm: %1ms][pose: %2][score: %3]").arg(timer.elapsed()).arg(poseResult.score).arg(poseResult.level).toStdString());

        if (poseResult.level != seeta::QualityLevel::HIGH)
        {
            // 姿势评估不满足, 直接返回
            faceInfo.quality = poseResult;
            return faceInfo;
        } else
        {
            // 五个维度的质量评估结果都是HIGH, 返回合格
            faceInfo.quality.level = seeta::QualityLevel::HIGH;
        }
    }

    return faceInfo;
}

CasicFaceInfo casic::face::CasicFaceInterface::faceAntiSpoofing(CasicFaceInfo faceInfo)
{
    if (faceInfo.hasFace == true)
    {
        if (this->processor == nullptr)
        {
            seeta::ModelSetting msa; // 人脸活体检测模型属性
            msa.set_device(this->device);
            msa.set_id(this->deviceId);
            msa.append(this->fas1stModelPath);
//            msa.append(this->fas2ndModelPath); // 加快速度, 只用局部活体检测算法

            this->processor = new seeta::FaceAntiSpoofing(msa);
            this->processor->SetThreshold(this->clarity, this->reality);
        }

        SeetaImageData image = faceInfo.data;
        auto &face = faceInfo.face.pos;
        auto points = faceInfo.points.data();

        QElapsedTimer timer;
        timer.start();

        // ★调用人脸活体检测算法
        auto status = this->processor->Predict(image, face, points);
        faceInfo.antiStatus = status;

        processor->GetPreFrameScore(&faceInfo.antiClarity, &faceInfo.antiReality);

//        LOG(DEBUG) << QString("活体检测[tm: %1 ms][anti: %2][clarity: %3, reality: %4]").arg(timer.elapsed()).arg(status).arg(faceInfo.antiClarity).arg(faceInfo.antiReality).toStdString();
        LOG_DEBUG(QString("活体检测[tm: %1 ms][anti: %2][clarity: %3, reality: %4]").arg(timer.elapsed()).arg(status).arg(faceInfo.antiClarity).arg(faceInfo.antiReality).toStdString());

    }

    return faceInfo;
}

CasicFaceInfo casic::face::CasicFaceInterface::faceFeatureExtract(CasicFaceInfo faceInfo)
{
    if (faceInfo.hasFace == true)
    {
        float * featureTemp;

        if (this->recognizer == nullptr)
        {
            seeta::ModelSetting msr; // 人脸识别模型属性
            msr.set_device(this->device);
            msr.set_id(this->deviceId);
            msr.append(this->recognizerModelPath);

            this->recognizer = new seeta::FaceRecognizer(msr);
        }

        featureTemp = new (float[this->recognizer->GetExtractFeatureSize()]);

        SeetaImageData image = faceInfo.data;
        auto points = faceInfo.points.data();

        this->recognizer->Extract(image, points, featureTemp);

        faceInfo.feature = featureTemp;
    }

    return faceInfo;
}

float casic::face::CasicFaceInterface::faceSimCalculate(float* feature, float* otherFeature)
{
    if (this->recognizer == nullptr)
    {
        seeta::ModelSetting msr; // 人脸识别模型属性
        msr.set_device(this->device);
        msr.set_id(this->deviceId);
        msr.append(this->recognizerModelPath);

        this->recognizer = new seeta::FaceRecognizer(msr);
    }

    float sim = this->recognizer->CalculateSimilarity(feature, otherFeature);
    return sim;
}


cv::Rect casic::face::CasicFaceInterface::faceDetectByCVCascade(cv::Mat frame)
{
    // 构建OpenCV自带的人脸分类器
    if (this->cascade == nullptr) {
        this->cascade = new cv::CascadeClassifier();
        this->cascade->load(cvFaceCascadeName);
    }

    QElapsedTimer timer;
    timer.start();

    std::vector<cv::Rect> rect;
    cv::Size minRectSize(minFaceSize, minFaceSize);
    cv::Size maxRectSize(maxFaceSize, maxFaceSize);

    // ★分类器对象调用
    cascade->detectMultiScale(frame, rect, 1.1, 3, cv::CASCADE_FIND_BIGGEST_OBJECT, minRectSize, maxRectSize);

    if (rect.size() == 0)
    {
//        LOG(TRACE) << QString("人脸分类检测算法[tm: %1 ms][0]").arg(timer.elapsed()).toStdString();
        LOG_TRACE(QString("人脸分类检测算法[tm: %1 ms][0]").arg(timer.elapsed()).toStdString());

        return cv::Rect(0, 0, 0, 0);
    }

//    LOG(TRACE) << QString("人脸分类检测算法[tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString();
    LOG_TRACE(QString("人脸分类检测算法[tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString());

    return rect.at(0);
}

cv::Rect casic::face::CasicFaceInterface::faceDetectByCVCascade(cv::Mat frame, int minFaceSize)
{
    // 构建OpenCV自带的人脸分类器
    if (this->cascade == nullptr) {
        this->cascade = new cv::CascadeClassifier();
        this->cascade->load(cvFaceCascadeName);
    }

    std::vector<cv::Rect> rect;
    cv::Size minRectSize(minFaceSize, minFaceSize);
    cv::Size maxRectSize(maxFaceSize, maxFaceSize);

    QElapsedTimer timer;
    timer.start();

    // ★分类器对象调用
    cascade->detectMultiScale(frame, rect, 1.1, 3, cv::CASCADE_FIND_BIGGEST_OBJECT, minRectSize, maxRectSize);

    if (rect.size() == 0)
    {
        return cv::Rect(0, 0, 0, 0);
    }

//    LOG(DEBUG) << QString("人脸分类检测算法[tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString();
    LOG_DEBUG(QString("人脸分类检测算法[tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString());
    return rect.at(0);
}

cv::Rect casic::face::CasicFaceInterface::eyeDetectByCVCascade(cv::Mat frame)
{
    // 构建openCV自带的眼睛分类器
    if (this->eyeCascade == nullptr)
    {
        this->eyeCascade = new cv::CascadeClassifier();
        this->eyeCascade->load(cvEyeCascadeName);
    }

    QElapsedTimer timer;
    timer.start();

    std::vector<cv::Rect> rect;
    cv::Size minRectSize(minEyeSize, minEyeSize);
    cv::Size maxRectSize(maxEyeSize, maxEyeSize);

    // ★分类器对象调用
    eyeCascade->detectMultiScale(frame, rect, 1.1, 3, cv::CASCADE_FIND_BIGGEST_OBJECT, minRectSize, maxRectSize);

    if (rect.size() == 0)
    {
//        LOG(DEBUG) << QString("眼睛分类检测算法[tm: %1 ms][0]").arg(timer.elapsed()).toStdString();
        LOG_DEBUG(QString("眼睛分类检测算法[tm: %1 ms][0]").arg(timer.elapsed()).toStdString());
        return cv::Rect(0, 0, 0, 0);
    }

//    LOG(DEBUG) << QString("眼睛分类检测算法[tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString();
    LOG_DEBUG(QString("眼睛分类检测算法[tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString());

    return rect.at(0);
}

cv::Rect casic::face::CasicFaceInterface::eyeGlassesDetectByCVCascade(cv::Mat frame)
{
    // 构建openCV自带的眼睛分类器
    if (this->eyeGlassesCascade == nullptr)
    {
        this->eyeGlassesCascade = new cv::CascadeClassifier();
        this->eyeGlassesCascade->load(cvEyeGlassesCascadeName);
    }

    QElapsedTimer timer;
    timer.start();

    std::vector<cv::Rect> rect;
    cv::Size minRectSize(minEyeSize, minEyeSize);
    cv::Size maxRectSize(maxEyeSize, maxEyeSize);

    // ★分类器对象调用
    eyeGlassesCascade->detectMultiScale(frame, rect, 1.1, 3, cv::CASCADE_FIND_BIGGEST_OBJECT, minRectSize, maxRectSize);

    if (rect.size() == 0)
    {
//        LOG(DEBUG) << QString("眼睛分类检测算法[戴眼镜][tm: %1 ms][0]").arg(timer.elapsed()).toStdString();
        LOG_DEBUG(QString("眼睛分类检测算法[戴眼镜][tm: %1 ms][0]").arg(timer.elapsed()).toStdString());
        return cv::Rect(0, 0, 0, 0);
    }

//    LOG(DEBUG) << QString("眼睛分类检测算法[戴眼镜][tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString();
    LOG_DEBUG(QString("眼睛分类检测算法[戴眼镜][tm: %1 ms][%2, %3]").arg(timer.elapsed()).arg(rect.at(0).width).arg(rect.at(0).height).toStdString());

    return rect.at(0);
}

std::vector<cv::Rect> casic::face::CasicFaceInterface::eyeDetectByCVCascade(cv::Mat frame, int minEyeSize)
{
    // 构建openCV自带的眼睛分类器
    if (this->eyeCascade == nullptr)
    {
        this->eyeCascade = new cv::CascadeClassifier();
        this->eyeCascade->load(cvEyeCascadeName);
    }

    std::vector<cv::Rect> rectVec;
    cv::Size minRectSize(minEyeSize, minEyeSize);
    cv::Size maxRectSize(maxEyeSize, maxEyeSize);

    // ★分类器对象调用
    eyeCascade->detectMultiScale(frame, rectVec, 1.1, 3, cv::CASCADE_FIND_BIGGEST_OBJECT, minRectSize, maxRectSize);

    return rectVec;
}

void casic::face::CasicFaceInterface::setMinFaceSize(int minFaceSize)
{
    this->minFaceSize = minFaceSize;
}
void casic::face::CasicFaceInterface::setMinEyeSize(int minEyeSize)
{
    this->minEyeSize = minEyeSize;
}