本文由 发布,转载请注明出处,如有问题请联系我们! 发布时间: 2021-05-20基于虹软人脸识别,实现RTMP直播推流追踪视频中所有人脸信息(C#)

加载中

根据虹软面部识别,完成RTMP直播推流跟踪视頻中任何人脸信息内容(C#)

序言

大伙儿应当都了解好多个很普遍的事例,例如在陈奕迅的巡回演唱,在安全检查安全通道验票时,根据人像识别系统软件取得成功鉴别捉了许多在逃人员,被称作逃犯克星;人行道不遵循交通法规闯红灯违章的过路人被面部识别系统软件摄录放到大屏幕上以表警示;参与一次主题活动根据面部开展每日签到来统计分析即时客流量这些, 现在我也来做一个根据直播电视,跟踪界面中任何人脸信息内容,并捕捉我需要的总体目标角色。

实际构思及步骤

根据虹软面部识别,对直播间界面中的每一帧照片开展检验,获得照片中任何人脸信息内容。能够加上总体目标角色的相片,用总体目标角色的面部矩阵的特征值与直播间界面帧照片中面部信息内容目录中的每一个矩阵的特征值开展核对。如果有配对到总体目标角色,把直播间界面摄录。实际步骤以下:

新项目构造

播放地址我们可以在网络上搜索一下直播电视RTMP详细地址,在程序流程中可开展播放视频

private void PlayVideo()
{
    videoCapture = new VideoCapture(rtmp);
    if (videoCapture.IsOpened())
    {
        videoInfo.Filename = rtmp;
        videoInfo.Width = (int)videoCapture.FrameWidth;
        videoInfo.Height = (int)videoCapture.FrameHeight;
        videoInfo.Fps = (int)videoCapture.Fps;

        myTimer.Interval = videoInfo.Fps == 0 ? 300 : 1000 / videoInfo.Fps;
        IsStartPlay = true;
        myTimer.Start();
    }
    else
    {
        MessageBox.Show("视频源出现异常");
    }
}

private void MyTimer_Elapsed(object sender, System.Timers.ElapsedEventArgs e)
{
    try
    {
        if (IsStartPlay)
        {
            lock (LockHelper)
            {
                var frame = videoCapture.RetrieveMat();
                if (frame != null)
                {
                    if (frame.Width == videoInfo.Width && frame.Height == videoInfo.Height)
                        this.SetVideoCapture(frame);
                    else
                        LogHelper.Log($"bad frame");
                }
            }
        }
    }catch(Exception ex)
    {
        LogHelper.Log(ex.Message);
    }
}
Bitmap btm = null;
private void SetVideoCapture(Mat frame)//视頻捕捉
{
    try
    {
        btm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(frame);
        pic_Video.Image = btm;
    }catch(Exception ex)
    {
        LogHelper.Log(ex.Message);
    }
}

之上这种便是在OpenCv中根据VideoCaptrue类对视頻开展载入实际操作,随后把图象3D渲染到PictureBox控件上。在PictureBox的Paint事情中开展面部识别与核对实际操作。

/// <summary>
/// 核对涵数,将每一帧摄录的相片和总体目标人物图片开展核对
/// </summary>
/// <param name="bitmap"></param>
/// <param name="e"></param>
/// <returns></returns>
private void CompareImgWithIDImg(Bitmap bitmap, PaintEventArgs e)
{
    if (bitmap != null)
    {
        //确保只检验一帧,避免网页页面卡屏及其发生别的运行内存被占有状况
        if (isLock == false)
        {
            isLock = true;
            Graphics g = e.Graphics;
            float offsetX = (pic_Video.Width * 1f / bitmap.Width);
            float offsetY = (pic_Video.Height * 1f / bitmap.Height);
            //依据Bitmap 获得面部信息内容目录
            List<FaceInfoModel> list = FaceUtil.GetFaceInfos(pImageEngine, bitmap);
            foreach (FaceInfoModel sface in list)
            {
                //多线程解决获取矩阵的特征值和核对,要不然网页页面会较为卡
                ThreadPool.QueueUserWorkItem(new WaitCallback(delegate
                {
                    try
                    {
                        //获取面部特点
                        float similarity = CompareTwoFeatures(sface.feature, imageTemp);
                        if (similarity > threshold)
                        {
                            this.pic_cutImg.Image = bitmap;
                            this.Invoke((Action)(() =>
                            {
                                this.lbl_simiValue.Text = similarity.ToString();
                            }));
                        }
                    }
                    catch (Exception ex)
                    {
                        Console.WriteLine(ex.Message);
                    }
                }));

                MRECT rect = sface.faceRect;
                float x = rect.left * offsetX;
                float width = rect.right * offsetX - x;
                float y = rect.top * offsetY;
                float height = rect.bottom * offsetY - y;
                //依据Rect开展相框
                g.DrawRectangle(pen, x, y, width, height);
                trackUnit.message = "年纪:"   sface.age.ToString()   "\r\n"   "性別:"   (sface.gender == 0 ? "" : "");
                g.DrawString(trackUnit.message, font, brush, x, y   5);
            }
            isLock = false;
        }
    }
}

 一张照片很有可能包括好几张面部,大家用FaceInfoModel 面部信息内容dao层,把面部信息内容放到该类中。

public class FaceInfoModel
{
    /// <summary>
    /// 年纪
    /// </summary>
    public int age { get; set; }
    /// <summary>
    /// 性別
    /// </summary>
    public int gender { get; set; }
    public ASF_Face3DAngle face3dAngle { get; set; }
    /// <summary>
    /// 面部框
    /// </summary>
    public MRECT faceRect { get; set; }
    /// <summary>
    /// 面部视角
    /// </summary>
    public int faceOrient { get; set; }
    /// <summary>
    /// 單人脸特点
    /// </summary>
    public IntPtr feature { get; set; }
}

多面部dao层储放單人脸信息内容目录

public class MultiFaceModel : IDisposable
{
    /// <summary>
    /// 多面部信息内容
    /// </summary>
    public ASF_MultiFaceInfo MultiFaceInfo { get; private set; }

    /// <summary>
    /// 單人脸信息内容List
    /// </summary>
    public List<ASF_SingleFaceInfo> FaceInfoList { get; private set; }

    /// <summary>
    /// 面部信息内容目录
    /// </summary>
    /// <param name="multiFaceInfo"></param>
    public MultiFaceModel(ASF_MultiFaceInfo multiFaceInfo) 
    {
        this.MultiFaceInfo = multiFaceInfo;
        this.FaceInfoList = new List<ASF_SingleFaceInfo>();
        FaceInfoList = PtrToMultiFaceArray(multiFaceInfo.faceRects, multiFaceInfo.faceOrients, multiFaceInfo.faceNum);
    }
    /// <summary>
    /// 表针转多面部目录
    /// </summary>
    /// <param name="faceRect"></param>
    /// <param name="faceOrient"></param>
    /// <param name="length"></param>
    /// <returns></returns>
    private List<ASF_SingleFaceInfo> PtrToMultiFaceArray(IntPtr faceRect, IntPtr faceOrient, int length)
    {
        List<ASF_SingleFaceInfo> FaceInfoList = new List<ASF_SingleFaceInfo>();
        var size = Marshal.SizeOf(typeof(int));
        var sizer = Marshal.SizeOf(typeof(MRECT));

        for (var i = 0; i < length; i  )
        {
            ASF_SingleFaceInfo faceInfo = new ASF_SingleFaceInfo();

            MRECT rect = new MRECT();
            var iPtr = new IntPtr(faceRect.ToInt32()   i * sizer);
            rect = (MRECT)Marshal.PtrToStructure(iPtr, typeof(MRECT));
            faceInfo.faceRect = rect;

            int orient = 0;
            iPtr = new IntPtr(faceOrient.ToInt32()   i * size);
            orient = (int)Marshal.PtrToStructure(iPtr, typeof(int));
            faceInfo.faceOrient = orient;
            FaceInfoList.Add(faceInfo);
        }
        return FaceInfoList;
    }

    public void Dispose()
    {
        Marshal.FreeCoTaskMem(MultiFaceInfo.faceRects);
        Marshal.FreeCoTaskMem(MultiFaceInfo.faceOrients);
    }
}

随后获得任何人脸信息内容,放到目录中预留

/// <summary>
/// 获得面部信息内容目录
/// </summary>
/// <param name="pEngine"></param>
/// <param name="bitmap"></param>
/// <returns></returns>
public static List<FaceInfoModel>GetFaceInfos(IntPtr pEngine,Image bitmap)
{
    List<FaceInfoModel> listRet = new List<FaceInfoModel>();
    try
    {
        List<int> AgeList = new List<int>();
        List<int> GenderList = new List<int>();
        //检验面部,获得Rect框      ASF_MultiFaceInfo multiFaceInfo = FaceUtil.DetectFace(pEngine, bitmap);
        MultiFaceModel multiFaceModel = new MultiFaceModel(multiFaceInfo);
        //面部信息资源管理
        ImageInfo imageInfo = ImageUtil.ReadBMP(bitmap);
        int retCode = ASFFunctions.ASFProcess(pEngine, imageInfo.width, imageInfo.height, imageInfo.format, imageInfo.imgData, ref multiFaceInfo, FaceEngineMask.ASF_AGE| FaceEngineMask.ASF_GENDER);
        //获得年纪信息内容
        ASF_AgeInfo ageInfo = new ASF_AgeInfo();
        retCode = ASFFunctions.ASFGetAge(pEngine, ref ageInfo);
        AgeList = ageInfo.PtrToAgeArray(ageInfo.ageArray, ageInfo.num);
        //获得性別信息内容
        ASF_GenderInfo genderInfo = new ASF_GenderInfo();
        retCode = ASFFunctions.ASFGetGender(pEngine, ref genderInfo);
        GenderList = genderInfo.PtrToGenderArray(genderInfo.genderArray, genderInfo.num);

        for (int i = 0; i < multiFaceInfo.faceNum; i  )
        {
            FaceInfoModel faceInfo = new FaceInfoModel();
            faceInfo.age = AgeList[i];
            faceInfo.gender = GenderList[i];
            faceInfo.faceRect = multiFaceModel.FaceInfoList[i].faceRect;
            faceInfo.feature = ExtractFeature(pEngine, bitmap, multiFaceModel.FaceInfoList[i]);//获取單人脸特点
            faceInfo.faceOrient = multiFaceModel.FaceInfoList[i].faceOrient;
            listRet.Add(faceInfo);
        }
        return listRet;//回到多面部信息内容
    }
    catch {
        return listRet;
    }
}

从目录中获得到的好几张面部,在人脸部相框做出标志,还可以把获取的面部信息内容,年纪、性別做出展现。下面便是挑选一张总体目标角色的相片,根据SDK获取总体目标角色的面部矩阵的特征值做为较为目标,逐一与视頻中的面部特点开展较为。如果有分辨到相似性配对的面部,则把视頻帧图象展现出去。

/// <summary>
/// 较为2个矩阵的特征值的相似性,回到相似性
/// </summary>
/// <param name="feature1"></param>
/// <param name="feature2"></param>
/// <returns></returns>
private float CompareTwoFeatures(IntPtr feature1, IntPtr feature2)
{
    float similarity = 0.0f;
    //启用面部配对方式 ,开展配对
    ASFFunctions.ASFFaceFeatureCompare(pImageEngine, feature1, feature2, ref similarity);
    return similarity;
}

以前只完成了从好几张面部中获得一张较大 规格的面部做为较为目标,那样视頻中也就只有对一张面部开展相框标识了,现在是把全部获取到的面部均开展标识,并把分别矩阵的特征值存有目录中,便于与总体目标面部矩阵的特征值开展配对。

那样也就粗略地的完成了面部识别跟踪,并对总体目标角色开展摄录的作用了。

GitHub源代码已提交

评论(0条)

刀客源码 游客评论