297 lines
11 KiB
C#
297 lines
11 KiB
C#
using Rect = OpenCVCompact.Rect;
|
||
using OpenCVCompact;
|
||
using System;
|
||
using UnityEngine;
|
||
using OpenCVForUnity.UnityUtils.Helper;
|
||
using System.Collections.Generic;
|
||
|
||
[RequireComponent(typeof(OpenCVCompact.WebCamTextureToMatHelper))]
|
||
public class FaceDetectManager : MonoBehaviour
|
||
{
|
||
//Related To DNN Model
|
||
dnn.DNNUtils dnnUtils;
|
||
|
||
//Image Capture
|
||
OpenCVCompact.WebCamTextureToMatHelper _webCamTextureToMatHelper;
|
||
Texture2D videoTexture;
|
||
private double _lndmrkEstScore;
|
||
private Mat mat4Process;
|
||
private Mat grayMat4Process;
|
||
private Mat mat4Display;
|
||
private Mat mat4DisplayTexture;
|
||
private Mat lndmrk;
|
||
private MatOfRect detectionResult;
|
||
private Rect detectRect;
|
||
int[] faceRect = new int[4];
|
||
bool webCamReady = false;
|
||
|
||
int webCamOrVideoOrImage = 0; //0 : webcam, 1 : video, 2 : image
|
||
int lndmrkMode = 1; //0: 51, 1: 84
|
||
int lndmrkLevel = 3;
|
||
bool kalmanOrNot = true;
|
||
|
||
float lndmrkEstScore;
|
||
|
||
private FaceDetectUI panel;
|
||
|
||
// Start is called before the first frame update
|
||
void Awake()
|
||
{
|
||
UIManager.Instance.LoadReset();
|
||
panel = UIManager.Instance.ShowPanel<FaceDetectUI>();
|
||
|
||
InitModel();
|
||
InitVariable();
|
||
|
||
_webCamTextureToMatHelper = gameObject.GetComponent<OpenCVCompact.WebCamTextureToMatHelper>();
|
||
_webCamTextureToMatHelper.Initialize();
|
||
}
|
||
|
||
private void OnDestroy()
|
||
{
|
||
if (mat4Process != null)
|
||
mat4Process.Dispose();
|
||
|
||
if (grayMat4Process != null)
|
||
grayMat4Process.Dispose();
|
||
|
||
//if (img4Thread != null)
|
||
// img4Thread.Dispose();
|
||
|
||
if (lndmrk != null)
|
||
lndmrk.Dispose();
|
||
|
||
//if (lndmrk4Thread != null)
|
||
// lndmrk4Thread.Dispose();
|
||
|
||
if (mat4Display != null)
|
||
mat4Display.Dispose();
|
||
|
||
if (mat4DisplayTexture != null)
|
||
mat4DisplayTexture.Dispose();
|
||
|
||
//if (probExp != null)
|
||
// probExp.Dispose();
|
||
|
||
_webCamTextureToMatHelper.Dispose();
|
||
}
|
||
|
||
private void InitVariable()
|
||
{
|
||
mat4Process = new Mat();
|
||
grayMat4Process = new Mat();
|
||
detectionResult = new MatOfRect();
|
||
detectRect = new Rect();
|
||
lndmrk = new Mat(2, 84, CvType.CV_32FC1, 0.0f);
|
||
|
||
InvokeRepeating("AnalyzingFace", 0f, 1f);
|
||
}
|
||
|
||
private bool InitModel()
|
||
{
|
||
dnnUtils = new dnn.DNNUtils();
|
||
string dnnLndmrkDetectModelFilePath;
|
||
string dnnFaceAttr7ModelFilePath;
|
||
string dnnFaceExpModelFilePath;
|
||
string multiTinyFaceDetectModelFilePath;
|
||
string YOLOV3DetectModelFilePath;
|
||
string YOLOV3DetectParamFilePath;
|
||
string YOLOV3DetectOpenCVModelFilePath;
|
||
string YOLOV3DetectOpenCVParamFilePath;
|
||
|
||
//Init Model File Path
|
||
#if (UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR
|
||
dnnLndmrkDetectModelFilePath = Utils.getFilePath("FaceAnalyzer/face_lndmrk_detect_mobile.bin");
|
||
#else
|
||
dnnLndmrkDetectModelFilePath = Utils.getFilePath("FaceAnalyzer/face_lndmrk_detect.bin");
|
||
#endif
|
||
|
||
#if (UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR
|
||
dnnFaceAttr7ModelFilePath = Utils.getFilePath("FaceAnalyzer/att_7_mobile.bin");
|
||
#else
|
||
dnnFaceAttr7ModelFilePath = Utils.getFilePath("FaceAnalyzer/att_7.bin");
|
||
#endif
|
||
|
||
#if (UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR
|
||
dnnFaceExpModelFilePath = Utils.getFilePath("FaceAnalyzer/face_exp_mobile.bin");
|
||
#else
|
||
dnnFaceExpModelFilePath = Utils.getFilePath("FaceAnalyzer/face_exp.bin");
|
||
#endif
|
||
multiTinyFaceDetectModelFilePath = Utils.getFilePath("FaceAnalyzer/multi_tiny_face_detect_mobile.bin");
|
||
YOLOV3DetectModelFilePath = Utils.getFilePath("FaceAnalyzer/yolov3.bin");
|
||
YOLOV3DetectParamFilePath = Utils.getFilePath("FaceAnalyzer/yolov3.param");
|
||
|
||
YOLOV3DetectOpenCVModelFilePath = Utils.getFilePath("FaceAnalyzer/yolov3-tiny.weights");
|
||
YOLOV3DetectOpenCVParamFilePath = Utils.getFilePath("FaceAnalyzer/yolov3-tiny.cfg");
|
||
|
||
bool initFaceLndmrkRes;
|
||
|
||
initFaceLndmrkRes = dnnUtils.InitFaceLandmarkDetect(dnnLndmrkDetectModelFilePath);
|
||
if (initFaceLndmrkRes == false)
|
||
{
|
||
Debug.LogError(dnnLndmrkDetectModelFilePath + " file is not loaded.");
|
||
Debug.LogError(dnnLndmrkDetectModelFilePath + " file is not existed on StreamingAssets Folder. Please copy from <20><>Assets/FaceAnalyzer/StreamingAssets/<2F><> to <20><>Assets/StreamingAssets/<2F><> folder.");
|
||
}
|
||
|
||
dnnUtils.InitHeadPoseEstimation();
|
||
|
||
int initFaceAttrRes = dnnUtils.InitFaceAttribNet_7(dnnFaceAttr7ModelFilePath);
|
||
if (initFaceAttrRes <= 0)
|
||
{
|
||
Debug.LogError(dnnFaceAttr7ModelFilePath + " file is not loaded.");
|
||
Debug.LogError(dnnFaceAttr7ModelFilePath + " file is not existed on StreamingAssets Folder. Please copy from <20><>Assets/FaceAnalyzer/StreamingAssets/<2F><> to <20><>Assets/StreamingAssets/<2F><> folder.");
|
||
}
|
||
|
||
int initFaceExpRes = dnnUtils.InitFaceExpressionNet_7(dnnFaceExpModelFilePath);
|
||
if (initFaceExpRes <= 0)
|
||
{
|
||
Debug.LogError(dnnFaceExpModelFilePath + " file is not loaded.");
|
||
Debug.LogError(dnnFaceExpModelFilePath + " file is not existed on StreamingAssets Folder. Please copy from <20><>Assets/FaceAnalyzer/StreamingAssets/<2F><> to <20><>Assets/StreamingAssets/<2F><> folder.");
|
||
}
|
||
|
||
int initMultiTinyFaceDetectRes = dnnUtils.InitMultiTinyFaceDetector(multiTinyFaceDetectModelFilePath);
|
||
if (initMultiTinyFaceDetectRes <= 0)
|
||
{
|
||
Debug.LogError(multiTinyFaceDetectModelFilePath + " file is not loaded.");
|
||
Debug.LogError(multiTinyFaceDetectModelFilePath + " file is not existed on StreamingAssets Folder. Please copy from <20><>Assets/FaceAnalyzer/StreamingAssets/<2F><> to <20><>Assets/StreamingAssets/<2F><> folder.");
|
||
}
|
||
|
||
int initYOLOV3DetectRes = dnnUtils.InitYOLOV3Detector(YOLOV3DetectModelFilePath, YOLOV3DetectParamFilePath);
|
||
if (initYOLOV3DetectRes == -1)
|
||
{
|
||
Debug.LogError(YOLOV3DetectModelFilePath + " file is not loaded.");
|
||
Debug.LogError(YOLOV3DetectModelFilePath + " file is not existed on StreamingAssets Folder. Please copy from <20><>Assets/FaceAnalyzer/StreamingAssets/<2F><> to <20><>Assets/StreamingAssets/<2F><> folder.");
|
||
}
|
||
else if (initYOLOV3DetectRes == -2)
|
||
{
|
||
Debug.LogError(YOLOV3DetectParamFilePath + " file is not loaded.");
|
||
Debug.LogError(YOLOV3DetectParamFilePath + " file is not existed on StreamingAssets Folder. Please copy from <20><>Assets/FaceAnalyzer/StreamingAssets/<2F><> to <20><>Assets/StreamingAssets/<2F><> folder.");
|
||
}
|
||
|
||
dnnUtils.InitLabels_7();
|
||
dnnUtils.InitExpLabels_7();
|
||
dnnUtils.InitObjectLabels_21();
|
||
dnnUtils.InitKalmanFilter(10f);
|
||
|
||
return true;
|
||
}
|
||
|
||
private void AnalyzingFace()
|
||
{
|
||
if (!_webCamTextureToMatHelper.IsPlaying() || !_webCamTextureToMatHelper.DidUpdateThisFrame())
|
||
return;
|
||
|
||
mat4Display = _webCamTextureToMatHelper.GetMat();
|
||
//mat4Display.copyTo(mat4Process);
|
||
mat4Process = mat4Display.clone();
|
||
|
||
//Face Detect
|
||
panel.RefreshData(LandmarkDetect());
|
||
|
||
if (webCamReady == true)
|
||
{
|
||
if (mat4Display.rows() == videoTexture.height)
|
||
{
|
||
mat4Display.copyTo(mat4DisplayTexture);
|
||
Utils.matToTexture2D(mat4DisplayTexture, videoTexture);
|
||
}
|
||
}
|
||
}
|
||
|
||
private Dictionary<string, string> LandmarkDetect()
|
||
{
|
||
if (dnnUtils.GetEstimateLandmarkSuccessOrNot() == false || lndmrkEstScore < 0.25) //If It Failed To Track Facial Landmark in Previous Frame
|
||
{
|
||
//Face Detect!!!
|
||
Imgproc.cvtColor(mat4Process, grayMat4Process, OpenCVCompact.Imgproc.COLOR_RGBA2GRAY); //Face Detection Should Use Gray Image
|
||
|
||
if (dnnUtils.DetectFace(grayMat4Process, detectionResult, 32, 1024, true)) //Detect Face
|
||
{
|
||
detectRect = detectionResult.toArray()[0];
|
||
faceRect[0] = detectRect.x; faceRect[1] = detectRect.y; faceRect[2] = detectRect.width; faceRect[3] = detectRect.height;
|
||
|
||
lndmrkEstScore = dnnUtils.EstimateFacialLandmark(mat4Process.nativeObj, ref faceRect[0], lndmrk.nativeObj, lndmrkMode, kalmanOrNot, lndmrkLevel);
|
||
}
|
||
}
|
||
else
|
||
{
|
||
//Track Facial Landmark If It Succeeded In Previous Frame
|
||
faceRect = dnnUtils.SquareFromInnerLandmark(lndmrk.nativeObj);
|
||
lndmrkEstScore = dnnUtils.EstimateFacialLandmark(mat4Process.nativeObj, ref faceRect[0], lndmrk.nativeObj, lndmrkMode, kalmanOrNot, lndmrkLevel);
|
||
}
|
||
|
||
Mat prob = new Mat();
|
||
Dictionary<string, string> retVal = null;
|
||
|
||
if (dnnUtils.EstFaceAttribNet_7(mat4Process.nativeObj, lndmrk.nativeObj, CvType.COLOR_RGBA, prob.nativeObj) == true)
|
||
{
|
||
dnnUtils.ParseEstFaceAttrib_7(prob);
|
||
|
||
string text = string.Empty;
|
||
retVal = new Dictionary<string, string>(dnnUtils.dnnFaceAttribRes_7);
|
||
int idx = 0;
|
||
foreach (var pair in dnnUtils.dnnFaceAttribRes_7)
|
||
{
|
||
string tmp;
|
||
if (idx % 2 == 0)
|
||
tmp = string.Format("[{0}] : {1}\n", pair.Key, pair.Value);
|
||
else
|
||
tmp = string.Format("[{0}] : {1}\t\t", pair.Key, pair.Value);
|
||
|
||
text = string.Concat(text, tmp);
|
||
|
||
idx = idx + 1;
|
||
}
|
||
Debug.LogWarning(text.Substring(0, text.Length - 1));
|
||
}
|
||
prob.Dispose();
|
||
|
||
return retVal;
|
||
}
|
||
|
||
public void OnWebCamTextureToMatHelperInitialized()
|
||
{
|
||
Mat webCamTextureMat = _webCamTextureToMatHelper.GetMat();
|
||
|
||
videoTexture = new Texture2D(webCamTextureMat.cols(), webCamTextureMat.rows(), TextureFormat.RGBA32, false);
|
||
gameObject.GetComponent<Renderer>().material.mainTexture = videoTexture;
|
||
gameObject.transform.localScale = new Vector3(webCamTextureMat.cols(), webCamTextureMat.rows(), 1);
|
||
|
||
mat4Display = new Mat(webCamTextureMat.rows(), webCamTextureMat.cols(), CvType.CV_8UC4);
|
||
mat4DisplayTexture = new Mat(webCamTextureMat.rows(), webCamTextureMat.cols(), CvType.CV_8UC4);
|
||
|
||
Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);
|
||
|
||
float width = webCamTextureMat.width();
|
||
float height = webCamTextureMat.height();
|
||
|
||
float widthScale = (float)Screen.width / width;
|
||
float heightScale = (float)Screen.height / height;
|
||
if (widthScale < heightScale)
|
||
{
|
||
Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
|
||
}
|
||
else
|
||
{
|
||
Camera.main.orthographicSize = height / 2;
|
||
}
|
||
|
||
dnnUtils.InitHeadPoseEstimationCameraInfo(webCamTextureMat.cols(), webCamTextureMat.rows());
|
||
|
||
webCamReady = true;
|
||
Debug.Log("OnWebCamTextureToMatHelperInitialized");
|
||
}
|
||
|
||
|
||
public void OnWebCamTextureToMatHelperDisposed()
|
||
{
|
||
Debug.Log("OnWebCamTextureToMatHelperDisposed");
|
||
|
||
}
|
||
|
||
public void OnWebCamTextureToMatHelperErrorOccurred(OpenCVCompact.WebCamTextureToMatHelper.ErrorCode errorCode)
|
||
{
|
||
Debug.Log("OnWebCamTextureToMatHelperErrorOccurred " + errorCode);
|
||
}
|
||
}
|