close
終於完成了影像辨識的第一部分了,
在這邊先放幾個比較重要的Function就好,
接下來我會在github那邊弄一個帳號,
以後如果有source,就丟往那裡囉,省得每次都要找半天。
#region 處理影像的動作
#endregion
private void ProcessFrameAsync(object sender, EventArgs e)
{
try
{
using (Mat objMat = new Mat())
{
// Get image from OpenCV , and Analysis face is exist.
objVideoCapture.Retrieve(objMat);
OpenCVResult result = this.CaptureFace(objMat);
// 在影像上進行框線的繪圖
for (int f = 0; f < result.faces.Count; f++)
CvInvoke.Rectangle(objMat, result.faces[f], new Bgr(Color.Red).MCvScalar, 2);
for (int y = 0; y < result.eyes.Count; y++)
CvInvoke.Rectangle(objMat, result.eyes[y], new Bgr(Color.Yellow).MCvScalar, 1);
// 將圖片放到PictureBox之中
if (picRender.Image != null)
picRender.Image.Dispose();
IntPtr gdibitmap = objMat.Bitmap.GetHbitmap();
picRender.Image = Image.FromHbitmap(gdibitmap);
DeleteObject(gdibitmap);
Thread.Sleep(5);
}
}
catch (Exception E)
{
MessageBox.Show(E.Message);
}
}
#region 透過OpenCV 進行人臉判斷,是否存在
#endregion
private OpenCVResult CaptureFace(Mat objMat)
{
long detectionTime;
List faces = new List();
List eyes = new List();
DetectFace.Detect(
objMat, "haarcascade_frontalface_default.xml", "haarcascade_eye.xml",
faces, eyes,
out detectionTime);
// 重新計算比例
// decimal : 28-29 個有效數字
decimal diWidth = decimal.Parse(picRender.Width.ToString()) / decimal.Parse(objMat.Bitmap.Width.ToString());
decimal diHeight = decimal.Parse(picRender.Height.ToString()) / decimal.Parse(objMat.Bitmap.Height.ToString());
List objDraw = new List();
for (int i = 0; i < faces.Count; i++)
{
objDraw.Add(new Rectangle(
(int)(faces[i].X * diWidth),
(int)(faces[i].Y * diHeight),
(int)(faces[i].Width * diWidth),
(int)(faces[i].Height * diHeight)
));
}
OpenCVResult result = new OpenCVResult()
{
eyes = eyes,
faces = faces,
};
return result;
}
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Diagnostics;
using System.Drawing;
using Emgu.CV;
using Emgu.CV.Structure;
#if !(__IOS__ || NETFX_CORE)
using Emgu.CV.Cuda;
#endif
namespace FaceDetect_EmguCV
{
public static class DetectFace
{
public static void Detect
(
IInputArray image ,
String faceFileName ,
String eyeFileName ,
List faces ,
List eyes ,
out long detectionTime
)
{
// timer control
Stopwatch watch;
using (InputArray iaImage = image.GetInputArray())
{
#if !(__IOS__ || NETFX_CORE)
if (iaImage.Kind == InputArray.Type.CudaGpuMat && CudaInvoke.HasCuda)
{
using (CudaCascadeClassifier face = new CudaCascadeClassifier(faceFileName))
using (CudaCascadeClassifier eye = new CudaCascadeClassifier(eyeFileName))
{
face.ScaleFactor = 1.1;
face.MinNeighbors = 10;
face.MinObjectSize = Size.Empty;
eye.ScaleFactor = 1.1;
eye.MinNeighbors = 10;
eye.MinObjectSize = Size.Empty;
watch = Stopwatch.StartNew();
using (CudaImage gpuImage = new CudaImage(image))
using (CudaImage gpuGray = gpuImage.Convert())
using (GpuMat region = new GpuMat())
{
face.DetectMultiScale(gpuGray, region);
Rectangle[] faceRegion = face.Convert(region);
faces.AddRange(faceRegion);
foreach (Rectangle f in faceRegion)
{
using (CudaImage faceImg = gpuGray.GetSubRect(f))
{
// For some reason a clone is required.
// Might be a bug of CudaCasscadeClassifier in opencv
using (CudaImage clone = faceImg.Clone(null))
using (GpuMat eyeRegionMat = new GpuMat())
{
eye.DetectMultiScale(clone, eyeRegionMat);
Rectangle[] eyeRegion = eye.Convert(eyeRegionMat);
foreach (Rectangle e in eyeRegion)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
}
}
watch.Stop();
}
}
else
#endif
{
//Read the HaarCascade objects
using (CascadeClassifier face = new CascadeClassifier(faceFileName))
using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
{
watch = Stopwatch.StartNew();
using (UMat ugray = new UMat())
{
CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
//normalizes brightness and increases contrast of the image
CvInvoke.EqualizeHist(ugray, ugray);
//Detect the faces from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
Rectangle[] facesDetected = face.DetectMultiScale(
ugray,
1.1,
10,
new Size(20, 20));
faces.AddRange(facesDetected);
foreach (Rectangle f in facesDetected)
{
//Get the region of interest on the faces
using (UMat faceRegion = new UMat(ugray, f))
{
Rectangle[] eyesDetected = eye.DetectMultiScale(
faceRegion,
1.1,
10,
new Size(20, 20));
foreach (Rectangle e in eyesDetected)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
}
watch.Stop();
}
}
detectionTime = watch.ElapsedMilliseconds;
}
}
}
}
全站熱搜