目录
介绍
效果
模型信息
项目
代码
下载
介绍
github地址:https://github.com/derronqi/yolov8-face
yolov8 face detection with landmark
效果
模型信息
Model Properties
-------------------------
description:Ultralytics YOLOv8-lite-t-pose model trained on widerface.yaml
author:Ultralytics
kpt_shape:[5, 3]
task:pose
license:AGPL-3.0 https://ultralytics.com/license
version:8.0.85
stride:32
batch:1
imgsz:[640, 640]
names:{0: 'face'}
---------------------------------------------------------------
Inputs
-------------------------
name:images
tensor:Float[1, 3, 640, 640]
---------------------------------------------------------------
Outputs
-------------------------
name:output0
tensor:Float[1, 80, 80, 80]
name:884
tensor:Float[1, 80, 40, 40]
name:892
tensor:Float[1, 80, 20, 20]
---------------------------------------------------------------
项目
代码
GenerateProposal函数
public static unsafe void GenerateProposal(int inpHeight, int inpWidth, int reg_max, int num_class, float score_threshold, int feat_h, int feat_w, Mat output, List<Rect> position_boxes, List<float> confidences, List<List<OpenCvSharp.Point>> landmarks, int imgh, int imgw, float ratioh, float ratiow, int padh, int padw)
{
int stride = (int)Math.Ceiling((double)(inpHeight / feat_h));
int area = feat_h * feat_w;
float* ptr = (float*)output.DataStart;
float* ptr_cls = ptr + area * reg_max * 4;
float* ptr_kp = ptr + area * (reg_max * 4 + num_class);
for (int i = 0; i < feat_h; i++)
{
for (int j = 0; j < feat_w; j++)
{
int cls_id = -1;
float max_conf = -10000;
int index = i * feat_w + j;
for (int k = 0; k < num_class; k++)
{
float conf = ptr_cls[k * area + index];
if (conf > max_conf)
{
max_conf = conf;
cls_id = k;
}
}
float box_prob = Common.sigmoid_x(max_conf);
if (box_prob > score_threshold)
{
float[] pred_ltrb = new float[4];
float[] dfl_value = new float[reg_max];
float[] dfl_softmax = new float[reg_max];
for (int k = 0; k < 4; k++)
{
for (int n = 0; n < reg_max; n++)
{
dfl_value[n] = ptr[(k * reg_max + n) * area + index];
}
Common.softmax_(ref dfl_value, ref dfl_softmax, reg_max);
float dis = 0f;
for (int n = 0; n < reg_max; n++)
{
dis += n * dfl_softmax[n];
}
pred_ltrb[k] = dis * stride;
}
float cx = (j + 0.5f) * stride;
float cy = (i + 0.5f) * stride;
float xmin = Math.Max((cx - pred_ltrb[0] - padw) * ratiow, 0f); ///还原回到原图
float ymin = Math.Max((cy - pred_ltrb[1] - padh) * ratioh, 0f);
float xmax = Math.Min((cx + pred_ltrb[2] - padw) * ratiow, (float)(imgw - 1));
float ymax = Math.Min((cy + pred_ltrb[3] - padh) * ratioh, (float)(imgh - 1));
Rect box = new Rect((int)xmin, (int)ymin, (int)(xmax - xmin), (int)(ymax - ymin));
position_boxes.Add(box);
confidences.Add(box_prob);
List<OpenCvSharp.Point> kpts = new List<OpenCvSharp.Point>();
for (int k = 0; k < 5; k++)
{
float x = ((ptr_kp[(k * 3) * area + index] * 2 + j) * stride - padw) * ratiow; ///还原回到原图
float y = ((ptr_kp[(k * 3 + 1) * area + index] * 2 + i) * stride - padh) * ratioh;
kpts.Add(new OpenCvSharp.Point((int)x, (int)y));
}
landmarks.Add(kpts);
}
}
}
}
using OpenCvSharp;
using OpenCvSharp.Dnn;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
namespace OpenCvSharp_Yolov8_Demo
{
public partial class frmMain : Form
{
public frmMain()
{
InitializeComponent();
}
string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
string image_path = "";
string startupPath;
DateTime dt1 = DateTime.Now;
DateTime dt2 = DateTime.Now;
string model_path;
Mat image;
Mat result_image;
Net opencv_net;
Mat BN_image;
StringBuilder sb = new StringBuilder();
int reg_max = 16;
int num_class = 1;
int inpWidth = 640;
int inpHeight = 640;
float score_threshold = 0.25f;
float nms_threshold = 0.5f;
private void Form1_Load(object sender, EventArgs e)
{
startupPath = System.Windows.Forms.Application.StartupPath;
model_path = startupPath + "\\yolov8-lite-t.onnx";
//初始化网络类,读取本地模型
opencv_net = CvDnn.ReadNetFromOnnx(model_path);
}
private void button1_Click(object sender, EventArgs e)
{
OpenFileDialog ofd = new OpenFileDialog();
ofd.Filter = fileFilter;
if (ofd.ShowDialog() != DialogResult.OK) return;
pictureBox1.Image = null;
image_path = ofd.FileName;
pictureBox1.Image = new Bitmap(image_path);
textBox1.Text = "";
image = new Mat(image_path);
pictureBox2.Image = null;
}
private void button2_Click(object sender, EventArgs e)
{
if (image_path == "")
{
return;
}
int newh = 0, neww = 0, padh = 0, padw = 0;
Mat resize_img = Common.ResizeImage(image, inpHeight, inpWidth, ref newh, ref neww, ref padh, ref padw);
float ratioh = (float)image.Rows / newh, ratiow = (float)image.Cols / neww;
//数据归一化处理
BN_image = CvDnn.BlobFromImage(resize_img, 1 / 255.0, new OpenCvSharp.Size(inpWidth, inpHeight), new Scalar(0, 0, 0), true, false);
//配置图片输入数据
opencv_net.SetInput(BN_image);
//模型推理,读取推理结果
Mat[] outs = new Mat[3] { new Mat(), new Mat(), new Mat() };
string[] outBlobNames = opencv_net.GetUnconnectedOutLayersNames().ToArray();
dt1 = DateTime.Now;
opencv_net.Forward(outs, outBlobNames);
dt2 = DateTime.Now;
List<Rect> position_boxes = new List<Rect>();
List<float> confidences = new List<float>();
List<List<OpenCvSharp.Point>> landmarks = new List<List<OpenCvSharp.Point>>();
Common.GenerateProposal(inpHeight, inpWidth, reg_max, num_class, score_threshold, 40, 40, outs[0], position_boxes, confidences, landmarks, image.Rows, image.Cols, ratioh, ratiow, padh, padw);
Common.GenerateProposal(inpHeight, inpWidth, reg_max, num_class, score_threshold, 20, 20, outs[1], position_boxes, confidences, landmarks, image.Rows, image.Cols, ratioh, ratiow, padh, padw);
Common.GenerateProposal(inpHeight, inpWidth, reg_max, num_class, score_threshold, 80, 80, outs[2], position_boxes, confidences, landmarks, image.Rows, image.Cols, ratioh, ratiow, padh, padw);
//NMS非极大值抑制
int[] indexes = new int[position_boxes.Count];
CvDnn.NMSBoxes(position_boxes, confidences, score_threshold, nms_threshold, out indexes);
List<Rect> re_result = new List<Rect>();
List<List<OpenCvSharp.Point>> re_landmarks = new List<List<OpenCvSharp.Point>>();
List<float> re_confidences = new List<float>();
for (int i = 0; i < indexes.Length; i++)
{
int index = indexes[i];
re_result.Add(position_boxes[index]);
re_landmarks.Add(landmarks[index]);
re_confidences.Add(confidences[index]);
}
if (re_result.Count > 0)
{
sb.Clear();
sb.AppendLine("推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms");
sb.AppendLine("--------------------------");
//将识别结果绘制到图片上
result_image = image.Clone();
for (int i = 0; i < re_result.Count; i++)
{
Cv2.Rectangle(result_image, re_result[i], new Scalar(0, 0, 255), 2, LineTypes.Link8);
Cv2.PutText(result_image, "face-" + re_confidences[i].ToString("0.00"),
new OpenCvSharp.Point(re_result[i].X, re_result[i].Y - 10),
HersheyFonts.HersheySimplex, 1, new Scalar(0, 0, 255), 2);
foreach (var item in re_landmarks[i])
{
Cv2.Circle(result_image, item, 4, new Scalar(0, 255, 0), -1);
}
sb.AppendLine(string.Format("{0}:{1},({2},{3},{4},{5})"
, "face"
, re_confidences[i].ToString("0.00")
, re_result[i].TopLeft.X
, re_result[i].TopLeft.Y
, re_result[i].BottomRight.X
, re_result[i].BottomRight.Y
));
}
pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
textBox1.Text = sb.ToString();
}
else
{
textBox1.Text = "无信息";
}
}
}
}
下载
exe可执行程序包免费下载文章来源:https://www.toymoban.com/news/detail-734712.html
源码下载文章来源地址https://www.toymoban.com/news/detail-734712.html
到了这里,关于C# OpenCvSharp Yolov8 Face Landmarks 人脸特征检测的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!