人脸识别是人工智能机器学习比较成熟的一个领域。人脸识别已经应用到了很多生产场景。比如生物认证,人脸考勤,人流监控等场景。对于很多中小功能由于技术门槛问题很难自己实现人脸识别的算法。Azure人脸API对人脸识别机器学习算法进行封装提供REST API跟SDK方便用户进行自定义开发。
Azure人脸API可以对图像中的人脸进行识别,返回面部的坐标、性别、年龄、情感、愤怒还是高兴、是否微笑,是否带眼镜等等非常有意思的信息。
Azure人脸API也是一个免费服务,每个月30000次事务的免费额度。
创建人脸服务
填写实例名,选择一个区域,同样选离你近的。
获取秘钥跟终结点
选中侧边菜单“秘钥于终结点”,获取信息,这2个信息后面再sdk调用中需要用到。
新建WPF应用
新建一个WPF应用实现以下功能:
安装SDK
使用nuget安装对于的sdk包:
Install-Package Microsoft.Azure.CognitiveServices.Vision.Face -Version 2.5.0-preview.2
实现界面
编辑MainWindow.xml放置图像显示区域、文件选中、描述显示区域
<Window x:Class="FaceWpf.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
xmlns:local="clr-namespace:FaceWpf"
mc:Ignorable="d"
Title="MainWindow" Height="600" Width="800">
<Grid x:Name="BackPanel">
<Image x:Name="FacePhoto" Stretch="Uniform" Margin="0,50" MouseMove="FacePhoto_MouseMove" />
<DockPanel DockPanel.Dock="Bottom">
<Button x:Name="BrowseButton" Width="72" Height="80" VerticalAlignment="Bottom" HorizontalAlignment="Left"
Content="选择图片..."
Click="BrowseButton_Click" />
<StatusBar VerticalAlignment="Bottom">
<StatusBarItem>
<TextBlock Name="faceDescriptionStatusBar" Height="80" FontSize="20" Text="" Width="500" TextWrapping="Wrap"/>
</StatusBarItem>
</StatusBar>
</DockPanel>
</Grid>
</Window>
构造函数
在编辑MainWindow类的构造函数初始化FaceClient等数据
private IFaceClient _faceClient;
//检测到的人脸
private IList<DetectedFace> _faceList;
//人脸描述信息
private string[] _faceDescriptions;
private double _resizeFactor;
private const string _defaultStatusBarText =
"鼠标移动到面部显示描述信息.";
public MainWindow()
{
InitializeComponent();
//faceid的订阅key
string subscriptionKey = "";
// faceid的终结的配置
string faceEndpoint = "";
_faceClient = new FaceClient(
new ApiKeyServiceClientCredentials(subscriptionKey),new System.Net.Http.DelegatingHandler[] { });
if (Uri.IsWellFormedUriString(faceEndpoint,UriKind.Absolute))
{
_faceClient.Endpoint = faceEndpoint;
}
else
{
MessageBox.Show(faceEndpoint,"Invalid URI",MessageBoxButton.OK,MessageBoxImage.Error);
Environment.Exit(0);
}
}
图片选择并显示
// 选择图片并上传
private async void BrowseButton_Click(object sender,RoutedEventArgs e)
{
var openDlg = new Microsoft.Win32.OpenFileDialog();
openDlg.Filter = "JPEG Image(*.jpg)|*.jpg";
bool? result = openDlg.ShowDialog(this);
if (!(bool)result)
{
return;
}
// Display the image file.
string filePath = openDlg.FileName;
Uri fileUri = new Uri(filePath);
BitmapImage bitmapSource = new BitmapImage();
bitmapSource.BeginInit();
bitmapSource.CacheOption = BitmapCacheOption.None;
bitmapSource.UriSource = fileUri;
bitmapSource.EndInit();
FacePhoto.Source = bitmapSource;
// Detect any faces in the image.
Title = "识别中...";
_faceList = await UploadAndDetectFaces(filePath);
Title = String.Format(
"识别完成. {0}个人脸",_faceList.Count);
if (_faceList.Count > 0)
{
// Prepare to draw rectangles around the faces.
DrawingVisual visual = new DrawingVisual();
DrawingContext drawingContext = visual.RenderOpen();
drawingContext.DrawImage(bitmapSource,new Rect(0,bitmapSource.Width,bitmapSource.Height));
double dpi = bitmapSource.DpiX;
// Some images don't contain dpi info.
_resizeFactor = (dpi == 0) ? 1 : 96 / dpi;
_faceDescriptions = new String[_faceList.Count];
for (int i = 0; i < _faceList.Count; ++i)
{
DetectedFace face = _faceList[i];
//画方框
drawingContext.DrawRectangle(
Brushes.Transparent,new Pen(Brushes.Red,2),new Rect(
face.FaceRectangle.Left * _resizeFactor,face.FaceRectangle.Top * _resizeFactor,face.FaceRectangle.Width * _resizeFactor,face.FaceRectangle.Height * _resizeFactor
)
);
_faceDescriptions[i] = FaceDescription(face);
}
drawingContext.Close();
RenderTargetBitmap faceWithRectBitmap = new RenderTargetBitmap(
(int)(bitmapSource.PixelWidth * _resizeFactor),(int)(bitmapSource.PixelHeight * _resizeFactor),96,PixelFormats.Pbgra32);
faceWithRectBitmap.Render(visual);
FacePhoto.Source = faceWithRectBitmap;
faceDescriptionStatusBar.Text = _defaultStatusBarText;
}
}
调用SDK进行识别
指定需要识别的要素,调用sdk进行图像识别
// 上传图片使用faceclient识别
private async Task<IList<DetectedFace>> UploadAndDetectFaces(string imageFilePath)
{
IList<FaceAttributeType> faceAttributes =
new FaceAttributeType[]
{
FaceAttributeType.Gender,FaceAttributeType.Age,FaceAttributeType.Smile,FaceAttributeType.Emotion,FaceAttributeType.Glasses,FaceAttributeType.Hair
};
using (Stream imageFileStream = File.OpenRead(imageFilePath))
{
IList<DetectedFace> faceList =
await _faceClient.Face.DetectWithStreamAsync(
imageFileStream,true,false,faceAttributes);
return faceList;
}
}
显示脸部的描述
对人脸识别后的结果信息组装成字符串,当鼠标移动到人脸上的时候显示这些信息。
/// <summary>
/// 鼠标移动显示脸部描述
/// </summary>
/// <param name="sender"></param>
/// <param name="e"></param>
private void FacePhoto_MouseMove(object sender,MouseEventArgs e)
{
if (_faceList == null)
return;
Point mouseXY = e.GetPosition(FacePhoto);
ImageSource imageSource = FacePhoto.Source;
BitmapSource bitmapSource = (BitmapSource)imageSource;
var scale = FacePhoto.ActualWidth / (bitmapSource.PixelWidth / _resizeFactor);
bool mouSEOverFace = false;
for (int i = 0; i < _faceList.Count; ++i)
{
FaceRectangle fr = _faceList[i].FaceRectangle;
double left = fr.Left * scale;
double top = fr.Top * scale;
double width = fr.Width * scale;
double height = fr.Height * scale;
if (mouseXY.X >= left && mouseXY.X <= left + width &&
mouseXY.Y >= top && mouseXY.Y <= top + height)
{
faceDescriptionStatusBar.Text = _faceDescriptions[i];
mouSEOverFace = true;
break;
}
}
if (!mouSEOverFace) faceDescriptionStatusBar.Text = _defaultStatusBarText;
}
/// <summary>
/// 脸部描述
/// </summary>
/// <param name="face"></param>
/// <returns></returns>
private string FaceDescription(DetectedFace face)
{
StringBuilder sb = new StringBuilder();
sb.Append("人脸: ");
// 性别年龄
sb.Append(face.FaceAttributes.Gender.Value == Gender.Female ? "女性" : "男性");
sb.Append(",");
sb.Append(face.FaceAttributes.Age.ToString() + "岁");
sb.Append(",");
sb.Append(String.Format("微笑 {0:F1}%,",face.FaceAttributes.Smile * 100));
// 显示超过0.1的表情
sb.Append("表情: ");
Emotion emotionscores = face.FaceAttributes.Emotion;
if (emotionscores.Anger >= 0.1f) sb.Append(
String.Format("生气 {0:F1}%,emotionscores.Anger * 100));
if (emotionscores.Contempt >= 0.1f) sb.Append(
String.Format("蔑视 {0:F1}%,emotionscores.Contempt * 100));
if (emotionscores.Disgust >= 0.1f) sb.Append(
String.Format("厌恶 {0:F1}%,emotionscores.Disgust * 100));
if (emotionscores.Fear >= 0.1f) sb.Append(
String.Format("恐惧 {0:F1}%,emotionscores.Fear * 100));
if (emotionscores.Happiness >= 0.1f) sb.Append(
String.Format("高兴 {0:F1}%,emotionscores.Happiness * 100));
if (emotionscores.Neutral >= 0.1f) sb.Append(
String.Format("自然 {0:F1}%,emotionscores.Neutral * 100));
if (emotionscores.Sadness >= 0.1f) sb.Append(
String.Format("悲伤 {0:F1}%,emotionscores.Sadness * 100));
if (emotionscores.Surprise >= 0.1f) sb.Append(
String.Format("惊喜 {0:F1}%,emotionscores.Surprise * 100));
sb.Append(face.FaceAttributes.Glasses);
sb.Append(",");
sb.Append("头发: ");
if (face.FaceAttributes.Hair.Bald >= 0.01f)
sb.Append(String.Format("秃头 {0:F1}% ",face.FaceAttributes.Hair.Bald * 100));
IList<HairColor> hairColors = face.FaceAttributes.Hair.HairColor;
foreach (HairColor hairColor in hairColors)
{
if (hairColor.Confidence >= 0.1f)
{
sb.Append(hairColor.Color.ToString());
sb.Append(String.Format(" {0:F1}% ",hairColor.Confidence * 100));
}
}
return sb.ToString();
}
运行
到此我们的应用打造完成了。先让我们选择一张结衣的图片试试:
看看我们的结衣微笑率97.9%。
再选一张杰伦的图片试试:
嗨,杰伦就是不喜欢笑,微笑率0% 。。。
总结
通过简单的一个wpf的应用我们演示了如果使用Azure人脸API进行图片中的人脸检测,真的非常方便,识别代码只有1行而已。如果不用C# sdk还可以使用更加通用的rest api来调用,这样可以适配任何开发语言。Azure人脸API除了能对图片中的人脸进行检测,还可以对多个人脸进行比对,检测是否是同一个人,这样就可以实现人脸考勤等功能了,这个下次再说吧。