简介
信任大部分同学们都已了解或接触过OpenAtom OpenHarmony(以下简称“OpenHarmony”)了,但你必定没在OpenHarmony上完结过人脸辨认功用,跟着本文带你快速在OpenHarmony标准设备上根据SeetaFace2和OpenCV完结人脸辨认。
项目作用
本项目完结了导入人脸模型、人脸框选和人脸辨认三大功用,操作流程如下:
-
录入页面点击右下角按钮,跳转拍摄页面进行拍照;
-
挑选一张或多张人脸作为训练模型,并设置对应的姓名;
-
挑选一张未录入的人脸图片,点击框选按钮完结人脸图片框选功用;
-
终究点击辨认,使用会对当时图片进行匹配,终究在界面中显现辨认成果。
快速上手
设备端开发
设备端经过OpenCV对图画进行处理并经过Seetaface2对图形数据进行人脸头像的辨认,终究输出对应的NAPI接口供给给使用端调用。因此设备端开发首要涉及到OpenCV和Seetaface2的移植以及NAPI接口的开发。
OpenCV库移植
OpenCV是一个功用十分强壮的开源计算机视觉库。此库已由常识系统工作组移植到了OpenHarmony中,后期还会将此库合入到主仓。在此库上主仓之前,咱们只需求以下几个过程就能够完结OpenCV的移植使用。
- 经过以下指令下载现已移植好的OpenCV
git clone git@gitee.com:zhong-luping/ohos_opencv.git
- 将OpenCV拷贝到OpenHarmony目录的third_party下
cp -raf opencv ~/openharmony/third_party/
- 适当裁剪编译选项
翻开OpenCV目录下的BUILD.gn,如下:
不需求video以及flann功用,将对应的模块注释即可。
import("//build/ohos.gni")
group("opencv") {
deps = [ "//third_party/opencv/modules/core:opencv_core", // "//third_party/opencv/modules/flann:opencv_flann", "//third_party/opencv/modules/imgproc:opencv_imgproc", "//third_party/opencv/modules/ml:opencv_ml", "//third_party/opencv/modules/photo:opencv_photo", "//third_party/opencv/modules/dnn:opencv_dnn", "//third_party/opencv/modules/features2d:opencv_features2d", "//third_party/opencv/modules/imgcodecs:opencv_imgcodecs", "//third_party/opencv/modules/videoio:opencv_videoio", "//third_party/opencv/modules/calib3d:opencv_calib3d", "//third_party/opencv/modules/highgui:opencv_highgui", "//third_party/opencv/modules/objdetect:opencv_objdetect", "//third_party/opencv/modules/stitching:opencv_stitching", "//third_party/opencv/modules/ts:opencv_ts", // "//third_party/opencv/modules/video:opencv_video", "//third_party/opencv/modules/gapi:opencv_gapi", ]
}
- 增加依靠子系统的part_name,编译框架子系统会将编译出的库拷贝到系统文件中。
此项目中咱们新建了一个SeetaFaceApp的子系统,该子系统中命名part_name为SeetafaceApi,所以咱们需求在对应模块中的BUILD.gn中加上part_name=”SeetafaceApi”
以module/core为例:
ohos_shared_library("opencv_core"){
sources = [ ... ]
configs = [ ... ]
deps = [ ... ]
part_name = "SeetafaceApi"
}
- 编译工程需求增加OpenCV的依靠。
在生成NAPI的BUILD.gn中增加以下依靠:
deps += [ "//third_party/opencv:opencv" ]
至此,人脸辨认中OpenCV的移植使用完结。
SeetaFace2库移植
SeetaFace2是中科视拓开源的第二代人脸辨认库。包括了建立一套全自动人脸辨认系统所需的三个中心模块,即:人脸检测模块FaceDetector、面部要害点定位模块FaceLandmarker以及人脸特征提取与比对模块 FaceRecognizer。
关于SeetaFace2的移植请参照文档:SeetaFace2移植开发文档。
NAPI接口开发
关于OpenHarmony中的NAPI开发,参阅视频:
OpenHarmony中napi的开发视频教程。本文将重点解说NAPI接口怎么完结OpenCV以及SeetaFace的调用。
- 人脸框获取的NAPI接口的完结。
int GetRecognizePoints(const char *image_path);
此接口首要是经过使用层输入一张图片,经过OpenCV的imread接口获取到图片数据,并经过人脸检测模块FaceDetector分析获得图片中一切的人脸矩形框(矩形框是以x,y,w,h的方法)并将人脸框矩形以数组的方法回来到使用层。
人脸框矩形获取的首要代码如下:
static int RecognizePoint(string image_path, FaceRect *rect, int num)
{
if (rect == nullptr) {
cerr << "NULL POINT!" << endl;
LOGE("NULL POINT! \n");
return -1;
}
seeta::ModelSetting::Device device = seeta::ModelSetting::CPU;
int id = 0;
/* 设置人脸辨认模型。*/
seeta::ModelSetting FD_model( "/system/usr/model/fd_2_00.dat", device, id );
seeta::ModelSetting FL_model( "/system/usr/model/pd_2_00_pts81.dat", device, id );
seeta::FaceDetector FD(FD_model);
seeta::FaceLandmarker FL(FL_model);
FD.set(seeta::FaceDetector::PROPERTY_VIDEO_STABLE, 1);
/* 读取图片数据 */
auto frame = imread(image_path);
seeta::cv::ImageData simage = frame;
if (simage.empty()) {
cerr << "Can not open image: " << image_path << endl;
LOGE("Can not open image: %{public}s", image_path.c_str());
return -1;
}
/* 图片数据进行人脸辨认处理 ,获取一切的人脸框数据目标*/
auto faces = FD.detect(simage);
if (faces.size <= 0) {
cerr << "detect " << image_path << "failed!" << endl;
LOGE("detect image: %s failed!", image_path.c_str());
return -1;
}
for (int i = 0; (i < faces.size && i < num); i++) {
/* 将一切人脸框目标数据以坐标方法输出*/
auto &face = faces.data[i];
memcpy(&rect[i], &(face.pos), sizeof(FaceRect));
}
return faces.size;
}
其间FD_model是人脸检测模型,而FL_model是面部要害点定位模型(此模型分为5点定位和81点定位,本项目中使用的是81点定位模型),这些模型从开源项目中免费获取。
经过以上方法获取到对应的人脸矩形框后,再将矩形框以数组的方法回来到使用端:
string image = path;
p = (FaceRect *)malloc(sizeof(FaceRect) * MAX_FACE_RECT);
/* 根据图片进行人脸辨认并获取人脸框坐标点 */
int retval = RecognizePoint(image, p, MAX_FACE_RECT);
if (retval <= napi_ok) {
LOGE("GetNapiValueString failed!");
free(p);
return result;
}
/*将一切坐标点以数组方法回来到使用端*/
for (int i = 0; i < retval; i++) {
int arry_int[4] = {p[i].x, p[i].y, p[i].w, p[i].h};
int arraySize = (sizeof(arry_int) / sizeof(arry_int[0]));
for (int j = 0; j < arraySize; j++) {
napi_value num_val;
if (napi_create_int32(env, arry_int[j], &num_val) != napi_ok) {
LOGE("napi_create_int32 failed!");
return result;
}
napi_set_element(env, array, i*arraySize + j, num_val);
}
}
if (napi_create_object(env, &result) != napi_ok) {
LOGE("napi_create_object failed!");
free(p);
return result;
}
if (napi_set_named_property(env, result, "recognizeFrame", array) != napi_ok) {
LOGE("napi_set_named_property failed!");
free(p);
return result;
}
LOGI("");
free(p);
return result;
其间array是经过napi_create_array创立的一个NAPI数组目标,经过 napi_set_element将一切的矩形框数据保存到array目标中,终究经过 napi_set_named_property将array转换成使用端可辨认的目标类型result并将其回来。
-
人脸查找辨认初始化与逆初始化。
-
int FaceSearchInit();
-
int FaceSearchDeinit();
这2个接口首要是供给给人脸查找以及辨认调用的,初始化首要包括模型的注册以及辨认模块的初始化:
static int FaceSearchInit(FaceSearchInfo *info)
{
if (info == NULL) {
info = (FaceSearchInfo *)malloc(sizeof(FaceSearchInfo));
if (info == nullptr) {
cerr << "NULL POINT!" << endl;
return -1;
}
}
seeta::ModelSetting::Device device = seeta::ModelSetting::CPU;
int id = 0;
seeta::ModelSetting FD_model( "/system/usr/model/fd_2_00.dat", device, id );
seeta::ModelSetting PD_model( "/system/usr//model/pd_2_00_pts5.dat", device, id );
seeta::ModelSetting FR_model( "/system/usr/model/fr_2_10.dat", device, id );
info->engine = make_shared<seeta::FaceEngine>(FD_model, PD_model, FR_model, 2, 16);
info->engine->FD.set( seeta::FaceDetector::PROPERTY_MIN_FACE_SIZE, 80);
info->GalleryIndexMap.clear();
return 0;
}
而逆初始化便是做一些内存的开释。
static void FaceSearchDeinit(FaceSearchInfo *info, int need_delete)
{
if (info != nullptr) {
if (info->engine != nullptr) {
}
info->GalleryIndexMap.clear();
if (need_delete) {
free(info);
info = nullptr;
}
}
}
- 人脸查找辨认注册接口的完结。
int FaceSearchRegister(const char *value);
需求注意的是,该接口需求使用端传入一个json数据的参数,首要包括注册人脸的姓名,图片以及图片个数,如{“name”:”刘德华”,”sum”:”2″,”image”:{“11.jpg”,”12.jpg”}}。而解析参数的时候需求调用 napi_get_named_property对json数据的各个目标进行解析,详细代码如下:
napi_get_cb_info(env, info, &argc, &argv, &thisVar, &data);
napi_value object = argv;
napi_value value = nullptr;
if (napi_get_named_property(env, object, (const char *)"name", &value) == napi_ok) {
char name[64] = {0};
if (GetNapiValueString(env, value, (char *)name, sizeof(name)) < 0) {
LOGE("GetNapiValueString failed!");
return result;
}
reg_info.name = name;
}
LOGI("name = %{public}s", reg_info.name.c_str());
if (napi_get_named_property(env, object, (const char *)"sum", &value) == napi_ok) {
if (napi_get_value_uint32(env, value, &sum) != napi_ok) {
LOGE("napi_get_value_uint32 failed!");
return result;
}
}
LOGI("sum = %{public}d", sum);
if (napi_get_named_property(env, object, (const char *)"image", &value) == napi_ok) {
bool res = false;
if (napi_is_array(env, value, &res) != napi_ok || res == false) {
LOGE("napi_is_array failed!");
return result;
}
for (int i = 0; i < sum; i++) {
char image[256] = {0};
napi_value imgPath = nullptr;
if (napi_get_element(env, value, i, &imgPath) != napi_ok) {
LOGE("napi_get_element failed!");
return result;
}
if (GetNapiValueString(env, imgPath, (char *)image, sizeof(image)) < 0) {
LOGE("GetNapiValueString failed!");
return result;
}
reg_info.path = image;
if (FaceSearchRegister(g_FaceSearch, reg_info) != napi_ok) {
retval = -1;
break;
}
}
}
经过napi_get_cb_info获取从使用端传来的参数,并经过 napi_get_named_property获取对应的name以及图片个数,终究经过napi_get_element获取图片数组中的各个image,将name和image经过FaceSearchRegister接口将图片和姓名注册到SeetaFace2模块的辨认引擎中。详细完结如下:
static int FaceSearchRegister(FaceSearchInfo &info, RegisterInfo &gegister)
{
if (info.engine == nullptr) {
cerr << "NULL POINT!" << endl;
return -1;
}
seeta::cv::ImageData image = cv::imread(gegister.path);
auto id = info.engine->Register(image);
if (id >= 0) {
info.GalleryIndexMap.insert(make_pair(id, gegister.name));
}
return 0;
}
注册完数据后,后续能够经过该引擎来辨认对应的图片。
- 获取人脸查找辨认成果接口的完结。
char *FaceSearchGetRecognize(const char *image_path);
该接口完结了经过传入一张图片,在辨认引擎中进行查找辨认。如果辨认引擎中有相似的人脸注册,则回来对应人脸注册时的姓名,不然回来不辨认(ignored)字样。该方法是经过异步回调的方法完结的:
// 创立async work,创立成功后经过终究一个参数(commandStrData->asyncWork)回来async work的handle
napi_value resourceName = nullptr;
napi_create_string_utf8(env, "FaceSearchGetPersonRecognizeMethod", NAPI_AUTO_LENGTH, &resourceName);
napi_create_async_work(env, nullptr, resourceName, FaceSearchRecognizeExecuteCB, FaceSearchRecognizeCompleteCB,
(void *)commandStrData, &commandStrData->asyncWork);
// 将刚创立的async work加到行列,由底层去调度履行
napi_queue_async_work(env, commandStrData->asyncWork);
其间FaceSearchRecognizeExecuteCB完结了人脸辨认
static void FaceSearchRecognizeExecuteCB(napi_env env, void *data)
{
CommandStrData *commandStrData = dynamic_cast<CommandStrData*>((CommandStrData *)data);
if (commandStrData == nullptr) {
HILOG_ERROR("nullptr point!", __FUNCTION__, __LINE__);
return;
}
FaceSearchInfo faceSearch = *(commandStrData->mFaceSearch);
commandStrData->result = FaceSearchSearchRecognizer(faceSearch, commandStrData->filename);
LOGI("Recognize result : %s !", __FUNCTION__, __LINE__, commandStrData->result.c_str());
}
FaceSearchRecognizeCompleteCB函数经过napi_resolve_deferred接口将辨认成果回来到使用端。
static void FaceSearchRecognizeCompleteCB(napi_env env, napi_status status, void *data)
{
CommandStrData *commandStrData = dynamic_cast<CommandStrData*>((CommandStrData *)data);
napi_value result;
if (commandStrData == nullptr || commandStrData->deferred == nullptr) {
LOGE("nullptr", __FUNCTION__, __LINE__);
if (commandStrData != nullptr) {
napi_delete_async_work(env, commandStrData->asyncWork);
delete commandStrData;
}
return;
}
const char *result_str = (const char *)commandStrData->result.c_str();
if (napi_create_string_utf8(env, result_str, strlen(result_str), &result) != napi_ok) {
LOGE("napi_create_string_utf8 failed!", __FUNCTION__, __LINE__);
napi_delete_async_work(env, commandStrData->asyncWork);
delete commandStrData;
return;
}
napi_resolve_deferred(env, commandStrData->deferred, result);
napi_delete_async_work(env, commandStrData->asyncWork);
delete commandStrData;
}
经过人脸特征提取与比对模块,对传入的数据与已注册的数据进行比照,并经过回来比照的相似度来进行判别当时人脸是否为可辨认的,终究回来辨认成果。详细完结代码:
static string FaceSearchSearchRecognizer(FaceSearchInfo &info, string filename)
{
if (info.engine == nullptr) {
cerr << "NULL POINT!" << endl;
return "recognize error 0";
}
string name;
float threshold = 0.7f;
seeta::QualityAssessor QA;
auto frame = cv::imread(filename);
if (frame.empty()) {
LOGE("read image %{public}s failed!", filename.c_str());
return "recognize error 1!";
}
seeta::cv::ImageData image = frame;
std::vector<SeetaFaceInfo> faces = info.engine->DetectFaces(image);
for (SeetaFaceInfo &face : faces) {
int64_t index = 0;
float similarity = 0;
auto points = info.engine->DetectPoints(image, face);
auto score = QA.evaluate(image, face.pos, points.data());
if (score == 0) {
name = "ignored";
} else {
auto queried = info.engine->QueryTop(image, points.data(), 1, &index, &similarity);
// no face queried from database
if (queried < 1) continue;
// similarity greater than threshold, means recognized
if( similarity > threshold ) {
name = info.GalleryIndexMap[index];
}
}
}
LOGI("name : %{public}s \n", name.length() > 0 ? name.c_str() : "null");
return name.length() > 0 ? name : "recognize failed";
}
至此,一切的NAPI接口现已开发完结。
- NAPI库编译开发完NAPI接口后,咱们需求将咱们编写的库加入到系统中进行编译,咱们需求增加一个自己的子系统。
首要在库目录下新建一个ohos.build
{
"subsystem": "SeetafaceApp",
"parts": {
"SeetafaceApi": {
"module_list": [
"//seetaface:seetafaceapp_napi"
],
"test_list": [ ]
}
}
}
其次同一目录新建一个BUILD.gn,将库源文件以及对应的依靠加上,如下:
import("//build/ohos.gni")
config("lib_config") {
cflags_cc = [
"-frtti",
"-fexceptions",
"-DCVAPI_EXPORTS",
"-DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int",
"-D_USE_MATH_DEFINES",
"-D__OPENCV_BUILD=1",
"-D__STDC_CONSTANT_MACROS",
"-D__STDC_FORMAT_MACROS",
"-D__STDC_LIMIT_MACROS",
"-O2",
"-Wno-error=header-hygiene",
]
}
ohos_shared_library("seetafaceapp_napi") {
sources = [
"app.cpp",
]
include_dirs = [
"./",
"//third_party/opencv/include",
"//third_party/opencv/common",
"//third_party/opencv/modules/core/include",
"//third_party/opencv/modules/highgui/include",
"//third_party/opencv/modules/imgcodecs/include",
"//third_party/opencv/modules/imgproc/include",
"//third_party/opencv/modules/calib3d/include",
"//third_party/opencv/modules/dnn/include",
"//third_party/opencv/modules/features2d/include",
"//third_party/opencv/modules/flann/include",
"//third_party/opencv/modules/ts/include",
"//third_party/opencv/modules/video/include",
"//third_party/opencv/modules/videoio/include",
"//third_party/opencv/modules/ml/include",
"//third_party/opencv/modules/objdetect/include",
"//third_party/opencv/modules/photo/include",
"//third_party/opencv/modules/stitching/include",
"//third_party/SeetaFace2/FaceDetector/include",
"//third_party/SeetaFace2/FaceLandmarker/include",
"//third_party/SeetaFace2/FaceRecognizer/include",
"//third_party/SeetaFace2/QualityAssessor/include",
"//base/accessibility/common/log/include",
"//base/hiviewdfx/hilog_lite/interfaces/native/innerkits"
]
deps = [ "//foundation/ace/napi:ace_napi" ]
deps += [ "//third_party/opencv:opencv" ]
deps += [ "//third_party/SeetaFace2:SeetaFace2" ]
external_deps = [
"hiviewdfx_hilog_native:libhilog",
]
configs = [
":lib_config"
]
# 指定库生成的途径
relative_install_dir = "module"
# 子系统及其组件,后面会引证
subsystem_name = "SeetafaceApp"
part_name = "SeetafaceApi"
}
增加完对应的文件后,咱们需求将咱们的子系统增加到系统中进行编译,翻开build/subsystem_config.json并在终究增加以下代码:
"SeetafaceApp": {
"path": "seetaface",
"name": "SeetafaceApp"
}
增加完子系统再修改产对应的品配置
翻开productdefine/common/products/rk3568.json并在终究增加以下代码:
"SeetafaceApp:SeetafaceApi":{}
做完以上修改后咱们就能够经过以下指令直接编译NAPI的库文件了:
./build.sh --product-name rk3568 --ccache
参阅RK3568快速上手-镜像烧录完结烧录即可。
使用端开发
在完结设备NAPI功用开发后,使用端经过调用NAPI组件中露出给使用的人脸辨认接口,即可完结对应功用。接下来就带着我们使用NAPI完结人脸辨认功用。
开发预备
-
下载DevEco Studio 3.0 Beta4;
-
建立开发环境,参阅开发预备;
-
了解特点eTS开发,参阅eTS语言快速入门;
SeetaFace2初始化
-
首要将SeetaFace2 NAPI接口声明文件放置于SDK目录/api下;
-
然后导入SeetaFace2 NAPI模块;ck-start/star
-
调用初始化接口;
// 首页实例创立后
async aboutToAppear() {
await StorageUtils.clearModel();
CommonLog.info(TAG,'aboutToAppear')
// 初始化人脸辨认
let res = SeetafaceApp.FaceSearchInit()
CommonLog.info(TAG,`FaceSearchInit res=${res}`)
this.requestPermissions()
}
// 请求权限
requestPermissions(){
CommonLog.info(TAG,'requestPermissions')
let context = featureAbility.getContext()
context.requestPermissionsFromUser(PERMISSIONS, 666,(res)=>{
this.getMediaImage()
})
}
获取一切人脸图片
经过文件办理模块fileio和媒体库办理mediaLibrary,获取指定使用数据目录下一切的图片信息,并将途径赋值给faceList,faceList数据用于Image组件供给url进行加载图片
// 获取一切图片
async getMediaImage(){
let context = featureAbility.getContext();
// 获取本地使用沙箱途径
let localPath = await context.getOrCreateLocalDir()
CommonLog.info(TAG, `localPath:${localPath}`)
let facePath = localPath + "/files"
// 获取一切照片
this.faceList = await FileUtil.getImagePath(facePath)
}
设置人脸模型
获取选中的人脸图片地址和输入的姓名,调用SeetafaceApp.FaceSearchRegister(params)进行设置人脸模型。其间参数params由name姓名、image图片地址集合和sum图片数量组成。
async submit(name) {
if (!name || name.length == 0) {
CommonLog.info(TAG, 'name is empty')
return
}
let selectArr = this.faceList.filter(item => item.isSelect)
if (selectArr.length == 0) {
CommonLog.info(TAG, 'faceList is empty')
return
}
// 关闭弹窗
this.dialogController.close()
try {
let urls = []
let files = []
selectArr.forEach(item => {
let source = item.url.replace('file://', '')
CommonLog.info(TAG, `source:${source}`)
urls.push(item.url)
files.push(source)
})
// 设置人脸辨认模型参数
let params = {
name: name,
image: files,
sum: files.length
}
CommonLog.info(TAG, 'FaceSearchRegister' + JSON.stringify(params))
let res = SeetafaceApp.FaceSearchRegister(params)
CommonLog.info(TAG, 'FaceSearchRegister res ' + res)
// 保存已设置的人脸模型到轻量存储
let data = {
name:name,
urls:urls
}
let modelStr = await StorageUtils.getModel()
let modelList = JSON.parse(modelStr)
modelList.push(data)
StorageUtils.setModel(modelList)
router.back()
} catch (err) {
CommonLog.error(TAG, 'submit fail ' + err)
}
}
完结框选人脸
调用SeetafaceApp.GetRecognizePoints传入当时图片地址,获取到人脸左上和右下坐标,再经过CanvasRenderingContext2D目标绘画出人脸框。
完结人脸辨认
调用SeetafaceApp.FaceSearchGetRecognize(url),传入图片地址对人脸进行辨认并回来对应辨认出来的姓名。
// 人脸辨认
recognize(){
SeetafaceApp.FaceSearchGetRecognize(this.url).then(res=>{
CommonLog.info(TAG,'recognize suceess' + JSON.stringify(res))
if(res && res != 'ignored' && res != "recognize failed" && res != 'recognize error 1!'){
// 赋值辨认到的人物模型
this.name = res
}else{
this.name = '未辨认到该模型'
}
}).catch(err=>{
CommonLog.error(TAG,'recognize' + err)
this.name = '未辨认到该模型'
})
}
参阅文档
SeetaFace2移植开发文档:
gitee.com/openharmony…
OpenHarmony中napi的开发视频教程:
www.bilibili.com/video/BV1L4…
RK3568快速上手:
growing.openharmony.cn/mainPlay/le…
人脸辨认使用:
gitee.com/openharmony…
使用开发预备:
docs.openharmony.cn/pages/v3.2B…
eTS语言快速入门:
docs.openharmony.cn/pages/v3.2B…
常识系统工作组:
gitee.com/openharmony…