【BPI-CanMV-K230D-Zero开发板体验】人脸检测、手势识别、车牌识别
本文介绍了香蕉派 CanMV K230D Zero 开发板结合 MIPI 摄像头实现人脸检测、人脸关键部位、手势识别、车牌识别的项目设计。
AI Demo 开发框架

Camera 默认输出两路图像:
- 一路格式为 YUV420SP (Sensor.YUV420SP),直接提供给 Display 显示;
- 一路格式为 RGBP888 (Sensor.RGBP888),则用于 AI 部分进行处理。
AI 主要负责任务的前处理、推理和后处理流程。
处理完成后,结果将绘制在 OSD 图像实例上,并发送给 Display 进行叠加显示。
接口封装
为了便于用户开发,基于上述 AI Demo 框架,对从 Camera 获取图像、AI2D 预处理、kmodel模型推理部分的通用功能做了封装。封装接口详见:AI Demo API .
人脸检测
人脸检测应用对视频中每一个人脸检测,并以检测框的形式标识出来,同时将每个人脸的左眼球、右眼球、鼻尖、左嘴角、右嘴角五个关键点位置标出。
流程图

代码
使用 CanMV IDE
打开 \CanMV\sdcard\examples\05-AI-Demo\face_detection.py
文件
from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import os
import ujson
from media.media import *
from time import *
import nncase_runtime as nn
import ulab.numpy as np
import time
import utime
import image
import random
import gc
import sys
import aidemo
class FaceDetectionApp(AIBase):
def __init__(self, kmodel_path, model_input_size, anchors, confidence_threshold=0.5, nms_threshold=0.2, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0):
super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode)
self.kmodel_path = kmodel_path
self.model_input_size = model_input_size
self.confidence_threshold = confidence_threshold
self.nms_threshold = nms_threshold
self.anchors = anchors
self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]]
self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]]
self.debug_mode = debug_mode
self.ai2d = Ai2d(debug_mode)
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)
def config_preprocess(self, input_image_size=None):
with ScopedTiming("set preprocess config", self.debug_mode > 0):
ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size
top, bottom, left, right = self.get_padding_param()
self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [104, 117, 123])
self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])
def postprocess(self, results):
with ScopedTiming("postprocess", self.debug_mode > 0):
post_ret = aidemo.face_det_post_process(self.confidence_threshold, self.nms_threshold, self.model_input_size[1], self.anchors, self.rgb888p_size, results)
if len(post_ret) == 0:
return post_ret
else:
return post_ret[0]
def draw_result(self, pl, dets):
with ScopedTiming("display_draw", self.debug_mode > 0):
if dets:
pl.osd_img.clear()
for det in dets:
x, y, w, h = map(lambda x: int(round(x, 0)), det[:4])
x = x * self.display_size[0] // self.rgb888p_size[0]
y = y * self.display_size[1] // self.rgb888p_size[1]
w = w * self.display_size[0] // self.rgb888p_size[0]
h = h * self.display_size[1] // self.rgb888p_size[1]
pl.osd_img.draw_rectangle(x, y, w, h, color=(255, 255, 0, 255), thickness=2)
else:
pl.osd_img.clear()
def get_padding_param(self):
dst_w = self.model_input_size[0]
dst_h = self.model_input_size[1]
ratio_w = dst_w / self.rgb888p_size[0]
ratio_h = dst_h / self.rgb888p_size[1]
ratio = min(ratio_w, ratio_h)
new_w = int(ratio * self.rgb888p_size[0])
new_h = int(ratio * self.rgb888p_size[1])
dw = (dst_w - new_w) / 2
dh = (dst_h - new_h) / 2
top = int(round(0))
bottom = int(round(dh * 2 + 0.1))
left = int(round(0))
right = int(round(dw * 2 - 0.1))
return top, bottom, left, right
if __name__ == "__main__":
display_mode="lcd"
rgb888p_size = [1920, 1080]
if display_mode=="hdmi":
display_size=[1920,1080]
else:
display_size=[800,480]
kmodel_path = "/sdcard/examples/kmodel/face_detection_320.kmodel"
confidence_threshold = 0.5
nms_threshold = 0.2
anchor_len = 4200
det_dim = 4
anchors_path = "/sdcard/examples/utils/prior_data_320.bin"
anchors = np.fromfile(anchors_path, dtype=np.float)
anchors = anchors.reshape((anchor_len, det_dim))
pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode)
pl.create()
face_det = FaceDetectionApp(kmodel_path, model_input_size=[320, 320], anchors=anchors, confidence_threshold=confidence_threshold, nms_threshold=nms_threshold, rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0)
face_det.config_preprocess()
while True:
with ScopedTiming("total",1):
img = pl.get_frame()
res = face_det.run(img)
face_det.draw_result(pl, res)
pl.show_image()
gc.collect()
face_det.deinit()
pl.destroy()
效果

人脸关键部位
人脸关键部位应用是双模型应用,首先对视频的每一帧图像进行人脸检测,然后对检测到的每一张人脸识别106个关键点,并根据106个关键点绘制人脸、嘴巴、眼睛、鼻子和眉毛区域的轮廓。
流程图

代码
使用 CanMV IDE
打开 \CanMV\sdcard\examples\05-AI-Demo\face_landmark.py
文件
from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import os
import ujson
from media.media import *
from time import *
import nncase_runtime as nn
import ulab.numpy as np
import time
import image
import aidemo
import random
import gc
import sys
class FaceDetApp(AIBase):
def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0):
super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
self.kmodel_path=kmodel_path
self.model_input_size=model_input_size
self.confidence_threshold=confidence_threshold
self.nms_threshold=nms_threshold
self.anchors=anchors
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
self.ai2d=Ai2d(debug_mode)
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
def config_preprocess(self,input_image_size=None):
with ScopedTiming("set preprocess config",self.debug_mode > 0):
ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
self.ai2d.pad(self.get_pad_param(), 0, [104,117,123])
self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])
def postprocess(self,results):
with ScopedTiming("postprocess",self.debug_mode > 0):
res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results)
if len(res)==0:
return res
else:
return res[0]
def get_pad_param(self):
dst_w = self.model_input_size[0]
dst_h = self.model_input_size[1]
ratio_w = dst_w / self.rgb888p_size[0]
ratio_h = dst_h / self.rgb888p_size[1]
if ratio_w < ratio_h:
ratio = ratio_w
else:
ratio = ratio_h
new_w = (int)(ratio * self.rgb888p_size[0])
new_h = (int)(ratio * self.rgb888p_size[1])
dw = (dst_w - new_w) / 2
dh = (dst_h - new_h) / 2
top = (int)(round(0))
bottom = (int)(round(dh * 2 + 0.1))
left = (int)(round(0))
right = (int)(round(dw * 2 - 0.1))
return [0,0,0,0,top, bottom, left, right]
class FaceLandMarkApp(AIBase):
def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0):
super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
self.kmodel_path=kmodel_path
self.model_input_size=model_input_size
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
self.matrix_dst=None
self.ai2d=Ai2d(debug_mode)
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
def config_preprocess(self,det,input_image_size=None):
with ScopedTiming("set preprocess config",self.debug_mode > 0):
ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
self.matrix_dst = self.get_affine_matrix(det)
affine_matrix = [self.matrix_dst[0][0],self.matrix_dst[0][1],self.matrix_dst[0][2],
self.matrix_dst[1][0],self.matrix_dst[1][1],self.matrix_dst[1][2]]
self.ai2d.affine(nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix)
self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])
def postprocess(self,results):
with ScopedTiming("postprocess",self.debug_mode > 0):
pred=results[0]
half_input_len = self.model_input_size[0] // 2
pred = pred.flatten()
for i in range(len(pred)):
pred[i] += (pred[i] + 1) * half_input_len
matrix_dst_inv = aidemo.invert_affine_transform(self.matrix_dst)
matrix_dst_inv = matrix_dst_inv.flatten()
half_out_len = len(pred) // 2
for kp_id in range(half_out_len):
old_x = pred[kp_id * 2]
old_y = pred[kp_id * 2 + 1]
new_x = old_x * matrix_dst_inv[0] + old_y * matrix_dst_inv[1] + matrix_dst_inv[2]
new_y = old_x * matrix_dst_inv[3] + old_y * matrix_dst_inv[4] + matrix_dst_inv[5]
pred[kp_id * 2] = new_x
pred[kp_id * 2 + 1] = new_y
return pred
def get_affine_matrix(self,bbox):
with ScopedTiming("get_affine_matrix", self.debug_mode > 1):
x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4])
scale_ratio = (self.model_input_size[0]) / (max(w, h) * 1.5)
cx = (x1 + w / 2) * scale_ratio
cy = (y1 + h / 2) * scale_ratio
half_input_len = self.model_input_size[0] / 2
matrix_dst = np.zeros((2, 3), dtype=np.float)
matrix_dst[0, 0] = scale_ratio
matrix_dst[0, 1] = 0
matrix_dst[0, 2] = half_input_len - cx
matrix_dst[1, 0] = 0
matrix_dst[1, 1] = scale_ratio
matrix_dst[1, 2] = half_input_len - cy
return matrix_dst
class FaceLandMark:
def __init__(self,face_det_kmodel,face_landmark_kmodel,det_input_size,landmark_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0):
self.face_det_kmodel=face_det_kmodel
self.face_landmark_kmodel=face_landmark_kmodel
self.det_input_size=det_input_size
self.landmark_input_size=landmark_input_size
self.anchors=anchors
self.confidence_threshold=confidence_threshold
self.nms_threshold=nms_threshold
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
self.dict_kp_seq = [
[43, 44, 45, 47, 46, 50, 51, 49, 48],
[97, 98, 99, 100, 101, 105, 104, 103, 102],
[35, 36, 33, 37, 39, 42, 40, 41],
[89, 90, 87, 91, 93, 96, 94, 95],
[34, 88],
[72, 73, 74, 86],
[77, 78, 79, 80, 85, 84, 83],
[52, 55, 56, 53, 59, 58, 61, 68, 67, 71, 63, 64],
[65, 54, 60, 57, 69, 70, 62, 66],
[1, 9, 10, 11, 12, 13, 14, 15, 16, 2, 3, 4, 5, 6, 7, 8, 0, 24, 23, 22, 21, 20, 19, 18, 32, 31, 30, 29, 28, 27, 26, 25, 17]
]
self.color_list_for_osd_kp = [
(255, 0, 255, 0),
(255, 0, 255, 0),
(255, 255, 0, 255),
(255, 255, 0, 255),
(255, 255, 0, 0),
(255, 255, 170, 0),
(255, 255, 255, 0),
(255, 0, 255, 255),
(255, 255, 220, 50),
(255, 30, 30, 255)
]
self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0)
self.face_landmark=FaceLandMarkApp(self.face_landmark_kmodel,model_input_size=self.landmark_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size)
self.face_det.config_preprocess()
def run(self,input_np):
det_boxes=self.face_det.run(input_np)
landmark_res=[]
for det_box in det_boxes:
self.face_landmark.config_preprocess(det_box)
res=self.face_landmark.run(input_np)
landmark_res.append(res)
return det_boxes,landmark_res
def draw_result(self,pl,dets,landmark_res):
pl.osd_img.clear()
if dets:
draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8)
draw_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_np)
for pred in landmark_res:
for sub_part_index in range(len(self.dict_kp_seq)):
sub_part = self.dict_kp_seq[sub_part_index]
face_sub_part_point_set = []
for kp_index in range(len(sub_part)):
real_kp_index = sub_part[kp_index]
x, y = pred[real_kp_index * 2], pred[real_kp_index * 2 + 1]
x = int(x * self.display_size[0] // self.rgb888p_size[0])
y = int(y * self.display_size[1] // self.rgb888p_size[1])
face_sub_part_point_set.append((x, y))
if sub_part_index in (9, 6):
color = np.array(self.color_list_for_osd_kp[sub_part_index],dtype = np.uint8)
face_sub_part_point_set = np.array(face_sub_part_point_set)
aidemo.polylines(draw_img_np, face_sub_part_point_set,False,color,5,8,0)
elif sub_part_index == 4:
color = self.color_list_for_osd_kp[sub_part_index]
for kp in face_sub_part_point_set:
x,y = kp[0],kp[1]
draw_img.draw_circle(x,y ,2, color, 1)
else:
color = np.array(self.color_list_for_osd_kp[sub_part_index],dtype = np.uint8)
face_sub_part_point_set = np.array(face_sub_part_point_set)
aidemo.contours(draw_img_np, face_sub_part_point_set,-1,color,2,8)
pl.osd_img.copy_from(draw_img)
if __name__=="__main__":
display_mode="lcd"
rgb888p_size = [1920, 1080]
if display_mode=="hdmi":
display_size=[1920,1080]
else:
display_size=[800,480]
face_det_kmodel_path="/sdcard/examples/kmodel/face_detection_320.kmodel"
face_landmark_kmodel_path="/sdcard/examples/kmodel/face_landmark.kmodel"
anchors_path="/sdcard/examples/utils/prior_data_320.bin"
face_det_input_size=[320,320]
face_landmark_input_size=[192,192]
confidence_threshold=0.5
nms_threshold=0.2
anchor_len=4200
det_dim=4
anchors = np.fromfile(anchors_path, dtype=np.float)
anchors = anchors.reshape((anchor_len,det_dim))
pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode)
pl.create()
flm=FaceLandMark(face_det_kmodel_path,face_landmark_kmodel_path,det_input_size=face_det_input_size,landmark_input_size=face_landmark_input_size,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size)
while True:
os.exitpoint()
with ScopedTiming("total",1):
img=pl.get_frame()
det_boxes,landmark_res=flm.run(img)
flm.draw_result(pl,det_boxes,landmark_res)
pl.show_image()
gc.collect()
flm.face_det.deinit()
flm.face_landmark.deinit()
pl.destroy()
效果


详见顶部视频。
手势识别
手势识别应用是基于分类的手势识别任务,首先对视频中的每一帧图片进行手掌检测,然后将检测到的手掌送入分类模型进行分类得到识别的手势。
流程图

代码
使用 CanMV IDE
打开 \CanMV\sdcard\examples\05-AI-Demo\hand_recognition.py
文件
from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import os
import ujson
from media.media import *
from time import *
import nncase_runtime as nn
import ulab.numpy as np
import time
import image
import aicube
import random
import gc
import sys
class HandDetApp(AIBase):
def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0):
super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
self.kmodel_path=kmodel_path
self.model_input_size=model_input_size
self.confidence_threshold=confidence_threshold
self.nms_threshold=nms_threshold
self.anchors=anchors
self.strides = strides
self.nms_option = nms_option
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
self.ai2d=Ai2d(debug_mode)
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
def config_preprocess(self,input_image_size=None):
with ScopedTiming("set preprocess config",self.debug_mode > 0):
ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size
top, bottom, left, right = self.get_padding_param()
self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114])
self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])
def postprocess(self,results):
with ScopedTiming("postprocess",self.debug_mode > 0):
dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides,1, self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option)
return dets
def get_padding_param(self):
dst_w = self.model_input_size[0]
dst_h = self.model_input_size[1]
input_width = self.rgb888p_size[0]
input_high = self.rgb888p_size[1]
ratio_w = dst_w / input_width
ratio_h = dst_h / input_high
if ratio_w < ratio_h:
ratio = ratio_w
else:
ratio = ratio_h
new_w = int(ratio * input_width)
new_h = int(ratio * input_high)
dw = (dst_w - new_w) / 2
dh = (dst_h - new_h) / 2
top = int(round(dh - 0.1))
bottom = int(round(dh + 0.1))
left = int(round(dw - 0.1))
right = int(round(dw + 0.1))
return top, bottom, left, right
class HandRecognitionApp(AIBase):
def __init__(self,kmodel_path,model_input_size,labels,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0):
super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
self.kmodel_path=kmodel_path
self.model_input_size=model_input_size
self.labels=labels
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.crop_params=[]
self.debug_mode=debug_mode
self.ai2d=Ai2d(debug_mode)
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
def config_preprocess(self,det,input_image_size=None):
with ScopedTiming("set preprocess config",self.debug_mode > 0):
ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
self.crop_params = self.get_crop_param(det)
self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3])
self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])
def postprocess(self,results):
with ScopedTiming("postprocess",self.debug_mode > 0):
result=results[0].reshape(results[0].shape[0]*results[0].shape[1])
x_softmax = self.softmax(result)
idx = np.argmax(x_softmax)
text = " " + self.labels[idx] + ": " + str(round(x_softmax[idx],2))
return text
def get_crop_param(self,det_box):
x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5]
w,h= int(x2 - x1),int(y2 - y1)
w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0])
h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1])
x_det = int(x1*self.display_size[0] // self.rgb888p_size[0])
y_det = int(y1*self.display_size[1] // self.rgb888p_size[1])
length = max(w, h)/2
cx = (x1+x2)/2
cy = (y1+y2)/2
ratio_num = 1.26*length
x1_kp = int(max(0,cx-ratio_num))
y1_kp = int(max(0,cy-ratio_num))
x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num))
y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num))
w_kp = int(x2_kp - x1_kp + 1)
h_kp = int(y2_kp - y1_kp + 1)
return [x1_kp, y1_kp, w_kp, h_kp]
def softmax(self,x):
x -= np.max(x)
x = np.exp(x) / np.sum(np.exp(x))
return x
class HandRecognition:
def __init__(self,hand_det_kmodel,hand_kp_kmodel,det_input_size,kp_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0):
self.hand_det_kmodel=hand_det_kmodel
self.hand_kp_kmodel=hand_kp_kmodel
self.det_input_size=det_input_size
self.kp_input_size=kp_input_size
self.labels=labels
self.anchors=anchors
self.confidence_threshold=confidence_threshold
self.nms_threshold=nms_threshold
self.nms_option=nms_option
self.strides=strides
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
self.hand_det=HandDetApp(self.hand_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0)
self.hand_rec=HandRecognitionApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,labels=self.labels,rgb888p_size=self.rgb888p_size,display_size=self.display_size)
self.hand_det.config_preprocess()
def run(self,input_np):
det_boxes=self.hand_det.run(input_np)
hand_rec_res=[]
hand_det_res=[]
for det_box in det_boxes:
x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5]
w,h= int(x2 - x1),int(y2 - y1)
if (h<(0.1*self.rgb888p_size[1])):
continue
if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))):
continue
if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))):
continue
self.hand_rec.config_preprocess(det_box)
text=self.hand_rec.run(input_np)
hand_det_res.append(det_box)
hand_rec_res.append(text)
return hand_det_res,hand_rec_res
def draw_result(self,pl,hand_det_res,hand_rec_res):
pl.osd_img.clear()
if hand_det_res:
for k in range(len(hand_det_res)):
det_box=hand_det_res[k]
x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5]
w,h= int(x2 - x1),int(y2 - y1)
w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0])
h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1])
x_det = int(x1*self.display_size[0] // self.rgb888p_size[0])
y_det = int(y1*self.display_size[1] // self.rgb888p_size[1])
pl.osd_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2)
pl.osd_img.draw_string_advanced( x_det, y_det-50, 32,hand_rec_res[k], color=(255,0, 255, 0))
if __name__=="__main__":
display_mode="lcd"
rgb888p_size = [1920, 1080]
if display_mode=="hdmi":
display_size=[1920,1080]
else:
display_size=[800,480]
hand_det_kmodel_path="/sdcard/examples/kmodel/hand_det.kmodel"
hand_rec_kmodel_path="/sdcard/examples/kmodel/hand_reco.kmodel"
anchors_path="/sdcard/examples/utils/prior_data_320.bin"
hand_det_input_size=[512,512]
hand_rec_input_size=[224,224]
confidence_threshold=0.2
nms_threshold=0.5
labels=["gun","other","yeah","five"]
anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276]
pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode)
pl.create()
hr=HandRecognition(hand_det_kmodel_path,hand_rec_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_rec_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size)
while True:
with ScopedTiming("total",1):
img=pl.get_frame()
hand_det_res,hand_rec_res=hr.run(img)
hr.draw_result(pl,hand_det_res,hand_rec_res)
pl.show_image()
gc.collect()
hr.hand_det.deinit()
hr.hand_rec.deinit()
pl.destroy()
效果
支持识别3种手势

对于其他手势

车牌识别
车牌识别应用是一个双模型任务,首先对视频中出现的车牌进行检测,然后对检测到的每个车牌进行识别,并将识别的车牌内容标识在对应检测框附近。
流程图

代码
使用 CanMV IDE
打开 \CanMV\sdcard\examples\05-AI-Demo\licence_det_rec.py
文件
from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import os
import ujson
from media.media import *
from time import *
import nncase_runtime as nn
import ulab.numpy as np
import time
import image
import aidemo
import random
import gc
import sys
class LicenceDetectionApp(AIBase):
def __init__(self, kmodel_path, model_input_size, confidence_threshold=0.5, nms_threshold=0.2, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0):
super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode)
self.kmodel_path = kmodel_path
self.model_input_size = model_input_size
self.confidence_threshold = confidence_threshold
self.nms_threshold = nms_threshold
self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]]
self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]]
self.debug_mode = debug_mode
self.ai2d = Ai2d(debug_mode)
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)
def config_preprocess(self, input_image_size=None):
with ScopedTiming("set preprocess config", self.debug_mode > 0):
ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size
self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])
def postprocess(self, results):
with ScopedTiming("postprocess", self.debug_mode > 0):
det_res = aidemo.licence_det_postprocess(results, [self.rgb888p_size[1], self.rgb888p_size[0]], self.model_input_size, self.confidence_threshold, self.nms_threshold)
return det_res
class LicenceRecognitionApp(AIBase):
def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0):
super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
self.kmodel_path=kmodel_path
self.model_input_size=model_input_size
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
self.dict_rec = ["挂", "使", "领", "澳", "港", "皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_", "-"]
self.dict_size = len(self.dict_rec)
self.ai2d=Ai2d(debug_mode)
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
def config_preprocess(self,input_image_size=None):
with ScopedTiming("set preprocess config",self.debug_mode > 0):
ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])
def postprocess(self,results):
with ScopedTiming("postprocess",self.debug_mode > 0):
output_data=results[0].reshape((-1,self.dict_size))
max_indices = np.argmax(output_data, axis=1)
result_str = ""
for i in range(max_indices.shape[0]):
index = max_indices[i]
if index > 0 and (i == 0 or index != max_indices[i - 1]):
result_str += self.dict_rec[index - 1]
return result_str
class LicenceRec:
def __init__(self,licence_det_kmodel,licence_rec_kmodel,det_input_size,rec_input_size,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0):
self.licence_det_kmodel=licence_det_kmodel
self.licence_rec_kmodel=licence_rec_kmodel
self.det_input_size=det_input_size
self.rec_input_size=rec_input_size
self.confidence_threshold=confidence_threshold
self.nms_threshold=nms_threshold
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
self.licence_det=LicenceDetectionApp(self.licence_det_kmodel,model_input_size=self.det_input_size,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0)
self.licence_rec=LicenceRecognitionApp(self.licence_rec_kmodel,model_input_size=self.rec_input_size,rgb888p_size=self.rgb888p_size)
self.licence_det.config_preprocess()
def run(self,input_np):
det_boxes=self.licence_det.run(input_np)
imgs_array_boxes = aidemo.ocr_rec_preprocess(input_np,[self.rgb888p_size[1],self.rgb888p_size[0]],det_boxes)
imgs_array = imgs_array_boxes[0]
boxes = imgs_array_boxes[1]
rec_res = []
for img_array in imgs_array:
self.licence_rec.config_preprocess(input_image_size=[img_array.shape[3],img_array.shape[2]])
licence_str=self.licence_rec.run(img_array)
rec_res.append(licence_str)
gc.collect()
return det_boxes,rec_res
def draw_result(self,pl,det_res,rec_res):
pl.osd_img.clear()
if det_res:
point_8 = np.zeros((8),dtype=np.int16)
for det_index in range(len(det_res)):
for i in range(4):
x = det_res[det_index][i * 2 + 0]/self.rgb888p_size[0]*self.display_size[0]
y = det_res[det_index][i * 2 + 1]/self.rgb888p_size[1]*self.display_size[1]
point_8[i * 2 + 0] = int(x)
point_8[i * 2 + 1] = int(y)
for i in range(4):
pl.osd_img.draw_line(point_8[i * 2 + 0],point_8[i * 2 + 1],point_8[(i+1) % 4 * 2 + 0],point_8[(i+1) % 4 * 2 + 1],color=(255, 0, 255, 0),thickness=4)
pl.osd_img.draw_string_advanced( point_8[6], point_8[7] + 20, 40,rec_res[det_index] , color=(255,255,153,18))
if __name__=="__main__":
display_mode="lcd"
rgb888p_size = [640,360]
if display_mode=="hdmi":
display_size=[1920,1080]
else:
display_size=[800,480]
licence_det_kmodel_path="/sdcard/examples/kmodel/LPD_640.kmodel"
licence_rec_kmodel_path="/sdcard/examples/kmodel/licence_reco.kmodel"
licence_det_input_size=[640,640]
licence_rec_input_size=[220,32]
confidence_threshold=0.2
nms_threshold=0.2
pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode)
pl.create()
lr=LicenceRec(licence_det_kmodel_path,licence_rec_kmodel_path,det_input_size=licence_det_input_size,rec_input_size=licence_rec_input_size,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size)
while True:
with ScopedTiming("total",1):
img=pl.get_frame()
det_res,rec_res=lr.run(img)
lr.draw_result(pl,det_res,rec_res)
pl.show_image()
gc.collect()
lr.licence_det.deinit()
lr.licence_rec.deinit()
pl.destroy()
效果

更多测试如下

详见底部视频。
总结
本文介绍了香蕉派 CanMV K230D Zero 开发板结合 MIPI 摄像头实现人脸检测、人脸关键部位、手势识别、车牌识别的项目设计,包括工程描述、流程图、代码、效果演示等,为该产品在人工智能领域的快速开发部署和产品应用设计提供了参考。