在瑞萨 RA8D1 开发板上集成 AI 人脸检测功能
1. 项目概述
该项目是根据官方AI人脸检测代码,在官方显示屏显示代码的基础上进行修改而得。由于本人实力有限,最后的结果并不理想,无法实现检测。故该文章仅作为一个参考,若有大佬知晓问题出在哪里,希望能够进行批评指正,感谢!!!
2. 硬件介绍
2.1 CPKCOR-RA8D1B 核心板
CPKCOR-RA8D1B 是基于瑞萨 RA8D1 系列微控制器的高性能核心板,具备以下特性:
核心规格
- 主控芯片: Renesas RA8D1 (R7FA8D1BH)
- CPU 架构: Arm Cortex-M85 @ 480 MHz
- 内存配置:
- 2MB Flash ROM
- 1MB SRAM
- 支持外部 SDRAM 扩展
- 图形加速:
- GLCDC 显示控制器
- Dave2D 2D 图形加速引擎
- MIPI-DSI 接口
- AI 加速: 支持 TensorFlow Lite Micro
硬件特性
2.2 CPKEXP-EKRA8X1 扩展板
CPKEXP-EKRA8X1 是专为 RA8D1 核心板设计的功能扩展板,提供丰富的外设接口。
扩展板特性
-
显示屏:
- 4.5 英寸 MIPI-DSI LCD (480x854)
- 电容式触摸屏 (GT911)
- 背光亮度可调
-
摄像头模组:
- OV7725 CMOS 传感器
- 最大支持 VGA (640x480)
- RGB565 输出格式
3. 开发环境准备
3.1 软件工具
- Renesas e² studio IDE v2024-04
- Flexible Software Package (FSP) v5.3.0
- LLVM Embedded Toolchain for Arm v17.0.1

3.2 环境配置
安装步骤
-
安装 e² studio
https://www.renesas.com/e2studio
-
安装 FSP
- 在 e² studio 中:Help → Add Renesas Software
- 选择 FSP 5.3.0
3.3 硬件连接



4. 文件迁移与配置
显示屏代码下载地址
cpk_examples: RA8D1核心板、扩展板详细资料。 CPK板样例代码 Sample Codes China Promotion Kits - Gitee.com
AI人脸检测代码下载地址
RA8D1 - 基于 480MHz Arm Cortex-M85、搭载 Helium 和 TrustZone 的图形微控制器 | Renesas 瑞萨电子
4.1 必须复制的文件清单
4.1.1 AI 核心代码
face_detect/src/ai_apps/ → screen_display/src/ai_apps/
完整目录结构:
src/ai_apps/
├── common/
│ ├── Main.cc
│ ├── tensorflow/
│ │ └── lite/
│ │ ├── micro/
│ │ ├── kernels/
│ │ └── ...
│ └── third_party/
│ ├── flatbuffers/
│ ├── gemmlowp/
│ ├── kissfft/
│ │ ├── _kiss_fft_guts.h
│ │ ├── kiss_fft.h
│ │ ├── kiss_fft.c
│ │ └── tools/
│ │ └── kiss_fftr.c
│ └── ruy/
└── face_detection/
├── MainLoop_obj.cc
├── UseCaseHandler_obj.cc
├── DetectorPreProcessing.cc
├── DetectorPostProcessing.cc
└── YoloFastestModel.hpp
4.1.2 图像处理代码
face_detect/src/camera/ → screen_display/src/camera/
关键文件:
ceu_ctl_and_data_processing.c - 包含 image_rgb565_to_int8() 函数
ceu_ctl_and_data_processing.h
4.1.3 显示相关代码
face_detect/src/graphics/bg_font_18_full.h → screen_display/src/graphics/
face_detect/src/graphics/bg_font_18_full.c → screen_display/src/graphics/
4.2 项目配置修改
4.2.1 Include 路径配置
在 e² studio 中:
- 右键项目 → Properties
- C/C++ Build → Settings
- GNU Arm Cross C Compiler → Includes
- 添加以下路径:
${workspace_loc:/${ProjName}/src}
${workspace_loc:/${ProjName}/src/ai_apps/common}
${workspace_loc:/${ProjName}/src/ai_apps/common/tensorflow}
${workspace_loc:/${ProjName}/src/ai_apps/common/third_party}
${workspace_loc:/${ProjName}/src/ai_apps/common/third_party/kissfft}
${workspace_loc:/${ProjName}/src/ai_apps/common/third_party/gemmlowp}
${workspace_loc:/${ProjName}/src/ai_apps/face_detection}
${workspace_loc:/${ProjName}/src/camera}
${workspace_loc:/${ProjName}/src/graphics}
- 在 GNU Arm Cross C++ Compiler → Includes 中添加相同路径
4.2.2 编译器选项
C++ 编译器设置:
- GNU Arm Cross C++ Compiler → Miscellaneous
- Other flags 添加:
-std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics
链接器设置:
- GNU Arm Cross C++ Linker → General
- 确保选择了正确的链接脚本(包含 SDRAM section)
5. 代码修改
5.1 创建配置头文件
文件:src/app_config.h
#ifndef APP_CONFIG_H_
#define APP_CONFIG_H_
#include "hal_data.h"
#define DET_MODEL_IMG_SIZE_X 192
#define DET_MODEL_IMG_SIZE_Y 192
#define MAX_DETECTION_NUMS 20
#define VGA_WIDTH 640
#define VGA_HEIGHT 480
#define QVGA_WIDTH 320
#define QVGA_HEIGHT 240
#define CAM_VGA_WIDTH 640
#define CAM_VGA_HEIGHT 480
#define CAM_QVGA_WIDTH 320
#define CAM_QVGA_HEIGHT 240
#ifndef DISPLAY_HSIZE_INPUT0
#define DISPLAY_HSIZE_INPUT0 480
#define DISPLAY_VSIZE_INPUT0 854
#endif
#define DISPLAY_WIDTH DISPLAY_HSIZE_INPUT0
#define DISPLAY_HEIGHT DISPLAY_VSIZE_INPUT0
#define LCD_HPIX DISPLAY_WIDTH
#define LCD_VPIX DISPLAY_HEIGHT
#define IMAGE_INPUT_WIDTH CAM_QVGA_HEIGHT
#define IMAGE_INPUT_HEIGHT CAM_QVGA_HEIGHT
#define AI_INFERENCE_INTERVAL 3
#define CAM_BYTE_PER_PIXEL 2
#endif
5.2 创建通用头文件
文件:src/common_util.h
#ifndef COMMON_UTIL_H_
#define COMMON_UTIL_H_
#include "hal_data.h"
#include "app_config.h"
typedef struct ai_detection_point_t {
signed short m_x;
signed short m_y;
signed short m_w;
signed short m_h;
float m_score;
} st_ai_detection_point_t;
typedef enum e_face_det_app_err {
FACE_DET_APP_SUCCESS = 0,
FACE_DET_APP_AI_INIT = 1,
FACE_DET_APP_INFERENCE = 2,
FACE_DET_APP_IMG_PROCESS = 3,
FACE_DET_APP_CAMERA_INIT = 4,
FACE_DET_APP_CEU_INIT = 5,
FACE_DET_APP_GRAPHICS_INIT = 6,
} face_det_err_t;
void handle_error(face_det_err_t err);
#endif
文件:src/common_util.c
#include "common_util.h"
#include "common_utils.h"
#include <stdio.h>
void handle_error(face_det_err_t err)
{
if (FACE_DET_APP_SUCCESS != err)
{
const char* error_messages[] = {
"Success",
"AI Initialization Failed",
"Inference Failed",
"Image Processing Failed",
"Camera Initialization Failed",
"CEU Initialization Failed",
"Graphics Initialization Failed"
};
APP_PRINT("\r\n");
APP_PRINT("========================================\r\n");
APP_PRINT(" ERROR OCCURRED! \r\n");
APP_PRINT("========================================\r\n");
APP_PRINT("Error Code: %d\r\n", err);
if (err < sizeof(error_messages) / sizeof(error_messages[0]))
{
APP_PRINT("Message: %s\r\n", error_messages[err]);
}
APP_PRINT("System halted.\r\n");
APP_PRINT("========================================\r\n");
while(1)
{
__asm("BKPT #0");
}
}
}
5.3 修改主程序文件
文件:src/hal_entry.c
#include "hal_data.h"
#include "common_utils.h"
#include "app_config.h"
#include "common_util.h"
#include "board_sdram.h"
#include "dave_driver.h"
#include "ov7725.h"
#include "graphics.h"
#include "mipi_dsi_ep.h"
#ifdef __cplusplus
extern "C" {
#endif
extern face_det_err_t ai_init(void);
extern face_det_err_t face_detection(void);
extern face_det_err_t image_rgb565_to_int8(const void *inbuf, void *outbuf,
uint16_t in_width, uint16_t in_height,
uint16_t out_width, uint16_t out_height);
extern fsp_err_t ceu_init(uint8_t *buffer, uint16_t width, uint16_t height);
extern fsp_err_t ceu_operation(uint8_t *buffer);
extern void graphics_init(void);
extern void mipi_dsi_entry(void);
extern fsp_err_t ov7725_open(void);
extern void OV7725_Window_Set(uint16_t width, uint16_t height, uint8_t mode);
extern void do_face_recognition_screen_single_thread(uint8_t *camera_buffer);
extern uint32_t *gp_single_buffer;
extern uint32_t *gp_double_buffer;
extern uint32_t *gp_frame_buffer;
#ifdef __cplusplus
}
#endif
BSP_PLACE_IN_SECTION(".sdram") BSP_ALIGN_VARIABLE(8)
static uint8_t camera_out_buffer[2][QVGA_WIDTH * QVGA_HEIGHT * CAM_BYTE_PER_PIXEL];
BSP_PLACE_IN_SECTION(".sdram") BSP_ALIGN_VARIABLE(8)
static int8_t model_buffer_int8[DET_MODEL_IMG_SIZE_X * DET_MODEL_IMG_SIZE_Y];
st_ai_detection_point_t g_ai_detection[MAX_DETECTION_NUMS];
bool update_face_count = false;
static uint8_t current_buffer = 0;
static uint32_t frame_count = 0;
static void hardware_init(void);
static void ai_inference_process(void);
static void print_system_info(void);
void hal_entry(void)
{
fsp_err_t err = FSP_SUCCESS;
face_det_err_t ai_status = FACE_DET_APP_SUCCESS;
print_system_info();
APP_PRINT("Phase 1: Hardware Initialization...\r\n");
hardware_init();
APP_PRINT("✓ Hardware initialized successfully!\r\n\r\n");
APP_PRINT("Phase 2: AI Model Initialization...\r\n");
APP_PRINT(" - Model: YoloFastest\r\n");
APP_PRINT(" - Input Size: %dx%d\r\n", DET_MODEL_IMG_SIZE_X, DET_MODEL_IMG_SIZE_Y);
APP_PRINT(" - Max Detections: %d\r\n", MAX_DETECTION_NUMS);
APP_PRINT(" - TensorArena: 512KB\r\n");
ai_status = ai_init();
if (FACE_DET_APP_SUCCESS != ai_status)
{
handle_error(FACE_DET_APP_AI_INIT);
}
APP_PRINT("✓ AI model loaded successfully!\r\n\r\n");
APP_PRINT("Phase 3: Starting camera capture...\r\n");
err = R_CEU_CaptureStart(&g_ceu_ctrl, (uint8_t *)camera_out_buffer[0]);
if (FSP_SUCCESS != err)
{
handle_error(FACE_DET_APP_CAMERA_INIT);
}
APP_PRINT("✓ Camera started successfully!\r\n\r\n");
APP_PRINT("========================================\r\n");
APP_PRINT(" System Running - Main Loop Started \r\n");
APP_PRINT("========================================\r\n");
APP_PRINT("Inference Interval: Every %d frames\r\n", AI_INFERENCE_INTERVAL);
APP_PRINT("Camera Resolution: %dx%d (RGB565)\r\n", QVGA_WIDTH, QVGA_HEIGHT);
APP_PRINT("Display Resolution: %dx%d\r\n", DISPLAY_WIDTH, DISPLAY_HEIGHT);
APP_PRINT("\r\nPress any key to see statistics...\r\n\r\n");
while (1)
{
err = ceu_operation(camera_out_buffer[current_buffer]);
if (FSP_SUCCESS != err)
{
continue;
}
frame_count++;
if (frame_count % AI_INFERENCE_INTERVAL == 0)
{
ai_inference_process();
}
do_face_recognition_screen_single_thread(camera_out_buffer[current_buffer]);
current_buffer = (current_buffer == 0) ? 1 : 0;
if (frame_count % 100 == 0)
{
uint8_t detected_faces = 0;
for (int i = 0; i < MAX_DETECTION_NUMS; i++)
{
if (g_ai_detection[i].m_w > 0 && g_ai_detection[i].m_h > 0)
{
detected_faces++;
}
}
APP_PRINT("Frame: %lu | Faces Detected: %d\r\n", frame_count, detected_faces);
}
}
}
static void hardware_init(void)
{
fsp_err_t err = FSP_SUCCESS;
APP_PRINT(" [1/7] Initializing SDRAM...\r\n");
bsp_sdram_init();
APP_PRINT(" [2/7] Initializing I2C master...\r\n");
err = R_IIC_MASTER_Open(&g_i2c_master1_ctrl, &g_i2c_master1_cfg);
if (FSP_SUCCESS != err)
{
handle_error(FACE_DET_APP_CAMERA_INIT);
}
APP_PRINT(" [3/7] Initializing OV7725 camera...\r\n");
err = ov7725_open();
if (FSP_SUCCESS != err)
{
handle_error(FACE_DET_APP_CAMERA_INIT);
}
OV7725_Window_Set(QVGA_WIDTH, QVGA_HEIGHT, 1);
APP_PRINT(" [4/7] Initializing CEU...\r\n");
err = ceu_init(camera_out_buffer[0], QVGA_WIDTH, QVGA_HEIGHT);
if (FSP_SUCCESS != err)
{
handle_error(FACE_DET_APP_CEU_INIT);
}
APP_PRINT(" [5/7] Initializing graphics library...\r\n");
graphics_init();
APP_PRINT(" [6/7] Initializing MIPI DSI display...\r\n");
mipi_dsi_entry();
APP_PRINT(" [7/7] Enabling CPU cache...\r\n");
SCB_EnableDCache();
SCB_EnableICache();
}
static void ai_inference_process(void)
{
face_det_err_t ai_status = FACE_DET_APP_SUCCESS;
ai_status = image_rgb565_to_int8(
(const void *)camera_out_buffer[current_buffer],
(void *)model_buffer_int8,
QVGA_WIDTH,
QVGA_HEIGHT,
DET_MODEL_IMG_SIZE_X,
DET_MODEL_IMG_SIZE_Y
);
if (FACE_DET_APP_SUCCESS != ai_status)
{
return;
}
for (int i = 0; i < MAX_DETECTION_NUMS; i++)
{
g_ai_detection[i].m_x = 0;
g_ai_detection[i].m_y = 0;
g_ai_detection[i].m_w = 0;
g_ai_detection[i].m_h = 0;
g_ai_detection[i].m_score = 0.0f;
}
ai_status = face_detection();
if (FACE_DET_APP_SUCCESS == ai_status)
{
update_face_count = true;
}
}
static void print_system_info(void)
{
APP_PRINT("\r\n");
APP_PRINT("========================================\r\n");
APP_PRINT(" AI Face Detection Application \r\n");
APP_PRINT(" Single-Thread Architecture \r\n");
APP_PRINT("========================================\r\n");
APP_PRINT("Author: Arvin041113\r\n");
APP_PRINT("Date: 2025-10-29\r\n");
APP_PRINT("Version: 2.0\r\n");
APP_PRINT("\r\n");
APP_PRINT("Hardware Configuration:\r\n");
APP_PRINT(" - MCU: RA8D1 (Cortex-M85 @ 480MHz)\r\n");
APP_PRINT(" - Board: CPKCOR-RA8D1B + CPKEXP-EKRA8X1\r\n");
APP_PRINT(" - Camera: OV7725 (QVGA)\r\n");
APP_PRINT(" - Display: 4.5\" MIPI-DSI LCD\r\n");
APP_PRINT(" - SDRAM: 32MB\r\n");
APP_PRINT("========================================\r\n\r\n");
}
5.4 修改显示函数
文件:src/graphics/face_detection_screen_mipi.c
#include "hal_data.h"
#include "app_config.h"
#include "common_util.h"
#include "dave_driver.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
#define RGB565_RED (0xFF0000)
extern st_ai_detection_point_t g_ai_detection[MAX_DETECTION_NUMS];
extern bool update_face_count;
extern d2_device *d2_handle;
uint32_t face_detection_inference_time = 0;
uint8_t face_count = 0;
static d2_point top_right_x[MAX_DETECTION_NUMS];
static d2_point top_right_y[MAX_DETECTION_NUMS];
static d2_point bottom_left_x[MAX_DETECTION_NUMS];
static d2_point bottom_left_y[MAX_DETECTION_NUMS];
static d2_point scaled_h[MAX_DETECTION_NUMS];
static void display_camera_image(uint8_t *camera_buffer);
static void draw_bounding_box(uint8_t i);
static void calculate_and_draw_bounding_box(uint8_t i);
static void print_detection_info(void);
static void display_camera_image(uint8_t *camera_buffer)
{
if (NULL == camera_buffer || NULL == d2_handle)
{
return;
}
d2_setblitsrc(d2_handle, camera_buffer,
QVGA_WIDTH, QVGA_WIDTH, QVGA_HEIGHT,
d2_mode_rgb565);
d2_blitcopy(d2_handle,
QVGA_WIDTH, QVGA_HEIGHT,
(d2_blitpos)0, 0,
(d2_width)(DISPLAY_WIDTH << 4),
(d2_width)(DISPLAY_HEIGHT << 4),
(d2_width)0,
(d2_width)0,
d2_tm_filter);
}
static void draw_bounding_box(uint8_t i)
{
if (NULL == d2_handle)
{
return;
}
d2_setcolor(d2_handle, 0, RGB565_RED);
d2_width line_width = (d2_width)(3 << 4);
d2_renderline(d2_handle,
(d2_point)(top_right_x[i] << 4),
(d2_point)(top_right_y[i] << 4),
(d2_point)(bottom_left_x[i] << 4),
(d2_point)(top_right_y[i] << 4),
line_width, 0);
d2_renderline(d2_handle,
(d2_point)(bottom_left_x[i] << 4),
(d2_point)(top_right_y[i] << 4),
(d2_point)(bottom_left_x[i] << 4),
(d2_point)(bottom_left_y[i] << 4),
line_width, 0);
d2_renderline(d2_handle,
(d2_point)(bottom_left_x[i] << 4),
(d2_point)(bottom_left_y[i] << 4),
(d2_point)(top_right_x[i] << 4),
(d2_point)(bottom_left_y[i] << 4),
line_width, 0);
d2_renderline(d2_handle,
(d2_point)(top_right_x[i] << 4),
(d2_point)(bottom_left_y[i] << 4),
(d2_point)(top_right_x[i] << 4),
(d2_point)(top_right_y[i] << 4),
line_width, 0);
}
static void calculate_and_draw_bounding_box(uint8_t i)
{
signed short scaled_x = (signed short)((g_ai_detection[i].m_x * DISPLAY_WIDTH) / DET_MODEL_IMG_SIZE_X);
signed short scaled_y = (signed short)((g_ai_detection[i].m_y * DISPLAY_HEIGHT) / DET_MODEL_IMG_SIZE_Y);
signed short scaled_w = (signed short)((g_ai_detection[i].m_w * DISPLAY_WIDTH) / DET_MODEL_IMG_SIZE_X);
signed short scaled_h_val = (signed short)((g_ai_detection[i].m_h * DISPLAY_HEIGHT) / DET_MODEL_IMG_SIZE_Y);
face_count++;
top_right_x[i] = scaled_x;
top_right_y[i] = scaled_y;
bottom_left_x[i] = scaled_x + scaled_w;
bottom_left_y[i] = scaled_y + scaled_h_val;
scaled_h[i] = scaled_h_val;
if (top_right_x[i] < 0) top_right_x[i] = 0;
if (top_right_y[i] < 0) top_right_y[i] = 0;
if (bottom_left_x[i] > DISPLAY_WIDTH) bottom_left_x[i] = DISPLAY_WIDTH;
if (bottom_left_y[i] > DISPLAY_HEIGHT) bottom_left_y[i] = DISPLAY_HEIGHT;
draw_bounding_box(i);
}
static void print_detection_info(void)
{
(void)face_detection_inference_time;
(void)face_count;
}
void do_face_recognition_screen_single_thread(uint8_t *camera_buffer)
{
if (NULL == d2_handle || NULL == camera_buffer)
{
return;
}
d2_startframe(d2_handle);
display_camera_image(camera_buffer);
if (update_face_count)
{
face_count = 0;
for (uint8_t i = 0; i < MAX_DETECTION_NUMS; i++)
{
scaled_h[i] = (d2_point)((g_ai_detection[i].m_h * DISPLAY_HEIGHT) / DET_MODEL_IMG_SIZE_Y);
if (scaled_h[i] != 0 && g_ai_detection[i].m_score > 0.5f)
{
calculate_and_draw_bounding_box(i);
}
}
update_face_count = false;
}
else
{
for (uint8_t i = 0; i < MAX_DETECTION_NUMS; i++)
{
if (scaled_h[i] != 0)
{
draw_bounding_box(i);
}
}
}
print_detection_info();
d2_endframe(d2_handle);
}
由于文章限幅,故只能贴出主要代码!!
6.4 参考资源
7 视频演示
由于使用的是轮询 并没有使用RTOS,肉眼可见的延迟非常高,并且没有识别出人脸。