大佬们好:
我要将一个使用Detectron2框架训练的GenerateRCNN模型转换成rknn模型,遇到困难。大佬们可以帮忙看一下吗?
报错如图:

我的转换代码:
import torch
import os
from rknn.api import RKNN
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import detection_utils, build_detection_test_loader
from detectron2.export import TracingAdapter
from detectron2.modeling import build_model
from detectron2.utils.file_io import PathManager
import detectron2.data.transforms as ts
def setup_cfg(cfg_path, m_path):
"""初始化配置文件"""
cfg = get_cfg()
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_file(cfg_path)
cfg.MODEL.WEIGHTS = os.path.join(m_path, "model_final.pth")
cfg.MODEL.DEVICE = "cpu"
cfg.freeze()
return cfg
def get_sample_inputs(cfg, sample_image_path=None):
"""生成模型导出所需的样本输入"""
if sample_image_path:
original_image = detection_utils.read_image(sample_image_path, format=cfg.INPUT.FORMAT)
aug = ts.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],
cfg.INPUT.MAX_SIZE_TEST
)
height, width = original_image.shape[:2]
image = aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
return [{"image": image, "height": height, "width": width}]
else:
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
return next(iter(data_loader))
def export_generalized_rcnn_to_onnx(cfg, model, sample_inputs, output_path):
"""将GeneralizedRCNN模型导出为ONNX格式"""
image = sample_inputs[0]["image"]
inputs = [{"image": image}]
def rcnn_inference(model, inputs):
instances = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": instances}]
traceable_model = TracingAdapter(model, inputs, rcnn_inference)
with PathManager.open(output_path, "wb") as f:
torch.onnx.export(
traceable_model,
(image,),
f,
input_names=["input_image"],
opset_version=11,
do_constant_folding=True,
verbose=False
)
print(f"ONNX模型已保存至: {output_path}")
print(f"输入格式: {traceable_model.inputs_schema}")
print(f"输出格式: {traceable_model.outputs_schema}")
def onnx2rknn(onnx_path, rknn_path, target_platform='rk3588'):
rknn = RKNN(verbose=True)
args = {
'mean_values': [[123.675, 116.28, 103.53]],
'std_values': [[58.395, 57.12, 57.375]],
'target_platform': target_platform
}
rknn.config(
mean_values=[[103.53, 116.28, 123.675]],
std_values=[[1.0, 1.0, 1.0]],
target_platform='rk3588')
input_size_list = [[1, 3, 800, 800]]
print('--> Loading ONNX model...')
ret = rknn.load_onnx(model=onnx_path, inputs=['input_image'], input_size_list= input_size_list)
if ret != 0:
print('Load ONNX model failed!')
exit(ret)
print('--> Building model...')
ret = rknn.build(do_quantization=False)
if ret != 0:
print('Build model failed!')
exit(ret)
print('--> Exporting RKNN model...')
ret = rknn.export_rknn(rknn_path)
if ret != 0:
print('Export RKNN model failed!')
exit(ret)
rknn.release()
print('ONNX to RKNN conversion completed!')
if __name__ == "__main__":
config_path = './detectron2/models/Config/config.yaml'
model_path = "./detectron2/models"
onnx_path = "mask_rcnn.onnx"
onnx_path_s = "mask_rcnn_simplified.onnx"
onnx2rknn(onnx_path_s, "mask_rcnn.rknn", target_platform='rk3588')