把yolov8n-seg部署在onnx, rknn, horizon平台上

2023-12-13 15:29:27
1. 環境部署要求
torch: 1.10.1+cu102
torchvision: 0.11.2+cu102
onnx: 1.10.0
onnxruntime: 1.10.0
2. 轉換為onnx格式

可以直接從yolov8官方發佈出來的模型下載到本地?https://github.com/ultralytics/assets/releases/tag/v0.0.0icon-default.png?t=N7T8https://github.com/ultralytics/assets/releases/tag/v0.0.0

或者是安裝ultralytics來下載

pip3 install ultralytics==8.0.147
pip3 install numpy==1.23.5

然後使用pytorch2onnx.py轉換成onnx格式,程式會自動放到適當的文件夾中,方便管理

3. 轉換為rknn格式

去rknn的官網上下載你需要的版本,我這次使用的平台是rk3588s,所以我下載了rknn_toolkit2-1.5.0的python3.8。我是使用混合量化的方法轉換成rknn,參照了一下官方提供的混合量化方法,總共有兩個步驟,這裡就直接提供程序好了。

第一個步驟就是將yolov8n-seg的onnx,製作出三個文件data,?model, quantization.cfg,最重要的是quantization.cfg,它關係到每一層在rknn的轉換中是使用哪一種的數據類型,rknn會自動提出建議,如果轉換後的結果不滿意的小夥伴們,可以自己改動數據類型來加強一下

import os, glob, shutil
from rknn.api import RKNN

input_width = 640
input_height = 480
model_path = "./model"
dataset_path = "./dataset"
config_path = "./config"
dataset_file = "./dataset.txt"
model_name = 'yolov8n-seg'
platform = "rk3588"
ONNX_MODEL = f'{model_path}/{model_name}-{input_height}-{input_width}.onnx'
OUT_NODE = ["output0","output1"]

def get_dataset_txt(dataset_path, dataset_savefile):
    file_data = glob.glob(os.path.join(dataset_path,"*.jpg"))
    with open(dataset_savefile, "w") as f:
        for file in file_data:
            f.writelines(f"./{file}\n")

def move_onnx_config():
    file_data = glob.glob("*.onnx")
    for file in file_data:
        shutil.move(file, f"{config_path}/{file}")

if __name__ == '__main__':
    isExist = os.path.exists(dataset_path)
    if not isExist:
        os.makedirs(dataset_path)
        
    isExist = os.path.exists(config_path)
    if not isExist:
        os.makedirs(config_path)

    # Prepare the dataset text file
    get_dataset_txt(dataset_path, dataset_file)

    # Create RKNN object
    rknn = RKNN(verbose=False)

    # pre-process config
    print('--> Config model')
    rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform=platform)
    print('done')

    # Load ONNX model
    print('--> Loading model')
    ret = rknn.load_onnx(model=ONNX_MODEL, outputs=OUT_NODE)
    if ret != 0:
        print('Load model failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> hybrid_quantization_step1')
    ret = rknn.hybrid_quantization_step1(dataset=dataset_file, proposal=False)
    if ret != 0:
        print('hybrid_quantization_step1 failed!')
        exit(ret)
    print('done')

    rknn.release()

    print('--> Move hybrid quatization config into config folder')
    shutil.move(f"{model_name}-{input_height}-{input_width}.data", f"{config_path}/{model_name}-{input_height}-{input_width}.data")
    shutil.move(f"{model_name}-{input_height}-{input_width}.model", f"{config_path}/{model_name}-{input_height}-{input_width}.model")
    shutil.move(f"{model_name}-{input_height}-{input_width}.quantization.cfg", f"{config_path}/{model_name}-{input_height}-{input_width}.quantization.cfg")

    print('--> Move onnx config into config folder')
    move_onnx_config()

然後rknn會根據你的data,?model, quantization.cfg來轉換,最後在生成rknn模型後會在PC模擬器上跑一下,推理結果會在儲存在result文件夾上

import os, shutil, numpy as np, cv2
from utils import *
from rknn.api import RKNN

conf_thres = 0.25
iou_thres = 0.45
input_width = 640
input_height = 480
model_name = 'yolov8n-seg'
model_path = "./model"
config_path = "./config"
result_path = "./result"
image_path = "./dataset/bus.jpg"
video_path = "test.mp4"
video_inference = False
RKNN_MODEL = f'{model_name}-{input_height}-{input_width}.rknn'
CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis','snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']

if __name__ == '__main__':
    isExist = os.path.exists(result_path)
    if not isExist:
        os.makedirs(result_path)

    # Create RKNN object
    rknn = RKNN(verbose=False)

    # Build model
    print('--> hybrid_quantization_step2')
    ret = rknn.hybrid_quantization_step2(model_input=f'{config_path}/{model_name}-{input_height}-{input_width}.model',
                                         data_input=f'{config_path}/{model_name}-{input_height}-{input_width}.data',
                                         model_quantization_cfg=f'{config_path}/{model_name}-{input_height}-{input_width}.quantization.cfg')
    
    if ret != 0:
        print('hybrid_quantization_step2 failed!')
        exit(ret)
    print('done')

    # Export rknn model
    print('--> Export rknn model')
    ret = rknn.export_rknn(RKNN_MODEL)
    if ret != 0:
        print('Export rknn model failed!')
        exit(ret)
    print('done')

    print('--> Move RKNN file into model folder')
    shutil.move(RKNN_MODEL, f"{model_path}/{RKNN_MODEL}")

    # Init runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    if ret != 0:
        print('Init runtime environment failed!')
        exit(ret)
    print('done')

    if video_inference == True:
        cap = cv2.VideoCapture(video_path)
        while(True):
            ret, image_3c = cap.read()
            if not ret:
                break
            image_4c, image_3c = preprocess(image_3c, input_height, input_width)
            print('--> Running model for video inference')
            outputs = rknn.inference(inputs=[image_3c])
            colorlist = gen_color(len(CLASSES))
            results = postprocess(outputs, image_4c, image_3c, conf_thres, iou_thres, classes=len(CLASSES)) ##[box,mask,shape]
            results = results[0]              ## batch=1
            boxes, masks, shape = results
            if isinstance(masks, np.ndarray):
                mask_img, vis_img = vis_result(image_3c,  results, colorlist, CLASSES, result_path)
                cv2.imshow("mask_img", mask_img)
                cv2.imshow("vis_img", vis_img)
            else:
                print("No segmentation result")
            cv2.waitKey(10)
    else:
        # Preprocess input image
        image_3c = cv2.imread(image_path)
        image_4c, image_3c =  preprocess(image_3c, input_height, input_width)
        print('--> Running model for image inference')
        outputs = rknn.inference(inputs=[image_3c])

        colorlist = gen_color(len(CLASSES))
        results = postprocess(outputs, image_4c, image_3c, conf_thres, iou_thres, classes=len(CLASSES)) ##[box,mask,shape]
        results = results[0]              ## batch=1
        boxes, masks, shape = results
        if isinstance(masks, np.ndarray):
            mask_img, vis_img = vis_result(image_3c,  results, colorlist, CLASSES, result_path)
            print('--> Save inference result')
        else:
            print("No segmentation result")
    print("RKNN inference finish")
    rknn.release()
    cv2.destroyAllWindows()
4. 轉換為horizon格式

首先我們要下載horizon的ai_toolchain

wget -c ftp://xj3ftp@vrftp.horizon.ai/ai_toolchain/ai_toolchain.tar.gz --ftp-password=xj3ftp@123$%
tar -xvf ai_toolchain.tar.gz
cd ai_toolchain/
pip3 install h*

同樣的我們需要重新在pytorch文件中轉換新的onnx,因為rknn需要的onnx opset是12,而horizon需要的是opset 11,這些改變直接在pytorch2onnx.py中改就好了。接著跑三個shell script,還有要製作一個yaml檔,這個檔儲存了模型輸入,量化等等的參數,把其中的參數全都換成你的設定

sh 01_check.sh
sh 02_preprocess.sh
sh 03_build.sh

然後就可以得到量化後的bin和onnx檔,其中onnx檔是專門用來在PC模擬器上跑,bin才是在horizon的板子跑

最後提供github網址給有需要的小夥伴們GitHub - laitathei/YOLOv8-ONNX-RKNN-HORIZON-Segmentation: Inference YOLOv8 segmentation on ONNX, RKNN and Horizon

5. 參考資料
https://blog.csdn.net/magic_ll/article/details/131944207
https://blog.csdn.net/weixin_45377629/article/details/124582404#t18
https://github.com/ibaiGorordo/ONNX-YOLOv8-Instance-Segmentation

?

文章来源:https://blog.csdn.net/2301_79167091/article/details/132736670
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。