定义模型并转ONNX:

import os
import cv2
import numpy as np
import requests
import torch
import torch.onnx
from torch import nn
# 经典的超分辨率模型 SRCNN
class SuperResolutionNet(nn.Module):
    def __init__(self, upscale_factor):
        super().__init__()
        self.upscale_factor = upscale_factor
        self.img_upsample = nn.Upsample(
            scale_factor= self.upscale_factor,
            #mode= 'bicubic',
            mode= 'bilinear',
            align_corners= False
        )
        self.conv1 = nn.Conv2d(3, 64, kernel_size= 9, padding=4)
        self.conv2 = nn.Conv2d(64, 32, kernel_size=1, padding=0)
        self.conv3 = nn.Conv2d(32, 3, kernel_size=5, padding=2)
        self.Relu = nn.ReLU()
    def forward(self, x):
        x = self.img_upsample(x)
        out = self.Relu(self.conv1(x))
        out = self.Relu(self.conv2(out))
        out = self.conv3(out)
        return out
def init_torch_model(model_name):
    torch_model = SuperResolutionNet(upscale_factor=3)
    state_dict= torch.load(model_name)['state_dict']
    # Adapt the checkpoint, 修改关键字 exp : generator.conv1.weight 2 conv1.weight, 关键字与模型定义相关
    for old_key in list(state_dict.keys()):
        new_key = '.'.join(old_key.split('.')[1:])
        state_dict[new_key]=state_dict.pop(old_key)
    torch_model.load_state_dict(state_dict)
    torch_model.eval()
    return torch_model
def model2onnx(model, outputname):
    input_x = torch.randn(1, 3, 256, 256)
    dynamic = True
    if dynamic:
        dynamic_axes = { #设置输入输出的第0维度为动态batch
            'input': {
                0: 'batch',
            },
            'output': {
                0: 'batch'
            }
        }
    else:
        dynamic_axes = {}
    with torch.no_grad():
        torch.onnx.export(
            model,
            input_x,
            outputname,
            opset_version=11,
            input_names=['input'],
            output_names=['output'],
            dynamic_axes=dynamic_axes)
    import onnx
    onnx_model = onnx.load(outputname)
    try:
        onnx.checker.check_model(onnx_model)
    except Exception:
        print("Model incorrect")
    else:
        print("Model correct")
def runonnx(onnxname, input_img):
    import onnxruntime
    ort_session = onnxruntime.InferenceSession(onnxname)
    ort_inputs = {'input': input_img}
    ort_output = ort_session.run(['output'], ort_inputs)[0]
    return ort_output

if __name__ == '__main__':
    # Download checkpoint and test image
    urls = ['https://download.openmmlab.com/mmediting/restorers/srcnn/srcnn_x4k915_1x16_1000k_div2k_20200608-4186f232.pth']
    names = ['srcnn.pth', '../imgs/face3.png']
     for url, name in zip(urls, names):
         print(url, name)
         open(name, 'wb').write(requests.get(url).content)
    model = init_torch_model(names[0])
    model2onnx(model, 'srcnn.onnx')
    batchsize = 2
    input_img = np.zeros((batchsize, 3, 256, 256), dtype=np.float32)
    for i in range(batchsize):
        imgname = '../imgs/face' + str(i) + '.png'
        img = cv2.imread(imgname).astype(np.float32)
        img = np.transpose(img,(2, 0, 1)) # HWC 2 CHW
        input_img[i] = img
        #input_img = np.expand_dims(input_img, 0) # CHW 2 NCHW
    #run pth
    #output = model(torch.from_numpy(input_img)).detach().numpy()
    # run onnx
    output = runonnx('srcnn.onnx', input_img)
    print(input_img[1, 2, 100, 200])
    print(output.shape)
    print(output[1, 1, 20, 30])
    for i in range(batchsize):
        #out = np.squeeze(output, 0)
        out = output[i]
        out = np.clip(out, 0, 255)
        out = np.transpose(out, [1,2,0]).astype(np.uint8)
        cv2.imwrite("face_torch"+str(i)+".png", out)
        cv2.imshow("img", out)
        cv2.waitKey(0)

训练好的模型(如.pt)转成onnx形式,ONNX定义了一组与环境和平台无关的标准格式。ONNX文件不仅存储了神经网络模型的权重,还存储了模型的结构信息、网络中各层的输入输出等一些信息。

ONNX的推理可以用ONNX Runtime官方库,如果在英伟达平台上,可以转TensorRT后运行。本文主要介绍转TRT格式后如何C++部署运行。

我们使用 TensorRT 生成模型主要有两种方式:

1、直接通过 TensorRT 的 API 逐层搭建网络;
2、将中间表示的模型转换成 TensorRT 的模型,比如将 ONNX 模型转换成 TensorRT 模型。

1、ONNX 转 TensorRT

基本流程就是:
1、创建构建器,由构建器创建网络,然后解析器解析ONNX文件。
2、设置一些必要参数
3、构建其构建网络然后保存成TRT模型
用到的logging.h 文件直接用NVIDIA自带的。

#include "NvInfer.h"
#include "NvOnnxParser.h"
#include "logging.h"
#include<fstream>  
#include<iostream>  
#include<string>   
using namespace std;
using namespace nvonnxparser;
using namespace nvinfer1;

#define USE_FP16
static Logger gLogger;

void saveToTrtModel(std::string trt_save_path,IHostMemory*trtModelStream){
    std::ofstream out(trt_save_path, std::ios::binary);
    if (!out.is_open()){
    std::cout << "打开文件失败!" <<std:: endl;
    }
    out.write(reinterpret_cast<const char*>(trtModelStream->data()), trtModelStream->size());
    out.close();
}

int onnx2trt(){
    std::string onnx_path = "../srcnn.onnx";
    std::string trt_save_path = "../srcnn.trt";
    int batch_size = 1;
    IBuilder * builder = createInferBuilder(gLogger);
    INetworkDefinition *network = builder->createNetworkV2(1U);
    // 解析模型
    IParser *parser = nvonnxparser::createParser(*network, gLogger);
    if(!parser->parseFromFile(onnx_path.c_str(), (int)nvinfer1::ILogger::Severity::kWARNING)){
        std::cout << " parse onnx file fail ..." << std::endl;
        return -1;
    }
    IBuilderConfig *config = builder->createBuilderConfig();
    builder->setMaxBatchSize(batch_size);
    config->setMaxWorkspaceSize(1<<30);
    auto profile = builder->createOptimizationProfile();
    auto input_tensor=network->getInput(0);
    auto input_dims = input_tensor->getDimensions();
    input_dims.d[0] = 1;
     profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
    input_dims.d[0] = batch_size;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
    config->addOptimizationProfile(profile);

#ifdef USE_FP16
    config->setFlag(BuilderFlag::kFP16);
#endif
#ifdef USE_INT8
    config->setFlag(BuilderFlag::kINT8);
#endif
    ICudaEngine *engine = builder->buildEngineWithConfig(*network, *config);
    assert(engine);
    IHostMemory* trtModelStream = engine->serialize(); //序列化 保存trt
    saveToTrtModel(trt_save_path.c_str(), trtModelStream);
    parser->destroy();
    engine->destroy();
    network->destroy();
    config->destroy();
    builder->destroy();
    return 0;
}
int main(){
    onnx2trt();
    return 0;
}
cmake_minimum_required(VERSION 3.10)
project(onnx2trt)
include_directories(/home/max/TensorRT-7.2.1.6/include)
link_directories(/home/max/TensorRT-7.2.1.6/lib)
add_executable(onnx2trt onnx2trt.cpp)
target_link_libraries(onnx2trt nvinfer)
target_link_libraries(onnx2trt cudart)
target_link_libraries(onnx2trt nvonnxparser)

2、 TensorRT 模型推理

转好的TRT模型在部署工程上推理运行。
基本流程:
1、trt从文件中解析出模型,并反序列化到推理CUDA推理引擎。
2、分配推理所需要的CPU、GPU内存空间
3、引擎推理获取结果
调用以下类接口即可推理。

对于动态 batchsize 的推理,一定要记得设置最大的动态batchsize, 否则默认是-1,多batchsize 推理报错。
_context->setBindingDimensions(0, nvinfer1::Dims4(_max_batchsize, 3, _input_h, _input_w));
动态batchsize 实际推理的图片数量小于_max_batchsize 即可。

#include "logging.h"
#include "NvInfer.h"
#include "NvOnnxParser.h"
#include <NvInferRuntime.h>
#include <cuda_runtime.h> // cuda include
#include<fstream>  
#include<iostream>  
#include<string> 
#include<opencv2/opencv.hpp>
using namespace nvinfer1; 
static Logger gLogger;

# define CHECK(call)\
do\
{\
const cudaError_t error_code=call;\
if (error_code!=cudaSuccess)\
{\
printf("CUDA Error:\n");\
printf(" FILE :%s",__FILE__);\
printf("LINE %d\n",__LINE__);\
printf("Error code:%d\n",error_code);\
printf("Error text:%s\n",cudaGetErrorString(error_code));\
exit(1);\
}\
}while(0)\


#define clip(x) (x < 0 ? 0.0 : ( x > 255.0 ? 255.0 : x))

class TrtDetct
{
private:
    char *_trtModelStream{nullptr};
    IRuntime* _runtime = nullptr;
    ICudaEngine* _engine=nullptr;
    IExecutionContext* _context=nullptr;
    void *_inferbuffers[2];
    int _outputSize = 0;
    int _max_batchsize = 4;
    int _input_h = 256;
    int _input_w = 256;
    int _inputSize = 3 * _input_h * _input_w;
    cudaStream_t _stream;

private:
 int getoutputSize(){
    auto out_dims = _engine->getBindingDimensions(1);
    int _outputSize = 1;
    for(int j = 1; j < out_dims.nbDims; j++) {
        std::cout << "j = " << j << " size = " << out_dims.d[j] << std::endl;
        _outputSize *= out_dims.d[j];
    }
    return _outputSize;
}
public:
    TrtDetct(/* args */){};
    ~TrtDetct(){
        if (nullptr != _trtModelStream){
            delete [] _trtModelStream;
        }
    };
    int outputsize(){
        return _outputSize;
    }
    // 文件读取模型,并反序列化成engine
    void load_trtmodel(std::string trt_model_path){
        std::ifstream file(trt_model_path, std::ios::binary);
        size_t size{0};
        if (file.good()) {
                file.seekg(0, file.end);
                size = file.tellg();
                file.seekg(0, file.beg);
                _trtModelStream = new char[size];
                assert(_trtModelStream);
                file.read(_trtModelStream, size);
                file.close();
        }
    _runtime = createInferRuntime(gLogger);
    assert(_runtime != nullptr);
    _engine = _runtime->deserializeCudaEngine(_trtModelStream, size);
    assert(_engine != nullptr); 
    _context = _engine->createExecutionContext();
    assert(_context != nullptr);
    }

    //分配处理相关内存
    void initbuff(){
        _outputSize = getoutputSize();
        //fix _max_batchsize
        _context->setBindingDimensions(0, nvinfer1::Dims4(_max_batchsize, 3, _input_h, _input_w));	
        //_context->setBindingDimensions(1, nvinfer1::Dims4(_max_batchsize, 3, _input_h * 3, _input_w * 3));		

        const int inputIndex = _engine->getBindingIndex("input");
        const int outputIndex = _engine->getBindingIndex("output");
        assert(inputIndex == 0);
        assert(outputIndex == 1);
        CHECK(cudaMalloc((void**)&_inferbuffers[inputIndex],  _max_batchsize * _inputSize * sizeof(float)));  //trt输入内存申请
        CHECK(cudaMalloc((void**)&_inferbuffers[outputIndex], _max_batchsize * _outputSize * sizeof(float)));           //trt输出内存申请
        CHECK(cudaStreamCreate(&_stream));
    }
    void releasebuff(){
        CHECK(cudaFree(_inferbuffers[0]));
        CHECK(cudaFree(_inferbuffers[1]));
    }
    // 推理
    void infer_trtmodel(const int infer_batch, const float* input_data, float *outputbuff){
        //图像数据填充_inferbuffers[0],GPU CUDA处理
        cudaMemcpy(_inferbuffers[0], input_data, infer_batch * _inputSize * sizeof(float), cudaMemcpyHostToDevice);
        
        _context->enqueueV2((void **)_inferbuffers, _stream, nullptr);
        
        //_inferbuffers[1]模型输出后处理,可以GPU处理,否则拷贝到cpu处理
        cudaMemcpy(outputbuff, _inferbuffers[1], infer_batch * _outputSize * sizeof(float), cudaMemcpyDeviceToHost);
    }
};



int main(){
    TrtDetct *srcnn = new TrtDetct();
    srcnn ->load_trtmodel("../srcnn.trt");
    srcnn ->initbuff();
    //cv::waitKey(0);
    //img.convertTo(img, CV_32FC3);

    //将图像从HWC转换为CHW
    int BatchSize = 4;
    int channel = 3;
    int imgH = 256;
    int imgW = 256; 

    float* input_last = new float[BatchSize * channel * imgH * imgW];
    for(int i =0; i< BatchSize; i++){
        std::string img_path = "../../imgs/face" + std::to_string(i) + ".png";
        cv::Mat img = cv::imread(img_path);
        // cv::imshow("img", img);
        // cv::waitKey(0);
        for (int c = 0; c < channel; ++c){
            for (int h = 0; h < imgH; ++h){
                for (int w = 0; w < imgW; ++w){   
                    input_last[i * channel * imgH * imgW + c * imgH * imgW + h * imgW + w] = 
                    static_cast<float>(img.at<cv::Vec3b>(h, w)[c]);
                }
            }
        }
    }

    int outputSize = srcnn ->outputsize();
    float *outputbuff = new float[BatchSize * outputSize];
    srcnn ->infer_trtmodel(BatchSize, input_last, outputbuff);

   // output is chw need to hwc
    int scale = 3;
    int outputH = imgH * scale;
    int outputW = imgW * scale; 
   
    cv::Mat ouputimg(outputH, outputW, CV_8UC3, cv::Scalar(255));
    for(int i =0; i< BatchSize; i++){
        for (int h = 0; h < outputH; ++h){
            for (int w = 0; w < outputW; ++w){  
                for (int c = 0; c < channel; ++c){
                    ouputimg.at<cv::Vec3b>(h, w)[c] =
                    u_char(clip(outputbuff[i * channel * outputH * outputW + c * outputH * outputW + h * outputW + w]));
                }
            }
        }
        cv::imshow("ouputimg", ouputimg);
        cv::waitKey(0);
    }
    
    delete []input_last;
    input_last =nullptr;
    delete []outputbuff;
    outputbuff =nullptr;
    srcnn ->releasebuff();
    return 0;
}

CMakeLists.txt 文件如下:

project(trt_detect)
add_definitions(-std=c++11)
add_definitions(-w)
find_package(CUDA REQUIRED)
# OpenCV package
FIND_PACKAGE(OpenCV REQUIRED) 
# OpenCV include directories
INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS})

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Release)
#cuda 
include_directories(/usr/local/cuda/include)
link_directories(/usr/local/cuda/lib64)
include_directories(/home/max/TensorRT-8.2.5.1/include)
link_directories(/home/max/TensorRT-8.2.5.1/lib)
cuda_add_executable(onnx2trt onnx2trt.cpp)
cuda_add_executable(trt_detect runtrt.cpp)
target_link_libraries(onnx2trt nvinfer)
target_link_libraries(onnx2trt cudart)
target_link_libraries(onnx2trt nvonnxparser)
target_link_libraries(trt_detect nvinfer)
target_link_libraries(trt_detect cudart)
target_link_libraries(trt_detect nvonnxparser)
target_link_libraries(trt_detect ${OpenCV_LIBS})
add_definitions(-O2)
Logo

GitCode 天启AI是一款由 GitCode 团队打造的智能助手,基于先进的LLM(大语言模型)与多智能体 Agent 技术构建,致力于为用户提供高效、智能、多模态的创作与开发支持。它不仅支持自然语言对话,还具备处理文件、生成 PPT、撰写分析报告、开发 Web 应用等多项能力,真正做到“一句话,让 Al帮你完成复杂任务”。

更多推荐