0.安装说明

CUDA_PATH       = /usr/local/cuda
TRT_PATH    = /opt/TensorRT-8.6.1.6

1.CMakeLists.txt

cmake_minimum_required(VERSION 3.14) ##最低版本

project(main) ##项目名称

#cuda TensorRT opencv2的include路径
include_directories(/usr/local/cuda/include)
include_directories(/opt/TensorRT-8.6.1.6/include)
include_directories(/usr/include/opencv2)
#编译器相关设置
set(CMAKE_SKIP_BUILD_RPATH TRUE)
set(CMAKE_CXX_COMPILER "/usr/local/cuda/bin/nvcc")
add_compile_options(-std=c++14 -O3 -UDEBUG -Xcompiler -fPIC -use_fast_math -g)
#set(CMAKE_CXX_FLAGS "-std=c++14 -O3 -UDEBUG -Xcompiler  -fPIC -use_fast_math")
#为工程添加可执行文件
set(CMAKE_VERBOSE_MAKEFILE on)
set ( PRJ_SRC_LIST )
file ( GLOB root_src_files "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" )
list ( APPEND PRJ_SRC_LIST ${root_src_files} )
# file ( GLOB root_src_files "${CMAKE_CURRENT_SOURCE_DIR}/common/*.cpp" )
# list ( APPEND PRJ_SRC_LIST ${root_src_files} )

# 指定链接库的路径
link_directories(/usr/local/cuda/lib64 /opt/TensorRT-8.6.1.6/lib /opt/TensorRT-8.6.1.6/lib/stubs /usr/local/lib)

# 编译所有cpp
add_executable(main ${PRJ_SRC_LIST})


set ( PRJ_LIBRARIES )
# 指定.a静态链接库 和 .so 动态链接库  名称 
# target_link_libraries(onnxTmp ${PRJ_LIBRARIES} cudart_static nvrtc nvptxcompiler_static nvonnxparser nvinfer_static cudnn_static_stub_trt cublas_static_stub_trt cublasLt_static_stub_trt)
target_link_libraries(main ${PRJ_LIBRARIES} cudart_static nvrtc nvptxcompiler_static nvonnxparser nvinfer cudnn_static_stub_trt cublas_static_stub_trt cublasLt_static_stub_trt opencv_gapi opencv_highgui opencv_ml opencv_objdetect opencv_photo opencv_stitching opencv_video opencv_calib3d opencv_features2d opencv_dnn opencv_flann opencv_videoio opencv_imgcodecs opencv_imgproc opencv_core)

# 
#使用cuda
set_target_properties(Main PROPERTIES CUDA_SEPARABLE_COMPILATION ON)

# cmake .. -DCMAKE_SKIP_BUILD_RPATH=TRUE

2.编译CMD

mkdir build
cd build
cmake .. -DCMAKE_SKIP_BUILD_RPATH=TRUE
make

标签: 部署, infer, tensorrt

添加新评论