0.安装说明

CUDA_PATH       = /usr/local/cuda
TRT_PATH    = /opt/TensorRT-8.6.1.6

1.Makefile.inc

CUDA_PATH       = /usr/local/cuda
TRT_PATH    = /opt/TensorRT-8.6.1.6
CXX            = $(CUDA_PATH)/bin/nvcc
CXXFLAGS         = -std=c++14 -O0 -UDEBUG -Xcompiler -fPIC -use_fast_math  $(INCLUDE) -g
INCLUDE        += -I. -I../../include -I../../../include
INCLUDE          = -I$(CUDA_PATH)/include
INCLUDE         += -I$(TRT_PATH)/include
INCLUDE        += -I/usr/include/opencv2
LDFLAGS          = -L$(CUDA_PATH)/lib64 -lcudart -lnvrtc -lnvptxcompiler_static
LDFLAGS         += -L$(TRT_PATH)/lib -lnvinfer_static -lcudnn -lnvonnxparser 
LDFLAGS         += -L$(TRT_PATH)/lib/stubs/ -lcublas_static_stub_trt -lcublasLt_static_stub_trt
LDFLAGS         += -L/usr/local/lib -lopencv_gapi -lopencv_highgui -lopencv_ml -lopencv_objdetect -lopencv_photo -lopencv_stitching -lopencv_video -lopencv_calib3d -lopencv_features2d -lopencv_dnn -lopencv_flann -lopencv_videoio -lopencv_imgcodecs -lopencv_imgproc -lopencv_core

2.Makefile

include Makefile.inc

SRC1         = ${wildcard *.cpp}
SRC2         = ${wildcard *.cu}
OBJ1         = $(SRC1:.cpp=.o)
OBJ2         = $(SRC2:.cu=.o)
OBJ          = $(OBJ1) $(OBJ2) 

$(info $(OBJ))

TARGET1 = main
TARGET2 = test

all:$(TARGET1)

$(TARGET1):$(OBJ)
    $(CXX) -g $(LDFLAGS) -o $@ $^

%.o: %.cpp
    $(CXX) -g $(CXXFLAGS) -c -o $@ $^
%.o: %.cu
    $(CXX) -g $(CXXFLAGS) -c -o $@ $^



clean:
    rm -rf ./*.d ./*.o ./*.so $(TARGET1) ./*.plan ./*.jpg

标签: 部署, infer, tensorrt

添加新评论