|
| 1 | +# |
| 2 | +# SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 3 | +# SPDX-License-Identifier: Apache-2.0 |
| 4 | +# |
| 5 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | +# you may not use this file except in compliance with the License. |
| 7 | +# You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, software |
| 12 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | +# See the License for the specific language governing permissions and |
| 15 | +# limitations under the License. |
| 16 | +# |
| 17 | + |
| 18 | +cmake_minimum_required(VERSION 3.13 FATAL_ERROR) |
| 19 | +include(cmake/modules/set_ifndef.cmake) |
| 20 | +include(cmake/modules/find_library_create_target.cmake) |
| 21 | + |
| 22 | +# Set lib and out path equals current build path, e.g. : tensorrt-github-samples/build |
| 23 | +set_ifndef(TRT_LIB_DIR ${CMAKE_BINARY_DIR}) |
| 24 | +set_ifndef(TRT_OUT_DIR ${CMAKE_BINARY_DIR}) |
| 25 | + |
| 26 | +if(CMAKE_VERSION VERSION_LESS 3.20) |
| 27 | + file(TO_CMAKE_PATH "${TRT_LIB_DIR}" TRT_LIB_DIR) |
| 28 | + file(TO_CMAKE_PATH "${TRT_OUT_DIR}" TRT_OUT_DIR) |
| 29 | +else() |
| 30 | + cmake_path(SET TRT_LIB_DIR ${TRT_LIB_DIR}) |
| 31 | + cmake_path(SET TRT_OUT_DIR ${TRT_OUT_DIR}) |
| 32 | +endif() |
| 33 | +message(STATUS "TRT_LIB_DIR: ${TRT_LIB_DIR}") |
| 34 | +message(STATUS "TRT_OUT_DIR: ${TRT_OUT_DIR}") |
| 35 | + |
| 36 | +# Set compile output paths |
| 37 | +set(RUNTIME_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for runtime target files") |
| 38 | +set(LIBRARY_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for library target files") |
| 39 | +set(ARCHIVE_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for archive target files") |
| 40 | + |
| 41 | +set(STATIC_LIB_EXT "a") |
| 42 | + |
| 43 | +# Get tensorrt version info |
| 44 | +file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/NvInferVersion.h" VERSION_STRINGS REGEX "#define NV_TENSORRT_.*") |
| 45 | + |
| 46 | +foreach(TYPE MAJOR MINOR PATCH BUILD) |
| 47 | + string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING ${VERSION_STRINGS}) |
| 48 | + string(REGEX MATCH "[0-9]+" TRT_${TYPE} ${TRT_TYPE_STRING}) |
| 49 | +endforeach(TYPE) |
| 50 | + |
| 51 | +set(TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "TensorRT project version") |
| 52 | +set(ONNX2TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "ONNX2TRT project version") |
| 53 | +set(TRT_SOVERSION "${TRT_MAJOR}" CACHE STRING "TensorRT library so version") |
| 54 | +message(STATUS "Building for TensorRT version: ${TRT_VERSION}, library version: ${TRT_SOVERSION}") |
| 55 | + |
| 56 | +# Set g++ cmake flag |
| 57 | +if(NOT DEFINED CMAKE_TOOLCHAIN_FILE) |
| 58 | + find_program(CMAKE_CXX_COMPILER NAMES $ENV{CXX} g++) # CMake 的 find_program 行为是 "lazy" 的,只在变量为空或未定义时才会查找并赋值 |
| 59 | + message(STATUS "CMAKE_CXX_COMPILER is: ${CMAKE_CXX_COMPILER}") |
| 60 | + if(NOT CMAKE_CXX_COMPILER) |
| 61 | + message(FATAL_ERROR "C++ compiler not found. Please specify one using the CXX environment variable or ensure g++ is installed.") |
| 62 | + endif() |
| 63 | +endif() |
| 64 | + |
| 65 | +# Set project info : languages c++ and cuda |
| 66 | +project(TensorRT |
| 67 | + LANGUAGES CXX CUDA |
| 68 | + VERSION ${TRT_VERSION} |
| 69 | + DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators." |
| 70 | + HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT") |
| 71 | + |
| 72 | +# Set cmake install path ../bin/ |
| 73 | +if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) |
| 74 | + set(CMAKE_INSTALL_PREFIX ${TRT_LIB_DIR}/../ CACHE PATH "TensorRT installation" FORCE) |
| 75 | +endif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) |
| 76 | + |
| 77 | +option(BUILD_SAMPLES "Build TensorRT samples" ON) |
| 78 | + |
| 79 | +# Set C++ standard version |
| 80 | +set(CMAKE_CXX_STANDARD 17) |
| 81 | +set(CMAKE_CXX_STANDARD_REQUIRED ON) |
| 82 | +set(CMAKE_CXX_EXTENSIONS OFF) |
| 83 | + |
| 84 | +# Set cmake cxx flags, define: -DBUILD_SYSTEM=cmake_oss |
| 85 | +if(NOT MSVC) |
| 86 | + set(CMAKE_CXX_FLAGS "-Wno-deprecated-declarations ${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss") |
| 87 | +else() |
| 88 | + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss") |
| 89 | +endif() |
| 90 | + |
| 91 | +# Set plat form info, tbd |
| 92 | +set_ifndef(TRT_PLATFORM_ID "x86_64") |
| 93 | +message(STATUS "Targeting TRT Platform: ${TRT_PLATFORM_ID}") |
| 94 | + |
| 95 | +set(TRT_DEBUG_POSTFIX _debug CACHE STRING "suffix for debug builds") |
| 96 | + |
| 97 | +if (CMAKE_BUILD_TYPE STREQUAL "Debug") |
| 98 | + message(STATUS "Building in debug mode ${DEBUG_POSTFIX}") |
| 99 | +endif() |
| 100 | + |
| 101 | +# Set dependency: threads, find cuda lib, include cuda CUDA_INCLUDE_DIRS |
| 102 | +set(DEFAULT_CUDA_VERSION 12.2.0) |
| 103 | +set(DEFAULT_CUDNN_VERSION 8.9) |
| 104 | + |
| 105 | +## Dependency Version Resolution |
| 106 | +set_ifndef(CUDA_VERSION ${DEFAULT_CUDA_VERSION}) |
| 107 | +message(STATUS "CUDA version set to ${CUDA_VERSION}") |
| 108 | +set_ifndef(CUDNN_VERSION ${DEFAULT_CUDNN_VERSION}) |
| 109 | +message(STATUS "cuDNN version set to ${CUDNN_VERSION}") |
| 110 | + |
| 111 | +set(THREADS_PREFER_PTHREAD_FLAG ON) |
| 112 | +find_package(Threads REQUIRED) |
| 113 | + |
| 114 | +message(STATUS "CUDA_INCLUDE_DIRS before find package: ${CUDA_INCLUDE_DIRS}") |
| 115 | +## find_package(CUDA) is broken for cross-compilation. Enable CUDA language instead. |
| 116 | +message(STATUS "CMAKE_PREFIX_PATH: ${CMAKE_PREFIX_PATH}") |
| 117 | +if(NOT DEFINED CMAKE_TOOLCHAIN_FILE) |
| 118 | + find_package(CUDA ${CUDA_VERSION} REQUIRED EXACT) # 即使精确指定,仍然可能找到本机的12.4的cuda版本 |
| 119 | +endif() |
| 120 | +message(STATUS "CUDA_INCLUDE_DIRS after find package: ${CUDA_INCLUDE_DIRS}") |
| 121 | + |
| 122 | +include_directories( |
| 123 | + ${CUDA_INCLUDE_DIRS} |
| 124 | +) |
| 125 | + |
| 126 | +set(nvinfer_lib_name "nvinfer") |
| 127 | +set(nvinfer_plugin_lib_name "nvinfer_plugin") |
| 128 | +set(nvinfer_vc_plugin_lib_name "nvinfer_vc_plugin") |
| 129 | +set(nvonnxparser_lib_name "nvonnxparser") |
| 130 | + |
| 131 | +find_library_create_target(nvinfer ${nvinfer_lib_name} SHARED ${TRT_LIB_DIR}) |
| 132 | +find_library(CUDART_LIB cudart_static HINTS ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib lib/x64 lib64) |
| 133 | + |
| 134 | +if (NOT MSVC) |
| 135 | + find_library(RT_LIB rt) |
| 136 | +endif() |
| 137 | + |
| 138 | +set(CUDA_LIBRARIES ${CUDART_LIB}) |
| 139 | + |
| 140 | +set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr -Xcompiler -Wno-deprecated-declarations") |
| 141 | + |
| 142 | +############################################################################################ |
| 143 | +# TensorRT |
| 144 | + |
| 145 | +if(BUILD_SAMPLES) |
| 146 | + add_subdirectory(samples) |
| 147 | +endif() |
0 commit comments