Skip to content

Commit 8305ce9

Browse files
committed
add saveEngine for save modle.engine from onnx parser
1 parent 630ac2b commit 8305ce9

File tree

10 files changed

+208
-30
lines changed

10 files changed

+208
-30
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,4 @@ bin/
99
*.vcxproj
1010
externals/
1111
**/.DS_Store
12+
save

.vscode/launch.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
"type": "cppdbg",
1010
"request": "launch",
1111
"program": "/home/zqi/sbx/tensorrt-github-samples/build/sample_onnx_mnist_debug",
12-
"args": ["-d", "/home/zqi/sbx/data/tensorrt-sample-data/mnist/", "--fp16"],
12+
"args": ["-d", "/home/zqi/sbx/data/tensorrt-sample-data/mnist/", "--fp16", "--saveEngine", "/home/zqi/sbx/tensorrt-github-samples/save/mnist.engine"],
1313
"environment": [{ "name": "config", "value": "Debug" }],
1414
"MIMode": "gdb",
1515
"miDebuggerPath": "/usr/bin/gdb",

CMakeLists.txt

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -38,20 +38,20 @@ set(RUNTIME_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for run
3838
set(LIBRARY_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for library target files")
3939
set(ARCHIVE_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for archive target files")
4040

41-
set(STATIC_LIB_EXT "a")
41+
# set(STATIC_LIB_EXT "a")
4242

43-
# Get tensorrt version info
44-
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/NvInferVersion.h" VERSION_STRINGS REGEX "#define NV_TENSORRT_.*")
43+
# # Get tensorrt version info
44+
# file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/NvInferVersion.h" VERSION_STRINGS REGEX "#define NV_TENSORRT_.*")
4545

46-
foreach(TYPE MAJOR MINOR PATCH BUILD)
47-
string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING ${VERSION_STRINGS})
48-
string(REGEX MATCH "[0-9]+" TRT_${TYPE} ${TRT_TYPE_STRING})
49-
endforeach(TYPE)
46+
# foreach(TYPE MAJOR MINOR PATCH BUILD)
47+
# string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING ${VERSION_STRINGS})
48+
# string(REGEX MATCH "[0-9]+" TRT_${TYPE} ${TRT_TYPE_STRING})
49+
# endforeach(TYPE)
5050

51-
set(TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "TensorRT project version")
52-
set(ONNX2TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "ONNX2TRT project version")
53-
set(TRT_SOVERSION "${TRT_MAJOR}" CACHE STRING "TensorRT library so version")
54-
message("Building for TensorRT version: ${TRT_VERSION}, library version: ${TRT_SOVERSION}")
51+
# set(TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "TensorRT project version")
52+
# set(ONNX2TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "ONNX2TRT project version")
53+
# set(TRT_SOVERSION "${TRT_MAJOR}" CACHE STRING "TensorRT library so version")
54+
# message(STATUS "Building for TensorRT version: ${TRT_VERSION}, library version: ${TRT_SOVERSION}")
5555

5656
# Set g++ cmake flag
5757
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)
@@ -63,11 +63,12 @@ if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)
6363
endif()
6464

6565
# Set project info : languages c++ and cuda
66-
project(TensorRT
67-
LANGUAGES CXX CUDA
68-
VERSION ${TRT_VERSION}
69-
DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators."
70-
HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT")
66+
project(TensorRT_zqi)
67+
# project(TensorRT
68+
# LANGUAGES CXX CUDA
69+
# VERSION ${TRT_VERSION}
70+
# DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators."
71+
# HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT")
7172

7273
# Set cmake install path ../bin/
7374
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
@@ -124,8 +125,8 @@ include_directories(
124125
)
125126

126127
set(nvinfer_lib_name "nvinfer")
127-
set(nvinfer_plugin_lib_name "nvinfer_plugin")
128-
set(nvinfer_vc_plugin_lib_name "nvinfer_vc_plugin")
128+
# set(nvinfer_plugin_lib_name "nvinfer_plugin")
129+
# set(nvinfer_vc_plugin_lib_name "nvinfer_vc_plugin")
129130
set(nvonnxparser_lib_name "nvonnxparser")
130131

131132
find_library_create_target(nvinfer ${nvinfer_lib_name} SHARED ${TRT_LIB_DIR})

CMakeLists.txt.bk.20150105

Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
#
2+
# SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3+
# SPDX-License-Identifier: Apache-2.0
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
19+
include(cmake/modules/set_ifndef.cmake)
20+
include(cmake/modules/find_library_create_target.cmake)
21+
22+
# Set lib and out path equals current build path, e.g. : tensorrt-github-samples/build
23+
set_ifndef(TRT_LIB_DIR ${CMAKE_BINARY_DIR})
24+
set_ifndef(TRT_OUT_DIR ${CMAKE_BINARY_DIR})
25+
26+
if(CMAKE_VERSION VERSION_LESS 3.20)
27+
file(TO_CMAKE_PATH "${TRT_LIB_DIR}" TRT_LIB_DIR)
28+
file(TO_CMAKE_PATH "${TRT_OUT_DIR}" TRT_OUT_DIR)
29+
else()
30+
cmake_path(SET TRT_LIB_DIR ${TRT_LIB_DIR})
31+
cmake_path(SET TRT_OUT_DIR ${TRT_OUT_DIR})
32+
endif()
33+
message(STATUS "TRT_LIB_DIR: ${TRT_LIB_DIR}")
34+
message(STATUS "TRT_OUT_DIR: ${TRT_OUT_DIR}")
35+
36+
# Set compile output paths
37+
set(RUNTIME_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for runtime target files")
38+
set(LIBRARY_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for library target files")
39+
set(ARCHIVE_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for archive target files")
40+
41+
set(STATIC_LIB_EXT "a")
42+
43+
# Get tensorrt version info
44+
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/NvInferVersion.h" VERSION_STRINGS REGEX "#define NV_TENSORRT_.*")
45+
46+
foreach(TYPE MAJOR MINOR PATCH BUILD)
47+
string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING ${VERSION_STRINGS})
48+
string(REGEX MATCH "[0-9]+" TRT_${TYPE} ${TRT_TYPE_STRING})
49+
endforeach(TYPE)
50+
51+
set(TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "TensorRT project version")
52+
set(ONNX2TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "ONNX2TRT project version")
53+
set(TRT_SOVERSION "${TRT_MAJOR}" CACHE STRING "TensorRT library so version")
54+
message(STATUS "Building for TensorRT version: ${TRT_VERSION}, library version: ${TRT_SOVERSION}")
55+
56+
# Set g++ cmake flag
57+
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)
58+
find_program(CMAKE_CXX_COMPILER NAMES $ENV{CXX} g++) # CMake 的 find_program 行为是 "lazy" 的,只在变量为空或未定义时才会查找并赋值
59+
message(STATUS "CMAKE_CXX_COMPILER is: ${CMAKE_CXX_COMPILER}")
60+
if(NOT CMAKE_CXX_COMPILER)
61+
message(FATAL_ERROR "C++ compiler not found. Please specify one using the CXX environment variable or ensure g++ is installed.")
62+
endif()
63+
endif()
64+
65+
# Set project info : languages c++ and cuda
66+
project(TensorRT
67+
LANGUAGES CXX CUDA
68+
VERSION ${TRT_VERSION}
69+
DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators."
70+
HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT")
71+
72+
# Set cmake install path ../bin/
73+
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
74+
set(CMAKE_INSTALL_PREFIX ${TRT_LIB_DIR}/../ CACHE PATH "TensorRT installation" FORCE)
75+
endif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
76+
77+
option(BUILD_SAMPLES "Build TensorRT samples" ON)
78+
79+
# Set C++ standard version
80+
set(CMAKE_CXX_STANDARD 17)
81+
set(CMAKE_CXX_STANDARD_REQUIRED ON)
82+
set(CMAKE_CXX_EXTENSIONS OFF)
83+
84+
# Set cmake cxx flags, define: -DBUILD_SYSTEM=cmake_oss
85+
if(NOT MSVC)
86+
set(CMAKE_CXX_FLAGS "-Wno-deprecated-declarations ${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss")
87+
else()
88+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss")
89+
endif()
90+
91+
# Set plat form info, tbd
92+
set_ifndef(TRT_PLATFORM_ID "x86_64")
93+
message(STATUS "Targeting TRT Platform: ${TRT_PLATFORM_ID}")
94+
95+
set(TRT_DEBUG_POSTFIX _debug CACHE STRING "suffix for debug builds")
96+
97+
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
98+
message(STATUS "Building in debug mode ${DEBUG_POSTFIX}")
99+
endif()
100+
101+
# Set dependency: threads, find cuda lib, include cuda CUDA_INCLUDE_DIRS
102+
set(DEFAULT_CUDA_VERSION 12.2.0)
103+
set(DEFAULT_CUDNN_VERSION 8.9)
104+
105+
## Dependency Version Resolution
106+
set_ifndef(CUDA_VERSION ${DEFAULT_CUDA_VERSION})
107+
message(STATUS "CUDA version set to ${CUDA_VERSION}")
108+
set_ifndef(CUDNN_VERSION ${DEFAULT_CUDNN_VERSION})
109+
message(STATUS "cuDNN version set to ${CUDNN_VERSION}")
110+
111+
set(THREADS_PREFER_PTHREAD_FLAG ON)
112+
find_package(Threads REQUIRED)
113+
114+
message(STATUS "CUDA_INCLUDE_DIRS before find package: ${CUDA_INCLUDE_DIRS}")
115+
## find_package(CUDA) is broken for cross-compilation. Enable CUDA language instead.
116+
message(STATUS "CMAKE_PREFIX_PATH: ${CMAKE_PREFIX_PATH}")
117+
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)
118+
find_package(CUDA ${CUDA_VERSION} REQUIRED EXACT) # 即使精确指定,仍然可能找到本机的12.4的cuda版本
119+
endif()
120+
message(STATUS "CUDA_INCLUDE_DIRS after find package: ${CUDA_INCLUDE_DIRS}")
121+
122+
include_directories(
123+
${CUDA_INCLUDE_DIRS}
124+
)
125+
126+
set(nvinfer_lib_name "nvinfer")
127+
set(nvinfer_plugin_lib_name "nvinfer_plugin")
128+
set(nvinfer_vc_plugin_lib_name "nvinfer_vc_plugin")
129+
set(nvonnxparser_lib_name "nvonnxparser")
130+
131+
find_library_create_target(nvinfer ${nvinfer_lib_name} SHARED ${TRT_LIB_DIR})
132+
find_library(CUDART_LIB cudart_static HINTS ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib lib/x64 lib64)
133+
134+
if (NOT MSVC)
135+
find_library(RT_LIB rt)
136+
endif()
137+
138+
set(CUDA_LIBRARIES ${CUDART_LIB})
139+
140+
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr -Xcompiler -Wno-deprecated-declarations")
141+
142+
############################################################################################
143+
# TensorRT
144+
145+
if(BUILD_SAMPLES)
146+
add_subdirectory(samples)
147+
endif()

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ https://stackoverflow.com/questions/54429210/how-do-i-prevent-conda-from-activat
3838
./build/sample_onnx_mnist -d ../data/tensorrt-sample-data/mnist/ --fp16
3939
# 5. 如何ssh遇到如下问题:
4040
ssh: connect to host github.com port 22: Connection refused
41-
Try this:
41+
Try this: https://stackoverflow.com/questions/7953806/github-ssh-via-public-wifi-port-22-blocked/45473512#45473512
4242
$ vim ~/.ssh/config and Add
4343
Host github.com
4444
Hostname ssh.github.com

cmake/modules/find_library_create_target.cmake

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,13 @@
1818
macro(find_library_create_target target_name lib libtype hints)
1919
message(STATUS "========================= Importing and creating target ${target_name} ==========================")
2020
message(STATUS "Looking for library ${lib}")
21+
message(STATUS "Library that was found ${${lib}_LIB_PATH}")
2122
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
2223
find_library(${lib}_LIB_PATH ${lib}${TRT_DEBUG_POSTFIX} HINTS ${hints} NO_DEFAULT_PATH)
23-
endif()
24+
message(STATUS "Library that was found ${${lib}_LIB_PATH}")
25+
endif()
2426
find_library(${lib}_LIB_PATH ${lib} HINTS ${hints} NO_DEFAULT_PATH)
27+
message(STATUS "Library that was found ${${lib}_LIB_PATH}")
2528
find_library(${lib}_LIB_PATH ${lib})
2629
message(STATUS "Library that was found ${${lib}_LIB_PATH}")
2730
add_library(${target_name} ${libtype} IMPORTED)

sampleMNIST.engine

277 KB
Binary file not shown.

samples/common/argsParser.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ inline bool parseArgs(Args& args, int32_t argc, char* argv[])
9292
{"useDLACore", required_argument, 0, 'u'}, {"batch", required_argument, 0, 'b'},
9393
{"timingCacheFile", required_argument, 0, 't'}, {nullptr, 0, nullptr, 0}};
9494
int32_t option_index = 0;
95-
arg = getopt_long(argc, argv, "hd:iu", long_options, &option_index);
95+
arg = getopt_long(argc, argv, "hd:iu:o:s:", long_options, &option_index);
9696
if (arg == -1)
9797
{
9898
break;
@@ -103,7 +103,8 @@ inline bool parseArgs(Args& args, int32_t argc, char* argv[])
103103
case 'h': args.help = true; return true;
104104
case 'd':
105105
if (optarg)
106-
{
106+
{
107+
std::cerr << "INFO: --datadir is valid" << std::endl;
107108
args.dataDirs.push_back(optarg);
108109
}
109110
else
@@ -116,12 +117,14 @@ inline bool parseArgs(Args& args, int32_t argc, char* argv[])
116117
if (optarg)
117118
{
118119
args.saveEngine = optarg;
120+
std::cout << "parseArgs: save engine: " << args.saveEngine << std::endl;
119121
}
120122
break;
121123
case 'o':
122124
if (optarg)
123125
{
124126
args.loadEngine = optarg;
127+
std::cerr << "INFO: --datadir is valid" << optarg << std::endl;
125128
}
126129
break;
127130
case 'i': args.runInInt8 = true; break;

samples/sampleOnnxMNIST/CMakeLists.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,11 @@
1616
#
1717
SET(SAMPLE_SOURCES
1818
sampleOnnxMNIST.cpp
19+
../common/sampleEngines.cpp # original
20+
../common/sampleDevice.cpp # below are save engine needed
21+
../common/sampleOptions.cpp
22+
../common/sampleUtils.cpp
23+
../common/bfloat16.cpp
1924
)
2025

2126
set(SAMPLE_PARSERS "onnx")

samples/sampleOnnxMNIST/sampleOnnxMNIST.cpp

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
#include "common.h"
3434
#include "logger.h"
3535
#include "parserOnnxConfig.h"
36+
#include "sampleEngines.h"
3637

3738
#include "NvInfer.h"
3839
#include <cuda_runtime_api.h>
@@ -46,14 +47,20 @@ using samplesCommon::SampleUniquePtr;
4647

4748
const std::string gSampleName = "TensorRT.sample_onnx_mnist";
4849

50+
struct sampleOnnxMNISTParams : samplesCommon::OnnxSampleParams
51+
{
52+
std::string saveEngine;
53+
std::string loadEngine;
54+
};
55+
4956
//! \brief The SampleOnnxMNIST class implements the ONNX MNIST sample
5057
//!
5158
//! \details It creates the network using an ONNX model
5259
//!
5360
class SampleOnnxMNIST
5461
{
5562
public:
56-
SampleOnnxMNIST(const samplesCommon::OnnxSampleParams& params)
63+
SampleOnnxMNIST(const sampleOnnxMNISTParams& params)
5764
: mParams(params)
5865
, mRuntime(nullptr)
5966
, mEngine(nullptr)
@@ -71,7 +78,7 @@ class SampleOnnxMNIST
7178
bool infer();
7279

7380
private:
74-
samplesCommon::OnnxSampleParams mParams; //!< The parameters for the sample.
81+
sampleOnnxMNISTParams mParams; //!< The parameters for the sample.
7582

7683
nvinfer1::Dims mInputDims; //!< The dimensions of the input to the network.
7784
nvinfer1::Dims mOutputDims; //!< The dimensions of the output to the network.
@@ -108,6 +115,7 @@ class SampleOnnxMNIST
108115
//!
109116
bool SampleOnnxMNIST::build()
110117
{
118+
// zqi tbd: builder的指针成员mImpl什么时候初始化的?//GPT: builder 的 mImpl 成员是在 createInferBuilder_INTERNAL 函数内部初始化的
111119
auto builder = SampleUniquePtr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger()));
112120
if (!builder)
113121
{
@@ -125,9 +133,9 @@ bool SampleOnnxMNIST::build()
125133
{
126134
return false;
127135
}
128-
136+
129137
auto parser
130-
= SampleUniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger()));
138+
= SampleUniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger())); // zqi: 关联 network和parser:https://github.com/onnx/onnx-tensorrt/blob/9c69a24bc2e20c8a511a4e6b06fd49639ec5300a/NvOnnxParser.cpp
131139
if (!parser)
132140
{
133141
return false;
@@ -149,7 +157,8 @@ bool SampleOnnxMNIST::build()
149157
}
150158
config->setProfileStream(*profileStream);
151159

152-
SampleUniquePtr<IHostMemory> plan{builder->buildSerializedNetwork(*network, *config)};
160+
// 生成了一个序列化的推理引擎数据,通常会被写入文件或传递给其他组件进行进一步处理或执行推理
161+
SampleUniquePtr<IHostMemory> plan{builder->buildSerializedNetwork(*network, *config)};
153162
if (!plan)
154163
{
155164
return false;
@@ -174,6 +183,13 @@ bool SampleOnnxMNIST::build()
174183
return false;
175184
}
176185

186+
// 使用 saveEngine 函数保存序列化的引擎
187+
std::ofstream errStream; // 错误输出流(可以用 std::cerr 也可以用文件流)
188+
std::string engineFileName = mParams.saveEngine; //"sampleMNIST.engine"; // 设置你想保存的文件名
189+
if (!sample::saveEngine(*mEngine, engineFileName, std::cerr)) {
190+
return false; // 保存失败,返回 false
191+
}
192+
177193
ASSERT(network->getNbInputs() == 1);
178194
mInputDims = network->getInput(0)->getDimensions();
179195
ASSERT(mInputDims.nbDims == 4);
@@ -353,9 +369,9 @@ bool SampleOnnxMNIST::verifyOutput(const samplesCommon::BufferManager& buffers)
353369
//!
354370
//! \brief Initializes members of the params struct using the command line args
355371
//!
356-
samplesCommon::OnnxSampleParams initializeSampleParams(const samplesCommon::Args& args)
372+
sampleOnnxMNISTParams initializeSampleParams(const samplesCommon::Args& args)
357373
{
358-
samplesCommon::OnnxSampleParams params;
374+
sampleOnnxMNISTParams params;
359375
if (args.dataDirs.empty()) // Use default directories if user hasn't provided directory paths
360376
{
361377
params.dataDirs.push_back("data/mnist/");
@@ -373,6 +389,8 @@ samplesCommon::OnnxSampleParams initializeSampleParams(const samplesCommon::Args
373389
params.fp16 = args.runInFp16;
374390
params.bf16 = args.runInBf16;
375391
params.timingCacheFile = args.timingCacheFile;
392+
params.saveEngine = args.saveEngine;
393+
std::cout << "save Engine path: " << params.saveEngine << std::endl;
376394

377395
return params;
378396
}

0 commit comments

Comments
 (0)