@@ -57,7 +57,6 @@ void TensorRTEngine::Execute(int batch_size, std::vector<void *> *buffers,
5757 } else {
5858#if IS_TRT_VERSION_GE(6000)
5959 infer_context->enqueueV2 (buffers->data (), stream, nullptr );
60- GetEngineInfo ();
6160#endif
6261 }
6362 SetRuntimeBatch (batch_size);
@@ -244,8 +243,10 @@ void TensorRTEngine::FreezeNetwork() {
244243#endif
245244 }
246245#if IS_TRT_VERSION_GE(8200)
247- infer_builder_config_->setProfilingVerbosity (
248- nvinfer1::ProfilingVerbosity::kDETAILED );
246+ if (use_inspector_) {
247+ infer_builder_config_->setProfilingVerbosity (
248+ nvinfer1::ProfilingVerbosity::kDETAILED );
249+ }
249250#endif
250251
251252#if IS_TRT_VERSION_LT(8000)
@@ -411,6 +412,21 @@ void TensorRTEngine::freshDeviceId() {
411412 platform::SetDeviceId (device_id_);
412413}
413414
415+ void TensorRTEngine::GetEngineInfo () {
416+ #if IS_TRT_VERSION_GE(8200)
417+ LOG (INFO) << " ====== engine info ======" ;
418+ std::unique_ptr<nvinfer1::IEngineInspector> infer_inspector (
419+ infer_engine_->createEngineInspector ());
420+ auto infer_context = context ();
421+ infer_inspector->setExecutionContext (infer_context);
422+ LOG (INFO) << infer_inspector->getEngineInformation (
423+ nvinfer1::LayerInformationFormat::kONELINE );
424+ LOG (INFO) << " ====== engine info end ======" ;
425+ #else
426+ LOG (INFO) << " Inspector needs TensorRT version 8.2 and after." ;
427+ #endif
428+ }
429+
414430} // namespace tensorrt
415431} // namespace inference
416432} // namespace paddle
0 commit comments