@@ -71,13 +71,11 @@ def compile(
71
71
engine_capability : EngineCapability = ENGINE_CAPABILITY ,
72
72
refit : bool = REFIT ,
73
73
debug : bool = DEBUG ,
74
- capability : EngineCapability = EngineCapability .default ,
75
74
num_avg_timing_iters : int = NUM_AVG_TIMING_ITERS ,
76
75
workspace_size : int = WORKSPACE_SIZE ,
77
76
dla_sram_size : int = DLA_SRAM_SIZE ,
78
77
dla_local_dram_size : int = DLA_LOCAL_DRAM_SIZE ,
79
78
dla_global_dram_size : int = DLA_GLOBAL_DRAM_SIZE ,
80
- calibrator : object = None ,
81
79
truncate_long_and_double : bool = TRUNCATE_LONG_AND_DOUBLE ,
82
80
require_full_compilation : bool = REQUIRE_FULL_COMPILATION ,
83
81
min_block_size : int = MIN_BLOCK_SIZE ,
@@ -156,6 +154,12 @@ def compile(
156
154
if debug :
157
155
set_log_level (logger .parent , logging .DEBUG )
158
156
157
+ if torch_executed_modules is not None and torch_executed_modules :
158
+ logger .warning (
159
+ f"Detected torch_executed_modules was non-empty: { torch_executed_modules } "
160
+ "\n This feature is unimplemented in Torch-TRT Dynamo currently."
161
+ )
162
+
159
163
if not isinstance (inputs , collections .abc .Sequence ):
160
164
inputs = [inputs ]
161
165
@@ -214,6 +218,7 @@ def compile(
214
218
"use_python_runtime" : use_python_runtime ,
215
219
"truncate_long_and_double" : truncate_long_and_double ,
216
220
"use_fast_partitioner" : use_fast_partitioner ,
221
+ "num_avg_timing_iters" : num_avg_timing_iters ,
217
222
"enable_experimental_decompositions" : enable_experimental_decompositions ,
218
223
"require_full_compilation" : require_full_compilation ,
219
224
"disable_tf32" : disable_tf32 ,
0 commit comments