@@ -94,7 +94,7 @@ def aten_ops_fmod(
94
94
return impl .elementwise .fmod (network , target , SourceIR .ATEN , name , args [0 ], args [1 ])
95
95
96
96
97
- @dynamo_tensorrt_converter (torch .ops .aten .relu .default )
97
+ @dynamo_tensorrt_converter (torch .ops .aten .relu .default ) # type: ignore[misc]
98
98
def aten_ops_relu (
99
99
network : TRTNetwork ,
100
100
target : Target ,
@@ -111,7 +111,7 @@ def aten_ops_relu(
111
111
)
112
112
113
113
114
- @dynamo_tensorrt_converter (torch .ops .aten .sigmoid .default )
114
+ @dynamo_tensorrt_converter (torch .ops .aten .sigmoid .default ) # type: ignore[misc]
115
115
def aten_ops_sigmoid (
116
116
network : TRTNetwork ,
117
117
target : Target ,
@@ -128,7 +128,7 @@ def aten_ops_sigmoid(
128
128
)
129
129
130
130
131
- @dynamo_tensorrt_converter (torch .ops .aten .tanh .default )
131
+ @dynamo_tensorrt_converter (torch .ops .aten .tanh .default ) # type: ignore[misc]
132
132
def aten_ops_tanh (
133
133
network : TRTNetwork ,
134
134
target : Target ,
@@ -145,7 +145,7 @@ def aten_ops_tanh(
145
145
)
146
146
147
147
148
- @dynamo_tensorrt_converter (torch .ops .aten .leaky_relu .default )
148
+ @dynamo_tensorrt_converter (torch .ops .aten .leaky_relu .default ) # type: ignore[misc]
149
149
def aten_ops_leaky_relu (
150
150
network : TRTNetwork ,
151
151
target : Target ,
@@ -163,7 +163,7 @@ def aten_ops_leaky_relu(
163
163
)
164
164
165
165
166
- @dynamo_tensorrt_converter (torch .ops .aten .elu .default )
166
+ @dynamo_tensorrt_converter (torch .ops .aten .elu .default ) # type: ignore[misc]
167
167
def aten_ops_elu (
168
168
network : TRTNetwork ,
169
169
target : Target ,
@@ -182,7 +182,7 @@ def aten_ops_elu(
182
182
)
183
183
184
184
185
- @dynamo_tensorrt_converter (torch .ops .aten .softplus .default )
185
+ @dynamo_tensorrt_converter (torch .ops .aten .softplus .default ) # type: ignore[misc]
186
186
def aten_ops_softplus (
187
187
network : TRTNetwork ,
188
188
target : Target ,
@@ -200,7 +200,7 @@ def aten_ops_softplus(
200
200
)
201
201
202
202
203
- @dynamo_tensorrt_converter (torch .ops .aten .clip .default )
203
+ @dynamo_tensorrt_converter (torch .ops .aten .clip .default ) # type: ignore[misc]
204
204
def aten_ops_clip (
205
205
network : TRTNetwork ,
206
206
target : Target ,
@@ -219,7 +219,7 @@ def aten_ops_clip(
219
219
)
220
220
221
221
222
- @dynamo_tensorrt_converter (torch .ops .aten .hardsigmoid .default )
222
+ @dynamo_tensorrt_converter (torch .ops .aten .hardsigmoid .default ) # type: ignore[misc]
223
223
def aten_ops_hard_sigmoid (
224
224
network : TRTNetwork ,
225
225
target : Target ,
@@ -296,26 +296,20 @@ def aten_ops_rsqrt(
296
296
)
297
297
298
298
299
- @dynamo_tensorrt_converter (torch .ops .aten .neg .default )
299
+ @dynamo_tensorrt_converter (torch .ops .aten .neg .default ) # type: ignore[misc]
300
300
def aten_ops_neg (
301
301
network : TRTNetwork ,
302
302
target : Target ,
303
303
args : Tuple [Argument , ...],
304
304
kwargs : Dict [str , Argument ],
305
305
name : str ,
306
306
) -> Union [TRTTensor , Sequence [TRTTensor ]]:
307
- input_val = args [0 ]
308
- if (isinstance (input_val , TRTTensor )) and (
309
- input_val .dtype == trt .int8 or input_val .dtype == trt .int32
310
- ):
311
- input_val = cast_trt_tensor (network , input_val , trt .float32 , name )
312
-
313
307
return impl .unary .neg (
314
308
network ,
315
309
target ,
316
310
SourceIR .ATEN ,
317
311
name ,
318
- input_val ,
312
+ args [ 0 ] ,
319
313
)
320
314
321
315
@@ -503,7 +497,7 @@ def aten_ops_clone(
503
497
)
504
498
505
499
506
- @dynamo_tensorrt_converter (torch .ops .aten .expand .default )
500
+ @dynamo_tensorrt_converter (torch .ops .aten .expand .default ) # type: ignore[misc]
507
501
def aten_ops_expand (
508
502
network : TRTNetwork ,
509
503
target : Target ,
@@ -533,7 +527,7 @@ def amax_param_validator(amax_node: Node) -> bool:
533
527
534
528
@dynamo_tensorrt_converter (
535
529
torch .ops .aten .amax .default , capability_validator = amax_param_validator
536
- )
530
+ ) # type: ignore[misc]
537
531
def aten_ops_amax (
538
532
network : TRTNetwork ,
539
533
target : Target ,
@@ -552,8 +546,8 @@ def aten_ops_amax(
552
546
)
553
547
554
548
555
- @dynamo_tensorrt_converter (torch .ops .aten .sum .default )
556
- @dynamo_tensorrt_converter (torch .ops .aten .sum .dim_IntList )
549
+ @dynamo_tensorrt_converter (torch .ops .aten .sum .default ) # type: ignore[misc]
550
+ @dynamo_tensorrt_converter (torch .ops .aten .sum .dim_IntList ) # type: ignore[misc]
557
551
def aten_ops_sum (
558
552
network : TRTNetwork ,
559
553
target : Target ,
@@ -946,8 +940,8 @@ def aten_ops_isinf(
946
940
)
947
941
948
942
949
- @dynamo_tensorrt_converter (torch .ops .aten .add .Tensor )
950
- @dynamo_tensorrt_converter (torch .ops .aten .add .Scalar )
943
+ @dynamo_tensorrt_converter (torch .ops .aten .add .Tensor ) # type: ignore[misc]
944
+ @dynamo_tensorrt_converter (torch .ops .aten .add .Scalar ) # type: ignore[misc]
951
945
def aten_ops_add (
952
946
network : TRTNetwork ,
953
947
target : Target ,
@@ -978,8 +972,8 @@ def aten_ops_add(
978
972
)
979
973
980
974
981
- @dynamo_tensorrt_converter (torch .ops .aten .mul .Tensor )
982
- @dynamo_tensorrt_converter (torch .ops .aten .mul .Scalar )
975
+ @dynamo_tensorrt_converter (torch .ops .aten .mul .Tensor ) # type: ignore[misc]
976
+ @dynamo_tensorrt_converter (torch .ops .aten .mul .Scalar ) # type: ignore[misc]
983
977
def aten_ops_mul (
984
978
network : TRTNetwork ,
985
979
target : Target ,
@@ -997,7 +991,7 @@ def aten_ops_mul(
997
991
)
998
992
999
993
1000
- @dynamo_tensorrt_converter (torch .ops .aten .maximum .default )
994
+ @dynamo_tensorrt_converter (torch .ops .aten .maximum .default ) # type: ignore[misc]
1001
995
def aten_ops_max (
1002
996
network : TRTNetwork ,
1003
997
target : Target ,
@@ -1015,7 +1009,7 @@ def aten_ops_max(
1015
1009
)
1016
1010
1017
1011
1018
- @dynamo_tensorrt_converter (torch .ops .aten .minimum .default )
1012
+ @dynamo_tensorrt_converter (torch .ops .aten .minimum .default ) # type: ignore[misc]
1019
1013
def aten_ops_min (
1020
1014
network : TRTNetwork ,
1021
1015
target : Target ,
@@ -1033,8 +1027,8 @@ def aten_ops_min(
1033
1027
)
1034
1028
1035
1029
1036
- @dynamo_tensorrt_converter (torch .ops .aten .sub .Tensor )
1037
- @dynamo_tensorrt_converter (torch .ops .aten .sub .Scalar )
1030
+ @dynamo_tensorrt_converter (torch .ops .aten .sub .Tensor ) # type: ignore[misc]
1031
+ @dynamo_tensorrt_converter (torch .ops .aten .sub .Scalar ) # type: ignore[misc]
1038
1032
def aten_ops_sub (
1039
1033
network : TRTNetwork ,
1040
1034
target : Target ,
@@ -1065,10 +1059,10 @@ def aten_ops_sub(
1065
1059
)
1066
1060
1067
1061
1068
- @dynamo_tensorrt_converter (torch .ops .aten .div .Tensor )
1069
- @dynamo_tensorrt_converter (torch .ops .aten .div .Tensor_mode )
1070
- @dynamo_tensorrt_converter (torch .ops .aten .div .Scalar )
1071
- @dynamo_tensorrt_converter (torch .ops .aten .div .Scalar_mode )
1062
+ @dynamo_tensorrt_converter (torch .ops .aten .div .Tensor ) # type: ignore[misc]
1063
+ @dynamo_tensorrt_converter (torch .ops .aten .div .Tensor_mode ) # type: ignore[misc]
1064
+ @dynamo_tensorrt_converter (torch .ops .aten .div .Scalar ) # type: ignore[misc]
1065
+ @dynamo_tensorrt_converter (torch .ops .aten .div .Scalar_mode ) # type: ignore[misc]
1072
1066
def aten_ops_div (
1073
1067
network : TRTNetwork ,
1074
1068
target : Target ,
@@ -1111,9 +1105,9 @@ def aten_ops_div(
1111
1105
)
1112
1106
1113
1107
1114
- @dynamo_tensorrt_converter (torch .ops .aten .pow .Tensor_Tensor )
1115
- @dynamo_tensorrt_converter (torch .ops .aten .pow .Scalar )
1116
- @dynamo_tensorrt_converter (torch .ops .aten .pow .Tensor_Scalar )
1108
+ @dynamo_tensorrt_converter (torch .ops .aten .pow .Tensor_Tensor ) # type: ignore[misc]
1109
+ @dynamo_tensorrt_converter (torch .ops .aten .pow .Scalar ) # type: ignore[misc]
1110
+ @dynamo_tensorrt_converter (torch .ops .aten .pow .Tensor_Scalar ) # type: ignore[misc]
1117
1111
def aten_ops_pow (
1118
1112
network : TRTNetwork ,
1119
1113
target : Target ,
@@ -1131,8 +1125,8 @@ def aten_ops_pow(
1131
1125
)
1132
1126
1133
1127
1134
- @dynamo_tensorrt_converter (torch .ops .aten .floor_divide .default )
1135
- @dynamo_tensorrt_converter (torch .ops .aten .floor_divide .Scalar )
1128
+ @dynamo_tensorrt_converter (torch .ops .aten .floor_divide .default ) # type: ignore[misc]
1129
+ @dynamo_tensorrt_converter (torch .ops .aten .floor_divide .Scalar ) # type: ignore[misc]
1136
1130
def aten_ops_floor_div (
1137
1131
network : TRTNetwork ,
1138
1132
target : Target ,
@@ -1150,7 +1144,7 @@ def aten_ops_floor_div(
1150
1144
)
1151
1145
1152
1146
1153
- @dynamo_tensorrt_converter (torch .ops .aten .logical_and .default )
1147
+ @dynamo_tensorrt_converter (torch .ops .aten .logical_and .default ) # type: ignore[misc]
1154
1148
def aten_ops_logical_and (
1155
1149
network : TRTNetwork ,
1156
1150
target : Target ,
@@ -1168,7 +1162,7 @@ def aten_ops_logical_and(
1168
1162
)
1169
1163
1170
1164
1171
- @dynamo_tensorrt_converter (torch .ops .aten .logical_or .default )
1165
+ @dynamo_tensorrt_converter (torch .ops .aten .logical_or .default ) # type: ignore[misc]
1172
1166
def aten_ops_logical_or (
1173
1167
network : TRTNetwork ,
1174
1168
target : Target ,
@@ -1186,7 +1180,7 @@ def aten_ops_logical_or(
1186
1180
)
1187
1181
1188
1182
1189
- @dynamo_tensorrt_converter (torch .ops .aten .logical_xor .default )
1183
+ @dynamo_tensorrt_converter (torch .ops .aten .logical_xor .default ) # type: ignore[misc]
1190
1184
def aten_ops_logical_xor (
1191
1185
network : TRTNetwork ,
1192
1186
target : Target ,
@@ -1204,8 +1198,8 @@ def aten_ops_logical_xor(
1204
1198
)
1205
1199
1206
1200
1207
- @dynamo_tensorrt_converter (torch .ops .aten .eq .Tensor )
1208
- @dynamo_tensorrt_converter (torch .ops .aten .eq .Scalar )
1201
+ @dynamo_tensorrt_converter (torch .ops .aten .eq .Tensor ) # type: ignore[misc]
1202
+ @dynamo_tensorrt_converter (torch .ops .aten .eq .Scalar ) # type: ignore[misc]
1209
1203
def aten_ops_equal (
1210
1204
network : TRTNetwork ,
1211
1205
target : Target ,
@@ -1223,8 +1217,8 @@ def aten_ops_equal(
1223
1217
)
1224
1218
1225
1219
1226
- @dynamo_tensorrt_converter (torch .ops .aten .gt .Tensor )
1227
- @dynamo_tensorrt_converter (torch .ops .aten .gt .Scalar )
1220
+ @dynamo_tensorrt_converter (torch .ops .aten .gt .Tensor ) # type: ignore[misc]
1221
+ @dynamo_tensorrt_converter (torch .ops .aten .gt .Scalar ) # type: ignore[misc]
1228
1222
def aten_ops_greater (
1229
1223
network : TRTNetwork ,
1230
1224
target : Target ,
@@ -1242,8 +1236,8 @@ def aten_ops_greater(
1242
1236
)
1243
1237
1244
1238
1245
- @dynamo_tensorrt_converter (torch .ops .aten .lt .Tensor )
1246
- @dynamo_tensorrt_converter (torch .ops .aten .lt .Scalar )
1239
+ @dynamo_tensorrt_converter (torch .ops .aten .lt .Tensor ) # type: ignore[misc]
1240
+ @dynamo_tensorrt_converter (torch .ops .aten .lt .Scalar ) # type: ignore[misc]
1247
1241
def aten_ops_less (
1248
1242
network : TRTNetwork ,
1249
1243
target : Target ,
@@ -1267,7 +1261,7 @@ def conv_param_validator(conv_node: Node) -> bool:
1267
1261
1268
1262
@dynamo_tensorrt_converter (
1269
1263
torch .ops .aten .convolution .default , capability_validator = conv_param_validator
1270
- )
1264
+ ) # type: ignore[misc]
1271
1265
def aten_ops_convolution (
1272
1266
network : TRTNetwork ,
1273
1267
target : Target ,
@@ -1291,7 +1285,7 @@ def aten_ops_convolution(
1291
1285
)
1292
1286
1293
1287
1294
- @dynamo_tensorrt_converter (torch .ops .aten .linear .default )
1288
+ @dynamo_tensorrt_converter (torch .ops .aten .linear .default ) # type: ignore[misc]
1295
1289
def aten_ops_linear (
1296
1290
network : TRTNetwork ,
1297
1291
target : Target ,
0 commit comments