Skip to content

Commit 8385253

Browse files
committed
chore: Update generateRandomTensors uses
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent b2a5183 commit 8385253

File tree

7 files changed

+30
-21
lines changed

7 files changed

+30
-21
lines changed

core/compiler.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,7 @@ void MapInputsAndDetermineDTypes(
322322
est_type_opt = first_use_type_map.find(in)->second;
323323
}
324324
// traverse elements in est_type_out and spec
325-
for (int i = 0; i < est_type_opt.size(); i++) {
325+
for (size_t i = 0; i < est_type_opt.size(); i++) {
326326
if (est_type_opt[i] && !spec[i].dtype_is_user_defined) {
327327
// If we can calculate the type from the graph and the type was not defined by the user then use the calculated
328328
// type

core/conversion/evaluators/aten.cpp

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,21 @@ auto aten_registrations TORCHTRT_UNUSED =
264264
},
265265
EvalOptions().validSchemas(
266266
{"aten::size(Tensor self) -> (int[])", "aten::size.int(Tensor self, int dim) -> (int)"})})
267+
.evaluator({c10::Symbol::fromQualString("aten::__getitem__"),
268+
[](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {
269+
auto list = args.at(n->input(0)).IValue()->to<c10::List<c10::IValue>>();
270+
auto idx = args.at(n->input(1)).unwrapToInt();
271+
272+
const int64_t list_size = list.size();
273+
const int64_t normalized_idx = normalizeIndex(idx, list_size);
274+
TORCHTRT_CHECK(
275+
normalized_idx >= 0 || normalized_idx < list_size,
276+
"List index out of range (aten::__getitem__)");
277+
return list.get(normalized_idx);
278+
},
279+
EvalOptions().validSchemas({
280+
"aten::__getitem__.t(t[](a) list, int idx) -> (t(*))",
281+
})})
267282
.evaluator({c10::Symbol::fromQualString("aten::append"),
268283
[](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {
269284
auto list = args.at(n->input(0)).IValue()->to<c10::List<c10::IValue>>();

core/ir/GraphInputs.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ namespace torch_tensorrt {
55
namespace core {
66
namespace ir {
77

8-
void flatten_dfs(std::vector<torch_tensorrt::core::ir::Input>& flattened_inputs, std::vector<std::vector<torch_tensorrt::core::ir::Input>>& collection_inputs,
8+
void flatten_dfs(std::vector<torch_tensorrt::core::ir::Input>& flattened_inputs, std::vector<std::vector<torch_tensorrt::core::ir::Input>>& collection_inputs,
99
torch::jit::IValue input_ivalue, int level, int index) {
1010
if (input_ivalue.isTuple()) {
1111
auto input_tuple = input_ivalue.toTuple();
@@ -53,7 +53,7 @@ GraphInputs::GraphInputs(std::vector<ir::Input> inputs_) {
5353
LOG_DEBUG("Construct GraphInput with ir::Input");
5454
inputs = inputs_;
5555
collection_inputs.resize(inputs_.size());
56-
for (int i = 0; i < inputs_.size(); i++) {
56+
for (size_t i = 0; i < inputs_.size(); i++) {
5757
collection_inputs[i].push_back(inputs_[i]);
5858
}
5959
}

core/partitioning/partitioning.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -400,15 +400,9 @@ PartitionedGraph segment_graph(
400400
if (n->kind() == torch::jit::prim::Constant) {
401401
continue;
402402
}
403-
<<<<<<< HEAD
404-
405-
if (check_node_fallback(n, global_fallback_nodes)) {
406-
in_prog_trt_blk_nodes.push_back(n);
407-
=======
408403
// the outputs of trt subgraph shouldn't be collections
409404
if (should_run_in_trt(n, forced_fallback_ops) && !(in_prog_trt_blk_nodes.size() == 0 && is_collection(n))) {
410405
in_prog_trt_blk_nodes.insert(in_prog_trt_blk_nodes.begin(), n);
411-
>>>>>>> feat: support for grouped inputs
412406

413407
// If there is an active PyTorch block and we have passed the threshold for a valid TRT
414408
// block then segment and reset the active PyTorch block

core/partitioning/shape_analysis.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,15 +39,15 @@ std::unordered_map<const torch::jit::Value*, torch::jit::IValue> generateRandomI
3939
std::vector<torch::jit::IValue> list;
4040
c10::TypePtr elementType = c10::TensorType::get();
4141
auto generic_list = c10::impl::GenericList(elementType);
42-
for (int i = 0; i < input.second.size(); i++) {
42+
for (size_t i = 0; i < input.second.size(); i++) {
4343
auto in = generateSingleInput(input.second[i], types[input.first][i]);
4444
generic_list.push_back(in.clone());
4545
}
4646
ivalue_map[input.first] = c10::IValue(generic_list);
4747
} else if (input.first->type()->kind() == torch::jit::TypeKind::TupleType) {
4848
// create tuple
4949
std::vector<torch::jit::IValue> list;
50-
for (int i = 0; i < input.second.size(); i++) {
50+
for (size_t i = 0; i < input.second.size(); i++) {
5151
auto in = generateSingleInput(input.second[i], types[input.first][i]);
5252
list.push_back(in.clone());
5353
}
@@ -56,7 +56,7 @@ std::unordered_map<const torch::jit::Value*, torch::jit::IValue> generateRandomI
5656
} else {
5757
auto in = generateSingleInput(input.second[0], types[input.first][0]);
5858
ivalue_map[input.first] = in.clone();
59-
59+
6060
}
6161
}
6262
return ivalue_map;

tests/core/partitioning/test_resolve_nontensor_inputs.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ TEST(Partitioning, ResolveNonTensorInputsCorrectly) {
116116
inputs.push_back(torch_tensorrt::core::ir::Input({16, 3, 3, 3}));
117117
inputs.push_back(torch_tensorrt::core::ir::Input({16}));
118118

119-
std::unordered_map<const torch::jit::Value*, std::vector<torch_tensorrt::core::ir::Input>> inputs_map;
119+
torch_tensorrt::core::ir::CollectionInputSpecMap inputs_map;
120120
std::unordered_map<const torch::jit::Value*, std::vector<c10::optional<at::ScalarType>>> input_types;
121121
for (size_t i = 0; i < g->inputs().size(); ++i) {
122122
inputs_map.insert({g->inputs()[i], {inputs[i]}});
@@ -367,11 +367,11 @@ TEST(Partitioning, ResolveOnlyNeccessaryNonTensorInputs) {
367367
inputs.push_back(torch_tensorrt::core::ir::Input({4, 4}));
368368
inputs.push_back(torch_tensorrt::core::ir::Input({4, 4}));
369369

370-
std::unordered_map<const torch::jit::Value*, torch_tensorrt::core::ir::Input> inputs_map;
371-
std::unordered_map<const torch::jit::Value*, c10::optional<at::ScalarType>> input_types;
370+
torch_tensorrt::core::ir::CollectionInputSpecMap inputs_map;
371+
std::unordered_map<const torch::jit::Value*, std::vector<c10::optional<at::ScalarType>>> input_types;
372372
for (size_t i = 0; i < g->inputs().size(); ++i) {
373-
inputs_map.insert({g->inputs()[i], inputs[i]});
374-
input_types.insert({g->inputs()[i], {at::kFloat}});
373+
inputs_map.insert({g->inputs()[i], {inputs[i]}});
374+
input_types.insert({g->inputs()[i], {{at::kFloat}}});
375375
}
376376
auto input_ivalues_map = torch_tensorrt::core::partitioning::generateRandomInputs(inputs_map, input_types);
377377
std::unordered_map<torch::jit::Node*, int> fallback_nodes;

tests/modules/hub.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -128,10 +128,10 @@
128128
"model": cm.ListInputTupleOutput(),
129129
"path": "script"
130130
},
131-
#"bert_base_uncased": {
132-
# "model": cm.BertModule(),
133-
# "path": "trace"
134-
#}
131+
"bert_base_uncased": {
132+
"model": cm.BertModule(),
133+
"path": "trace"
134+
}
135135
}
136136

137137

0 commit comments

Comments
 (0)