Skip to content

Commit aef6003

Browse files
authored
Merge pull request #29 from narendasan/mobilenet
Add operators necessary for mobilenet
2 parents 79c909c + ca42ef5 commit aef6003

33 files changed

+841
-323
lines changed

Diff for: .gitignore

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,4 @@ experiments/
1313
py/build/
1414
py/tmp/
1515
py/.eggs
16-
16+
.vscode/

Diff for: core/conversion/conversion.cpp

+34-32
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ namespace trtorch {
1010
namespace core {
1111
namespace conversion {
1212

13-
// Defined in core/conversion/conversion_blacklist.cpp
13+
// Defined in core/conversion/conversion_blacklist.cpp
1414
bool isNodeConversionBlacklisted(const torch::jit::Node* n);
1515

1616
bool OpSupported(const torch::jit::Node* n) {
@@ -24,8 +24,8 @@ c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::
2424
// Also probably a better way to deal with the two error cases;
2525
TRTORCH_CHECK(level < limit, "Failed to evaluate node: " << *n \
2626
<< "Reason: Exceeded evaluation stack limit (limit=" \
27-
<< limit << ")");
28-
27+
<< limit << ")");
28+
2929
LOG_DEBUG(ctx->logger, "Evaluating " << util::node_info(n));
3030
evaluators::kwargs eval_args;
3131
for (auto eval_in : n->inputs()) {
@@ -55,7 +55,7 @@ c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::
5555
return eval;
5656
}
5757

58-
bool AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
58+
void AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
5959
LOG_INFO(ctx->logger,
6060
"Adding Layer " << util::node_info(n) << " (ctx.AddLayer)");
6161
converters::args node_args;
@@ -87,36 +87,34 @@ bool AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
8787
TRTORCH_THROW_ERROR("Unable to retrieve all node inputs for node: " \
8888
<< util::node_info(n) << " (ctx.AddLayer)\nSpecifically failed to retrieve value for input: " \
8989
<< *input_node);
90-
return false;
9190
}
92-
9391
}
9492

9593
if (n->inputs().size() != node_args.size()) {
9694
TRTORCH_THROW_ERROR("Unable to retrieve all node inputs for node: " << *n);
97-
return false;
9895
}
9996

100-
97+
10198
auto schema = n->maybeSchema();
10299
TRTORCH_CHECK(schema, "Unable to get schema for Node " << util::node_info(n) \
103100
<< " (conversion.AddLayer)");
104-
101+
105102
auto converter = converters::get_node_converter_for(schema);
106103
TRTORCH_CHECK(converter, "Unable to convert node: " << util::node_info(n) \
107104
<< " (conversion.AddLayer)\nSchema: " << *schema
108105
<< "\nConverter for " << schema->name()
109106
<< " requested, but no such converter was found.\nIf you need a converter for this operator, you can try implementing one yourself\n"
110-
<< "or request a converter: https://www.github.com/NVIDIA/TRTorch/issues");
111-
converter(ctx, n, node_args);
107+
<< "or request a converter: https://www.github.com/NVIDIA/TRTorch/issues");
112108

113-
return true;
109+
TRTORCH_CHECK(converter(ctx, n, node_args),
110+
"Converter for " << *schema << " failed to convert node: "
111+
<< util::node_info(n) << "please report this error to https://www.github.com/NVIDIA/TRTorch/issues");
114112
}
115113

116-
bool AddInputs(ConversionCtx* ctx,
114+
void AddInputs(ConversionCtx* ctx,
117115
at::ArrayRef<const torch::jit::Value*> inputs,
118116
std::vector<InputRange>& input_dims) {
119-
117+
120118
auto type_lut = torch::jit::script::string_to_type_lut();
121119
std::vector<const torch::jit::Value*> input_tensors;
122120
for (auto in : inputs) {
@@ -130,15 +128,15 @@ bool AddInputs(ConversionCtx* ctx,
130128
input_tensors.push_back(in);
131129
}
132130
}
133-
131+
134132
TRTORCH_CHECK(input_tensors.size() == input_dims.size(),
135133
"Expected dimension specifications for all input tensors" \
136134
<< ", but found " << input_tensors.size() \
137135
<< " input tensors and " \
138136
<< input_dims.size() << "dimension specs (conversion.AddInputs)");
139137

140138
auto profile = ctx->builder->createOptimizationProfile();
141-
139+
142140
for (size_t i = 0; i < input_tensors.size(); i++) {
143141
auto in = input_tensors[i];
144142
auto dims = input_dims[i];
@@ -158,20 +156,23 @@ bool AddInputs(ConversionCtx* ctx,
158156
}
159157

160158
TRTORCH_CHECK(profile->isValid(), "Optimization profile is invalid, please check the input range provided (conversion.AddInputs)");
161-
159+
162160
ctx->cfg->addOptimizationProfile(profile);
163-
return true;
164161
}
165162

166-
bool MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
163+
void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
167164
for (auto out : outputs) {
168-
ctx->net->markOutput(*(ctx->value_tensor_map[out]));
165+
auto it = ctx->value_tensor_map.find(out);
166+
// Leaves the potential for unused outputs to be populated with nullptr "safely"
167+
TRTORCH_CHECK(it != ctx->value_tensor_map.end() && it->second,
168+
"No corresponding output TRT Tensor found for TorchScript output: " << out->debugName());
169+
auto out_tensor = it->second;
170+
ctx->net->markOutput(*out_tensor);
169171
LOG_INFO(ctx->logger,
170172
"Marking Output " << out->debugName() << " (ctx.MarkOutput)");
171173
}
172-
return true;
173174
}
174-
175+
175176
void AddParamsToCtxValueMap(ConversionCtx* ctx, GraphParams& params) {
176177
for (auto p : params) {
177178
ctx->evaluated_value_map[p.first] = torch::jit::IValue(p.second.clone());
@@ -191,13 +192,8 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
191192
bool to_eval = evaluators::shouldEvalAtConversionTime(n);
192193
bool blacklisted = isNodeConversionBlacklisted(n);
193194
if (!to_eval && !blacklisted) {
194-
if (!AddLayer(ctx, n)) {
195-
//TODO: Exception things
196-
LOG_ERROR(ctx->logger,
197-
"Failed to add layer: " << *n \
198-
<< " (ctx.AddLayer)");
199-
return;
200-
}
195+
// Should error out if something fails
196+
AddLayer(ctx, n);
201197
} else {
202198
std::string reason = "";
203199
if (to_eval) {
@@ -207,7 +203,13 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
207203
reason += " (explicitly blacklisted)";
208204
}
209205
LOG_DEBUG(ctx->logger,
210-
"Skipping Node: " << (n->kind().toQualString()) << reason);
206+
"Skipping Node: " << util::node_info(n) << reason);
207+
}
208+
}
209+
210+
for (const auto n : nodes) {
211+
if (converters::node_is_convertable(n)) {
212+
ctx->CheckLayerAddition(n);
211213
}
212214
}
213215

@@ -218,7 +220,7 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
218220
// Converts a already lowered block (blocks with no sub blocks) to
219221
// a serialized TensorRT engine that can be deserialized and run
220222

221-
// Probably should consolidate these two functions
223+
// Probably should consolidate these two functions
222224
std::string ConvertBlockToEngine(const torch::jit::Block* b, ExtraInfo build_info, GraphParams& static_params) {
223225
ConversionCtx ctx(build_info.engine_settings);
224226
ConvertBlockToNetDef(&ctx, b, build_info, static_params);
@@ -247,7 +249,7 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b) {
247249
for (auto s : unsupported_ops) {
248250
unsupported_msg << " - " << s << std::endl;
249251
}
250-
unsupported_msg << "You can either implement converters for these ops in your application or file a bug" << std::endl;
252+
unsupported_msg << "You can either implement converters for these ops in your application or request implementation" << std::endl;
251253
unsupported_msg << "https://www.github.com/nvidia/TRTorch/issues" << std::endl;
252254
LOG_ERROR(unsupported_msg.str());
253255
}

Diff for: core/conversion/conversionctx/ConversionCtx.cpp

+20-3
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,11 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
3737
switch(settings.op_precision) {
3838
case nvinfer1::DataType::kHALF:
3939
cfg->setFlag(nvinfer1::BuilderFlag::kFP16);
40-
input_type = nvinfer1::DataType::kHALF;
40+
input_type = nvinfer1::DataType::kHALF;
4141
break;
4242
// case nvinfer1::DataType::kINT8:
4343
// cfg->setFlag(nvinfer1::BuilderFlag::kINT8);
44-
// input_type = nvinfer1::DataType::kFLOAT;
44+
// input_type = nvinfer1::DataType::kFLOAT;
4545
// break;
4646
case nvinfer1::DataType::kFLOAT:
4747
default:
@@ -80,13 +80,30 @@ ConversionCtx::~ConversionCtx() {
8080
free(ptr);
8181
}
8282
}
83-
83+
84+
nvinfer1::ITensor* ConversionCtx::AssociateValueAndTensor(const torch::jit::Value* value, nvinfer1::ITensor* tensor) {
85+
tensor->setName(value->debugName().c_str());
86+
this->value_tensor_map[value] = tensor;
87+
return tensor;
88+
}
89+
8490
std::string ConversionCtx::SerializeEngine() {
8591
auto engine = builder->buildEngineWithConfig(*net, *cfg);
8692
auto serialized_engine = engine->serialize();
8793
return std::string((const char*)serialized_engine->data(), serialized_engine->size());
8894
}
8995

96+
bool ConversionCtx::CheckLayerAddition(const torch::jit::Node* n) {
97+
for (auto out : n->outputs()) {
98+
auto iter = this->value_tensor_map.find(out);
99+
if (iter == this->value_tensor_map.end()) {
100+
LOG_WARNING("Node " << util::node_info(n) << " output: " << out->debugName() << " does not have a coresponding output, may potentially indicate a defective converter");
101+
return false;
102+
}
103+
}
104+
return true;
105+
}
106+
90107
} // namespace conversion
91108
} // namespace core
92109
} // namespace trtorch

Diff for: core/conversion/conversionctx/ConversionCtx.h

+8-5
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,15 @@ struct BuilderSettings {
3030

3131
BuilderSettings() = default;
3232
BuilderSettings(const BuilderSettings& other) = default;
33-
friend std::ostream& operator<<(std::ostream& os, const BuilderSettings& s);
33+
friend std::ostream& operator<<(std::ostream& os, const BuilderSettings& s);
3434
};
35-
35+
3636
struct ConversionCtx {
3737
ConversionCtx(BuilderSettings settings);
3838
std::string SerializeEngine();
39+
nvinfer1::ITensor* AssociateValueAndTensor(const torch::jit::Value* value, nvinfer1::ITensor* tensor);
40+
bool CheckLayerAddition(const torch::jit::Node* n);
41+
3942
~ConversionCtx();
4043

4144
nvinfer1::IBuilder* builder;
@@ -50,12 +53,12 @@ struct ConversionCtx {
5053
// is constructed from a PyTorch Tensor it allocates the data here to store a
5154
// copy of the values
5255
std::vector<void*> builder_resources;
53-
56+
5457
std::unordered_map<const torch::jit::Value*, nvinfer1::ITensor*> value_tensor_map;
5558
std::unordered_map<const torch::jit::Value*, torch::jit::IValue> evaluated_value_map;
5659
};
5760

58-
} // namespace conversion
61+
} // namespace conversion
5962
} // namespace core
6063
} // namespace trtorch
61-
64+

Diff for: core/conversion/converters/Arg.cpp

+32-17
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,9 @@ std::string Arg::type_name() const {
8585
default:
8686
return "None";
8787
}
88-
88+
8989
}
90-
90+
9191
const torch::jit::IValue* Arg::IValue() const {
9292
if (type_ == Type::kIValue) {
9393
return ptr_.ivalue;
@@ -150,7 +150,7 @@ double Arg::unwrapToDouble(double default_val) {
150150

151151
double Arg::unwrapToDouble() {
152152
return this->unwrapTo<double>();
153-
}
153+
}
154154

155155
bool Arg::unwrapToBool(bool default_val) {
156156
return this->unwrapTo<bool>(default_val);
@@ -194,26 +194,41 @@ c10::List<bool> Arg::unwrapToBoolList() {
194194

195195
template<typename T>
196196
T Arg::unwrapTo(T default_val) {
197-
if (isIValue()) {
198-
// TODO: implement Tag Checking
199-
return ptr_.ivalue->to<T>();
197+
try {
198+
return this->unwrapTo<T>();
199+
} catch(trtorch::Error& e) {
200+
LOG_DEBUG("In arg unwrapping, returning default value provided (" << e.what() << ")");
201+
return default_val;
200202
}
201-
LOG_DEBUG("In arg unwrapping, returning default value provided");
202-
return default_val;
203203
}
204204

205-
206205
template<typename T>
207206
T Arg::unwrapTo() {
208-
if (isIValue()) {
209-
//TODO: Implement Tag checking
210-
return ptr_.ivalue->to<T>();
211-
//TODO: Exception
212-
//LOG_INTERNAL_ERROR("Requested unwrapping of arg IValue assuming it was " << typeid(T).name() << " however type is " << ptr_.ivalue->type());
213-
207+
TRTORCH_CHECK(isIValue(), "Requested unwrapping of arg assuming it was an IValue, however arg type is " << type_name());
208+
auto ivalue = ptr_.ivalue;
209+
bool correct_type = false;
210+
if (typeid(T) == typeid(double)) {
211+
correct_type = ivalue->isDouble();
212+
} else if (typeid(T) == typeid(bool)) {
213+
correct_type = ivalue->isBool();
214+
} else if (typeid(T) == typeid(int64_t)) {
215+
correct_type = ivalue->isInt();
216+
} else if (typeid(T) == typeid(at::Tensor)) {
217+
correct_type = ivalue->isTensor();
218+
} else if (typeid(T) == typeid(c10::Scalar)) {
219+
correct_type = ivalue->isScalar();
220+
} else if (typeid(T) == typeid(c10::List<int64_t>)) {
221+
correct_type = ivalue->isIntList();
222+
} else if (typeid(T) == typeid(c10::List<double>)) {
223+
correct_type = ivalue->isDoubleList();
224+
} else if (typeid(T) == typeid(c10::List<bool>)) {
225+
correct_type = ivalue->isBoolList();
226+
} else {
227+
TRTORCH_THROW_ERROR("Requested unwrapping of arg to an unsupported type: " << typeid(T).name());
214228
}
215-
TRTORCH_THROW_ERROR("Requested unwrapping of arg assuming it was an IValue, however arg type is " << type_name());
216-
return T();
229+
230+
TRTORCH_CHECK(correct_type, "Requested unwrapping of arg IValue assuming it was " << typeid(T).name() << " however type is " << *(ptr_.ivalue->type()));
231+
return ptr_.ivalue->to<T>();
217232
}
218233

219234

Diff for: core/conversion/converters/BUILD

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ cc_library(
1616
"impl/element_wise.cpp",
1717
"impl/linear.cpp",
1818
"impl/pooling.cpp",
19+
"impl/reduce.cpp",
1920
"impl/softmax.cpp",
2021
"impl/unary.cpp",
2122
],

Diff for: core/conversion/converters/NodeConverterRegistry.cpp

+9-9
Original file line numberDiff line numberDiff line change
@@ -41,20 +41,20 @@ std::string canonical_schema_string(const torch::jit::FunctionSchema& schema) {
4141
}
4242

4343
namespace {
44-
using ConverterLUT = std::unordered_map<torch::jit::Symbol, OpConverter>;
44+
using ConverterLUT = std::unordered_map<c10::OperatorName, OpConverter>;
4545

4646
class NodeConverterRegistry {
4747
public:
4848
bool RegisterConverter(torch::jit::FunctionSchema* signature, OpConverter& converter) {
4949
LOG_DEBUG("Registering Converter for " << canonical_schema_string(*signature));
50-
auto sym = torch::jit::Symbol::fromQualString(signature->name());
51-
converter_lut_[sym] = std::move(converter);
50+
auto name = signature->operator_name();
51+
converter_lut_[name] = std::move(converter);
5252
return true;
5353
}
5454

5555
OpConverter GetConverter(const torch::jit::FunctionSchema* signature) {
56-
auto sym = torch::jit::Symbol::fromQualString(signature->name());
57-
auto iter = converter_lut_.find(sym);
56+
auto name = signature->operator_name();
57+
auto iter = converter_lut_.find(name);
5858
if (iter == converter_lut_.end()) {
5959
LOG_ERROR("Requested converter for " << signature->name() << ", but no such converter was found");
6060
// ASK: Is there a better way than returning a nullptr?
@@ -66,8 +66,8 @@ class NodeConverterRegistry {
6666
bool Convertable(const torch::jit::Node* n) {
6767
auto schema = n->maybeSchema();
6868
if (schema) {
69-
auto sym = torch::jit::Symbol::fromQualString(schema->name());
70-
auto iter = converter_lut_.find(sym);
69+
auto name = schema->operator_name();
70+
auto iter = converter_lut_.find(name);
7171
if (iter == converter_lut_.end()) {
7272
return false;
7373
} else {
@@ -79,7 +79,7 @@ class NodeConverterRegistry {
7979
return false;
8080
}
8181
}
82-
82+
8383
private:
8484
ConverterLUT converter_lut_;
8585
};
@@ -111,7 +111,7 @@ OpConverter get_node_converter_for(const torch::jit::FunctionSchema* signature)
111111
bool node_is_convertable(const torch::jit::Node* n) {
112112
return get_converter_registry().Convertable(n);
113113
}
114-
114+
115115
RegisterNodeConversionPatterns&& RegisterNodeConversionPatterns::pattern(ConversionPattern p) && {
116116
register_node_converter(std::move(p));
117117
return std::move(*this);

0 commit comments

Comments
 (0)