Skip to content

Commit caed671

Browse files
pytorchbotdbort
andauthored
Remove torch:: references from arm_executor_runner (#5521)
Remove `torch::` references from arm_executor_runner (#5506) Summary: Example code should use the new `executorch::` namespace wherever possible. Pull Request resolved: #5506 Test Plan: Built using the instructions at https://github.com/pytorch/executorch/blob/main/examples/arm/README.md Reviewed By: Gasoonjia Differential Revision: D63075681 Pulled By: dbort fbshipit-source-id: 62d12ccf6c792056d9a2949d23c64c97c0cf6a51 (cherry picked from commit 01dcebd) Co-authored-by: Dave Bort <[email protected]>
1 parent 0866c52 commit caed671

File tree

1 file changed

+35
-28
lines changed

1 file changed

+35
-28
lines changed

examples/arm/executor_runner/arm_executor_runner.cpp

Lines changed: 35 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,23 @@ char* model_pte = nullptr;
4242
#include "model_pte.h"
4343
#endif
4444

45-
using namespace exec_aten;
46-
using namespace std;
47-
using torch::executor::Error;
48-
using torch::executor::Result;
45+
using executorch::aten::ScalarType;
46+
using executorch::aten::Tensor;
47+
using executorch::aten::TensorImpl;
48+
using executorch::extension::BufferCleanup;
49+
using executorch::extension::BufferDataLoader;
50+
using executorch::runtime::Error;
51+
using executorch::runtime::EValue;
52+
using executorch::runtime::HierarchicalAllocator;
53+
using executorch::runtime::MemoryAllocator;
54+
using executorch::runtime::MemoryManager;
55+
using executorch::runtime::Method;
56+
using executorch::runtime::MethodMeta;
57+
using executorch::runtime::Program;
58+
using executorch::runtime::Result;
59+
using executorch::runtime::Span;
60+
using executorch::runtime::Tag;
61+
using executorch::runtime::TensorInfo;
4962

5063
#define METHOD_ALLOCATOR_POOL_SIZE (70 * 1024 * 1024)
5164
unsigned char __attribute__((
@@ -83,11 +96,10 @@ void et_pal_emit_log_message(
8396
}
8497

8598
namespace {
86-
using namespace torch::executor;
8799

88-
Result<util::BufferCleanup> prepare_input_tensors(
100+
Result<BufferCleanup> prepare_input_tensors(
89101
Method& method,
90-
torch::executor::MemoryAllocator& allocator,
102+
MemoryAllocator& allocator,
91103
std::vector<std::pair<char*, size_t>>& input_buffers) {
92104
MethodMeta method_meta = method.method_meta();
93105
size_t num_inputs = method_meta.num_inputs();
@@ -170,18 +182,18 @@ Result<util::BufferCleanup> prepare_input_tensors(
170182
ET_LOG(
171183
Error, "Failed to prepare input %zu: 0x%" PRIx32, i, (uint32_t)err);
172184
// The BufferCleanup will free the inputs when it goes out of scope.
173-
util::BufferCleanup cleanup({inputs, num_allocated});
185+
BufferCleanup cleanup({inputs, num_allocated});
174186
return err;
175187
}
176188
}
177-
return util::BufferCleanup({inputs, num_allocated});
189+
return BufferCleanup({inputs, num_allocated});
178190
}
179191

180192
#ifdef SEMIHOSTING
181193

182194
std::pair<char*, size_t> read_binary_file(
183195
const char* filename,
184-
torch::executor::MemoryAllocator& allocator) {
196+
MemoryAllocator& allocator) {
185197
FILE* fp = fopen(filename, "rb");
186198
if (!fp) {
187199
ET_LOG(
@@ -233,13 +245,13 @@ int main(int argc, const char* argv[]) {
233245
(void)argv;
234246
#endif
235247

236-
torch::executor::runtime_init();
248+
executorch::runtime::runtime_init();
237249
std::vector<std::pair<char*, size_t>> input_buffers;
238250
size_t pte_size = sizeof(model_pte);
239251

240252
#ifdef SEMIHOSTING
241253
const char* output_basename = nullptr;
242-
torch::executor::MemoryAllocator input_allocator(
254+
MemoryAllocator input_allocator(
243255
input_allocation_pool_size, input_allocation_pool);
244256

245257
/* parse input parameters */
@@ -272,10 +284,9 @@ int main(int argc, const char* argv[]) {
272284
}
273285
#endif
274286
ET_LOG(Info, "Model in %p %c", model_pte, model_pte[0]);
275-
auto loader = torch::executor::util::BufferDataLoader(model_pte, pte_size);
287+
auto loader = BufferDataLoader(model_pte, pte_size);
276288
ET_LOG(Info, "Model PTE file loaded. Size: %lu bytes.", pte_size);
277-
Result<torch::executor::Program> program =
278-
torch::executor::Program::load(&loader);
289+
Result<Program> program = Program::load(&loader);
279290
if (!program.ok()) {
280291
ET_LOG(
281292
Info,
@@ -294,8 +305,7 @@ int main(int argc, const char* argv[]) {
294305
}
295306
ET_LOG(Info, "Running method %s", method_name);
296307

297-
Result<torch::executor::MethodMeta> method_meta =
298-
program->method_meta(method_name);
308+
Result<MethodMeta> method_meta = program->method_meta(method_name);
299309
if (!method_meta.ok()) {
300310
ET_LOG(
301311
Info,
@@ -304,13 +314,11 @@ int main(int argc, const char* argv[]) {
304314
(unsigned int)method_meta.error());
305315
}
306316

307-
torch::executor::MemoryAllocator method_allocator{
308-
torch::executor::MemoryAllocator(
309-
METHOD_ALLOCATOR_POOL_SIZE, method_allocation_pool)};
317+
MemoryAllocator method_allocator(
318+
METHOD_ALLOCATOR_POOL_SIZE, method_allocation_pool);
310319

311320
std::vector<uint8_t*> planned_buffers; // Owns the memory
312-
std::vector<torch::executor::Span<uint8_t>>
313-
planned_spans; // Passed to the allocator
321+
std::vector<Span<uint8_t>> planned_spans; // Passed to the allocator
314322
size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers();
315323

316324
for (size_t id = 0; id < num_memory_planned_buffers; ++id) {
@@ -325,17 +333,16 @@ int main(int argc, const char* argv[]) {
325333
planned_spans.push_back({planned_buffers.back(), buffer_size});
326334
}
327335

328-
torch::executor::HierarchicalAllocator planned_memory(
336+
HierarchicalAllocator planned_memory(
329337
{planned_spans.data(), planned_spans.size()});
330338

331-
torch::executor::MemoryAllocator temp_allocator(
339+
MemoryAllocator temp_allocator(
332340
temp_allocation_pool_size, temp_allocation_pool);
333341

334-
torch::executor::MemoryManager memory_manager(
342+
MemoryManager memory_manager(
335343
&method_allocator, &planned_memory, &temp_allocator);
336344

337-
Result<torch::executor::Method> method =
338-
program->load_method(method_name, &memory_manager);
345+
Result<Method> method = program->load_method(method_name, &memory_manager);
339346
if (!method.ok()) {
340347
ET_LOG(
341348
Info,
@@ -374,7 +381,7 @@ int main(int argc, const char* argv[]) {
374381
ET_LOG(Info, "Model executed successfully.");
375382
}
376383

377-
std::vector<torch::executor::EValue> outputs(method->outputs_size());
384+
std::vector<EValue> outputs(method->outputs_size());
378385
ET_LOG(Info, "%zu outputs: ", outputs.size());
379386
status = method->get_outputs(outputs.data(), outputs.size());
380387
ET_CHECK(status == Error::Ok);

0 commit comments

Comments
 (0)