From 2b57efd5ca22b850325d7c39dd1f6c906d7f042a Mon Sep 17 00:00:00 2001 From: Dheeraj Peri Date: Thu, 22 Sep 2022 12:47:24 -0700 Subject: [PATCH 1/4] chore: Fix centralized partititoning Signed-off-by: Dheeraj Peri --- core/partitioning/CMakeLists.txt | 3 ++- core/partitioning/shape_analysis.cpp | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/core/partitioning/CMakeLists.txt b/core/partitioning/CMakeLists.txt index 7f83b3d891..14d4cc373e 100644 --- a/core/partitioning/CMakeLists.txt +++ b/core/partitioning/CMakeLists.txt @@ -4,6 +4,7 @@ add_library(${lib_name} OBJECT) set(CXX_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/partitioning.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/shape_analysis.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/stitching.cpp" ) set(HEADER_FILES @@ -36,4 +37,4 @@ add_subdirectory(partitioningctx) add_subdirectory(partitioninginfo) add_subdirectory(segmentedblock) -install(FILES ${HEADER_FILES} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/torch_tensorrt/core/partitioning") \ No newline at end of file +install(FILES ${HEADER_FILES} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/torch_tensorrt/core/partitioning") diff --git a/core/partitioning/shape_analysis.cpp b/core/partitioning/shape_analysis.cpp index 514681a088..21696e8204 100644 --- a/core/partitioning/shape_analysis.cpp +++ b/core/partitioning/shape_analysis.cpp @@ -33,8 +33,6 @@ std::unordered_map generateRandomI for (auto& input : inputs) { if (input.first->type()->kind() == torch::jit::TypeKind::ListType) { - // create list - std::vector list; c10::TypePtr elementType = c10::TensorType::get(); auto generic_list = c10::impl::GenericList(elementType); for (size_t i = 0; i < input.second.size(); i++) { From 93ec6b0115298f22063191cb9602e2b41d84f0bb Mon Sep 17 00:00:00 2001 From: Dheeraj Peri Date: Fri, 23 Sep 2022 02:15:36 -0700 Subject: [PATCH 2/4] chore: Add new parititoning headers to py packaging Signed-off-by: Dheeraj Peri --- py/setup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/py/setup.py b/py/setup.py index 46a89ba78b..30aa49599e 100644 --- a/py/setup.py +++ b/py/setup.py @@ -422,6 +422,9 @@ def run(self): "include/torch_tensorrt/core/lowering/*.h", "include/torch_tensorrt/core/lowering/passes/*.h", "include/torch_tensorrt/core/partitioning/*.h", + "include/torch_tensorrt/core/partitioning/segmentedblock/*.h", + "include/torch_tensorrt/core/partitioning/partitioninginfo/*.h", + "include/torch_tensorrt/core/partitioning/partitioningctx/*.h", "include/torch_tensorrt/core/plugins/*.h", "include/torch_tensorrt/core/plugins/impl/*.h", "include/torch_tensorrt/core/runtime/*.h", From 789be6172b109e01b9047365d324b61e6f26f004 Mon Sep 17 00:00:00 2001 From: Dheeraj Peri Date: Fri, 23 Sep 2022 13:17:19 -0700 Subject: [PATCH 3/4] chore: Add partitioning headers to C++ tar package Signed-off-by: Dheeraj Peri --- BUILD | 3 +++ 1 file changed, 3 insertions(+) diff --git a/BUILD b/BUILD index 238d93ed05..98792b2dc1 100644 --- a/BUILD +++ b/BUILD @@ -22,6 +22,9 @@ pkg_tar( "//core/lowering:include", "//core/lowering/passes:include", "//core/partitioning:include", + "//core/partitioning/segmentedblock:include", + "//core/partitioning/partitioninginfo:include", + "//core/partitioning/partitioningctx:include", "//core/plugins:impl_include", "//core/plugins:include", "//core/runtime:include", From 3017a39aa9e9f7edcc665cb6f7d677a0639f534d Mon Sep 17 00:00:00 2001 From: Dheeraj Peri Date: Thu, 29 Sep 2022 13:03:42 -0700 Subject: [PATCH 4/4] chore: Increase threshold for topk Signed-off-by: Dheeraj Peri --- tests/core/conversion/converters/test_topk.cpp | 4 ++-- tests/util/util.cpp | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/core/conversion/converters/test_topk.cpp b/tests/core/conversion/converters/test_topk.cpp index c53d209c1f..2e7e490ab7 100644 --- a/tests/core/conversion/converters/test_topk.cpp +++ b/tests/core/conversion/converters/test_topk.cpp @@ -26,7 +26,7 @@ TEST(Converters, ATenTopKConvertsCorrectly) { auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 8e-5)); ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 8e-5)); } diff --git a/tests/util/util.cpp b/tests/util/util.cpp index 8359d31576..3bfec24c5a 100644 --- a/tests/util/util.cpp +++ b/tests/util/util.cpp @@ -11,9 +11,9 @@ bool cosineSimEqual(const at::Tensor& computed_tensor, const at::Tensor& gt_tens computed_tensor.flatten(), gt_tensor.flatten(), torch::nn::functional::CosineSimilarityFuncOptions().dim(0)); std::ostringstream ss; ss << computed_tensor << std::endl << gt_tensor << std::endl; - LOG_GRAPH(ss.str()); - LOG_GRAPH(std::string("Cosine Similarity score: ") + std::to_string(cosine_sim.item())); - LOG_GRAPH(std::string("Acceptable Threshold: ") + std::to_string(threshold)); + LOG_DEBUG(ss.str()); + LOG_DEBUG(std::string("Cosine Similarity score: ") + std::to_string(cosine_sim.item())); + LOG_DEBUG(std::string("Acceptable Threshold: ") + std::to_string(threshold)); return cosine_sim.item() >= threshold; } @@ -31,14 +31,14 @@ bool almostEqual(const at::Tensor& computed_tensor, const at::Tensor& gt_tensor, auto result = diff.abs().max().item(); auto threshold = atol + (rtol * gt_tensor.abs().max().item()); - LOG_GRAPH(std::string("Max Difference: ") + std::to_string(result)); - LOG_GRAPH(std::string("Acceptable Threshold: ") + std::to_string(threshold)); + LOG_DEBUG(std::string("Max Difference: ") + std::to_string(result)); + LOG_DEBUG(std::string("Acceptable Threshold: ") + std::to_string(threshold)); return result <= threshold; } bool exactlyEqual(const at::Tensor& computed_tensor, const at::Tensor& gt_tensor) { - LOG_GRAPH(computed_tensor << std::endl << gt_tensor << std::endl); + LOG_DEBUG(computed_tensor << std::endl << gt_tensor << std::endl); std::cout << "Max Difference: " << (computed_tensor - gt_tensor).abs().max().item() << std::endl; return (computed_tensor - gt_tensor).abs().max().item() == 0.f;