Skip to content

Commit 1e198a0

Browse files
author
Wei
committed
Changes done internally at Facebook (#1299)
bd46e8f292bf68fe6b87d2d5d206c89fda79a746 Shirong Wu <[email protected]> Disable group ln fuse pass 6ce1d3bc19d75b266e99355c96daeff7054dcbf8 Wei Wei <[email protected]> [fx2trt] set logging level to INFO at fx root 9d552dc3f69db9e4a249f80ef00803a9413e5d38 Wei Wei <[email protected]> [fx2trt] change OSS method lower_to_trt() to compile() 6c4bdb8ac5823d161e4afc7c9d295f961aeeb0bf Mor Tzur <[email protected]> fix engine holder test binary to fix contbuild_pytorch_fx2trt_build 636d0ab2a3d0f09267e25b8b8e7eedd4d91d791d Yinghai Lu <[email protected]> [easy] remove random prints 5a97668307c26e69a89a4e02a535e319eaf3ce3d Wei Wei <[email protected]> [ads] sequential linear fuse 508338ab343e407ee49605919508210b62ad9a52 Wei Wei <[email protected]> [fx2trt] minor literal fix
1 parent 19ae4cb commit 1e198a0

File tree

3 files changed

+51
-2
lines changed

3 files changed

+51
-2
lines changed

py/torch_tensorrt/fx/lower.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def compile(
5353
timing_cache_prefix: Timing cache file name for timing cache used by fx2trt.
5454
save_timing_cache: Update timing cache with current timing cache data if set to True.
5555
cuda_graph_batch_size: Cuda graph batch size, default to be -1.
56-
56+
dynamic_batch: batch dimension (dim=0) is dynamic.
5757
Returns:
5858
A torch.nn.Module lowered by TensorRT.
5959
"""

py/torch_tensorrt/fx/passes/pass_utils.py

+47
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,53 @@ def bounded_method(*args, **kwargs):
102102
return dec_for_method
103103

104104

105+
def log_perf_before_after(pass_: PassFunc) -> PassFunc:
106+
"""
107+
Wraps a pass function to log perf of the module before and after the pass
108+
"""
109+
110+
@wraps(pass_)
111+
def check_perf_with_before_after_log(
112+
module: fx.GraphModule, input: Input
113+
) -> fx.GraphModule:
114+
def benchmark_torch_function(iters: int, f, *args) -> float:
115+
"""Estimates the average time duration for a single inference call in second
116+
117+
If the input is batched, then the estimation is for the batches inference call.
118+
119+
Args:
120+
iters: number of inference iterations to run
121+
f: a function to perform a single inference call
122+
123+
Returns:
124+
estimated average time duration in second for a single inference call
125+
"""
126+
with torch.inference_mode():
127+
f(*args)
128+
torch.cuda.synchronize()
129+
start_event = torch.cuda.Event(enable_timing=True)
130+
end_event = torch.cuda.Event(enable_timing=True)
131+
# print("== Start benchmark iterations")
132+
with torch.inference_mode():
133+
start_event.record()
134+
for _ in range(iters):
135+
f(*args)
136+
end_event.record()
137+
torch.cuda.synchronize()
138+
# print("== End benchmark iterations")
139+
return (start_event.elapsed_time(end_event) * 1.0e-3) / iters
140+
141+
time_before = benchmark_torch_function(100, lambda: module(*input))
142+
_LOGGER.info(f"[{pass_}] Perf Before(eager mode): {time_before}")
143+
144+
module = pass_(module, input)
145+
time_after = benchmark_torch_function(100, lambda: module(*input))
146+
_LOGGER.info(f"[{pass_}] Perf After(eager mode): {time_after}")
147+
return module
148+
149+
return check_perf_with_before_after_log
150+
151+
105152
def log_before_after(pass_: PassFunc) -> PassFunc:
106153
"""
107154
Wraps a pass function to log the module graph before and after the pass

py/torch_tensorrt/fx/tracer/acc_tracer/acc_ops.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -778,7 +778,9 @@ def dropout_mapper(node: torch.fx.Node, mod: nn.Module):
778778

779779
assert callable(stochastic_depth)
780780
except Exception as e:
781-
warnings.warn(f"Unable to import torchvision related libraries.: {e}")
781+
warnings.warn(
782+
f"Unable to import torchvision related libraries.: {e}. Please install torchvision lib in order to lower stochastic_depth"
783+
)
782784
else:
783785

784786
@register_custom_acc_mapper_fn(

0 commit comments

Comments
 (0)