Skip to content

Commit 2cd9670

Browse files
committed
fix: Refactor benchmarks to use CSV
- Each benchmark is its own Python process - All runners default to FP16
1 parent 5760de4 commit 2cd9670

File tree

3 files changed

+108
-64
lines changed

3 files changed

+108
-64
lines changed

tools/perf/accumulate_results.py

+22
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
import glob
2+
3+
import pandas as pd
4+
5+
6+
def concat_all_results(outputs_filename="all_outputs.csv", files_regex="*_bs*.csv"):
7+
"""Concatenates all files via regex to an output file"""
8+
candidate_files = glob.glob(files_regex)
9+
10+
if candidate_files:
11+
df = pd.concat(
12+
[pd.read_csv(file_name, index_col=0) for file_name in candidate_files]
13+
).reset_index(drop=True)
14+
df.to_csv(outputs_filename)
15+
print(f"Saved concatenated outputs to {outputs_filename}")
16+
17+
else:
18+
print(f"No outputs to generate {outputs_filename}")
19+
20+
21+
if __name__ == "__main__":
22+
concat_all_results()

tools/perf/benchmark.sh

+82-60
Original file line numberDiff line numberDiff line change
@@ -7,115 +7,137 @@ python hub.py
77

88
batch_sizes=(1 2 4 8 16 32 64 128 256)
99
large_model_batch_sizes=(1 2 4 8 16 32 64)
10+
backends=("torch" "ts_trt" "dynamo" "torch_compile" "inductor")
11+
backends_no_torchscript=("torch" "dynamo" "torch_compile" "inductor")
1012

1113

1214
# Benchmark VGG16 model
1315
echo "Benchmarking VGG16 model"
1416
for bs in ${batch_sizes[@]}
1517
do
16-
python perf_run.py --model ${MODELS_DIR}/vgg16_scripted.jit.pt \
17-
--model_torch vgg16 \
18-
--precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \
19-
--batch_size ${bs} \
20-
--truncate \
21-
--backends torch,ts_trt,dynamo,torch_compile,inductor \
22-
--report "vgg16_perf_bs${bs}.txt"
18+
for backend in ${backends[@]}
19+
do
20+
python perf_run.py --model ${MODELS_DIR}/vgg16_scripted.jit.pt \
21+
--model_torch vgg16 \
22+
--precision fp16 --inputs="(${bs}, 3, 224, 224)" \
23+
--batch_size ${bs} \
24+
--truncate \
25+
--backends ${backend} \
26+
--report "vgg16_perf_bs${bs}_backend_${backend}.csv"
27+
done
2328
done
2429

2530
# Benchmark AlexNet model
2631
echo "Benchmarking AlexNet model"
2732
for bs in ${batch_sizes[@]}
2833
do
29-
python perf_run.py --model ${MODELS_DIR}/alexnet_scripted.jit.pt \
30-
--model_torch alexnet \
31-
--precision fp32,fp16 --inputs="(${bs}, 3, 227, 227)" \
32-
--batch_size ${bs} \
33-
--truncate \
34-
--backends torch,ts_trt,dynamo,torch_compile,inductor \
35-
--report "alexnet_perf_bs${bs}.txt"
34+
for backend in ${backends[@]}
35+
do
36+
python perf_run.py --model ${MODELS_DIR}/alexnet_scripted.jit.pt \
37+
--model_torch alexnet \
38+
--precision fp16 --inputs="(${bs}, 3, 227, 227)" \
39+
--batch_size ${bs} \
40+
--truncate \
41+
--backends ${backend} \
42+
--report "alexnet_perf_bs${bs}_backend_${backend}.csv"
43+
done
3644
done
3745

3846
# Benchmark Resnet50 model
3947
echo "Benchmarking Resnet50 model"
4048
for bs in ${batch_sizes[@]}
4149
do
42-
python perf_run.py --model ${MODELS_DIR}/resnet50_scripted.jit.pt \
43-
--model_torch resnet50 \
44-
--precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \
45-
--batch_size ${bs} \
46-
--truncate \
47-
--backends torch,ts_trt,dynamo,torch_compile,inductor \
48-
--report "resnet50_perf_bs${bs}.txt"
50+
for backend in ${backends[@]}
51+
do
52+
python perf_run.py --model ${MODELS_DIR}/resnet50_scripted.jit.pt \
53+
--model_torch resnet50 \
54+
--precision fp16 --inputs="(${bs}, 3, 224, 224)" \
55+
--batch_size ${bs} \
56+
--truncate \
57+
--backends ${backend} \
58+
--report "resnet50_perf_bs${bs}_backend_${backend}.csv"
59+
done
4960
done
5061

5162
# Benchmark VIT model
5263
echo "Benchmarking VIT model"
5364
for bs in ${batch_sizes[@]}
5465
do
55-
python perf_run.py --model ${MODELS_DIR}/vit_scripted.jit.pt \
56-
--model_torch vit \
57-
--precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \
58-
--batch_size ${bs} \
59-
--truncate \
60-
--backends torch,ts_trt,dynamo,torch_compile,inductor \
61-
--report "vit_perf_bs${bs}.txt"
66+
for backend in ${backends[@]}
67+
do
68+
python perf_run.py --model ${MODELS_DIR}/vit_scripted.jit.pt \
69+
--model_torch vit \
70+
--precision fp16 --inputs="(${bs}, 3, 224, 224)" \
71+
--batch_size ${bs} \
72+
--truncate \
73+
--backends ${backend} \
74+
--report "vit_perf_bs${bs}_backend_${backend}.csv"
75+
done
6276
done
6377

6478
# Benchmark VIT Large model
6579
echo "Benchmarking VIT Large model"
6680
for bs in ${large_model_batch_sizes[@]}
6781
do
68-
python perf_run.py --model ${MODELS_DIR}/vit_large_scripted.jit.pt \
69-
--model_torch vit_large \
70-
--precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \
71-
--truncate \
72-
--batch_size ${bs} \
73-
--backends torch,ts_trt,dynamo,torch_compile,inductor \
74-
--report "vit_large_perf_bs${bs}.txt"
82+
for backend in ${backends[@]}
83+
do
84+
python perf_run.py --model ${MODELS_DIR}/vit_large_scripted.jit.pt \
85+
--model_torch vit_large \
86+
--precision fp16 --inputs="(${bs}, 3, 224, 224)" \
87+
--batch_size ${bs} \
88+
--truncate \
89+
--backends ${backend} \
90+
--report "vit_large_perf_bs${bs}_backend_${backend}.csv"
91+
done
7592
done
7693

7794
# Benchmark EfficientNet-B0 model
7895
echo "Benchmarking EfficientNet-B0 model"
7996
for bs in ${batch_sizes[@]}
8097
do
81-
python perf_run.py --model ${MODELS_DIR}/efficientnet_b0_scripted.jit.pt \
82-
--model_torch efficientnet_b0 \
83-
--precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \
84-
--batch_size ${bs} \
85-
--truncate \
86-
--backends torch,ts_trt,dynamo,torch_compile,inductor \
87-
--report "efficientnet_b0_perf_bs${bs}.txt"
98+
for backend in ${backends[@]}
99+
do
100+
python perf_run.py --model ${MODELS_DIR}/efficientnet_b0_scripted.jit.pt \
101+
--model_torch efficientnet_b0 \
102+
--precision fp16 --inputs="(${bs}, 3, 224, 224)" \
103+
--batch_size ${bs} \
104+
--truncate \
105+
--backends ${backend} \
106+
--report "efficientnet_b0_perf_bs${bs}_backend_${backend}.csv"
107+
done
88108
done
89109

90110
# Benchmark Stable Diffusion UNet model
91111
echo "Benchmarking SD UNet model"
92112
for bs in ${large_model_batch_sizes[@]}
93113
do
94-
python perf_run.py --model_torch sd_unet \
95-
--precision fp32,fp16 --inputs="(${bs}, 4, 64, 64)@fp16;(${bs})@fp16;(${bs}, 1, 768)@fp16" \
96-
--batch_size ${bs} \
97-
--backends torch,dynamo,torch_compile,inductor \
98-
--truncate \
99-
--report "sd_unet_perf_bs${bs}.txt"
114+
for backend in ${backends_no_torchscript[@]}
115+
do
116+
python perf_run.py --model_torch sd_unet \
117+
--precision fp16 --inputs="(${bs}, 4, 64, 64);(${bs});(${bs}, 1, 768)" \
118+
--batch_size ${bs} \
119+
--truncate \
120+
--backends ${backend} \
121+
--report "sd_unet_perf_bs${bs}_backend_${backend}.csv"
122+
done
100123
done
101124

102125
# Benchmark BERT model
103126
echo "Benchmarking Huggingface BERT base model"
104127
for bs in ${batch_sizes[@]}
105128
do
106-
python perf_run.py --model ${MODELS_DIR}/bert_base_uncased_traced.jit.pt \
107-
--model_torch "bert_base_uncased" \
108-
--precision fp32 --inputs="(${bs}, 128)@int32;(${bs}, 128)@int32" \
109-
--batch_size ${bs} \
110-
--backends torch,ts_trt,dynamo,torch_compile,inductor \
111-
--truncate \
112-
--report "bert_base_perf_bs${bs}.txt"
129+
for backend in ${backends[@]}
130+
do
131+
python perf_run.py --model ${MODELS_DIR}/bert_base_uncased_traced.jit.pt \
132+
--model_torch "bert_base_uncased" \
133+
--precision fp16 --inputs="(${bs}, 128)@int32;(${bs}, 128)@int32" \
134+
--batch_size ${bs} \
135+
--truncate \
136+
--backends ${backend} \
137+
--report "bert_base_perf_bs${bs}_backend_${backend}.csv.csv"
138+
done
113139
done
114140

115141
# Collect and concatenate all results
116142
echo "Concatenating all results"
117-
(echo "Output of All Model Runs"; echo) >> all_outputs.txt;
118-
119-
for i in $(ls *_bs*.txt);
120-
do (echo $i; cat $i; echo; echo) >> all_outputs.txt;
121-
done
143+
python accumulate_results.py

tools/perf/perf_run.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -593,9 +593,9 @@ def recordStats(backend, timings, precision, batch_size=1, compile_time_s=None):
593593
# Generate report
594594
print("Model Summary: ", model_name)
595595
summary = pd.DataFrame(results)
596+
summary["model_name"] = (
597+
model_name_torch if model_name_torch is not None else model_name
598+
)
596599
print(summary)
597600
if args.report:
598-
with open(args.report, "w") as file:
599-
file.write("Model Summary: " + model_name + "\n")
600-
file.write(summary.to_string())
601-
file.close()
601+
summary.to_csv(args.report)

0 commit comments

Comments
 (0)