Skip to content

Commit cb52d6e

Browse files
rahul-tulibfineran
andauthored
Add analyze callable (#1574)
Co-authored-by: Benjamin Fineran <[email protected]>
1 parent 03c4078 commit cb52d6e

File tree

2 files changed

+38
-19
lines changed

2 files changed

+38
-19
lines changed

src/deepsparse/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
from .version import __version__, is_release
3939
from .analytics import deepsparse_analytics as _analytics
4040
from .subgraph_execute import *
41+
from .analyze import analyze
4142
from .evaluation.evaluator import evaluate
4243

4344
_analytics.send_event("python__init")

src/deepsparse/analyze.py

+37-19
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,11 @@
3131
ModelAnalysis,
3232
NodeInferenceResult,
3333
)
34-
from sparsezoo.analyze.cli import analyze_options, analyze_performance_options
34+
from sparsezoo.analyze.cli import (
35+
DEEPSPARSE_ENGINE,
36+
analyze_options,
37+
analyze_performance_options,
38+
)
3539

3640

3741
_LOGGER = logging.getLogger(__name__)
@@ -74,21 +78,11 @@ def main(
7478
)
7579

7680
_LOGGER.info("Starting Analysis ...")
77-
analysis = ModelAnalysis.create(model_path)
78-
_LOGGER.info("Analysis complete, collating results...")
79-
scenario = BenchmarkScenario(
80-
batch_size=batch_size_throughput,
81-
num_cores=None,
82-
engine=benchmark_engine,
83-
)
84-
performance_summary = run_benchmark_and_analysis(
85-
onnx_model=model_to_path(model_path),
86-
scenario=scenario,
87-
)
81+
analysis = analyze(model_path, batch_size_throughput, benchmark_engine)
82+
8883
by_types: bool = convert_to_bool(by_types)
8984
by_layers: bool = convert_to_bool(by_layers)
9085

91-
analysis.benchmark_results = [performance_summary]
9286
summary = analysis.summary(
9387
by_types=by_types,
9488
by_layers=by_layers,
@@ -103,13 +97,9 @@ def main(
10397

10498
print("Comparison Analysis:")
10599
for model_to_compare in compare:
106-
compare_model_analysis = ModelAnalysis.create(model_to_compare)
107-
_LOGGER.info(f"Running Performance Analysis on {model_to_compare}")
108-
performance_summary = run_benchmark_and_analysis(
109-
onnx_model=model_to_path(model_to_compare),
110-
scenario=scenario,
100+
compare_model_analysis = analyze(
101+
model_to_compare, batch_size_throughput, benchmark_engine
111102
)
112-
compare_model_analysis.benchmark_results = [performance_summary]
113103
summary_comparison_model = compare_model_analysis.summary(
114104
by_types=by_types,
115105
by_layers=by_layers,
@@ -124,6 +114,34 @@ def main(
124114
analysis.yaml(file_path=save)
125115

126116

117+
def analyze(
118+
model_path,
119+
batch_size_throughput: int = 1,
120+
benchmark_engine: str = DEEPSPARSE_ENGINE,
121+
) -> ModelAnalysis:
122+
"""
123+
:param model_path: Local filepath to an ONNX model, or a SparseZoo stub
124+
:param batch_size_throughput: Batch size for throughput benchmark
125+
:param benchmark_engine: Benchmark engine to use, can be 'deepsparse' or
126+
'onnxruntime', defaults to 'deepsparse'
127+
:return: A `ModelAnalysis` object encapsulating the results of the analysis
128+
"""
129+
analysis = ModelAnalysis.create(model_path)
130+
_LOGGER.info("Analysis complete, collating results...")
131+
scenario = BenchmarkScenario(
132+
batch_size=batch_size_throughput,
133+
num_cores=None,
134+
engine=benchmark_engine,
135+
)
136+
performance_summary = run_benchmark_and_analysis(
137+
onnx_model=model_to_path(model_path),
138+
scenario=scenario,
139+
)
140+
141+
analysis.benchmark_results = [performance_summary]
142+
return analysis
143+
144+
127145
def run_benchmark_and_analysis(
128146
onnx_model: str,
129147
scenario: BenchmarkScenario,

0 commit comments

Comments
 (0)