From dc6ab549cc2b40eec52a5d2932c070693de5732d Mon Sep 17 00:00:00 2001 From: horheynm Date: Wed, 20 Sep 2023 18:01:43 +0000 Subject: [PATCH 1/9] benchmarker --- src/deepsparse/benchmark/api/__init__.py | 13 +++++ src/deepsparse/benchmark/api/benchmarker.py | 57 +++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 src/deepsparse/benchmark/api/__init__.py create mode 100644 src/deepsparse/benchmark/api/benchmarker.py diff --git a/src/deepsparse/benchmark/api/__init__.py b/src/deepsparse/benchmark/api/__init__.py new file mode 100644 index 0000000000..0c44f887a4 --- /dev/null +++ b/src/deepsparse/benchmark/api/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/deepsparse/benchmark/api/benchmarker.py b/src/deepsparse/benchmark/api/benchmarker.py new file mode 100644 index 0000000000..97617994d0 --- /dev/null +++ b/src/deepsparse/benchmark/api/benchmarker.py @@ -0,0 +1,57 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Optional + +from deepsparse.benchmark.benchmark_model import benchmark_model +from deepsparse.benchmark.benchmark_pipeline import benchmark_pipeline + + +class Benchmarker: + def __init__(self, model, pipeline, url): + self.model = model + self.pipeline = pipeline + self.url = url + + def __call__(self, **kwargs): + + if self.model: + benchmark_model(model_path=self.model, **kwargs) + + if self.pipeline: + benchmark_pipeline(model_path=self.pipeline, **kwargs) + + if self.url: + # benchmark with url here + pass + + @staticmethod + def benchmark( + model: Optional[str] = None, + pipeline: Optional[str] = None, + url: Optional[str] = None, + **kwargs, + ): + if len((model, pipeline, url)) != 1: + return ValueError("[api.benchmark] Only one input arg required") + + if model: + benchmarker = Benchmarker(model=model) + elif pipeline: + benchmarker = Benchmarker(pipeline=pipeline) + elif url: + benchmarker = Benchmarker(url=url) + + return benchmarker(**kwargs) From 2c0661e5ee75518b318574355570b41591a37288 Mon Sep 17 00:00:00 2001 From: horheynm Date: Fri, 22 Sep 2023 01:54:06 +0000 Subject: [PATCH 2/9] add tests, blocked on adding dummy config data for benchmark pipeline --- src/deepsparse/benchmark/api/benchmarker.py | 51 ++++++--- src/deepsparse/benchmark/api/errors.py | 17 +++ src/deepsparse/benchmark/benchmark_model.py | 2 - tests/deepsparse/benchmark/__init__.py | 13 +++ tests/deepsparse/benchmark/api/__init__.py | 13 +++ .../benchmark/api/test_benchmarker.py | 101 ++++++++++++++++++ 6 files changed, 180 insertions(+), 17 deletions(-) create mode 100644 src/deepsparse/benchmark/api/errors.py create mode 100644 tests/deepsparse/benchmark/__init__.py create mode 100644 tests/deepsparse/benchmark/api/__init__.py create mode 100644 tests/deepsparse/benchmark/api/test_benchmarker.py diff --git a/src/deepsparse/benchmark/api/benchmarker.py b/src/deepsparse/benchmark/api/benchmarker.py index 97617994d0..fc479bb01e 100644 --- a/src/deepsparse/benchmark/api/benchmarker.py +++ b/src/deepsparse/benchmark/api/benchmarker.py @@ -15,43 +15,64 @@ from typing import Optional +from deepsparse.benchmark.api.errors import UnclearBenchmarkerModeException from deepsparse.benchmark.benchmark_model import benchmark_model from deepsparse.benchmark.benchmark_pipeline import benchmark_pipeline class Benchmarker: - def __init__(self, model, pipeline, url): + """ + Benchmark API + + Input arg to `model`, `pipeline` should be one of: + - SparseZoo stub + - path to a model.onnx + - path to a local folder containing a model.onnx + - path to onnx.ModelProto + + Provide the stub/path to one of + - model to run deesparse.benchmark + - pipeline to run deepsparse.benchmark_pipeline + """ + + def __init__( + self, + model: Optional[str] = None, + pipeline: Optional[str] = None, + ): + self._validate_exactly_one_mode_selected(model, pipeline) self.model = model self.pipeline = pipeline - self.url = url def __call__(self, **kwargs): - if self.model: - benchmark_model(model_path=self.model, **kwargs) + return benchmark_model(model_path=self.model, **kwargs) if self.pipeline: - benchmark_pipeline(model_path=self.pipeline, **kwargs) - - if self.url: - # benchmark with url here - pass + return benchmark_pipeline(model_path=self.pipeline, **kwargs) @staticmethod def benchmark( model: Optional[str] = None, pipeline: Optional[str] = None, - url: Optional[str] = None, **kwargs, ): - if len((model, pipeline, url)) != 1: - return ValueError("[api.benchmark] Only one input arg required") - if model: benchmarker = Benchmarker(model=model) elif pipeline: benchmarker = Benchmarker(pipeline=pipeline) - elif url: - benchmarker = Benchmarker(url=url) return benchmarker(**kwargs) + + def _validate_exactly_one_mode_selected( + self, + *args, + ): + selections = sum(1 for mode in args if mode is not None) + if selections != 1: + raise UnclearBenchmarkerModeException( + "Benchmarker only accepts" + "one input arg for " + "'model' to run deepsparse.benchmark" + "'pipeline' to run deepsparse.benchmark_pipeline" + ) diff --git a/src/deepsparse/benchmark/api/errors.py b/src/deepsparse/benchmark/api/errors.py new file mode 100644 index 0000000000..1053468d8f --- /dev/null +++ b/src/deepsparse/benchmark/api/errors.py @@ -0,0 +1,17 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class UnclearBenchmarkerModeException(Exception): + pass diff --git a/src/deepsparse/benchmark/benchmark_model.py b/src/deepsparse/benchmark/benchmark_model.py index 10969a1571..980a90618d 100644 --- a/src/deepsparse/benchmark/benchmark_model.py +++ b/src/deepsparse/benchmark/benchmark_model.py @@ -410,7 +410,6 @@ def benchmark_model( seconds_to_warmup=warmup_time, num_streams=num_streams, ) - export_dict = { "engine": str(model), "version": __version__, @@ -435,7 +434,6 @@ def benchmark_model( _LOGGER.info("Saving benchmark results to JSON file at {}".format(export_path)) with open(export_path, "w") as out: json.dump(export_dict, out, indent=2) - return export_dict diff --git a/tests/deepsparse/benchmark/__init__.py b/tests/deepsparse/benchmark/__init__.py new file mode 100644 index 0000000000..0c44f887a4 --- /dev/null +++ b/tests/deepsparse/benchmark/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/deepsparse/benchmark/api/__init__.py b/tests/deepsparse/benchmark/api/__init__.py new file mode 100644 index 0000000000..0c44f887a4 --- /dev/null +++ b/tests/deepsparse/benchmark/api/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/deepsparse/benchmark/api/test_benchmarker.py b/tests/deepsparse/benchmark/api/test_benchmarker.py new file mode 100644 index 0000000000..1d4a538556 --- /dev/null +++ b/tests/deepsparse/benchmark/api/test_benchmarker.py @@ -0,0 +1,101 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +from typing import Any, Dict, Optional + +import pytest +from deepsparse.benchmark.api.benchmarker import Benchmarker +from deepsparse.benchmark.api.errors import UnclearBenchmarkerModeException +from sparsezoo import Model + + +@pytest.fixture(scope="function") +def get_model_path(): + """download model, return its path and delete at the end""" + + text_gen_stub = "zoo:opt-1.3b-opt_pretrain-quantW8A8" + default_download_path = os.path.expanduser( + os.path.join("~/.cache/nm_tests", "deepsparse") + ) + + def download_model_and_return_path( + stub: Optional[str] = None, download_path: Optional[str] = None + ): + model = Model(stub or text_gen_stub, download_path or default_download_path) + yield model.path + + # yield model.path() + # shutil.rmtree(path) + # assert os.path.exists(path) is False + + return download_model_and_return_path + + +@pytest.fixture +def benchmarker_fixture(get_model_path): + def get( + source: Optional[str] = None, + path: Optional[str] = None, + model_args: Optional[Dict[str, Any]] = None, + pipeline_args: Dict[str, Any] = None, + ): + model_path = path or next(get_model_path(stub=source)) + + required_benchmark_model_args = model_args or {} + + required_benchmark_pipeline_args = pipeline_args or { + "task": "text_generation", + "config": "", + } + + return ( + model_path, + required_benchmark_model_args, + required_benchmark_pipeline_args, + ) + + return get + + # required_benchmark_pipeline_args = { + # "task": "text_generation", + # } + + # return model_path, required_benchmark_pipeline_args + + +# def test_validate_exactly_one_arg_provided(): +# args = { +# "model": "foo", +# "pipeline": "bar", +# } +# with pytest.raises(UnclearBenchmarkerModeException): +# Benchmarker(**args) + + +# def test_benchmark_model_from_benchmarker(benchmarker_fixture): +# path, model_args, _ = benchmarker_fixture() +# benchmarker = Benchmarker(model=path) +# export_dict = benchmarker(**model_args) +# assert export_dict is not None + + +def test_benchmark_pipeline_from_benchmarker(benchmarker_fixture): + path, _, pipeline_args = benchmarker_fixture() + benchmarker = Benchmarker(pipeline=path) + batch_times, total_run_time, num_streams = benchmarker(**pipeline_args) + assert batch_times is not None + assert total_run_time is not None + assert num_streams is not None From 0c204ba0fd67b7239d2841f7f2c34b0b3a86f099 Mon Sep 17 00:00:00 2001 From: horheynm Date: Mon, 25 Sep 2023 19:38:27 +0000 Subject: [PATCH 3/9] tests --- .../benchmark/api/test_benchmarker.py | 121 ++++++++++++------ 1 file changed, 79 insertions(+), 42 deletions(-) diff --git a/tests/deepsparse/benchmark/api/test_benchmarker.py b/tests/deepsparse/benchmark/api/test_benchmarker.py index 1d4a538556..0652746f80 100644 --- a/tests/deepsparse/benchmark/api/test_benchmarker.py +++ b/tests/deepsparse/benchmark/api/test_benchmarker.py @@ -19,6 +19,7 @@ import pytest from deepsparse.benchmark.api.benchmarker import Benchmarker from deepsparse.benchmark.api.errors import UnclearBenchmarkerModeException +from deepsparse.benchmark.config import PipelineBenchmarkConfig from sparsezoo import Model @@ -26,20 +27,13 @@ def get_model_path(): """download model, return its path and delete at the end""" - text_gen_stub = "zoo:opt-1.3b-opt_pretrain-quantW8A8" - default_download_path = os.path.expanduser( - os.path.join("~/.cache/nm_tests", "deepsparse") - ) - - def download_model_and_return_path( - stub: Optional[str] = None, download_path: Optional[str] = None - ): - model = Model(stub or text_gen_stub, download_path or default_download_path) - yield model.path + def download_model_and_return_path(stub: str, download_path: Optional[str] = None): + model = Model(stub, download_path) + path = model.path + yield path - # yield model.path() - # shutil.rmtree(path) - # assert os.path.exists(path) is False + shutil.rmtree(path) + assert os.path.exists(path) is False return download_model_and_return_path @@ -47,18 +41,20 @@ def download_model_and_return_path( @pytest.fixture def benchmarker_fixture(get_model_path): def get( - source: Optional[str] = None, - path: Optional[str] = None, + stub: str, + task: Optional[str] = None, + config_dict: Optional[str] = None, + model_path: Optional[str] = None, model_args: Optional[Dict[str, Any]] = None, pipeline_args: Dict[str, Any] = None, ): - model_path = path or next(get_model_path(stub=source)) + model_path = model_path or next(get_model_path(stub=stub)) required_benchmark_model_args = model_args or {} required_benchmark_pipeline_args = pipeline_args or { - "task": "text_generation", - "config": "", + "task": task, + "config": PipelineBenchmarkConfig(**config_dict) if config_dict else None, } return ( @@ -69,32 +65,73 @@ def get( return get - # required_benchmark_pipeline_args = { - # "task": "text_generation", - # } - - # return model_path, required_benchmark_pipeline_args - - -# def test_validate_exactly_one_arg_provided(): -# args = { -# "model": "foo", -# "pipeline": "bar", -# } -# with pytest.raises(UnclearBenchmarkerModeException): -# Benchmarker(**args) - - -# def test_benchmark_model_from_benchmarker(benchmarker_fixture): -# path, model_args, _ = benchmarker_fixture() -# benchmarker = Benchmarker(model=path) -# export_dict = benchmarker(**model_args) -# assert export_dict is not None +def test_validate_exactly_one_mode_selected(): + args = { + "model": "foo", + "pipeline": "bar", + } + with pytest.raises(UnclearBenchmarkerModeException): + Benchmarker(**args) + + +@pytest.mark.parametrize( + "stub", + [ + "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/imagenet/base-none", + ( + "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" + "bigpython_bigquery_thepile/base_quant-none" + ), + ], +) +def test_benchmark_model_from_benchmarker(benchmarker_fixture, stub): + path, model_args, _ = benchmarker_fixture(stub=stub) + benchmarker = Benchmarker(model=path) + export_dict = benchmarker(**model_args) + assert export_dict is not None + + +@pytest.mark.parametrize( + "stub,task,config_dict", + [ + ( + "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/imagenet/base-none", + "image_classification", + { + "data_type": "dummy", + "gen_sequence_length": 100, + "input_image_shape": [500, 500, 3], + "pipeline_kwargs": {}, + "input_schema_kwargs": {}, + }, + ), + ( + ( + "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" + "bigpython_bigquery_thepile/base_quant-none" + ), + "text_generation", + { + "data_type": "dummy", + "gen_sequence_length": 100, + "pipeline_kwargs": {}, + "input_schema_kwargs": {}, + }, + ), + ], +) +def test_benchmark_pipeline_from_benchmarker( + benchmarker_fixture, stub, task, config_dict +): + + path, _, pipeline_args = benchmarker_fixture( + stub=stub, task=task, config_dict=config_dict + ) + # [TODO]: downstream benchmark_pipeline to accept path for text_gen. + # Passes for ic + benchmarker = Benchmarker(pipeline=stub) # TODO: -def test_benchmark_pipeline_from_benchmarker(benchmarker_fixture): - path, _, pipeline_args = benchmarker_fixture() - benchmarker = Benchmarker(pipeline=path) batch_times, total_run_time, num_streams = benchmarker(**pipeline_args) assert batch_times is not None assert total_run_time is not None From 8431980e2abfd51ac17c3212f325f4e8b38cf056 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 25 Sep 2023 15:43:42 -0400 Subject: [PATCH 4/9] remove comment --- tests/deepsparse/benchmark/api/test_benchmarker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/deepsparse/benchmark/api/test_benchmarker.py b/tests/deepsparse/benchmark/api/test_benchmarker.py index 0652746f80..2239bc8cac 100644 --- a/tests/deepsparse/benchmark/api/test_benchmarker.py +++ b/tests/deepsparse/benchmark/api/test_benchmarker.py @@ -130,7 +130,7 @@ def test_benchmark_pipeline_from_benchmarker( ) # [TODO]: downstream benchmark_pipeline to accept path for text_gen. # Passes for ic - benchmarker = Benchmarker(pipeline=stub) # TODO: + benchmarker = Benchmarker(pipeline=stub) batch_times, total_run_time, num_streams = benchmarker(**pipeline_args) assert batch_times is not None From 04cd7ef5d4d3e0eb6635a307f163d37cfea3c440 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 25 Sep 2023 16:00:58 -0400 Subject: [PATCH 5/9] Update src/deepsparse/benchmark/api/benchmarker.py Co-authored-by: Benjamin Fineran --- src/deepsparse/benchmark/api/benchmarker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/deepsparse/benchmark/api/benchmarker.py b/src/deepsparse/benchmark/api/benchmarker.py index fc479bb01e..11a853b64c 100644 --- a/src/deepsparse/benchmark/api/benchmarker.py +++ b/src/deepsparse/benchmark/api/benchmarker.py @@ -31,8 +31,8 @@ class Benchmarker: - path to onnx.ModelProto Provide the stub/path to one of - - model to run deesparse.benchmark - - pipeline to run deepsparse.benchmark_pipeline + - onnx model to run deesparse.benchmark + - deployment directory to run deepsparse deepsparse.benchmark_pipeline """ def __init__( From 7bf81a9770c7c20fbe636fdaeb756536d88958a6 Mon Sep 17 00:00:00 2001 From: horheynm Date: Mon, 25 Sep 2023 21:55:37 +0000 Subject: [PATCH 6/9] static func to generic func --- src/deepsparse/benchmark/api/benchmarker.py | 26 ++--- .../benchmark/api/test_benchmarker.py | 101 +++++++++++++++--- 2 files changed, 98 insertions(+), 29 deletions(-) diff --git a/src/deepsparse/benchmark/api/benchmarker.py b/src/deepsparse/benchmark/api/benchmarker.py index fc479bb01e..25dfde892e 100644 --- a/src/deepsparse/benchmark/api/benchmarker.py +++ b/src/deepsparse/benchmark/api/benchmarker.py @@ -20,6 +20,19 @@ from deepsparse.benchmark.benchmark_pipeline import benchmark_pipeline +def run_benchmarker( + model: Optional[str] = None, + pipeline: Optional[str] = None, + **kwargs, +): + if model: + benchmarker = Benchmarker(model=model) + elif pipeline: + benchmarker = Benchmarker(pipeline=pipeline) + + return benchmarker(**kwargs) + + class Benchmarker: """ Benchmark API @@ -51,19 +64,6 @@ def __call__(self, **kwargs): if self.pipeline: return benchmark_pipeline(model_path=self.pipeline, **kwargs) - @staticmethod - def benchmark( - model: Optional[str] = None, - pipeline: Optional[str] = None, - **kwargs, - ): - if model: - benchmarker = Benchmarker(model=model) - elif pipeline: - benchmarker = Benchmarker(pipeline=pipeline) - - return benchmarker(**kwargs) - def _validate_exactly_one_mode_selected( self, *args, diff --git a/tests/deepsparse/benchmark/api/test_benchmarker.py b/tests/deepsparse/benchmark/api/test_benchmarker.py index 0652746f80..ab9b403dcf 100644 --- a/tests/deepsparse/benchmark/api/test_benchmarker.py +++ b/tests/deepsparse/benchmark/api/test_benchmarker.py @@ -15,14 +15,37 @@ import os import shutil from typing import Any, Dict, Optional +from unittest import mock import pytest -from deepsparse.benchmark.api.benchmarker import Benchmarker +from deepsparse.benchmark.api.benchmarker import Benchmarker, run_benchmarker from deepsparse.benchmark.api.errors import UnclearBenchmarkerModeException from deepsparse.benchmark.config import PipelineBenchmarkConfig from sparsezoo import Model +# from types import MethodType + + +IC = "image_classification" +TEXT_GEN = "text_generation" + +BENCHMARK_PIPELINE_IC_CONFIG = { + "data_type": "dummy", + "gen_sequence_length": 100, + "input_image_shape": [500, 500, 3], + "pipeline_kwargs": {}, + "input_schema_kwargs": {}, +} + +BENCHMARK_PIPELINE_TEXT_GEN_CONFIG = { + "data_type": "dummy", + "gen_sequence_length": 100, + "pipeline_kwargs": {}, + "input_schema_kwargs": {}, +} + + @pytest.fixture(scope="function") def get_model_path(): """download model, return its path and delete at the end""" @@ -66,6 +89,25 @@ def get( return get +class MockBenchmarker(Benchmarker): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __call__(self, **kwargs): + if self.model: + return "foo" + + if self.pipeline: + pipeline_kwargs = kwargs["config"].__dict__ + if kwargs["task"] == IC: + assert set(BENCHMARK_PIPELINE_IC_CONFIG).issubset(set(pipeline_kwargs)) + else: + assert set(BENCHMARK_PIPELINE_TEXT_GEN_CONFIG).issubset( + set(pipeline_kwargs) + ) + return "bar" + + def test_validate_exactly_one_mode_selected(): args = { "model": "foo", @@ -97,27 +139,16 @@ def test_benchmark_model_from_benchmarker(benchmarker_fixture, stub): [ ( "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/imagenet/base-none", - "image_classification", - { - "data_type": "dummy", - "gen_sequence_length": 100, - "input_image_shape": [500, 500, 3], - "pipeline_kwargs": {}, - "input_schema_kwargs": {}, - }, + IC, + BENCHMARK_PIPELINE_IC_CONFIG, ), ( ( "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" "bigpython_bigquery_thepile/base_quant-none" ), - "text_generation", - { - "data_type": "dummy", - "gen_sequence_length": 100, - "pipeline_kwargs": {}, - "input_schema_kwargs": {}, - }, + TEXT_GEN, + BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, ), ], ) @@ -136,3 +167,41 @@ def test_benchmark_pipeline_from_benchmarker( assert batch_times is not None assert total_run_time is not None assert num_streams is not None + + +@pytest.mark.parametrize( + "stub,task,config_dict", + [ + ( + "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/imagenet/base-none", + IC, + BENCHMARK_PIPELINE_IC_CONFIG, + ), + ( + "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" + "bigpython_bigquery_thepile/base_quant-none", + TEXT_GEN, + BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, + ), + ], +) +def test_run_benchmarker( + benchmarker_fixture, + stub, + task, + config_dict, +): + + path, model_args, pipeline_args = benchmarker_fixture( + stub=stub, task=task, config_dict=config_dict + ) + + with mock.patch( + "deepsparse.benchmark.api.benchmarker.Benchmarker", + side_effect=MockBenchmarker, + ): + response_model = run_benchmarker(model=path, **model_args) + assert response_model == "foo" + + response_pipeline = run_benchmarker(pipeline=stub, **pipeline_args) + assert response_pipeline == "bar" From b1309c6e4ed3b2b9a169e1ac041c595f3d1b0d49 Mon Sep 17 00:00:00 2001 From: horheynm Date: Mon, 25 Sep 2023 22:13:38 +0000 Subject: [PATCH 7/9] tests for def run_benchmarker --- .../benchmark/api/test_benchmarker.py | 270 +++++++++--------- 1 file changed, 137 insertions(+), 133 deletions(-) diff --git a/tests/deepsparse/benchmark/api/test_benchmarker.py b/tests/deepsparse/benchmark/api/test_benchmarker.py index 72b8eb7d5c..0b501b15d6 100644 --- a/tests/deepsparse/benchmark/api/test_benchmarker.py +++ b/tests/deepsparse/benchmark/api/test_benchmarker.py @@ -24,9 +24,6 @@ from sparsezoo import Model -# from types import MethodType - - IC = "image_classification" TEXT_GEN = "text_generation" @@ -46,49 +43,6 @@ } -@pytest.fixture(scope="function") -def get_model_path(): - """download model, return its path and delete at the end""" - - def download_model_and_return_path(stub: str, download_path: Optional[str] = None): - model = Model(stub, download_path) - path = model.path - yield path - - shutil.rmtree(path) - assert os.path.exists(path) is False - - return download_model_and_return_path - - -@pytest.fixture -def benchmarker_fixture(get_model_path): - def get( - stub: str, - task: Optional[str] = None, - config_dict: Optional[str] = None, - model_path: Optional[str] = None, - model_args: Optional[Dict[str, Any]] = None, - pipeline_args: Dict[str, Any] = None, - ): - model_path = model_path or next(get_model_path(stub=stub)) - - required_benchmark_model_args = model_args or {} - - required_benchmark_pipeline_args = pipeline_args or { - "task": task, - "config": PipelineBenchmarkConfig(**config_dict) if config_dict else None, - } - - return ( - model_path, - required_benchmark_model_args, - required_benchmark_pipeline_args, - ) - - return get - - class MockBenchmarker(Benchmarker): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -108,100 +62,150 @@ def __call__(self, **kwargs): return "bar" -def test_validate_exactly_one_mode_selected(): - args = { - "model": "foo", - "pipeline": "bar", - } - with pytest.raises(UnclearBenchmarkerModeException): - Benchmarker(**args) - - -@pytest.mark.parametrize( - "stub", - [ - "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/imagenet/base-none", - ( - "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" - "bigpython_bigquery_thepile/base_quant-none" - ), - ], -) -def test_benchmark_model_from_benchmarker(benchmarker_fixture, stub): - path, model_args, _ = benchmarker_fixture(stub=stub) - benchmarker = Benchmarker(model=path) - export_dict = benchmarker(**model_args) - assert export_dict is not None - - -@pytest.mark.parametrize( - "stub,task,config_dict", - [ - ( +@pytest.mark.skip(reason="Heavy load -- download text-gen models -- for GHA machine") +class TestBenchmarker: + @pytest.fixture + def get_model_path(self): + """download model, return its path and delete at the end""" + + def download_model_and_return_path( + stub: str, download_path: Optional[str] = None + ): + model = Model(stub, download_path) + path = model.path + yield path + + shutil.rmtree(path) + assert os.path.exists(path) is False + + return download_model_and_return_path + + @pytest.fixture + def benchmarker_fixture(self, get_model_path): + def get( + stub: str, + task: Optional[str] = None, + config_dict: Optional[str] = None, + model_path: Optional[str] = None, + model_args: Optional[Dict[str, Any]] = None, + pipeline_args: Dict[str, Any] = None, + ): + model_path = model_path or next(get_model_path(stub=stub)) + + required_benchmark_model_args = model_args or {} + + required_benchmark_pipeline_args = pipeline_args or { + "task": task, + "config": PipelineBenchmarkConfig(**config_dict) + if config_dict + else None, + } + + return ( + model_path, + required_benchmark_model_args, + required_benchmark_pipeline_args, + ) + + return get + + def test_validate_exactly_one_mode_selected(self): + args = { + "model": "foo", + "pipeline": "bar", + } + with pytest.raises(UnclearBenchmarkerModeException): + Benchmarker(**args) + + @pytest.mark.parametrize( + "stub", + [ "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/imagenet/base-none", - IC, - BENCHMARK_PIPELINE_IC_CONFIG, - ), - ( ( "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" "bigpython_bigquery_thepile/base_quant-none" ), - TEXT_GEN, - BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, - ), - ], -) -def test_benchmark_pipeline_from_benchmarker( - benchmarker_fixture, stub, task, config_dict -): - - path, _, pipeline_args = benchmarker_fixture( - stub=stub, task=task, config_dict=config_dict + ], ) - # [TODO]: downstream benchmark_pipeline to accept path for text_gen. - # Passes for ic - benchmarker = Benchmarker(pipeline=stub) - - batch_times, total_run_time, num_streams = benchmarker(**pipeline_args) - assert batch_times is not None - assert total_run_time is not None - assert num_streams is not None - - -@pytest.mark.parametrize( - "stub,task,config_dict", - [ - ( - "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/imagenet/base-none", - IC, - BENCHMARK_PIPELINE_IC_CONFIG, - ), - ( - "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" - "bigpython_bigquery_thepile/base_quant-none", - TEXT_GEN, - BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, - ), - ], -) -def test_run_benchmarker( - benchmarker_fixture, - stub, - task, - config_dict, -): - - path, model_args, pipeline_args = benchmarker_fixture( - stub=stub, task=task, config_dict=config_dict + def test_benchmark_model_from_benchmarker(self, benchmarker_fixture, stub): + path, model_args, _ = benchmarker_fixture(stub=stub) + benchmarker = Benchmarker(model=path) + export_dict = benchmarker(**model_args) + assert export_dict is not None + + @pytest.mark.parametrize( + "stub,task,config_dict", + [ + ( + ( + "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/" + "imagenet/base-none" + ), + IC, + BENCHMARK_PIPELINE_IC_CONFIG, + ), + ( + ( + "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" + "bigpython_bigquery_thepile/base_quant-none" + ), + TEXT_GEN, + BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, + ), + ], ) + def test_benchmark_pipeline_from_benchmarker( + self, benchmarker_fixture, stub, task, config_dict + ): - with mock.patch( - "deepsparse.benchmark.api.benchmarker.Benchmarker", - side_effect=MockBenchmarker, + path, _, pipeline_args = benchmarker_fixture( + stub=stub, task=task, config_dict=config_dict + ) + # [TODO]: accept path for text_gen downstream benchmark_pipeline + # Passes for ic + benchmarker = Benchmarker(pipeline=stub) + + batch_times, total_run_time, num_streams = benchmarker(**pipeline_args) + assert batch_times is not None + assert total_run_time is not None + assert num_streams is not None + + @pytest.mark.parametrize( + "stub,task,config_dict", + [ + ( + ( + "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/" + "imagenet/base-none" + ), + IC, + BENCHMARK_PIPELINE_IC_CONFIG, + ), + ( + "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" + "bigpython_bigquery_thepile/base_quant-none", + TEXT_GEN, + BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, + ), + ], + ) + def test_run_benchmarker( + self, + benchmarker_fixture, + stub, + task, + config_dict, ): - response_model = run_benchmarker(model=path, **model_args) - assert response_model == "foo" + path, model_args, pipeline_args = benchmarker_fixture( + stub=stub, task=task, config_dict=config_dict + ) + + with mock.patch( + "deepsparse.benchmark.api.benchmarker.Benchmarker", + side_effect=MockBenchmarker, + ): + response_model = run_benchmarker(model=path, **model_args) + assert response_model == "foo" - response_pipeline = run_benchmarker(pipeline=stub, **pipeline_args) - assert response_pipeline == "bar" + response_pipeline = run_benchmarker(pipeline=stub, **pipeline_args) + assert response_pipeline == "bar" From 7251d2854cfb1803229b6f8125539878874c58e6 Mon Sep 17 00:00:00 2001 From: horheynm Date: Tue, 26 Sep 2023 03:16:07 +0000 Subject: [PATCH 8/9] add test_run_benchmarker__failure --- src/deepsparse/benchmark/api/benchmarker.py | 45 +++++++++------- .../benchmark/api/test_benchmarker.py | 54 +++++++++++-------- 2 files changed, 59 insertions(+), 40 deletions(-) diff --git a/src/deepsparse/benchmark/api/benchmarker.py b/src/deepsparse/benchmark/api/benchmarker.py index 7614a80efc..989a242240 100644 --- a/src/deepsparse/benchmark/api/benchmarker.py +++ b/src/deepsparse/benchmark/api/benchmarker.py @@ -25,12 +25,32 @@ def run_benchmarker( pipeline: Optional[str] = None, **kwargs, ): - if model: - benchmarker = Benchmarker(model=model) - elif pipeline: - benchmarker = Benchmarker(pipeline=pipeline) + if bool(model) ^ bool(pipeline): + if model: + benchmarker = Benchmarker(model=model) + elif pipeline: + benchmarker = Benchmarker(pipeline=pipeline) - return benchmarker(**kwargs) + return benchmarker(**kwargs) + raise UnclearBenchmarkerModeException( + "Benchmarker only accepts" + "one input arg for " + "'model' to run deepsparse.benchmark" + "'pipeline' to run deepsparse.benchmark_pipeline" + ) + + +def _validate_exactly_one_mode_selected( + *args, +): + selections = sum(1 for mode in args if mode is not None) + if selections != 1: + raise UnclearBenchmarkerModeException( + "Benchmarker only accepts" + "one input arg for " + "'model' to run deepsparse.benchmark" + "'pipeline' to run deepsparse.benchmark_pipeline" + ) class Benchmarker: @@ -53,7 +73,7 @@ def __init__( model: Optional[str] = None, pipeline: Optional[str] = None, ): - self._validate_exactly_one_mode_selected(model, pipeline) + _validate_exactly_one_mode_selected(model, pipeline) self.model = model self.pipeline = pipeline @@ -63,16 +83,3 @@ def __call__(self, **kwargs): if self.pipeline: return benchmark_pipeline(model_path=self.pipeline, **kwargs) - - def _validate_exactly_one_mode_selected( - self, - *args, - ): - selections = sum(1 for mode in args if mode is not None) - if selections != 1: - raise UnclearBenchmarkerModeException( - "Benchmarker only accepts" - "one input arg for " - "'model' to run deepsparse.benchmark" - "'pipeline' to run deepsparse.benchmark_pipeline" - ) diff --git a/tests/deepsparse/benchmark/api/test_benchmarker.py b/tests/deepsparse/benchmark/api/test_benchmarker.py index 0b501b15d6..85c745f59c 100644 --- a/tests/deepsparse/benchmark/api/test_benchmarker.py +++ b/tests/deepsparse/benchmark/api/test_benchmarker.py @@ -24,10 +24,10 @@ from sparsezoo import Model -IC = "image_classification" -TEXT_GEN = "text_generation" +IC_STRING = "image_classification" +TEXT_GEN_STRING = "text_generation" -BENCHMARK_PIPELINE_IC_CONFIG = { +BENCHMARK_PIPELINE_IC_CONFIG_DICT = { "data_type": "dummy", "gen_sequence_length": 100, "input_image_shape": [500, 500, 3], @@ -35,7 +35,7 @@ "input_schema_kwargs": {}, } -BENCHMARK_PIPELINE_TEXT_GEN_CONFIG = { +BENCHMARK_PIPELINE_TEXT_GEN_CONFIG_DICT = { "data_type": "dummy", "gen_sequence_length": 100, "pipeline_kwargs": {}, @@ -53,16 +53,18 @@ def __call__(self, **kwargs): if self.pipeline: pipeline_kwargs = kwargs["config"].__dict__ - if kwargs["task"] == IC: - assert set(BENCHMARK_PIPELINE_IC_CONFIG).issubset(set(pipeline_kwargs)) - else: - assert set(BENCHMARK_PIPELINE_TEXT_GEN_CONFIG).issubset( + if kwargs["task"] == IC_STRING: + assert set(BENCHMARK_PIPELINE_IC_CONFIG_DICT).issubset( + set(pipeline_kwargs) + ) + elif kwargs["task"] == TEXT_GEN_STRING: + assert set(BENCHMARK_PIPELINE_TEXT_GEN_CONFIG_DICT).issubset( set(pipeline_kwargs) ) return "bar" -@pytest.mark.skip(reason="Heavy load -- download text-gen models -- for GHA machine") +# @pytest.mark.skip(reason="Heavy load -- download text-gen models -- for GHA machine") class TestBenchmarker: @pytest.fixture def get_model_path(self): @@ -88,7 +90,7 @@ def get( config_dict: Optional[str] = None, model_path: Optional[str] = None, model_args: Optional[Dict[str, Any]] = None, - pipeline_args: Dict[str, Any] = None, + pipeline_args: Optional[Dict[str, Any]] = None, ): model_path = model_path or next(get_model_path(stub=stub)) @@ -110,12 +112,12 @@ def get( return get def test_validate_exactly_one_mode_selected(self): - args = { + kwargs = { "model": "foo", "pipeline": "bar", } with pytest.raises(UnclearBenchmarkerModeException): - Benchmarker(**args) + Benchmarker(**kwargs) @pytest.mark.parametrize( "stub", @@ -141,16 +143,16 @@ def test_benchmark_model_from_benchmarker(self, benchmarker_fixture, stub): "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/" "imagenet/base-none" ), - IC, - BENCHMARK_PIPELINE_IC_CONFIG, + IC_STRING, + BENCHMARK_PIPELINE_IC_CONFIG_DICT, ), ( ( "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" "bigpython_bigquery_thepile/base_quant-none" ), - TEXT_GEN, - BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, + TEXT_GEN_STRING, + BENCHMARK_PIPELINE_TEXT_GEN_CONFIG_DICT, ), ], ) @@ -178,18 +180,18 @@ def test_benchmark_pipeline_from_benchmarker( "zoo:cv/classification/resnet_v1-50_2x/pytorch/sparseml/" "imagenet/base-none" ), - IC, - BENCHMARK_PIPELINE_IC_CONFIG, + IC_STRING, + BENCHMARK_PIPELINE_IC_CONFIG_DICT, ), ( "zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/" "bigpython_bigquery_thepile/base_quant-none", - TEXT_GEN, - BENCHMARK_PIPELINE_TEXT_GEN_CONFIG, + TEXT_GEN_STRING, + BENCHMARK_PIPELINE_TEXT_GEN_CONFIG_DICT, ), ], ) - def test_run_benchmarker( + def test_run_benchmarker__success( self, benchmarker_fixture, stub, @@ -209,3 +211,13 @@ def test_run_benchmarker( response_pipeline = run_benchmarker(pipeline=stub, **pipeline_args) assert response_pipeline == "bar" + + def test_run_benchmarker__failure( + self, + ): + kwargs = { + "model": "foo", + "pipeline": "bar", + } + with pytest.raises(UnclearBenchmarkerModeException): + run_benchmarker(**kwargs) From b10ec5b93a304eea48c4a739d1c94af2fc7fcba6 Mon Sep 17 00:00:00 2001 From: horheynm Date: Tue, 26 Sep 2023 03:16:45 +0000 Subject: [PATCH 9/9] skip test --- tests/deepsparse/benchmark/api/test_benchmarker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/deepsparse/benchmark/api/test_benchmarker.py b/tests/deepsparse/benchmark/api/test_benchmarker.py index 85c745f59c..eec8066348 100644 --- a/tests/deepsparse/benchmark/api/test_benchmarker.py +++ b/tests/deepsparse/benchmark/api/test_benchmarker.py @@ -64,7 +64,7 @@ def __call__(self, **kwargs): return "bar" -# @pytest.mark.skip(reason="Heavy load -- download text-gen models -- for GHA machine") +@pytest.mark.skip(reason="Heavy load -- download text-gen models -- for GHA machine") class TestBenchmarker: @pytest.fixture def get_model_path(self):