Skip to content

Commit 31c68d1

Browse files
authored
Remove AcceleratorConnector.num_gpus and deprecate Trainer.num_gpus (#12384)
1 parent caed77f commit 31c68d1

File tree

6 files changed

+53
-36
lines changed

6 files changed

+53
-36
lines changed

CHANGELOG.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
527527
- Deprecated `Trainer.devices` in favor of `Trainer.num_devices` and `Trainer.device_ids` ([#12151](https://github.com/PyTorchLightning/pytorch-lightning/pull/12151))
528528

529529

530-
- Deprecated `Trainer.root_gpu` in favor of `Trainer.strategy.root_device.index` when GPU is used. ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
530+
- Deprecated `Trainer.root_gpu` in favor of `Trainer.strategy.root_device.index` when GPU is used ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
531+
532+
533+
- Deprecated `Trainer.num_gpus` in favor of `Trainer.num_devices` when GPU is used ([#12384](https://github.com/PyTorchLightning/pytorch-lightning/pull/12384))
531534

532535

533536
### Removed
@@ -720,6 +723,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
720723
- Removed `AcceleratorConnector.root_gpu` property ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
721724

722725

726+
- Removed `AcceleratorConnector.num_gpus` property ([#12384](https://github.com/PyTorchLightning/pytorch-lightning/pull/12384))
727+
728+
723729
### Fixed
724730

725731
- Fixed an issue where `ModelCheckpoint` could delete older checkpoints when `dirpath` has changed during resumed training ([#12045](https://github.com/PyTorchLightning/pytorch-lightning/pull/12045))

pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -815,12 +815,6 @@ def num_ipus(self) -> int:
815815
return self.devices
816816
return 0
817817

818-
@property
819-
def num_gpus(self) -> int:
820-
if isinstance(self.accelerator, GPUAccelerator):
821-
return self.devices
822-
return 0
823-
824818
@property
825819
def gpus(self) -> Optional[Union[List[int], str, int]]:
826820
return self._gpus

pytorch_lightning/trainer/trainer.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2071,7 +2071,11 @@ def ipus(self) -> int:
20712071

20722072
@property
20732073
def num_gpus(self) -> int:
2074-
return self._accelerator_connector.num_gpus
2074+
rank_zero_deprecation(
2075+
"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
2076+
" Please use `Trainer.num_devices` instead."
2077+
)
2078+
return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0
20752079

20762080
@property
20772081
def devices(self) -> int:

tests/deprecated_api/test_remove_1-8.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -925,3 +925,40 @@ def test_root_gpu_property_0_passing(monkeypatch, gpus, expected_root_gpu, strat
925925
"Please use `Trainer.strategy.root_device.index` instead."
926926
):
927927
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
928+
929+
930+
@pytest.mark.parametrize(
931+
["gpus", "expected_num_gpus", "strategy"],
932+
[
933+
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
934+
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
935+
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
936+
pytest.param(-1, 16, "ddp", id="-1 - use all gpus"),
937+
pytest.param("-1", 16, "ddp", id="'-1' - use all gpus"),
938+
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"),
939+
],
940+
)
941+
def test_trainer_gpu_parse(monkeypatch, gpus, expected_num_gpus, strategy):
942+
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
943+
monkeypatch.setattr(torch.cuda, "device_count", lambda: 16)
944+
with pytest.deprecated_call(
945+
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
946+
" Please use `Trainer.num_devices` instead."
947+
):
948+
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
949+
950+
951+
@pytest.mark.parametrize(
952+
["gpus", "expected_num_gpus", "strategy"],
953+
[
954+
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
955+
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
956+
],
957+
)
958+
def test_trainer_num_gpu_0(monkeypatch, gpus, expected_num_gpus, strategy):
959+
monkeypatch.setattr(torch.cuda, "device_count", lambda: 0)
960+
with pytest.deprecated_call(
961+
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
962+
" Please use `Trainer.num_devices` instead."
963+
):
964+
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus

tests/models/test_gpu.py

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -92,32 +92,6 @@ def device_count():
9292
monkeypatch.setattr(torch.cuda, "device_count", device_count)
9393

9494

95-
@pytest.mark.parametrize(
96-
["gpus", "expected_num_gpus", "strategy"],
97-
[
98-
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
99-
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
100-
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
101-
pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"),
102-
pytest.param("-1", PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"),
103-
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"),
104-
],
105-
)
106-
def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, strategy):
107-
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
108-
109-
110-
@pytest.mark.parametrize(
111-
["gpus", "expected_num_gpus", "strategy"],
112-
[
113-
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
114-
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
115-
],
116-
)
117-
def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, strategy):
118-
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
119-
120-
12195
# Asking for a gpu when non are available will result in a MisconfigurationException
12296
@pytest.mark.parametrize(
12397
["gpus", "expected_root_gpu", "strategy"],

tests/trainer/test_trainer.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1222,7 +1222,8 @@ def test_trainer_config_accelerator(
12221222
assert isinstance(trainer.strategy, strategy_cls)
12231223
assert strategy_cls.strategy_name == strategy_name
12241224
assert isinstance(trainer.accelerator, accelerator_cls)
1225-
assert trainer.num_gpus == num_gpus
1225+
trainer_num_gpus = trainer.num_devices if isinstance(trainer.accelerator, GPUAccelerator) else 0
1226+
assert trainer_num_gpus == num_gpus
12261227

12271228

12281229
def test_trainer_subclassing():
@@ -2097,7 +2098,8 @@ def test_trainer_config_strategy(monkeypatch, trainer_kwargs, strategy_cls, stra
20972098
assert isinstance(trainer.strategy, strategy_cls)
20982099
assert strategy_cls.strategy_name == strategy_name
20992100
assert isinstance(trainer.accelerator, accelerator_cls)
2100-
assert trainer.num_gpus == num_gpus
2101+
trainer_num_gpus = trainer.num_devices if isinstance(trainer.accelerator, GPUAccelerator) else 0
2102+
assert trainer_num_gpus == num_gpus
21012103
assert trainer.num_nodes == trainer_kwargs.get("num_nodes", 1)
21022104

21032105

0 commit comments

Comments
 (0)