Skip to content

Commit 97e327e

Browse files
committed
deprecate Trainer.root_gpu and remove AcceleratorConnector.root_gpu
1 parent aea96e4 commit 97e327e

File tree

6 files changed

+56
-37
lines changed

6 files changed

+56
-37
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -482,6 +482,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
482482
- Deprecated `PrecisionPlugin.on_{save,load}_checkpoint` in favor of `PrecisionPlugin.{state_dict,load_state_dict}` ([#11978](https://github.com/PyTorchLightning/pytorch-lightning/pull/11978))
483483

484484

485+
- Deprecated `Trainer.root_gpu` in favor of `Trainer.strategy.root_device.index` when GPU is used. ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/11978))
486+
487+
485488
### Removed
486489

487490
- Removed deprecated parameter `method` in `pytorch_lightning.utilities.model_helpers.is_overridden` ([#10507](https://github.com/PyTorchLightning/pytorch-lightning/pull/10507))
@@ -669,6 +672,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
669672
- Removed public attribute `sync_batchnorm` from strategies ([#11754](https://github.com/PyTorchLightning/pytorch-lightning/pull/11754))
670673

671674

675+
- Removed `AcceleratorConnector.root_gpu` property ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
676+
677+
672678
### Fixed
673679

674680
- Fixed an issue where `ModelCheckpoint` could delete older checkpoints when `dirpath` has changed during resumed training ([#12045](https://github.com/PyTorchLightning/pytorch-lightning/pull/12045))

pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -775,14 +775,6 @@ def parallel_devices(self) -> List[Union[torch.device, int]]:
775775
def num_processes(self) -> int:
776776
return self.devices if self.devices is not None else 1
777777

778-
@property
779-
def root_gpu(self) -> Optional[int]:
780-
return (
781-
self.strategy.root_device.index
782-
if not isinstance(self.accelerator, (IPUAccelerator, TPUAccelerator))
783-
else None
784-
)
785-
786778
@property
787779
def devices(self) -> int:
788780
if isinstance(self.strategy, SingleDeviceStrategy):

pytorch_lightning/trainer/trainer.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2016,7 +2016,11 @@ def num_processes(self) -> int:
20162016

20172017
@property
20182018
def root_gpu(self) -> Optional[int]:
2019-
return self._accelerator_connector.root_gpu
2019+
rank_zero_deprecation(
2020+
"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. Please use "
2021+
"`Trainer.strategy.root_device.index if isinstance(Trainer.accelerator, GPUAccelerator) else None` instead."
2022+
)
2023+
return self.strategy.root_device.index if isinstance(self.accelerator, GPUAccelerator) else None
20202024

20212025
@property
20222026
def tpu_cores(self) -> int:

tests/deprecated_api/test_remove_1-8.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -740,3 +740,41 @@ def on_load_checkpoint(self, checkpoint):
740740

741741
def test_v1_8_0_abstract_profiler():
742742
assert "`AbstractProfiler` was deprecated in v1.6" in AbstractProfiler.__doc__
743+
744+
745+
@pytest.mark.parametrize(
746+
["gpus", "expected_root_gpu", "strategy"],
747+
[
748+
pytest.param(None, None, "ddp", id="None is None"),
749+
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
750+
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
751+
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
752+
pytest.param("-1", 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
753+
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)"),
754+
],
755+
)
756+
def test_root_gpu_property(monkeypatch, gpus, expected_root_gpu, strategy):
757+
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
758+
monkeypatch.setattr(torch.cuda, "device_count", lambda: 16)
759+
with pytest.deprecated_call(
760+
match="`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. Please use "
761+
r"`Trainer.strategy.root_device.index if isinstance\(Trainer.accelerator, GPUAccelerator\) else None` instead."
762+
):
763+
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
764+
765+
766+
@pytest.mark.parametrize(
767+
["gpus", "expected_root_gpu", "strategy"],
768+
[
769+
pytest.param(None, None, None, id="None is None"),
770+
pytest.param(None, None, "ddp", id="None is None"),
771+
pytest.param(0, None, "ddp", id="None is None"),
772+
],
773+
)
774+
def test_root_gpu_property_0_passing(monkeypatch, gpus, expected_root_gpu, strategy):
775+
monkeypatch.setattr(torch.cuda, "device_count", lambda: 0)
776+
with pytest.deprecated_call(
777+
match="`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. Please use "
778+
r"`Trainer.strategy.root_device.index if isinstance\(Trainer.accelerator, GPUAccelerator\) else None` instead."
779+
):
780+
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu

tests/models/data/horovod/train_default_model.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import os
2121
import sys
2222

23+
import pytest
2324
import torch
2425

2526
# this is needed because Conda does not use `PYTHONPATH` env var while pip and virtualenv do
@@ -102,7 +103,12 @@ def training_epoch_end(self, outputs) -> None:
102103
if on_gpu:
103104
trainer = Trainer(gpus=1, strategy="horovod", max_epochs=1)
104105
# Test the root_gpu property
105-
assert trainer.root_gpu == hvd.local_rank()
106+
with pytest.deprecated_call(
107+
match="`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. Please use "
108+
r"`Trainer.strategy.root_device.index if isinstance\(Trainer.accelerator, GPUAccelerator\) else None`"
109+
" instead."
110+
):
111+
assert trainer.root_gpu == hvd.local_rank()
106112

107113

108114
if __name__ == "__main__":

tests/models/test_gpu.py

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -118,33 +118,6 @@ def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, strat
118118
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
119119

120120

121-
@pytest.mark.parametrize(
122-
["gpus", "expected_root_gpu", "strategy"],
123-
[
124-
pytest.param(None, None, "ddp", id="None is None"),
125-
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
126-
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
127-
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
128-
pytest.param("-1", 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
129-
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)"),
130-
],
131-
)
132-
def test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, strategy):
133-
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
134-
135-
136-
@pytest.mark.parametrize(
137-
["gpus", "expected_root_gpu", "strategy"],
138-
[
139-
pytest.param(None, None, None, id="None is None"),
140-
pytest.param(None, None, "ddp", id="None is None"),
141-
pytest.param(0, None, "ddp", id="None is None"),
142-
],
143-
)
144-
def test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_gpu, strategy):
145-
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
146-
147-
148121
# Asking for a gpu when non are available will result in a MisconfigurationException
149122
@pytest.mark.parametrize(
150123
["gpus", "expected_root_gpu", "strategy"],

0 commit comments

Comments
 (0)