Skip to content

Commit 1750e93

Browse files
awaelchlicarmoccaBorda
authored andcommitted
Remove deprecated device attributes from Trainer (#14829)
* Remove deprecated device attributes from Trainer * changelog Co-authored-by: Carlos Mocholí <[email protected]> Co-authored-by: Jirka Borovec <[email protected]>
1 parent bf2d87f commit 1750e93

File tree

3 files changed

+5
-185
lines changed

3 files changed

+5
-185
lines changed

src/pytorch_lightning/CHANGELOG.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,9 +199,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
199199

200200
- Removed the deprecated way to set the distributed backend via the environment variable `PL_TORCH_DISTRIBUTED_BACKEND`, in favor of setting the `process_group_backend` in the strategy constructor ([#14693](https://github.com/Lightning-AI/lightning/pull/14693))
201201

202+
- Removed the deprecated device attributes `Trainer.{devices,gpus,num_gpus,ipus,tpu_cores}` in favor of the accelerator-agnostic `Trainer.num_devices` ([#14829](https://github.com/Lightning-AI/lightning/pull/14829))
202203

203-
- Removed the deprecated `Trainer.use_amp` and `LightningModule.use_amp` attributes ([#14832](https://github.com/Lightning-AI/lightning/pull/14832))
204204

205+
- Removed the deprecated `Trainer.root_gpu` attribute in favor of `Trainer.strategy.root_device` ([#14829](https://github.com/Lightning-AI/lightning/pull/14829))
206+
207+
208+
- Removed the deprecated `Trainer.use_amp` and `LightningModule.use_amp` attributes ([#14832](https://github.com/Lightning-AI/lightning/pull/14832))
205209

206210

207211
### Fixed

src/pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -2052,46 +2052,6 @@ def num_devices(self) -> int:
20522052
"""Number of devices the trainer uses per node."""
20532053
return len(self.device_ids)
20542054

2055-
@property
2056-
def root_gpu(self) -> Optional[int]:
2057-
rank_zero_deprecation(
2058-
"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
2059-
"Please use `Trainer.strategy.root_device.index` instead."
2060-
)
2061-
return self.strategy.root_device.index if isinstance(self.accelerator, CUDAAccelerator) else None
2062-
2063-
@property
2064-
def tpu_cores(self) -> int:
2065-
rank_zero_deprecation(
2066-
"`Trainer.tpu_cores` is deprecated in v1.6 and will be removed in v1.8. "
2067-
"Please use `Trainer.num_devices` instead."
2068-
)
2069-
return self.num_devices if isinstance(self.accelerator, TPUAccelerator) else 0
2070-
2071-
@property
2072-
def ipus(self) -> int:
2073-
rank_zero_deprecation(
2074-
"`Trainer.ipus` was deprecated in v1.6 and will be removed in v1.8."
2075-
" Please use `Trainer.num_devices` instead."
2076-
)
2077-
return self.num_devices if isinstance(self.accelerator, IPUAccelerator) else 0
2078-
2079-
@property
2080-
def num_gpus(self) -> int:
2081-
rank_zero_deprecation(
2082-
"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
2083-
" Please use `Trainer.num_devices` instead."
2084-
)
2085-
return self.num_devices if isinstance(self.accelerator, CUDAAccelerator) else 0
2086-
2087-
@property
2088-
def devices(self) -> int:
2089-
rank_zero_deprecation(
2090-
"`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8."
2091-
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
2092-
)
2093-
return self.num_devices
2094-
20952055
@property
20962056
def lightning_module(self) -> "pl.LightningModule":
20972057
# TODO: this is actually an optional return
@@ -2140,14 +2100,6 @@ def precision(self) -> Union[str, int]:
21402100
def scaler(self) -> Optional[Any]:
21412101
return getattr(self.precision_plugin, "scaler", None)
21422102

2143-
@property
2144-
def gpus(self) -> Optional[Union[List[int], str, int]]:
2145-
rank_zero_deprecation(
2146-
"`Trainer.gpus` was deprecated in v1.6 and will be removed in v1.8."
2147-
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
2148-
)
2149-
return self._accelerator_connector._gpus
2150-
21512103
@property
21522104
def model(self) -> torch.nn.Module:
21532105
"""The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.

tests/tests_pytorch/deprecated_api/test_remove_1-8.py

Lines changed: 0 additions & 136 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import numpy as np
2020
import pytest
2121

22-
import pytorch_lightning
2322
from pytorch_lightning import Callback, Trainer
2423
from pytorch_lightning.callbacks import ModelCheckpoint
2524
from pytorch_lightning.demos.boring_classes import BoringDataModule, BoringModel
@@ -30,7 +29,6 @@
3029
from pytorch_lightning.trainer.configuration_validator import _check_datamodule_checkpoint_hooks
3130
from pytorch_lightning.trainer.states import RunningStage
3231
from pytorch_lightning.utilities.rank_zero import rank_zero_only
33-
from tests_pytorch.helpers.runif import RunIf
3432

3533

3634
def test_v1_8_0_on_init_start_end(tmpdir):
@@ -490,104 +488,6 @@ def on_load_checkpoint(self, checkpoint):
490488
_check_datamodule_checkpoint_hooks(trainer)
491489

492490

493-
def test_trainer_config_device_ids():
494-
trainer = Trainer(devices=2)
495-
with pytest.deprecated_call(
496-
match="`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8."
497-
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
498-
):
499-
trainer.devices == 2
500-
501-
502-
@pytest.mark.parametrize(
503-
["gpus", "expected_root_gpu", "strategy"],
504-
[
505-
pytest.param(None, None, "ddp", id="None is None"),
506-
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
507-
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
508-
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
509-
pytest.param("-1", 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
510-
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)"),
511-
],
512-
)
513-
def test_root_gpu_property(cuda_count_4, gpus, expected_root_gpu, strategy):
514-
with pytest.deprecated_call(
515-
match="`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
516-
"Please use `Trainer.strategy.root_device.index` instead."
517-
):
518-
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
519-
520-
521-
@pytest.mark.parametrize(
522-
["gpus", "expected_root_gpu", "strategy"],
523-
[
524-
pytest.param(None, None, None, id="None is None"),
525-
pytest.param(None, None, "ddp", id="None is None"),
526-
pytest.param(0, None, "ddp", id="None is None"),
527-
],
528-
)
529-
def test_root_gpu_property_0_passing(cuda_count_0, gpus, expected_root_gpu, strategy):
530-
with pytest.deprecated_call(
531-
match="`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
532-
"Please use `Trainer.strategy.root_device.index` instead."
533-
):
534-
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
535-
536-
537-
@pytest.mark.parametrize(
538-
["gpus", "expected_num_gpus", "strategy"],
539-
[
540-
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
541-
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
542-
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
543-
pytest.param(-1, 4, "ddp", id="-1 - use all gpus"),
544-
pytest.param("-1", 4, "ddp", id="'-1' - use all gpus"),
545-
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"),
546-
],
547-
)
548-
def test_trainer_gpu_parse(cuda_count_4, gpus, expected_num_gpus, strategy):
549-
with pytest.deprecated_call(
550-
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
551-
" Please use `Trainer.num_devices` instead."
552-
):
553-
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
554-
555-
556-
@pytest.mark.parametrize(
557-
["gpus", "expected_num_gpus", "strategy"],
558-
[
559-
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
560-
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
561-
],
562-
)
563-
def test_trainer_num_gpu_0(cuda_count_0, gpus, expected_num_gpus, strategy):
564-
with pytest.deprecated_call(
565-
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
566-
" Please use `Trainer.num_devices` instead."
567-
):
568-
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
569-
570-
571-
@pytest.mark.parametrize(
572-
["trainer_kwargs", "expected_ipus"],
573-
[
574-
({}, 0),
575-
({"devices": 1}, 0),
576-
({"accelerator": "ipu", "devices": 1}, 1),
577-
({"accelerator": "ipu", "devices": 8}, 8),
578-
],
579-
)
580-
def test_trainer_config_ipus(monkeypatch, trainer_kwargs, expected_ipus):
581-
monkeypatch.setattr(pytorch_lightning.accelerators.ipu.IPUAccelerator, "is_available", lambda _: True)
582-
monkeypatch.setattr(pytorch_lightning.strategies.ipu, "_IPU_AVAILABLE", lambda: True)
583-
trainer = Trainer(**trainer_kwargs)
584-
with pytest.deprecated_call(
585-
match="`Trainer.ipus` was deprecated in v1.6 and will be removed in v1.8."
586-
" Please use `Trainer.num_devices` instead."
587-
):
588-
trainer.ipus == expected_ipus
589-
590-
591491
def test_v1_8_0_deprecated_lightning_ipu_module():
592492
with pytest.deprecated_call(match=r"has been deprecated in v1.7.0 and will be removed in v1.8."):
593493
_ = LightningIPUModule(BoringModel(), 32)
@@ -653,39 +553,3 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint):
653553

654554
trainer.callbacks = [TestCallbackSaveHookOverride()]
655555
trainer.save_checkpoint(tmpdir + "/pathok.ckpt")
656-
657-
658-
@pytest.mark.parametrize(
659-
"trainer_kwargs",
660-
[
661-
pytest.param({"accelerator": "gpu", "devices": 2}, marks=RunIf(mps=False)),
662-
pytest.param({"accelerator": "gpu", "devices": [0, 2]}, marks=RunIf(mps=False)),
663-
pytest.param({"accelerator": "gpu", "devices": "2"}, marks=RunIf(mps=False)),
664-
pytest.param({"accelerator": "gpu", "devices": "0,"}, marks=RunIf(mps=False)),
665-
pytest.param({"accelerator": "gpu", "devices": 1}, marks=RunIf(mps=True)),
666-
pytest.param({"accelerator": "gpu", "devices": [0]}, marks=RunIf(mps=True)),
667-
pytest.param({"accelerator": "gpu", "devices": "0,"}, marks=RunIf(mps=True)),
668-
],
669-
)
670-
def test_trainer_gpus(cuda_count_4, trainer_kwargs):
671-
trainer = Trainer(**trainer_kwargs)
672-
with pytest.deprecated_call(
673-
match=(
674-
"`Trainer.gpus` was deprecated in v1.6 and will be removed in v1.8."
675-
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
676-
)
677-
):
678-
assert trainer.gpus == trainer_kwargs["devices"]
679-
680-
681-
@RunIf(skip_windows=True)
682-
def test_trainer_tpu_cores(monkeypatch):
683-
monkeypatch.setattr(pytorch_lightning.accelerators.tpu.TPUAccelerator, "is_available", lambda _: True)
684-
trainer = Trainer(accelerator="tpu", devices=8)
685-
with pytest.deprecated_call(
686-
match=(
687-
"`Trainer.tpu_cores` is deprecated in v1.6 and will be removed in v1.8. "
688-
"Please use `Trainer.num_devices` instead."
689-
)
690-
):
691-
assert trainer.tpu_cores == 8

0 commit comments

Comments
 (0)