Skip to content

Commit c9af112

Browse files
DuYicong515akihironittapre-commit-ci[bot]
authored
Remove AcceleratorConnector.num_nodes (#12107)
Co-authored-by: Aki Nitta <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 8fd17f2 commit c9af112

File tree

4 files changed

+22
-6
lines changed

4 files changed

+22
-6
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -601,6 +601,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
601601
- Removed the `AcceleratorConnector.device_type` property ([#12081](https://github.com/PyTorchLightning/pytorch-lightning/pull/12081))
602602

603603

604+
- Removed `AcceleratorConnector.num_nodes` ([#12107](https://github.com/PyTorchLightning/pytorch-lightning/pull/12107))
605+
606+
604607
- Removed `AcceleratorConnector.has_ipu` property ([#12111](https://github.com/PyTorchLightning/pytorch-lightning/pull/12111))
605608

606609

pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -768,10 +768,6 @@ def _lazy_init_strategy(self) -> None:
768768
def parallel_devices(self) -> List[Union[torch.device, int]]:
769769
return self._parallel_devices
770770

771-
@property
772-
def num_nodes(self) -> int:
773-
return self._num_nodes_flag
774-
775771
@property
776772
def num_processes(self) -> int:
777773
return self.devices if self.devices is not None else 1

pytorch_lightning/trainer/trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2018,7 +2018,7 @@ def should_rank_save_checkpoint(self) -> bool:
20182018

20192019
@property
20202020
def num_nodes(self) -> int:
2021-
return self._accelerator_connector.num_nodes
2021+
return getattr(self.strategy, "num_nodes", 1)
20222022

20232023
@property
20242024
def num_processes(self) -> int:

tests/trainer/test_trainer.py

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2059,7 +2059,6 @@ def training_step(self, batch, batch_idx):
20592059
({"strategy": None}, SingleDeviceStrategy, "single_device", CPUAccelerator, 0),
20602060
({"strategy": "dp"}, DDPStrategy, "ddp", CPUAccelerator, 0),
20612061
({"strategy": "ddp"}, DDPStrategy, "ddp", CPUAccelerator, 0),
2062-
({"strategy": "ddp", "num_processes": 2}, DDPStrategy, "ddp", CPUAccelerator, 0),
20632062
({"strategy": "ddp", "num_nodes": 2}, DDPStrategy, "ddp", CPUAccelerator, 0),
20642063
({"strategy": "ddp2"}, DDPStrategy, "ddp", CPUAccelerator, 0),
20652064
({"strategy": None, "gpus": 1}, SingleDeviceStrategy, "single_device", GPUAccelerator, 1),
@@ -2097,6 +2096,23 @@ def training_step(self, batch, batch_idx):
20972096
2,
20982097
),
20992098
({"strategy": DDPShardedStrategy(), "gpus": 2}, DDPShardedStrategy, "ddp_sharded", GPUAccelerator, 2),
2099+
({"strategy": "ddp2", "gpus": 2, "num_nodes": 2}, DDP2Strategy, "ddp2", GPUAccelerator, 2),
2100+
({"strategy": "ddp_spawn", "gpus": 2, "num_nodes": 2}, DDPSpawnStrategy, "ddp_spawn", GPUAccelerator, 2),
2101+
(
2102+
{"strategy": "ddp_fully_sharded", "gpus": 1, "num_nodes": 2},
2103+
DDPFullyShardedStrategy,
2104+
"ddp_fully_sharded",
2105+
GPUAccelerator,
2106+
1,
2107+
),
2108+
({"strategy": "ddp_sharded", "gpus": 2, "num_nodes": 2}, DDPShardedStrategy, "ddp_sharded", GPUAccelerator, 2),
2109+
(
2110+
{"strategy": "ddp_sharded_spawn", "gpus": 2, "num_nodes": 2},
2111+
DDPSpawnShardedStrategy,
2112+
"ddp_sharded_spawn",
2113+
GPUAccelerator,
2114+
2,
2115+
),
21002116
],
21012117
)
21022118
def test_trainer_config_strategy(monkeypatch, trainer_kwargs, strategy_cls, strategy_name, accelerator_cls, num_gpus):
@@ -2110,6 +2126,7 @@ def test_trainer_config_strategy(monkeypatch, trainer_kwargs, strategy_cls, stra
21102126
assert strategy_cls.strategy_name == strategy_name
21112127
assert isinstance(trainer.accelerator, accelerator_cls)
21122128
assert trainer.num_gpus == num_gpus
2129+
assert trainer.num_nodes == trainer_kwargs.get("num_nodes", 1)
21132130

21142131

21152132
@pytest.mark.parametrize(

0 commit comments

Comments
 (0)