Skip to content

Commit 8edb3b3

Browse files
committed
Fix message
1 parent aa2a08a commit 8edb3b3

File tree

2 files changed

+2
-3
lines changed

2 files changed

+2
-3
lines changed

pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -817,7 +817,7 @@ def set_distributed_mode(self, distributed_backend: Optional[str] = None):
817817
if self.distributed_backend == DistributedType.DDP_CPU:
818818
if _TPU_AVAILABLE:
819819
raise MisconfigurationException(
820-
"`strategy='ddp_cpu'` is not supported on TPU machines. "
820+
"`accelerator='ddp_cpu'` is not supported on TPU machines. "
821821
"Learn more: https://github.com/PyTorchLightning/pytorch-lightning/issues/7810"
822822
)
823823
if self.num_processes == 1 and self.num_nodes > 1:
@@ -826,7 +826,7 @@ def set_distributed_mode(self, distributed_backend: Optional[str] = None):
826826
self._distrib_type = DistributedType.DDP_SPAWN
827827
if self.num_gpus > 0:
828828
rank_zero_warn(
829-
"You requested one or more GPUs, but set `strategy='ddp_cpu'`. Training will not use GPUs."
829+
"You requested one or more GPUs, but set `accelerator='ddp_cpu'`. Training will not use GPUs."
830830
)
831831
self.parallel_device_ids = None
832832
if self.num_processes is None:

tests/accelerators/test_tpu.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,6 @@ def on_train_end(self, trainer, pl_module):
222222

223223
@RunIf(tpu=True)
224224
def test_ddp_cpu_not_supported_on_tpus():
225-
226225
with pytest.raises(MisconfigurationException, match="`accelerator='ddp_cpu'` is not supported on TPU machines"):
227226
Trainer(accelerator="ddp_cpu")
228227

0 commit comments

Comments
 (0)