Skip to content

Commit 1f7bd66

Browse files
Mark accelerator connector as protected (#10032)
1 parent 6d79184 commit 1f7bd66

File tree

13 files changed

+43
-43
lines changed

13 files changed

+43
-43
lines changed

pytorch_lightning/plugins/training_type/deepspeed.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -627,9 +627,9 @@ def _auto_select_batch_size(self):
627627
return batch_size
628628

629629
def _format_precision_config(self):
630-
amp_type = self.lightning_module.trainer.accelerator_connector.amp_type
631-
amp_level = self.lightning_module.trainer.accelerator_connector.amp_level
632-
precision = self.lightning_module.trainer.accelerator_connector.precision
630+
amp_type = self.lightning_module.trainer._accelerator_connector.amp_type
631+
amp_level = self.lightning_module.trainer._accelerator_connector.amp_level
632+
precision = self.lightning_module.trainer._accelerator_connector.precision
633633
if precision in (16, "mixed"):
634634
if "fp16" not in self.config and amp_type == AMPType.NATIVE:
635635
# FP16 is a DeepSpeed standalone AMP implementation

pytorch_lightning/trainer/configuration_validator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ def __verify_dp_batch_transfer_support(trainer: "pl.Trainer", model: "pl.Lightni
196196
# TODO: Remove this blocker once batch transfer to device is integrated in Lightning for DP mode.
197197
batch_transfer_hooks = ("on_before_batch_transfer", "transfer_batch_to_device", "on_after_batch_transfer")
198198
for hook in batch_transfer_hooks:
199-
if trainer.accelerator_connector.use_dp and is_overridden(hook, model):
199+
if trainer._accelerator_connector.use_dp and is_overridden(hook, model):
200200
raise MisconfigurationException(f"Overriding `{hook}` is not supported in DP mode.")
201201

202202

pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ def _log_gpus_metrics(self) -> None:
244244
self.trainer.lightning_module.log(key, mem, prog_bar=False, logger=True)
245245
else:
246246
gpu_id = int(key.split("/")[0].split(":")[1])
247-
if gpu_id in self.trainer.accelerator_connector.parallel_device_ids:
247+
if gpu_id in self.trainer._accelerator_connector.parallel_device_ids:
248248
self.trainer.lightning_module.log(
249249
key, mem, prog_bar=False, logger=True, on_step=True, on_epoch=False
250250
)

pytorch_lightning/trainer/data_loading.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def _worker_check(self, dataloader: DataLoader, name: str) -> None:
7070
if not isinstance(dataloader, DataLoader):
7171
return
7272

73-
using_spawn = self.accelerator_connector._distrib_type == DistributedType.DDP_SPAWN
73+
using_spawn = self._accelerator_connector._distrib_type == DistributedType.DDP_SPAWN
7474
num_cpus = multiprocessing.cpu_count()
7575

7676
# ddp_spawn + num_workers > 0 don't mix! tell the user
@@ -120,8 +120,8 @@ def auto_add_worker_init_fn(self, dataloader: DataLoader) -> None:
120120

121121
def _requires_distributed_sampler(self, dataloader) -> bool:
122122
return (
123-
self.accelerator_connector.replace_sampler_ddp
124-
and self.accelerator_connector.is_distributed
123+
self._accelerator_connector.replace_sampler_ddp
124+
and self._accelerator_connector.is_distributed
125125
and not isinstance(dataloader.sampler, DistributedSampler)
126126
and not has_iterable_dataset(dataloader)
127127
)
@@ -147,7 +147,7 @@ def prepare_dataloader(self, dataloader: Any, shuffle: bool, mode: Optional[Runn
147147
_fault_tolerant_training() # injects components to track the state
148148
or self._requires_distributed_sampler(dataloader) # sets the distributed sampler
149149
or mode == RunningStage.PREDICTING # to track indices for the predictions
150-
or self.accelerator_connector.use_ipu # IPUs use a custom `DataLoader`
150+
or self._accelerator_connector.use_ipu # IPUs use a custom `DataLoader`
151151
):
152152
sampler = self._resolve_sampler(dataloader, shuffle=shuffle, mode=mode)
153153
dataloader = self._update_dataloader(dataloader, sampler, mode=mode)

pytorch_lightning/trainer/trainer.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -432,7 +432,7 @@ def __init__(
432432
self._data_connector = DataConnector(self, multiple_trainloader_mode)
433433
self.optimizer_connector = OptimizerConnector(self)
434434

435-
self.accelerator_connector = AcceleratorConnector(
435+
self._accelerator_connector = AcceleratorConnector(
436436
num_processes,
437437
devices,
438438
tpu_cores,
@@ -1507,7 +1507,7 @@ def _on_exception(self):
15071507

15081508
@property
15091509
def accelerator(self) -> Accelerator:
1510-
return self.accelerator_connector.accelerator
1510+
return self._accelerator_connector.accelerator
15111511

15121512
@property
15131513
def training_type_plugin(self) -> TrainingTypePlugin:
@@ -1542,43 +1542,43 @@ def should_rank_save_checkpoint(self) -> bool:
15421542

15431543
@property
15441544
def _distrib_type(self) -> DistributedType:
1545-
return self.accelerator_connector._distrib_type
1545+
return self._accelerator_connector._distrib_type
15461546

15471547
@property
15481548
def _device_type(self) -> DeviceType:
1549-
return self.accelerator_connector._device_type
1549+
return self._accelerator_connector._device_type
15501550

15511551
@property
15521552
def num_nodes(self) -> int:
1553-
return self.accelerator_connector.num_nodes
1553+
return self._accelerator_connector.num_nodes
15541554

15551555
@property
15561556
def num_processes(self) -> int:
1557-
return self.accelerator_connector.num_processes
1557+
return self._accelerator_connector.num_processes
15581558

15591559
@property
15601560
def root_gpu(self) -> Optional[int]:
1561-
return self.accelerator_connector.root_gpu
1561+
return self._accelerator_connector.root_gpu
15621562

15631563
@property
15641564
def tpu_cores(self) -> int:
1565-
return self.accelerator_connector.tpu_cores
1565+
return self._accelerator_connector.tpu_cores
15661566

15671567
@property
15681568
def ipus(self) -> int:
1569-
return self.accelerator_connector.num_ipus
1569+
return self._accelerator_connector.num_ipus
15701570

15711571
@property
15721572
def num_gpus(self) -> int:
1573-
return self.accelerator_connector.num_gpus
1573+
return self._accelerator_connector.num_gpus
15741574

15751575
@property
15761576
def devices(self) -> Optional[Union[List[int], str, int]]:
1577-
return self.accelerator_connector.devices
1577+
return self._accelerator_connector.devices
15781578

15791579
@property
15801580
def data_parallel_device_ids(self) -> Optional[List[int]]:
1581-
return self.accelerator_connector.parallel_device_ids
1581+
return self._accelerator_connector.parallel_device_ids
15821582

15831583
@property
15841584
def lightning_module(self) -> "pl.LightningModule":
@@ -1627,7 +1627,7 @@ def scaler(self):
16271627

16281628
@property
16291629
def gpus(self) -> Optional[Union[List[int], str, int]]:
1630-
return self.accelerator_connector.gpus
1630+
return self._accelerator_connector.gpus
16311631

16321632
@property
16331633
def model(self) -> torch.nn.Module:

tests/accelerators/test_accelerator_connector.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock):
100100
def test_accelerator_choice_ddp_slurm(setup_distributed_mock):
101101
class CB(Callback):
102102
def on_fit_start(self, trainer, pl_module):
103-
assert trainer.accelerator_connector._is_slurm_managing_tasks
103+
assert trainer._accelerator_connector._is_slurm_managing_tasks
104104
assert isinstance(trainer.accelerator, GPUAccelerator)
105105
assert isinstance(trainer.training_type_plugin, DDPPlugin)
106106
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
@@ -132,7 +132,7 @@ def on_fit_start(self, trainer, pl_module):
132132
def test_accelerator_choice_ddp2_slurm(device_count_mock, setup_distributed_mock):
133133
class CB(Callback):
134134
def on_fit_start(self, trainer, pl_module):
135-
assert trainer.accelerator_connector._is_slurm_managing_tasks
135+
assert trainer._accelerator_connector._is_slurm_managing_tasks
136136
assert isinstance(trainer.accelerator, GPUAccelerator)
137137
assert isinstance(trainer.training_type_plugin, DDP2Plugin)
138138
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
@@ -307,7 +307,7 @@ def on_fit_start(self, trainer, pl_module):
307307
def test_accelerator_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock):
308308
class CB(Callback):
309309
def on_fit_start(self, trainer, pl_module):
310-
assert trainer.accelerator_connector._is_slurm_managing_tasks
310+
assert trainer._accelerator_connector._is_slurm_managing_tasks
311311
assert isinstance(trainer.accelerator, CPUAccelerator)
312312
assert isinstance(trainer.training_type_plugin, DDPPlugin)
313313
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
@@ -402,7 +402,7 @@ class TrainTypePlugin(SingleDevicePlugin):
402402
assert isinstance(trainer.accelerator, Accel)
403403
assert isinstance(trainer.training_type_plugin, TrainTypePlugin)
404404
assert isinstance(trainer.precision_plugin, Prec)
405-
assert trainer.accelerator_connector.training_type_plugin is ttp
405+
assert trainer._accelerator_connector.training_type_plugin is ttp
406406

407407
class DistributedPlugin(DDPPlugin):
408408
pass
@@ -413,7 +413,7 @@ class DistributedPlugin(DDPPlugin):
413413
assert isinstance(trainer.accelerator, Accel)
414414
assert isinstance(trainer.training_type_plugin, DistributedPlugin)
415415
assert isinstance(trainer.precision_plugin, Prec)
416-
assert trainer.accelerator_connector.training_type_plugin is ttp
416+
assert trainer._accelerator_connector.training_type_plugin is ttp
417417

418418

419419
@mock.patch.dict(
@@ -756,7 +756,7 @@ def test_strategy_choice_ddp_spawn(cuda_available_mock, device_count_mock):
756756
def test_strategy_choice_ddp_slurm(setup_distributed_mock):
757757
class CB(Callback):
758758
def on_fit_start(self, trainer, pl_module):
759-
assert trainer.accelerator_connector._is_slurm_managing_tasks
759+
assert trainer._accelerator_connector._is_slurm_managing_tasks
760760
assert isinstance(trainer.accelerator, GPUAccelerator)
761761
assert isinstance(trainer.training_type_plugin, DDPPlugin)
762762
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
@@ -788,7 +788,7 @@ def on_fit_start(self, trainer, pl_module):
788788
def test_strategy_choice_ddp2_slurm(device_count_mock, setup_distributed_mock):
789789
class CB(Callback):
790790
def on_fit_start(self, trainer, pl_module):
791-
assert trainer.accelerator_connector._is_slurm_managing_tasks
791+
assert trainer._accelerator_connector._is_slurm_managing_tasks
792792
assert isinstance(trainer.accelerator, GPUAccelerator)
793793
assert isinstance(trainer.training_type_plugin, DDP2Plugin)
794794
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)
@@ -963,7 +963,7 @@ def on_fit_start(self, trainer, pl_module):
963963
def test_strategy_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock):
964964
class CB(Callback):
965965
def on_fit_start(self, trainer, pl_module):
966-
assert trainer.accelerator_connector._is_slurm_managing_tasks
966+
assert trainer._accelerator_connector._is_slurm_managing_tasks
967967
assert isinstance(trainer.accelerator, CPUAccelerator)
968968
assert isinstance(trainer.training_type_plugin, DDPPlugin)
969969
assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)

tests/deprecated_api/test_remove_1-6.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -411,13 +411,13 @@ def test_v1_6_0_deprecated_accelerator_pass_through_functions():
411411
def test_v1_6_0_configure_slurm_ddp():
412412
trainer = Trainer()
413413
with pytest.deprecated_call(match=r"`AcceleratorConnector.configure_slurm_ddp\(\)` was deprecated in v1.5"):
414-
trainer.accelerator_connector.configure_slurm_ddp()
414+
trainer._accelerator_connector.configure_slurm_ddp()
415415

416416

417417
def test_v1_6_0_is_slurm_managing_tasks():
418418
trainer = Trainer()
419419
with pytest.deprecated_call(match=r"`AcceleratorConnector.is_slurm_managing_tasks` was deprecated in v1.5"):
420-
_ = trainer.accelerator_connector.is_slurm_managing_tasks
420+
_ = trainer._accelerator_connector.is_slurm_managing_tasks
421421

422422
with pytest.deprecated_call(match=r"`AcceleratorConnector.is_slurm_managing_tasks` was deprecated in v1.5"):
423-
trainer.accelerator_connector.is_slurm_managing_tasks = False
423+
trainer._accelerator_connector.is_slurm_managing_tasks = False

tests/models/test_gpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -239,8 +239,8 @@ def test_torchelastic_gpu_parsing(mocked_device_count, gpus):
239239
"""Ensure when using torchelastic and nproc_per_node is set to the default of 1 per GPU device That we omit
240240
sanitizing the gpus as only one of the GPUs is visible."""
241241
trainer = Trainer(gpus=gpus)
242-
assert isinstance(trainer.accelerator_connector.cluster_environment, TorchElasticEnvironment)
243-
assert trainer.accelerator_connector.parallel_device_ids == device_parser.parse_gpu_ids(gpus)
242+
assert isinstance(trainer._accelerator_connector.cluster_environment, TorchElasticEnvironment)
243+
assert trainer._accelerator_connector.parallel_device_ids == device_parser.parse_gpu_ids(gpus)
244244
assert trainer.gpus == gpus
245245

246246

tests/models/test_tpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ def test_dataloaders_passed_to_fit(tmpdir):
251251
@RunIf(tpu=True)
252252
def test_tpu_id_to_be_as_expected(tpu_cores, expected_tpu_id):
253253
"""Test if trainer.tpu_id is set as expected."""
254-
assert Trainer(tpu_cores=tpu_cores).accelerator_connector.tpu_id == expected_tpu_id
254+
assert Trainer(tpu_cores=tpu_cores)._accelerator_connector.tpu_id == expected_tpu_id
255255

256256

257257
def test_tpu_misconfiguration():
@@ -315,7 +315,7 @@ def test_tpu_choice(tmpdir, tpu_cores, expected_tpu_id, error_expected):
315315
Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)
316316
else:
317317
trainer = Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)
318-
assert trainer.accelerator_connector.tpu_id == expected_tpu_id
318+
assert trainer._accelerator_connector.tpu_id == expected_tpu_id
319319

320320

321321
@pytest.mark.parametrize(

tests/overrides/test_data_parallel.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def training_step(self, batch, batch_idx):
8787
model = TestModel()
8888
trainer = MagicMock()
8989
trainer.state.stage = RunningStage.TRAINING
90-
trainer.accelerator_connector._init_deterministic(False)
90+
trainer._accelerator_connector._init_deterministic(False)
9191

9292
model.trainer = trainer
9393
batch = torch.rand(2, 32).cuda()
@@ -128,7 +128,7 @@ def training_step(self, batch, batch_idx):
128128
model = TestModel().to(device)
129129
trainer = MagicMock()
130130
trainer.state.stage = RunningStage.TRAINING
131-
trainer.accelerator_connector._init_deterministic(False)
131+
trainer._accelerator_connector._init_deterministic(False)
132132
model.trainer = trainer
133133
batch = torch.rand(2, 32).to(device)
134134
batch_idx = 0

tests/trainer/test_data_loading.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def _get_warning_msg():
137137
@pytest.mark.parametrize("num_workers", [0, 1])
138138
def test_dataloader_warnings(tmpdir, num_workers):
139139
trainer = Trainer(default_root_dir=tmpdir, strategy="ddp_spawn", num_processes=2, fast_dev_run=4)
140-
assert trainer.accelerator_connector._distrib_type == DistributedType.DDP_SPAWN
140+
assert trainer._accelerator_connector._distrib_type == DistributedType.DDP_SPAWN
141141
trainer.fit(TestSpawnBoringModel(num_workers))
142142

143143

tests/trainer/test_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1352,15 +1352,15 @@ def write_on_batch_end(self, trainer, pl_module, prediction, batch_indices, *arg
13521352
self.write_on_batch_end_called = True
13531353

13541354
def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):
1355-
expected = 1 if trainer.accelerator_connector.is_distributed else 2
1355+
expected = 1 if trainer._accelerator_connector.is_distributed else 2
13561356
assert len(predictions) == 2
13571357
assert len(predictions[0]) == expected
13581358
assert len(batch_indices) == 2
13591359
assert len(batch_indices[0]) == expected
13601360
self.write_on_epoch_end_called = True
13611361

13621362
def on_predict_epoch_end(self, trainer, pl_module, outputs):
1363-
if trainer.accelerator_connector.is_distributed:
1363+
if trainer._accelerator_connector.is_distributed:
13641364
for idx in range(2):
13651365
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler.sampler, UnrepeatedDistributedSampler)
13661366
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler, IndexBatchSamplerWrapper)

tests/utilities/test_cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ def test_lightning_cli_args_cluster_environments(tmpdir):
320320
class TestModel(BoringModel):
321321
def on_fit_start(self):
322322
# Ensure SLURMEnvironment is set, instead of default LightningEnvironment
323-
assert isinstance(self.trainer.accelerator_connector._cluster_environment, SLURMEnvironment)
323+
assert isinstance(self.trainer._accelerator_connector._cluster_environment, SLURMEnvironment)
324324
self.trainer.ran_asserts = True
325325

326326
with mock.patch("sys.argv", ["any.py", "fit", f"--trainer.plugins={json.dumps(plugins)}"]):

0 commit comments

Comments
 (0)