Skip to content

Rename _device_type to _accelerator_type #11326

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- `Trainer.logged_metrics` now always contains scalar tensors, even when a Python scalar was logged ([#11270](https://github.com/PyTorchLightning/pytorch-lightning/pull/11270))


- Renamed `_device_type` to `_accelerator_type` ([#11326](https://github.com/PyTorchLightning/pytorch-lightning/pull/11326))


### Deprecated

- Deprecated `ClusterEnvironment.master_{address,port}` in favor of `ClusterEnvironment.main_{address,port}` ([#10103](https://github.com/PyTorchLightning/pytorch-lightning/issues/10103))
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/callbacks/gpu_stats_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: O
if not trainer.logger:
raise MisconfigurationException("Cannot use GPUStatsMonitor callback with Trainer that has no logger.")

if trainer._device_type != _AcceleratorType.GPU:
if trainer._accelerator_type != _AcceleratorType.GPU:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(for a separate issue/PR) i know this is a rename, but why do we need this check at all? couldn't we use the strategy's root_device instead?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or strategy's on_gpu property. Could be a follow up.

raise MisconfigurationException(
"You are using GPUStatsMonitor but are not running on GPU"
f" since gpus attribute in Trainer is set to {trainer.gpus}."
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/callbacks/xla_stats_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def on_train_start(self, trainer, pl_module) -> None:
if not trainer.logger:
raise MisconfigurationException("Cannot use XLAStatsMonitor callback with Trainer that has no logger.")

if trainer._device_type != _AcceleratorType.TPU:
if trainer._accelerator_type != _AcceleratorType.TPU:
raise MisconfigurationException(
"You are using XLAStatsMonitor but are not running on TPU"
f" since `tpu_cores` attribute in Trainer is set to {trainer.tpu_cores}."
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
self.trainer = None

self._distrib_type = None
self._device_type = None
self._accelerator_type = None

# true if using amp
self.use_amp: bool = False
Expand Down
4 changes: 2 additions & 2 deletions pytorch_lightning/lite/lite.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ def _get_distributed_sampler(dataloader: DataLoader, **kwargs: Any) -> Distribut
return DistributedSampler(dataloader.dataset, **kwargs)

def _check_accelerator_support(self, accelerator: Optional[Union[str, Accelerator]]) -> None:
supported = [t.value.lower() for t in self._supported_device_types()] + ["auto"]
supported = [t.value.lower() for t in self._supported_accelerator_types()] + ["auto"]
valid = accelerator is None or isinstance(accelerator, Accelerator) or accelerator in supported
if not valid:
raise MisconfigurationException(
Expand All @@ -462,7 +462,7 @@ def _check_strategy_support(self, strategy: Optional[Union[str, Strategy]]) -> N
)

@staticmethod
def _supported_device_types() -> Sequence[_AcceleratorType]:
def _supported_accelerator_types() -> Sequence[_AcceleratorType]:
return (
_AcceleratorType.CPU,
_AcceleratorType.GPU,
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/loops/optimization/optimizer_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def _optimizer_step(
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=(self.trainer._device_type == _AcceleratorType.TPU and _TPU_AVAILABLE),
on_tpu=(self.trainer._accelerator_type == _AcceleratorType.TPU and _TPU_AVAILABLE),
using_native_amp=(self.trainer.amp_backend is not None and self.trainer.amp_backend == AMPType.NATIVE),
using_lbfgs=is_lbfgs,
)
Expand Down
42 changes: 21 additions & 21 deletions pytorch_lightning/trainer/connectors/accelerator_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def __init__(
plugins,
):
# initialization
self._device_type = _AcceleratorType.CPU
self._accelerator_type = _AcceleratorType.CPU
self._distrib_type = None
self._accelerator_type = None

Expand Down Expand Up @@ -170,8 +170,8 @@ def __init__(

self._cluster_environment = self.select_cluster_environment()

self.update_device_type_if_ipu_plugin()
self.update_device_type_if_strategy_passed()
self.update_accelerator_type_if_ipu_plugin()
self.update_accelerator_type_if_strategy_passed()

self._validate_accelerator_type()
self._set_devices_if_none()
Expand Down Expand Up @@ -241,13 +241,13 @@ def _validate_accelerator_and_devices(self) -> None:
)

def _validate_accelerator_type(self) -> None:
if self._accelerator_type and self._accelerator_type != self._device_type:
if self._accelerator_type and self._accelerator_type != self._accelerator_type:
# internal error: should not happen.
raise ValueError(
f"Mismatch between the requested accelerator type ({self._accelerator_type})"
f" and assigned device type ({self._device_type})."
f" and assigned device type ({self._accelerator_type})."
)
self._accelerator_type = self._device_type
self._accelerator_type = self._accelerator_type

def _warn_if_devices_flag_ignored(self) -> None:
if self.devices is None:
Expand Down Expand Up @@ -864,16 +864,16 @@ def set_distributed_mode(self, strategy: Optional[str] = None):
self.num_processes = os.cpu_count()
# special case with TPUs
elif self.has_tpu and not _use_cpu:
self._device_type = _AcceleratorType.TPU
self._accelerator_type = _AcceleratorType.TPU
if isinstance(self.tpu_cores, int):
self._distrib_type = _StrategyType.TPU_SPAWN
elif self.has_ipu and not _use_cpu:
self._device_type = _AcceleratorType.IPU
self._accelerator_type = _AcceleratorType.IPU
elif self.distributed_backend and self._distrib_type is None:
self._distrib_type = _StrategyType(self.distributed_backend)

if self.num_gpus > 0 and not _use_cpu:
self._device_type = _AcceleratorType.GPU
self._accelerator_type = _AcceleratorType.GPU

_gpu_distrib_types = (_StrategyType.DP, _StrategyType.DDP, _StrategyType.DDP_SPAWN, _StrategyType.DDP2)
# DP and DDP2 cannot run without GPU
Expand All @@ -893,13 +893,13 @@ def set_distributed_mode(self, strategy: Optional[str] = None):
self.check_interactive_compatibility()

# for DDP overwrite nb processes by requested GPUs
if self._device_type == _AcceleratorType.GPU and self._distrib_type in (
if self._accelerator_type == _AcceleratorType.GPU and self._distrib_type in (
_StrategyType.DDP,
_StrategyType.DDP_SPAWN,
):
self.num_processes = self.num_gpus

if self._device_type == _AcceleratorType.GPU and self._distrib_type == _StrategyType.DDP2:
if self._accelerator_type == _AcceleratorType.GPU and self._distrib_type == _StrategyType.DDP2:
self.num_processes = self.num_nodes

# Horovod is an extra case...
Expand Down Expand Up @@ -959,28 +959,28 @@ def has_horovodrun() -> bool:
"""Returns True if running with `horovodrun` using Gloo or OpenMPI."""
return _HOROVOD_AVAILABLE and ("OMPI_COMM_WORLD_RANK" in os.environ or "HOROVOD_RANK" in os.environ)

def update_device_type_if_ipu_plugin(self) -> None:
def update_accelerator_type_if_ipu_plugin(self) -> None:
# This allows the poptorch.Options that are passed into the IPUStrategy to be the source of truth,
# which gives users the flexibility to not have to pass `ipus` flag directly to Trainer
if isinstance(self._strategy, IPUStrategy) and self._device_type != _AcceleratorType.IPU:
self._device_type = _AcceleratorType.IPU
if isinstance(self._strategy, IPUStrategy) and self._accelerator_type != _AcceleratorType.IPU:
self._accelerator_type = _AcceleratorType.IPU

def update_device_type_if_strategy_passed(self) -> None:
def update_accelerator_type_if_strategy_passed(self) -> None:
if isinstance(self._strategy_flag, Strategy) or any(isinstance(plug, Strategy) for plug in self.plugins):
if self._accelerator_type is not None:
if self.use_ipu:
self._device_type = _AcceleratorType.IPU
self._accelerator_type = _AcceleratorType.IPU
elif self.use_tpu:
self._device_type = _AcceleratorType.TPU
self._accelerator_type = _AcceleratorType.TPU
elif self.use_gpu:
self._device_type = _AcceleratorType.GPU
self._accelerator_type = _AcceleratorType.GPU
else:
if self.has_ipu:
self._device_type = _AcceleratorType.IPU
self._accelerator_type = _AcceleratorType.IPU
elif self.has_tpu:
self._device_type = _AcceleratorType.TPU
self._accelerator_type = _AcceleratorType.TPU
elif self.has_gpu:
self._device_type = _AcceleratorType.GPU
self._accelerator_type = _AcceleratorType.GPU

def _set_distrib_type_if_strategy_passed(self):
# This is required as when `Strategy` instance is passed to either `strategy`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def gpus_metrics(self) -> Dict[str, float]:
.. deprecated:: v1.5
Will be removed in v1.7.
"""
if self.trainer._device_type == _AcceleratorType.GPU and self.log_gpu_memory:
if self.trainer._accelerator_type == _AcceleratorType.GPU and self.log_gpu_memory:
mem_map = memory.get_memory_profile(self.log_gpu_memory)
self._gpus_metrics.update(mem_map)
return self._gpus_metrics
Expand Down
16 changes: 9 additions & 7 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1704,31 +1704,33 @@ def __setup_profiler(self) -> None:
self.profiler.setup(stage=self.state.fn._setup_fn, local_rank=local_rank, log_dir=self.log_dir)

def _log_device_info(self) -> None:
rank_zero_info(f"GPU available: {torch.cuda.is_available()}, used: {self._device_type == _AcceleratorType.GPU}")
rank_zero_info(
f"GPU available: {torch.cuda.is_available()}, used: {self._accelerator_type == _AcceleratorType.GPU}"
)

num_tpu_cores = (
self.tpu_cores if self.tpu_cores is not None and self._device_type == _AcceleratorType.TPU else 0
self.tpu_cores if self.tpu_cores is not None and self._accelerator_type == _AcceleratorType.TPU else 0
)
rank_zero_info(f"TPU available: {_TPU_AVAILABLE}, using: {num_tpu_cores} TPU cores")

num_ipus = self.ipus if self.ipus is not None else 0
rank_zero_info(f"IPU available: {_IPU_AVAILABLE}, using: {num_ipus} IPUs")

if torch.cuda.is_available() and self._device_type != _AcceleratorType.GPU:
if torch.cuda.is_available() and self._accelerator_type != _AcceleratorType.GPU:
rank_zero_warn(
"GPU available but not used. Set the gpus flag in your trainer `Trainer(gpus=1)` or script `--gpus=1`.",
category=PossibleUserWarning,
)

if _TPU_AVAILABLE and self._device_type != _AcceleratorType.TPU:
if _TPU_AVAILABLE and self._accelerator_type != _AcceleratorType.TPU:
rank_zero_warn(
"TPU available but not used. Set the `tpu_cores` flag in your trainer"
" `Trainer(tpu_cores=8)` or script `--tpu_cores=8`."
)

if (
_IPU_AVAILABLE
and self._device_type != _AcceleratorType.IPU
and self._accelerator_type != _AcceleratorType.IPU
and not isinstance(self.accelerator, IPUAccelerator)
):
rank_zero_warn(
Expand Down Expand Up @@ -1801,8 +1803,8 @@ def _distrib_type(self) -> _StrategyType:
return self._accelerator_connector._distrib_type

@property
def _device_type(self) -> _AcceleratorType:
return self._accelerator_connector._device_type
def _accelerator_type(self) -> _AcceleratorType:
return self._accelerator_connector._accelerator_type

@property
def num_nodes(self) -> int:
Expand Down
22 changes: 11 additions & 11 deletions tests/accelerators/test_accelerator_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ def test_accelerator_cpu():

trainer = Trainer(accelerator="cpu")

assert trainer._device_type == "cpu"
assert trainer._accelerator_type == "cpu"
assert isinstance(trainer.accelerator, CPUAccelerator)

with pytest.raises(MisconfigurationException, match="You passed `accelerator='gpu'`, but GPUs are not available"):
Expand All @@ -454,7 +454,7 @@ def test_accelerator_gpu():

trainer = Trainer(accelerator="gpu", gpus=1)

assert trainer._device_type == "gpu"
assert trainer._accelerator_type == "gpu"
assert isinstance(trainer.accelerator, GPUAccelerator)

with pytest.raises(
Expand All @@ -464,7 +464,7 @@ def test_accelerator_gpu():

trainer = Trainer(accelerator="auto", gpus=1)

assert trainer._device_type == "gpu"
assert trainer._accelerator_type == "gpu"
assert isinstance(trainer.accelerator, GPUAccelerator)


Expand All @@ -473,7 +473,7 @@ def test_accelerator_cpu_with_gpus_flag():

trainer = Trainer(accelerator="cpu", gpus=1)

assert trainer._device_type == "cpu"
assert trainer._accelerator_type == "cpu"
assert isinstance(trainer.accelerator, CPUAccelerator)


Expand All @@ -482,7 +482,7 @@ def test_accelerator_cpu_with_multiple_gpus():

trainer = Trainer(accelerator="cpu", gpus=2)

assert trainer._device_type == "cpu"
assert trainer._accelerator_type == "cpu"
assert isinstance(trainer.accelerator, CPUAccelerator)


Expand Down Expand Up @@ -524,7 +524,7 @@ def test_accelerator_auto_with_devices_gpu():

trainer = Trainer(accelerator="auto", devices=1)

assert trainer._device_type == "gpu"
assert trainer._accelerator_type == "gpu"
assert trainer.gpus == 1


Expand Down Expand Up @@ -647,12 +647,12 @@ def test_strategy_choice_gpu_plugin(tmpdir, plugin):


@RunIf(min_gpus=2)
@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy])
def test_device_type_when_training_plugin_gpu_passed(tmpdir, plugin):
@pytest.mark.parametrize("strategy", [DDPSpawnStrategy, DDPStrategy])
def test_accelerator_type_when_training_strategy_for_gpu_passed(tmpdir, strategy):

trainer = Trainer(strategy=plugin(), gpus=2)
assert isinstance(trainer.strategy, plugin)
assert trainer._device_type == _AcceleratorType.GPU
trainer = Trainer(strategy=strategy(), gpus=2)
assert isinstance(trainer.strategy, strategy)
assert trainer._accelerator_type == _AcceleratorType.GPU
assert isinstance(trainer.accelerator, GPUAccelerator)


Expand Down
12 changes: 6 additions & 6 deletions tests/accelerators/test_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ def test_accelerator_ipu():

trainer = Trainer(accelerator="ipu", ipus=1)

assert trainer._device_type == "ipu"
assert trainer._accelerator_type == "ipu"
assert isinstance(trainer.accelerator, IPUAccelerator)

with pytest.raises(
Expand All @@ -511,7 +511,7 @@ def test_accelerator_ipu():

trainer = Trainer(accelerator="auto", ipus=8)

assert trainer._device_type == "ipu"
assert trainer._accelerator_type == "ipu"
assert isinstance(trainer.accelerator, IPUAccelerator)


Expand All @@ -520,7 +520,7 @@ def test_accelerator_cpu_with_ipus_flag():

trainer = Trainer(accelerator="cpu", ipus=1)

assert trainer._device_type == "cpu"
assert trainer._accelerator_type == "cpu"
assert isinstance(trainer.accelerator, CPUAccelerator)


Expand All @@ -539,7 +539,7 @@ def test_accelerator_auto_with_devices_ipu():

trainer = Trainer(accelerator="auto", devices=8)

assert trainer._device_type == "ipu"
assert trainer._accelerator_type == "ipu"
assert trainer.ipus == 8


Expand Down Expand Up @@ -568,11 +568,11 @@ def test_strategy_choice_ipu_plugin(tmpdir):


@RunIf(ipu=True)
def test_device_type_when_training_plugin_ipu_passed(tmpdir):
def test_accelerator_type_when_training_plugin_ipu_passed(tmpdir):

trainer = Trainer(strategy=IPUStrategy(), ipus=8)
assert isinstance(trainer.strategy, IPUStrategy)
assert trainer._device_type == _AcceleratorType.IPU
assert trainer._accelerator_type == _AcceleratorType.IPU
assert isinstance(trainer.accelerator, IPUAccelerator)


Expand Down
8 changes: 4 additions & 4 deletions tests/accelerators/test_tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def test_accelerator_tpu():

trainer = Trainer(accelerator="tpu", tpu_cores=8)

assert trainer._device_type == "tpu"
assert trainer._accelerator_type == "tpu"
assert isinstance(trainer.accelerator, TPUAccelerator)

with pytest.raises(
Expand All @@ -100,7 +100,7 @@ def test_accelerator_cpu_with_tpu_cores_flag():

trainer = Trainer(accelerator="cpu", tpu_cores=8)

assert trainer._device_type == "cpu"
assert trainer._accelerator_type == "cpu"
assert isinstance(trainer.accelerator, CPUAccelerator)


Expand All @@ -109,7 +109,7 @@ def test_accelerator_tpu_with_auto():

trainer = Trainer(accelerator="auto", tpu_cores=8)

assert trainer._device_type == "tpu"
assert trainer._accelerator_type == "tpu"
assert isinstance(trainer.accelerator, TPUAccelerator)


Expand All @@ -128,7 +128,7 @@ def test_accelerator_auto_with_devices_tpu():

trainer = Trainer(accelerator="auto", devices=8)

assert trainer._device_type == "tpu"
assert trainer._accelerator_type == "tpu"
assert trainer.tpu_cores == 8


Expand Down
Loading