Skip to content

Commit 3183079

Browse files
awaelchlicarmoccaotajBorda
authored
Remove deprecated callback hooks (#14834)
Co-authored-by: Carlos Mocholí <[email protected]> Co-authored-by: otaj <[email protected]> Co-authored-by: Jirka Borovec <[email protected]>
1 parent d15bd15 commit 3183079

21 files changed

+222
-546
lines changed

docs/source-pytorch/extensions/callbacks.rst

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -154,12 +154,6 @@ state_key
154154
Hooks
155155
=====
156156

157-
on_configure_sharded_model
158-
^^^^^^^^^^^^^^^^^^^^^^^^^^
159-
160-
.. automethod:: pytorch_lightning.callbacks.Callback.on_configure_sharded_model
161-
:noindex:
162-
163157
setup
164158
^^^^^
165159

@@ -256,9 +250,6 @@ on_predict_epoch_end
256250
.. automethod:: pytorch_lightning.callbacks.Callback.on_predict_epoch_end
257251
:noindex:
258252

259-
.. automethod:: pytorch_lightning.callbacks.Callback.on_epoch_end
260-
:noindex:
261-
262253
on_validation_batch_start
263254
^^^^^^^^^^^^^^^^^^^^^^^^^
264255

src/pytorch_lightning/CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,16 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
255255
- Removed the deprecated way to set the distributed backend via the environment variable `PL_TORCH_DISTRIBUTED_BACKEND`, in favor of setting the `process_group_backend` in the strategy constructor ([#14693](https://github.com/Lightning-AI/lightning/pull/14693))
256256

257257

258+
- Removed deprecated callback hooks ([#14834](https://github.com/Lightning-AI/lightning/pull/14834))
259+
* `Callback.on_configure_sharded_model` in favor of `Callback.setup`
260+
* `Callback.on_before_accelerator_backend_setup` in favor of `Callback.setup`
261+
* `Callback.on_batch_start` in favor of `Callback.on_train_batch_start`
262+
* `Callback.on_batch_end` in favor of `Callback.on_train_batch_end`
263+
* `Callback.on_epoch_start` in favor of `Callback.on_{train,validation,test}_epoch_start`
264+
* `Callback.on_epoch_end` in favor of `Callback.on_{train,validation,test}_epoch_end`
265+
* `Callback.on_pretrain_routine_{start,end}` in favor of `Callback.on_fit_start`
266+
267+
258268
- Removed the deprecated device attributes `Trainer.{devices,gpus,num_gpus,ipus,tpu_cores}` in favor of the accelerator-agnostic `Trainer.num_devices` ([#14829](https://github.com/Lightning-AI/lightning/pull/14829))
259269

260270

src/pytorch_lightning/callbacks/callback.py

Lines changed: 0 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -56,22 +56,6 @@ def _generate_state_key(self, **kwargs: Any) -> str:
5656
"""
5757
return f"{self.__class__.__qualname__}{repr(kwargs)}"
5858

59-
def on_configure_sharded_model(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
60-
r"""
61-
.. deprecated:: v1.6
62-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use `setup()` instead.
63-
64-
Called before configure sharded model.
65-
"""
66-
67-
def on_before_accelerator_backend_setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
68-
r"""
69-
.. deprecated:: v1.6
70-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``setup()`` instead.
71-
72-
Called before accelerator is being setup.
73-
"""
74-
7559
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None:
7660
"""Called when fit, validate, test, predict, or tune begins."""
7761

@@ -130,42 +114,6 @@ def on_predict_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.Lightning
130114
def on_predict_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: List[Any]) -> None:
131115
"""Called when the predict epoch ends."""
132116

133-
def on_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
134-
r"""
135-
.. deprecated:: v1.6
136-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use
137-
``on_<train/validation/test>_epoch_start`` instead.
138-
139-
Called when either of train/val/test epoch begins.
140-
"""
141-
142-
def on_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
143-
r"""
144-
.. deprecated:: v1.6
145-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use
146-
``on_<train/validation/test>_epoch_end`` instead.
147-
148-
Called when either of train/val/test epoch ends.
149-
"""
150-
151-
def on_batch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
152-
r"""
153-
.. deprecated:: v1.6
154-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use
155-
``on_train_batch_start`` instead.
156-
157-
Called when the training batch begins.
158-
"""
159-
160-
def on_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
161-
r"""
162-
.. deprecated:: v1.6
163-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use
164-
``on_train_batch_end`` instead.
165-
166-
Called when the training batch ends.
167-
"""
168-
169117
def on_validation_batch_start(
170118
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch: Any, batch_idx: int, dataloader_idx: int
171119
) -> None:
@@ -220,24 +168,6 @@ def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule")
220168
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
221169
"""Called when the train ends."""
222170

223-
def on_pretrain_routine_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
224-
r"""
225-
.. deprecated:: v1.6
226-
227-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``on_fit_start`` instead.
228-
229-
Called when the pretrain routine begins.
230-
"""
231-
232-
def on_pretrain_routine_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
233-
r"""
234-
.. deprecated:: v1.6
235-
236-
This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``on_fit_start`` instead.
237-
238-
Called when the pretrain routine ends.
239-
"""
240-
241171
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
242172
"""Called when the validation loop begins."""
243173

src/pytorch_lightning/callbacks/lambda_function.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,7 @@ class LambdaCallback(Callback):
4040

4141
def __init__(
4242
self,
43-
on_before_accelerator_backend_setup: Optional[Callable] = None,
4443
setup: Optional[Callable] = None,
45-
on_configure_sharded_model: Optional[Callable] = None,
4644
teardown: Optional[Callable] = None,
4745
on_fit_start: Optional[Callable] = None,
4846
on_fit_end: Optional[Callable] = None,
@@ -56,18 +54,12 @@ def __init__(
5654
on_validation_epoch_end: Optional[Callable] = None,
5755
on_test_epoch_start: Optional[Callable] = None,
5856
on_test_epoch_end: Optional[Callable] = None,
59-
on_epoch_start: Optional[Callable] = None,
60-
on_epoch_end: Optional[Callable] = None,
61-
on_batch_start: Optional[Callable] = None,
6257
on_validation_batch_start: Optional[Callable] = None,
6358
on_validation_batch_end: Optional[Callable] = None,
6459
on_test_batch_start: Optional[Callable] = None,
6560
on_test_batch_end: Optional[Callable] = None,
66-
on_batch_end: Optional[Callable] = None,
6761
on_train_start: Optional[Callable] = None,
6862
on_train_end: Optional[Callable] = None,
69-
on_pretrain_routine_start: Optional[Callable] = None,
70-
on_pretrain_routine_end: Optional[Callable] = None,
7163
on_validation_start: Optional[Callable] = None,
7264
on_validation_end: Optional[Callable] = None,
7365
on_test_start: Optional[Callable] = None,

src/pytorch_lightning/callbacks/model_checkpoint.py

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from lightning_lite.utilities.types import _PATH
4040
from pytorch_lightning.callbacks import Checkpoint
4141
from pytorch_lightning.utilities.exceptions import MisconfigurationException
42-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
42+
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn
4343
from pytorch_lightning.utilities.types import STEP_OUTPUT
4444

4545
log = logging.getLogger(__name__)
@@ -351,19 +351,12 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
351351

352352
self.best_model_path = state_dict["best_model_path"]
353353

354-
def save_checkpoint(self, trainer: "pl.Trainer") -> None: # pragma: no-cover
355-
"""Performs the main logic around saving a checkpoint.
356-
357-
This method runs on all ranks. It is the responsibility of `trainer.save_checkpoint` to correctly handle the
358-
behaviour in distributed training, i.e., saving only on rank 0 for data parallel use cases.
359-
"""
360-
rank_zero_deprecation(
361-
f"`{self.__class__.__name__}.save_checkpoint()` was deprecated in v1.6 and will be removed in v1.8."
362-
" Instead, you can use `trainer.save_checkpoint()` to manually save a checkpoint."
354+
def save_checkpoint(self, trainer: "pl.Trainer") -> None:
355+
raise NotImplementedError(
356+
f"`{self.__class__.__name__}.save_checkpoint()` was deprecated in v1.6 and is no longer supported"
357+
f" as of 1.8. Please use `trainer.save_checkpoint()` to manually save a checkpoint. This method will be"
358+
f" removed completely in v2.0."
363359
)
364-
monitor_candidates = self._monitor_candidates(trainer)
365-
self._save_topk_checkpoint(trainer, monitor_candidates)
366-
self._save_last_checkpoint(trainer, monitor_candidates)
367360

368361
def _save_topk_checkpoint(self, trainer: "pl.Trainer", monitor_candidates: Dict[str, Tensor]) -> None:
369362
if self.save_top_k == 0:

src/pytorch_lightning/core/hooks.py

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -63,32 +63,6 @@ def on_predict_start(self) -> None:
6363
def on_predict_end(self) -> None:
6464
"""Called at the end of predicting."""
6565

66-
def on_pretrain_routine_start(self) -> None:
67-
"""Called at the beginning of the pretrain routine (between fit and train start).
68-
69-
- fit
70-
- pretrain_routine start
71-
- pretrain_routine end
72-
- training_start
73-
74-
.. deprecated:: v1.6
75-
:meth:`on_pretrain_routine_start` has been deprecated in v1.6 and will be removed in v1.8.
76-
Use ``on_fit_start`` instead.
77-
"""
78-
79-
def on_pretrain_routine_end(self) -> None:
80-
"""Called at the end of the pretrain routine (between fit and train start).
81-
82-
- fit
83-
- pretrain_routine start
84-
- pretrain_routine end
85-
- training_start
86-
87-
.. deprecated:: v1.6
88-
:meth:`on_pretrain_routine_end` has been deprecated in v1.6 and will be removed in v1.8.
89-
Use ``on_fit_start`` instead.
90-
"""
91-
9266
def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]:
9367
"""Called in the training loop before anything happens for that batch.
9468
@@ -189,22 +163,6 @@ def on_predict_model_eval(self) -> None:
189163
"""Sets the model to eval during the predict loop."""
190164
self.trainer.model.eval()
191165

192-
def on_epoch_start(self) -> None:
193-
"""Called when either of train/val/test epoch begins.
194-
195-
.. deprecated:: v1.6
196-
:meth:`on_epoch_start` has been deprecated in v1.6 and will be removed in v1.8.
197-
Use ``on_<train/validation/test>_epoch_start`` instead.
198-
"""
199-
200-
def on_epoch_end(self) -> None:
201-
"""Called when either of train/val/test epoch ends.
202-
203-
.. deprecated:: v1.6
204-
:meth:`on_epoch_end` has been deprecated in v1.6 and will be removed in v1.8.
205-
Use ``on_<train/validation/test>_epoch_end`` instead.
206-
"""
207-
208166
def on_train_epoch_start(self) -> None:
209167
"""Called in the training loop at the very beginning of the epoch."""
210168

src/pytorch_lightning/loops/dataloader/evaluation_loop.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -267,10 +267,8 @@ def _on_evaluation_end(self, *args: Any, **kwargs: Any) -> None:
267267
self.trainer._logger_connector.reset_results()
268268

269269
def _on_evaluation_epoch_start(self, *args: Any, **kwargs: Any) -> None:
270-
"""Runs ``on_epoch_start`` and ``on_{validation/test}_epoch_start`` hooks."""
270+
"""Runs the ``on_{validation/test}_epoch_start`` hooks."""
271271
self.trainer._logger_connector.on_epoch_start()
272-
self.trainer._call_callback_hooks("on_epoch_start", *args, **kwargs)
273-
self.trainer._call_lightning_module_hook("on_epoch_start", *args, **kwargs)
274272

275273
hook_name = "on_test_epoch_start" if self.trainer.testing else "on_validation_epoch_start"
276274
self.trainer._call_callback_hooks(hook_name, *args, **kwargs)
@@ -295,8 +293,6 @@ def _on_evaluation_epoch_end(self) -> None:
295293
self.trainer._call_callback_hooks(hook_name)
296294
self.trainer._call_lightning_module_hook(hook_name)
297295

298-
self.trainer._call_callback_hooks("on_epoch_end")
299-
self.trainer._call_lightning_module_hook("on_epoch_end")
300296
self.trainer._logger_connector.on_epoch_end()
301297

302298
@staticmethod

src/pytorch_lightning/loops/epoch/training_epoch_loop.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -200,9 +200,6 @@ def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[ov
200200
self._warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
201201
batch_output = []
202202
else:
203-
# hook
204-
self.trainer._call_callback_hooks("on_batch_start")
205-
206203
# hook
207204
self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx)
208205
response = self.trainer._call_lightning_module_hook("on_train_batch_start", batch, batch_idx)
@@ -232,7 +229,6 @@ def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[ov
232229

233230
self.trainer._call_callback_hooks("on_train_batch_end", batch_end_outputs, batch, batch_idx)
234231
self.trainer._call_lightning_module_hook("on_train_batch_end", batch_end_outputs, batch, batch_idx)
235-
self.trainer._call_callback_hooks("on_batch_end")
236232
self.trainer._logger_connector.on_batch_end()
237233

238234
self.batch_progress.increment_completed()

src/pytorch_lightning/loops/fit_loop.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -219,8 +219,7 @@ def on_run_start(self) -> None: # type: ignore[override]
219219
self.trainer._call_strategy_hook("on_train_start")
220220

221221
def on_advance_start(self) -> None: # type: ignore[override]
222-
"""Prepares the dataloader for training and calls the hooks ``on_epoch_start`` and
223-
``on_train_epoch_start``"""
222+
"""Prepares the dataloader for training and calls the hook ``on_train_epoch_start``"""
224223
model = self.trainer.lightning_module
225224

226225
# reset train dataloader
@@ -246,9 +245,6 @@ def on_advance_start(self) -> None: # type: ignore[override]
246245

247246
self.trainer._logger_connector.on_epoch_start()
248247

249-
self.trainer._call_callback_hooks("on_epoch_start")
250-
self.trainer._call_lightning_module_hook("on_epoch_start")
251-
252248
self.trainer._call_callback_hooks("on_train_epoch_start")
253249
self.trainer._call_lightning_module_hook("on_train_epoch_start")
254250

@@ -299,9 +295,6 @@ def on_advance_end(self) -> None:
299295
self.trainer._call_callback_hooks("on_train_epoch_end")
300296
self.trainer._call_lightning_module_hook("on_train_epoch_end")
301297

302-
self.trainer._call_callback_hooks("on_epoch_end")
303-
self.trainer._call_lightning_module_hook("on_epoch_end")
304-
305298
self.trainer._logger_connector.on_epoch_end()
306299

307300
if self.epoch_loop._num_ready_batches_reached():

0 commit comments

Comments
 (0)