diff --git a/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/pytorch_lightning/trainer/connectors/checkpoint_connector.py index da6a81e8add44..2bde85de052ca 100644 --- a/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/pytorch_lightning/trainer/connectors/checkpoint_connector.py @@ -59,9 +59,6 @@ def resume_start(self) -> None: 1. from HPC weights if found 2. from `resume_from_checkpoint` file if provided 3. don't restore - - Raises: - FileNotFoundError: If the path to the checkpoint file is provided but the file does not exist. """ self.resume_checkpoint_path = self.hpc_resume_path or self.resume_checkpoint_path checkpoint_path = self.resume_checkpoint_path diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index be0a7728edddc..a9e5af106f47c 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -357,7 +357,7 @@ def __init__( you can set ``replace_sampler_ddp=False`` and add your own distributed sampler. resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is - no checkpoint file at the path, start from scratch. If resuming from mid-epoch checkpoint, + no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint, training will start from the beginning of the next epoch. strategy: Supports different training strategies with aliases