|
| 1 | +# --- |
| 2 | +# jupyter: |
| 3 | +# jupytext: |
| 4 | +# cell_metadata_filter: -all |
| 5 | +# formats: ipynb,py:percent |
| 6 | +# text_representation: |
| 7 | +# extension: .py |
| 8 | +# format_name: percent |
| 9 | +# format_version: '1.3' |
| 10 | +# jupytext_version: 1.13.2 |
| 11 | +# --- |
| 12 | + |
1 | 13 | # %% [markdown]
|
2 | 14 | # ## Scheduled Finetuning
|
3 | 15 | #
|
@@ -396,6 +408,7 @@ def validation_step(self, batch, batch_idx, dataloader_idx=0):
|
396 | 408 |
|
397 | 409 | labels = batch["labels"]
|
398 | 410 | self.log("val_loss", val_loss, prog_bar=True)
|
| 411 | + |
399 | 412 | return {"loss": val_loss, "preds": preds, "labels": labels}
|
400 | 413 |
|
401 | 414 | def validation_epoch_end(self, outputs):
|
@@ -518,7 +531,8 @@ def configure_callbacks(self):
|
518 | 531 | # %%
|
519 | 532 | lr_scheduler_init = {
|
520 | 533 | "class_path": "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
|
521 |
| - "init_args": {"T_0": 1, "T_mult": 2, "eta_min": 1e-07}, |
| 534 | + # "init_args": {"T_0": 1, "T_mult": 2, "eta_min": 1e-07}, |
| 535 | + "init_args": {"T_0": 1, "T_mult": 2, "eta_min": 0}, |
522 | 536 | }
|
523 | 537 | pl_lrs_cfg = {"interval": "epoch", "frequency": 1, "name": "CosineAnnealingWarmRestarts"}
|
524 | 538 |
|
@@ -551,10 +565,10 @@ def configure_callbacks(self):
|
551 | 565 | def train() -> None:
|
552 | 566 | trainer = pl.Trainer(
|
553 | 567 | enable_progress_bar=enable_progress_bar,
|
| 568 | + # max_epochs=1, |
554 | 569 | precision=16,
|
555 |
| - gpus=1, |
556 |
| - # accelerator="auto", |
557 |
| - # devices="auto", |
| 570 | + accelerator="gpu", |
| 571 | + devices=1, |
558 | 572 | callbacks=callbacks,
|
559 | 573 | logger=logger,
|
560 | 574 | )
|
|
0 commit comments