Skip to content

Commit 184c8ef

Browse files
committed
Enable logging hparams only if there are any (#11105)
1 parent 4414e0c commit 184c8ef

File tree

8 files changed

+13
-9
lines changed

8 files changed

+13
-9
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
88

99
### Fixed
1010

11+
- Fixed a bug to disable logging hyperparameters in logger if there are no hparams ([#11105](https://github.com/PyTorchLightning/pytorch-lightning/issues/11105))
1112
- Fixed an issue when torch-scripting a `LightningModule` after training with `Trainer(sync_batchnorm=True)` ([#11078](https://github.com/PyTorchLightning/pytorch-lightning/pull/11078))
1213
- Fixed an `AttributeError` occuring when using a `CombinedLoader` (multiple dataloaders) for prediction ([#11111](https://github.com/PyTorchLightning/pytorch-lightning/pull/11111))
1314

pytorch_lightning/core/mixins/hparams_mixin.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class HyperparametersMixin:
2828

2929
def __init__(self) -> None:
3030
super().__init__()
31-
self._log_hyperparams = True
31+
self._log_hyperparams = False
3232

3333
def save_hyperparameters(
3434
self,

tests/callbacks/test_gpu_stats_monitor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def test_gpu_stats_monitor_no_queries(tmpdir):
8383
with mock.patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics") as log_metrics_mock:
8484
trainer.fit(model)
8585

86-
assert log_metrics_mock.mock_calls[2:] == [
86+
assert log_metrics_mock.mock_calls[1:] == [
8787
mock.call({"batch_time/intra_step (ms)": mock.ANY}, step=0),
8888
mock.call({"batch_time/inter_step (ms)": mock.ANY}, step=1),
8989
mock.call({"batch_time/intra_step (ms)": mock.ANY}, step=1),

tests/loggers/test_all.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,10 +144,8 @@ def log_metrics(self, metrics, step):
144144
log_metric_names = [(s, sorted(m.keys())) for s, m in logger.history]
145145
if logger_class == TensorBoardLogger:
146146
expected = [
147-
(0, ["hp_metric"]),
148147
(0, ["epoch", "train_some_val"]),
149148
(0, ["early_stop_on", "epoch", "val_loss"]),
150-
(0, ["hp_metric"]),
151149
(1, ["epoch", "test_loss"]),
152150
]
153151
assert log_metric_names == expected

tests/loggers/test_base.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,6 @@ def training_step(self, batch, batch_idx):
111111
trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=logger, default_root_dir=tmpdir)
112112
trainer.fit(model)
113113
assert trainer.state.finished, f"Training failed with {trainer.state}"
114-
assert logger.hparams_logged == model.hparams
115114
assert logger.metrics_logged != {}
116115
assert logger.after_save_checkpoint_called
117116
assert logger.finalized_status == "success"
@@ -133,11 +132,11 @@ def training_step(self, batch, batch_idx):
133132
trainer.fit(model)
134133
assert trainer.state.finished, f"Training failed with {trainer.state}"
135134

136-
assert logger1.hparams_logged == model.hparams
135+
assert logger1.hparams_logged is None
137136
assert logger1.metrics_logged != {}
138137
assert logger1.finalized_status == "success"
139138

140-
assert logger2.hparams_logged == model.hparams
139+
assert logger2.hparams_logged is None
141140
assert logger2.metrics_logged != {}
142141
assert logger2.finalized_status == "success"
143142

tests/models/test_hparams.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -776,7 +776,10 @@ def test_adding_datamodule_hparams(tmpdir, model, data):
776776
# Merged hparams were logged
777777
merged_hparams = copy.deepcopy(org_model_hparams)
778778
merged_hparams.update(org_data_hparams)
779-
mock_logger.log_hyperparams.assert_called_with(merged_hparams)
779+
if merged_hparams:
780+
mock_logger.log_hyperparams.assert_called_with(merged_hparams)
781+
else:
782+
mock_logger.log_hyperparams.assert_not_called()
780783

781784

782785
def test_no_datamodule_for_hparams(tmpdir):

tests/trainer/logging_/test_distributed_logging.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,6 @@ def on_fit_start(self, trainer, pl_module):
112112

113113
def on_train_start(self, trainer, pl_module):
114114
assert trainer.logger.method_call
115-
trainer.logger.log_hyperparams.assert_called_once()
116115
trainer.logger.log_graph.assert_called_once()
117116

118117
logger = Mock()

tests/trainer/logging_/test_eval_loop_logging.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -510,6 +510,10 @@ class ExtendedModel(BoringModel):
510510

511511
val_losses = []
512512

513+
def __init__(self, some_val=7):
514+
super().__init__()
515+
self.save_hyperparameters()
516+
513517
def training_step(self, batch, batch_idx):
514518
output = self.layer(batch)
515519
loss = self.loss(batch, output)

0 commit comments

Comments
 (0)