Skip to content

Commit

Permalink
Avoid using lr from checkpoint. (#1781)
Browse files Browse the repository at this point in the history
  • Loading branch information
csukuangfj authored Oct 27, 2024
1 parent 37a1420 commit 05f7563
Showing 1 changed file with 8 additions and 1 deletion.
9 changes: 8 additions & 1 deletion egs/librispeech/ASR/zipformer/optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,9 @@ def state_dict(self):
is not the optimizer.
"""
return {
"base_lrs": self.base_lrs,
# the user might try to override the base_lr, so don't include this in the state.
# previously they were included.
# "base_lrs": self.base_lrs,
"epoch": self.epoch,
"batch": self.batch,
}
Expand All @@ -799,7 +801,12 @@ def load_state_dict(self, state_dict):
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# the things with base_lrs are a work-around for a previous problem
# where base_lrs were written with the state dict.
base_lrs = self.base_lrs
self.__dict__.update(state_dict)
self.base_lrs = base_lrs


def get_last_lr(self) -> List[float]:
"""Return last computed learning rate by current scheduler. Will be a list of float."""
Expand Down

0 comments on commit 05f7563

Please sign in to comment.