fixed warning of changed torch behaviour

order in which optimizer and lr_scheduler have to be called switched
see: https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
This commit is contained in:
Jan Kowalczyk
2024-06-28 12:00:37 +02:00
parent f51752c6f2
commit 42fb437fe1
2 changed files with 14 additions and 14 deletions

View File

@@ -83,13 +83,6 @@ class DeepSADTrainer(BaseTrainer):
net.train()
for epoch in range(self.n_epochs):
scheduler.step()
if epoch in self.lr_milestones:
logger.info(
" LR scheduler: new learning rate is %g"
% float(scheduler.get_lr()[0])
)
epoch_loss = 0.0
n_batches = 0
epoch_start_time = time.time()
@@ -117,6 +110,13 @@ class DeepSADTrainer(BaseTrainer):
epoch_loss += loss.item()
n_batches += 1
scheduler.step()
if epoch in self.lr_milestones:
logger.info(
" LR scheduler: new learning rate is %g"
% float(scheduler.get_lr()[0])
)
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
logger.info(