ae elbow work
This commit is contained in:
@@ -72,34 +72,45 @@ class AETrainer(BaseTrainer):
|
||||
optimizer, milestones=self.lr_milestones, gamma=0.1
|
||||
)
|
||||
|
||||
# Prepare containers for results (similar to test)
|
||||
all_indices = []
|
||||
all_labels_exp_based = []
|
||||
all_labels_manual_based = []
|
||||
all_semi_targets = []
|
||||
all_file_ids = []
|
||||
all_frame_ids = []
|
||||
all_scores = []
|
||||
|
||||
# Training
|
||||
logger.info("Starting pretraining...")
|
||||
start_time = time.time()
|
||||
ae_net.train()
|
||||
|
||||
all_training_data = []
|
||||
for epoch in range(self.n_epochs):
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
epoch_start_time = time.time()
|
||||
for data in train_loader:
|
||||
inputs, _, _, _, _, file_frame_ids = data
|
||||
inputs = inputs.to(self.device)
|
||||
all_training_data.append(
|
||||
np.dstack(
|
||||
(
|
||||
file_frame_ids[0].detach().cpu().numpy(),
|
||||
file_frame_ids[1].detach().cpu().numpy(),
|
||||
)
|
||||
)
|
||||
(
|
||||
inputs,
|
||||
labels_exp_based,
|
||||
labels_manual_based,
|
||||
semi_targets,
|
||||
idx,
|
||||
(file_id, frame_id),
|
||||
) = data
|
||||
inputs, idx = (
|
||||
inputs.to(self.device),
|
||||
idx.to(self.device),
|
||||
)
|
||||
|
||||
# Zero the network parameter gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Update network parameters via backpropagation: forward + backward + optimize
|
||||
# Forward + backward + optimize
|
||||
rec = ae_net(inputs)
|
||||
rec_loss = criterion(rec, inputs)
|
||||
scores = torch.mean(rec_loss, dim=tuple(range(1, rec.dim())))
|
||||
loss = torch.mean(rec_loss)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
@@ -107,6 +118,17 @@ class AETrainer(BaseTrainer):
|
||||
epoch_loss += loss.item()
|
||||
n_batches += 1
|
||||
|
||||
# Save all relevant information from dataloader
|
||||
all_indices.extend(idx.detach().cpu().numpy())
|
||||
all_labels_exp_based.extend(labels_exp_based.detach().cpu().numpy())
|
||||
all_labels_manual_based.extend(
|
||||
labels_manual_based.detach().cpu().numpy()
|
||||
)
|
||||
all_semi_targets.extend(semi_targets.detach().cpu().numpy())
|
||||
all_file_ids.extend(file_id.detach().cpu().numpy())
|
||||
all_frame_ids.extend(frame_id.detach().cpu().numpy())
|
||||
all_scores.extend(scores.detach().cpu().numpy())
|
||||
|
||||
scheduler.step()
|
||||
if epoch in self.lr_milestones:
|
||||
logger.info(
|
||||
@@ -122,14 +144,18 @@ class AETrainer(BaseTrainer):
|
||||
)
|
||||
|
||||
self.train_time = time.time() - start_time
|
||||
|
||||
# Save all results as member variables (like in test)
|
||||
self.train_indices = np.array(all_indices)
|
||||
self.train_labels_exp_based = np.array(all_labels_exp_based)
|
||||
self.train_labels_manual_based = np.array(all_labels_manual_based)
|
||||
self.train_semi_targets = np.array(all_semi_targets)
|
||||
self.train_file_ids = np.array(all_file_ids)
|
||||
self.train_frame_ids = np.array(all_frame_ids)
|
||||
self.train_scores = np.array(all_scores)
|
||||
self.train_loss = epoch_loss / n_batches if n_batches > 0 else float("nan")
|
||||
|
||||
logger.info("Pretraining Time: {:.3f}s".format(self.train_time))
|
||||
|
||||
all_training_data = np.concatenate([x.squeeze() for x in all_training_data])
|
||||
|
||||
sorted_training_data = all_training_data[
|
||||
np.lexsort((all_training_data[:, 1], all_training_data[:, 0]))
|
||||
]
|
||||
|
||||
logger.info("Finished pretraining.")
|
||||
|
||||
return ae_net
|
||||
@@ -156,65 +182,70 @@ class AETrainer(BaseTrainer):
|
||||
ae_net = ae_net.to(self.device)
|
||||
criterion = criterion.to(self.device)
|
||||
|
||||
# Prepare containers for results
|
||||
all_indices = []
|
||||
all_labels_exp_based = []
|
||||
all_labels_manual_based = []
|
||||
all_semi_targets = []
|
||||
all_file_ids = []
|
||||
all_frame_ids = []
|
||||
all_scores = []
|
||||
|
||||
# Testing
|
||||
logger.info("Testing autoencoder...")
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
start_time = time.time()
|
||||
idx_label_score = []
|
||||
ae_net.eval()
|
||||
all_training_data = []
|
||||
with torch.no_grad():
|
||||
for data in test_loader:
|
||||
inputs, labels, _, _, idx, file_frame_ids = data
|
||||
inputs, labels, idx = (
|
||||
(
|
||||
inputs,
|
||||
labels_exp_based,
|
||||
labels_manual_based,
|
||||
semi_targets,
|
||||
idx,
|
||||
(file_id, frame_id),
|
||||
) = data
|
||||
inputs, idx = (
|
||||
inputs.to(self.device),
|
||||
labels.to(self.device),
|
||||
idx.to(self.device),
|
||||
)
|
||||
|
||||
all_training_data.append(
|
||||
np.dstack(
|
||||
(
|
||||
file_frame_ids[0].detach().cpu().numpy(),
|
||||
file_frame_ids[1].detach().cpu().numpy(),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
rec = ae_net(inputs)
|
||||
rec_loss = criterion(rec, inputs)
|
||||
scores = torch.mean(rec_loss, dim=tuple(range(1, rec.dim())))
|
||||
|
||||
# Save triple of (idx, label, score) in a list
|
||||
idx_label_score += list(
|
||||
zip(
|
||||
idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist(),
|
||||
)
|
||||
)
|
||||
|
||||
loss = torch.mean(rec_loss)
|
||||
epoch_loss += loss.item()
|
||||
n_batches += 1
|
||||
|
||||
# Save all relevant information from dataloader
|
||||
all_indices.extend(idx.detach().cpu().numpy())
|
||||
all_labels_exp_based.extend(labels_exp_based.detach().cpu().numpy())
|
||||
all_labels_manual_based.extend(
|
||||
labels_manual_based.detach().cpu().numpy()
|
||||
)
|
||||
all_semi_targets.extend(semi_targets.detach().cpu().numpy())
|
||||
all_file_ids.extend(file_id.detach().cpu().numpy())
|
||||
all_frame_ids.extend(frame_id.detach().cpu().numpy())
|
||||
all_scores.extend(scores.detach().cpu().numpy())
|
||||
|
||||
self.test_time = time.time() - start_time
|
||||
|
||||
all_training_data = np.concatenate([x.squeeze() for x in all_training_data])
|
||||
# Save all results as member variables
|
||||
self.test_indices = np.array(all_indices)
|
||||
self.test_labels_exp_based = np.array(all_labels_exp_based)
|
||||
self.test_labels_manual_based = np.array(all_labels_manual_based)
|
||||
self.test_semi_targets = np.array(all_semi_targets)
|
||||
self.test_file_ids = np.array(all_file_ids)
|
||||
self.test_frame_ids = np.array(all_frame_ids)
|
||||
self.test_scores = np.array(all_scores)
|
||||
|
||||
sorted_training_data = all_training_data[
|
||||
np.lexsort((all_training_data[:, 1], all_training_data[:, 0]))
|
||||
]
|
||||
|
||||
# Compute AUC
|
||||
_, labels, scores = zip(*idx_label_score)
|
||||
labels = np.array(labels)
|
||||
scores = np.array(scores)
|
||||
self.test_auc = roc_auc_score(labels, scores)
|
||||
# No performance metric is calculated using labels, as this is pre-training
|
||||
self.test_loss = epoch_loss / n_batches if n_batches > 0 else float("nan")
|
||||
|
||||
# Log results
|
||||
logger.info("Test Loss: {:.6f}".format(epoch_loss / n_batches))
|
||||
logger.info("Test AUC: {:.2f}%".format(100.0 * self.test_auc))
|
||||
logger.info("Test Loss: {:.6f}".format(self.test_loss))
|
||||
logger.info("Test Time: {:.3f}s".format(self.test_time))
|
||||
logger.info("Finished testing autoencoder.")
|
||||
|
||||
Reference in New Issue
Block a user