black formatted files before changes
This commit is contained in:
@@ -13,11 +13,29 @@ import numpy as np
|
||||
|
||||
class DeepSADTrainer(BaseTrainer):
|
||||
|
||||
def __init__(self, c, eta: float, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150,
|
||||
lr_milestones: tuple = (), batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda',
|
||||
n_jobs_dataloader: int = 0):
|
||||
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
|
||||
n_jobs_dataloader)
|
||||
def __init__(
|
||||
self,
|
||||
c,
|
||||
eta: float,
|
||||
optimizer_name: str = "adam",
|
||||
lr: float = 0.001,
|
||||
n_epochs: int = 150,
|
||||
lr_milestones: tuple = (),
|
||||
batch_size: int = 128,
|
||||
weight_decay: float = 1e-6,
|
||||
device: str = "cuda",
|
||||
n_jobs_dataloader: int = 0,
|
||||
):
|
||||
super().__init__(
|
||||
optimizer_name,
|
||||
lr,
|
||||
n_epochs,
|
||||
lr_milestones,
|
||||
batch_size,
|
||||
weight_decay,
|
||||
device,
|
||||
n_jobs_dataloader,
|
||||
)
|
||||
|
||||
# Deep SAD parameters
|
||||
self.c = torch.tensor(c, device=self.device) if c is not None else None
|
||||
@@ -36,39 +54,50 @@ class DeepSADTrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get train data loader
|
||||
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
train_loader, _ = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set device for network
|
||||
net = net.to(self.device)
|
||||
|
||||
# Set optimizer (Adam optimizer for now)
|
||||
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
|
||||
optimizer = optim.Adam(
|
||||
net.parameters(), lr=self.lr, weight_decay=self.weight_decay
|
||||
)
|
||||
|
||||
# Set learning rate scheduler
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(
|
||||
optimizer, milestones=self.lr_milestones, gamma=0.1
|
||||
)
|
||||
|
||||
# Initialize hypersphere center c (if c not loaded)
|
||||
if self.c is None:
|
||||
logger.info('Initializing center c...')
|
||||
logger.info("Initializing center c...")
|
||||
self.c = self.init_center_c(train_loader, net)
|
||||
logger.info('Center c initialized.')
|
||||
logger.info("Center c initialized.")
|
||||
|
||||
# Training
|
||||
logger.info('Starting training...')
|
||||
logger.info("Starting training...")
|
||||
start_time = time.time()
|
||||
net.train()
|
||||
for epoch in range(self.n_epochs):
|
||||
|
||||
scheduler.step()
|
||||
if epoch in self.lr_milestones:
|
||||
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
|
||||
logger.info(
|
||||
" LR scheduler: new learning rate is %g"
|
||||
% float(scheduler.get_lr()[0])
|
||||
)
|
||||
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
epoch_start_time = time.time()
|
||||
for data in train_loader:
|
||||
inputs, _, semi_targets, _ = data
|
||||
inputs, semi_targets = inputs.to(self.device), semi_targets.to(self.device)
|
||||
inputs, semi_targets = inputs.to(self.device), semi_targets.to(
|
||||
self.device
|
||||
)
|
||||
|
||||
# Zero the network parameter gradients
|
||||
optimizer.zero_grad()
|
||||
@@ -76,7 +105,11 @@ class DeepSADTrainer(BaseTrainer):
|
||||
# Update network parameters via backpropagation: forward + backward + optimize
|
||||
outputs = net(inputs)
|
||||
dist = torch.sum((outputs - self.c) ** 2, dim=1)
|
||||
losses = torch.where(semi_targets == 0, dist, self.eta * ((dist + self.eps) ** semi_targets.float()))
|
||||
losses = torch.where(
|
||||
semi_targets == 0,
|
||||
dist,
|
||||
self.eta * ((dist + self.eps) ** semi_targets.float()),
|
||||
)
|
||||
loss = torch.mean(losses)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
@@ -86,12 +119,14 @@ class DeepSADTrainer(BaseTrainer):
|
||||
|
||||
# log epoch statistics
|
||||
epoch_train_time = time.time() - epoch_start_time
|
||||
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
|
||||
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
|
||||
logger.info(
|
||||
f"| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s "
|
||||
f"| Train Loss: {epoch_loss / n_batches:.6f} |"
|
||||
)
|
||||
|
||||
self.train_time = time.time() - start_time
|
||||
logger.info('Training Time: {:.3f}s'.format(self.train_time))
|
||||
logger.info('Finished training.')
|
||||
logger.info("Training Time: {:.3f}s".format(self.train_time))
|
||||
logger.info("Finished training.")
|
||||
|
||||
return net
|
||||
|
||||
@@ -99,13 +134,15 @@ class DeepSADTrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get test data loader
|
||||
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
_, test_loader = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set device for network
|
||||
net = net.to(self.device)
|
||||
|
||||
# Testing
|
||||
logger.info('Starting testing...')
|
||||
logger.info("Starting testing...")
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
start_time = time.time()
|
||||
@@ -122,14 +159,22 @@ class DeepSADTrainer(BaseTrainer):
|
||||
|
||||
outputs = net(inputs)
|
||||
dist = torch.sum((outputs - self.c) ** 2, dim=1)
|
||||
losses = torch.where(semi_targets == 0, dist, self.eta * ((dist + self.eps) ** semi_targets.float()))
|
||||
losses = torch.where(
|
||||
semi_targets == 0,
|
||||
dist,
|
||||
self.eta * ((dist + self.eps) ** semi_targets.float()),
|
||||
)
|
||||
loss = torch.mean(losses)
|
||||
scores = dist
|
||||
|
||||
# Save triples of (idx, label, score) in a list
|
||||
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist()))
|
||||
idx_label_score += list(
|
||||
zip(
|
||||
idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist(),
|
||||
)
|
||||
)
|
||||
|
||||
epoch_loss += loss.item()
|
||||
n_batches += 1
|
||||
@@ -144,10 +189,10 @@ class DeepSADTrainer(BaseTrainer):
|
||||
self.test_auc = roc_auc_score(labels, scores)
|
||||
|
||||
# Log results
|
||||
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
|
||||
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
|
||||
logger.info('Test Time: {:.3f}s'.format(self.test_time))
|
||||
logger.info('Finished testing.')
|
||||
logger.info("Test Loss: {:.6f}".format(epoch_loss / n_batches))
|
||||
logger.info("Test AUC: {:.2f}%".format(100.0 * self.test_auc))
|
||||
logger.info("Test Time: {:.3f}s".format(self.test_time))
|
||||
logger.info("Finished testing.")
|
||||
|
||||
def init_center_c(self, train_loader: DataLoader, net: BaseNet, eps=0.1):
|
||||
"""Initialize hypersphere center c as the mean from an initial forward pass on the data."""
|
||||
|
||||
@@ -14,11 +14,28 @@ import numpy as np
|
||||
|
||||
class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
|
||||
def __init__(self, alpha: float = 0.1, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150,
|
||||
lr_milestones: tuple = (), batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda',
|
||||
n_jobs_dataloader: int = 0):
|
||||
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
|
||||
n_jobs_dataloader)
|
||||
def __init__(
|
||||
self,
|
||||
alpha: float = 0.1,
|
||||
optimizer_name: str = "adam",
|
||||
lr: float = 0.001,
|
||||
n_epochs: int = 150,
|
||||
lr_milestones: tuple = (),
|
||||
batch_size: int = 128,
|
||||
weight_decay: float = 1e-6,
|
||||
device: str = "cuda",
|
||||
n_jobs_dataloader: int = 0,
|
||||
):
|
||||
super().__init__(
|
||||
optimizer_name,
|
||||
lr,
|
||||
n_epochs,
|
||||
lr_milestones,
|
||||
batch_size,
|
||||
weight_decay,
|
||||
device,
|
||||
n_jobs_dataloader,
|
||||
)
|
||||
|
||||
self.alpha = alpha
|
||||
|
||||
@@ -32,7 +49,9 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get train data loader
|
||||
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
train_loader, _ = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set device
|
||||
net = net.to(self.device)
|
||||
@@ -42,20 +61,27 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
elbo = SVI(net, likelihood=binary_cross_entropy, sampler=sampler)
|
||||
|
||||
# Set optimizer (Adam optimizer for now)
|
||||
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
|
||||
optimizer = optim.Adam(
|
||||
net.parameters(), lr=self.lr, weight_decay=self.weight_decay
|
||||
)
|
||||
|
||||
# Set learning rate scheduler
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(
|
||||
optimizer, milestones=self.lr_milestones, gamma=0.1
|
||||
)
|
||||
|
||||
# Training
|
||||
logger.info('Starting training...')
|
||||
logger.info("Starting training...")
|
||||
start_time = time.time()
|
||||
net.train()
|
||||
for epoch in range(self.n_epochs):
|
||||
|
||||
scheduler.step()
|
||||
if epoch in self.lr_milestones:
|
||||
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
|
||||
logger.info(
|
||||
" LR scheduler: new learning rate is %g"
|
||||
% float(scheduler.get_lr()[0])
|
||||
)
|
||||
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
@@ -73,7 +99,9 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
u = inputs[semi_targets == 0]
|
||||
y = labels[semi_targets != 0]
|
||||
if y.nelement() > 1:
|
||||
y_onehot = torch.Tensor(y.size(0), 2).to(self.device) # two labels: 0: normal, 1: outlier
|
||||
y_onehot = torch.Tensor(y.size(0), 2).to(
|
||||
self.device
|
||||
) # two labels: 0: normal, 1: outlier
|
||||
y_onehot.zero_()
|
||||
y_onehot.scatter_(1, y.view(-1, 1), 1)
|
||||
|
||||
@@ -94,7 +122,9 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
# Add auxiliary classification loss q(y|x)
|
||||
logits = net.classify(x)
|
||||
eps = 1e-8
|
||||
classication_loss = torch.sum(y_onehot * torch.log(logits + eps), dim=1).mean()
|
||||
classication_loss = torch.sum(
|
||||
y_onehot * torch.log(logits + eps), dim=1
|
||||
).mean()
|
||||
|
||||
# Overall loss
|
||||
loss = L - self.alpha * classication_loss + U # J_alpha
|
||||
@@ -107,12 +137,14 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
|
||||
# log epoch statistics
|
||||
epoch_train_time = time.time() - epoch_start_time
|
||||
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
|
||||
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
|
||||
logger.info(
|
||||
f"| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s "
|
||||
f"| Train Loss: {epoch_loss / n_batches:.6f} |"
|
||||
)
|
||||
|
||||
self.train_time = time.time() - start_time
|
||||
logger.info('Training Time: {:.3f}s'.format(self.train_time))
|
||||
logger.info('Finished training.')
|
||||
logger.info("Training Time: {:.3f}s".format(self.train_time))
|
||||
logger.info("Finished training.")
|
||||
|
||||
return net
|
||||
|
||||
@@ -120,7 +152,9 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get test data loader
|
||||
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
_, test_loader = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set device
|
||||
net = net.to(self.device)
|
||||
@@ -130,7 +164,7 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
elbo = SVI(net, likelihood=binary_cross_entropy, sampler=sampler)
|
||||
|
||||
# Testing
|
||||
logger.info('Starting testing...')
|
||||
logger.info("Starting testing...")
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
start_time = time.time()
|
||||
@@ -147,7 +181,9 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
inputs = inputs.view(inputs.size(0), -1)
|
||||
u = inputs
|
||||
y = labels
|
||||
y_onehot = torch.Tensor(y.size(0), 2).to(self.device) # two labels: 0: normal, 1: outlier
|
||||
y_onehot = torch.Tensor(y.size(0), 2).to(
|
||||
self.device
|
||||
) # two labels: 0: normal, 1: outlier
|
||||
y_onehot.zero_()
|
||||
y_onehot.scatter_(1, y.view(-1, 1), 1)
|
||||
|
||||
@@ -157,17 +193,25 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
|
||||
logits = net.classify(u)
|
||||
eps = 1e-8
|
||||
classication_loss = -torch.sum(y_onehot * torch.log(logits + eps), dim=1).mean()
|
||||
classication_loss = -torch.sum(
|
||||
y_onehot * torch.log(logits + eps), dim=1
|
||||
).mean()
|
||||
|
||||
loss = L + self.alpha * classication_loss + U # J_alpha
|
||||
|
||||
# Compute scores
|
||||
scores = logits[:, 1] # likelihood/confidence for anomalous class as anomaly score
|
||||
scores = logits[
|
||||
:, 1
|
||||
] # likelihood/confidence for anomalous class as anomaly score
|
||||
|
||||
# Save triple of (idx, label, score) in a list
|
||||
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist()))
|
||||
idx_label_score += list(
|
||||
zip(
|
||||
idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist(),
|
||||
)
|
||||
)
|
||||
|
||||
epoch_loss += loss.item()
|
||||
n_batches += 1
|
||||
@@ -182,7 +226,7 @@ class SemiDeepGenerativeTrainer(BaseTrainer):
|
||||
self.test_auc = roc_auc_score(labels, scores)
|
||||
|
||||
# Log results
|
||||
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
|
||||
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
|
||||
logger.info('Test Time: {:.3f}s'.format(self.test_time))
|
||||
logger.info('Finished testing.')
|
||||
logger.info("Test Loss: {:.6f}".format(epoch_loss / n_batches))
|
||||
logger.info("Test AUC: {:.2f}%".format(100.0 * self.test_auc))
|
||||
logger.info("Test Time: {:.3f}s".format(self.test_time))
|
||||
logger.info("Finished testing.")
|
||||
|
||||
@@ -13,10 +13,27 @@ import numpy as np
|
||||
|
||||
class AETrainer(BaseTrainer):
|
||||
|
||||
def __init__(self, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150, lr_milestones: tuple = (),
|
||||
batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda', n_jobs_dataloader: int = 0):
|
||||
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
|
||||
n_jobs_dataloader)
|
||||
def __init__(
|
||||
self,
|
||||
optimizer_name: str = "adam",
|
||||
lr: float = 0.001,
|
||||
n_epochs: int = 150,
|
||||
lr_milestones: tuple = (),
|
||||
batch_size: int = 128,
|
||||
weight_decay: float = 1e-6,
|
||||
device: str = "cuda",
|
||||
n_jobs_dataloader: int = 0,
|
||||
):
|
||||
super().__init__(
|
||||
optimizer_name,
|
||||
lr,
|
||||
n_epochs,
|
||||
lr_milestones,
|
||||
batch_size,
|
||||
weight_decay,
|
||||
device,
|
||||
n_jobs_dataloader,
|
||||
)
|
||||
|
||||
# Results
|
||||
self.train_time = None
|
||||
@@ -27,30 +44,39 @@ class AETrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get train data loader
|
||||
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
train_loader, _ = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set loss
|
||||
criterion = nn.MSELoss(reduction='none')
|
||||
criterion = nn.MSELoss(reduction="none")
|
||||
|
||||
# Set device
|
||||
ae_net = ae_net.to(self.device)
|
||||
criterion = criterion.to(self.device)
|
||||
|
||||
# Set optimizer (Adam optimizer for now)
|
||||
optimizer = optim.Adam(ae_net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
|
||||
optimizer = optim.Adam(
|
||||
ae_net.parameters(), lr=self.lr, weight_decay=self.weight_decay
|
||||
)
|
||||
|
||||
# Set learning rate scheduler
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(
|
||||
optimizer, milestones=self.lr_milestones, gamma=0.1
|
||||
)
|
||||
|
||||
# Training
|
||||
logger.info('Starting pretraining...')
|
||||
logger.info("Starting pretraining...")
|
||||
start_time = time.time()
|
||||
ae_net.train()
|
||||
for epoch in range(self.n_epochs):
|
||||
|
||||
scheduler.step()
|
||||
if epoch in self.lr_milestones:
|
||||
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
|
||||
logger.info(
|
||||
" LR scheduler: new learning rate is %g"
|
||||
% float(scheduler.get_lr()[0])
|
||||
)
|
||||
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
@@ -74,12 +100,14 @@ class AETrainer(BaseTrainer):
|
||||
|
||||
# log epoch statistics
|
||||
epoch_train_time = time.time() - epoch_start_time
|
||||
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
|
||||
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
|
||||
logger.info(
|
||||
f"| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s "
|
||||
f"| Train Loss: {epoch_loss / n_batches:.6f} |"
|
||||
)
|
||||
|
||||
self.train_time = time.time() - start_time
|
||||
logger.info('Pretraining Time: {:.3f}s'.format(self.train_time))
|
||||
logger.info('Finished pretraining.')
|
||||
logger.info("Pretraining Time: {:.3f}s".format(self.train_time))
|
||||
logger.info("Finished pretraining.")
|
||||
|
||||
return ae_net
|
||||
|
||||
@@ -87,17 +115,19 @@ class AETrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get test data loader
|
||||
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
_, test_loader = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set loss
|
||||
criterion = nn.MSELoss(reduction='none')
|
||||
criterion = nn.MSELoss(reduction="none")
|
||||
|
||||
# Set device for network
|
||||
ae_net = ae_net.to(self.device)
|
||||
criterion = criterion.to(self.device)
|
||||
|
||||
# Testing
|
||||
logger.info('Testing autoencoder...')
|
||||
logger.info("Testing autoencoder...")
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
start_time = time.time()
|
||||
@@ -106,16 +136,24 @@ class AETrainer(BaseTrainer):
|
||||
with torch.no_grad():
|
||||
for data in test_loader:
|
||||
inputs, labels, _, idx = data
|
||||
inputs, labels, idx = inputs.to(self.device), labels.to(self.device), idx.to(self.device)
|
||||
inputs, labels, idx = (
|
||||
inputs.to(self.device),
|
||||
labels.to(self.device),
|
||||
idx.to(self.device),
|
||||
)
|
||||
|
||||
rec = ae_net(inputs)
|
||||
rec_loss = criterion(rec, inputs)
|
||||
scores = torch.mean(rec_loss, dim=tuple(range(1, rec.dim())))
|
||||
|
||||
# Save triple of (idx, label, score) in a list
|
||||
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist()))
|
||||
idx_label_score += list(
|
||||
zip(
|
||||
idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist(),
|
||||
)
|
||||
)
|
||||
|
||||
loss = torch.mean(rec_loss)
|
||||
epoch_loss += loss.item()
|
||||
@@ -130,7 +168,7 @@ class AETrainer(BaseTrainer):
|
||||
self.test_auc = roc_auc_score(labels, scores)
|
||||
|
||||
# Log results
|
||||
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
|
||||
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
|
||||
logger.info('Test Time: {:.3f}s'.format(self.test_time))
|
||||
logger.info('Finished testing autoencoder.')
|
||||
logger.info("Test Loss: {:.6f}".format(epoch_loss / n_batches))
|
||||
logger.info("Test AUC: {:.2f}%".format(100.0 * self.test_auc))
|
||||
logger.info("Test Time: {:.3f}s".format(self.test_time))
|
||||
logger.info("Finished testing autoencoder.")
|
||||
|
||||
@@ -13,10 +13,27 @@ import numpy as np
|
||||
|
||||
class VAETrainer(BaseTrainer):
|
||||
|
||||
def __init__(self, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150, lr_milestones: tuple = (),
|
||||
batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda', n_jobs_dataloader: int = 0):
|
||||
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
|
||||
n_jobs_dataloader)
|
||||
def __init__(
|
||||
self,
|
||||
optimizer_name: str = "adam",
|
||||
lr: float = 0.001,
|
||||
n_epochs: int = 150,
|
||||
lr_milestones: tuple = (),
|
||||
batch_size: int = 128,
|
||||
weight_decay: float = 1e-6,
|
||||
device: str = "cuda",
|
||||
n_jobs_dataloader: int = 0,
|
||||
):
|
||||
super().__init__(
|
||||
optimizer_name,
|
||||
lr,
|
||||
n_epochs,
|
||||
lr_milestones,
|
||||
batch_size,
|
||||
weight_decay,
|
||||
device,
|
||||
n_jobs_dataloader,
|
||||
)
|
||||
|
||||
# Results
|
||||
self.train_time = None
|
||||
@@ -27,26 +44,35 @@ class VAETrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get train data loader
|
||||
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
train_loader, _ = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set device
|
||||
vae = vae.to(self.device)
|
||||
|
||||
# Set optimizer (Adam optimizer for now)
|
||||
optimizer = optim.Adam(vae.parameters(), lr=self.lr, weight_decay=self.weight_decay)
|
||||
optimizer = optim.Adam(
|
||||
vae.parameters(), lr=self.lr, weight_decay=self.weight_decay
|
||||
)
|
||||
|
||||
# Set learning rate scheduler
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
|
||||
scheduler = optim.lr_scheduler.MultiStepLR(
|
||||
optimizer, milestones=self.lr_milestones, gamma=0.1
|
||||
)
|
||||
|
||||
# Training
|
||||
logger.info('Starting pretraining...')
|
||||
logger.info("Starting pretraining...")
|
||||
start_time = time.time()
|
||||
vae.train()
|
||||
for epoch in range(self.n_epochs):
|
||||
|
||||
scheduler.step()
|
||||
if epoch in self.lr_milestones:
|
||||
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
|
||||
logger.info(
|
||||
" LR scheduler: new learning rate is %g"
|
||||
% float(scheduler.get_lr()[0])
|
||||
)
|
||||
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
@@ -76,12 +102,14 @@ class VAETrainer(BaseTrainer):
|
||||
|
||||
# log epoch statistics
|
||||
epoch_train_time = time.time() - epoch_start_time
|
||||
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
|
||||
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
|
||||
logger.info(
|
||||
f"| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s "
|
||||
f"| Train Loss: {epoch_loss / n_batches:.6f} |"
|
||||
)
|
||||
|
||||
self.train_time = time.time() - start_time
|
||||
logger.info('Pretraining Time: {:.3f}s'.format(self.train_time))
|
||||
logger.info('Finished pretraining.')
|
||||
logger.info("Pretraining Time: {:.3f}s".format(self.train_time))
|
||||
logger.info("Finished pretraining.")
|
||||
|
||||
return vae
|
||||
|
||||
@@ -89,13 +117,15 @@ class VAETrainer(BaseTrainer):
|
||||
logger = logging.getLogger()
|
||||
|
||||
# Get test data loader
|
||||
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
|
||||
_, test_loader = dataset.loaders(
|
||||
batch_size=self.batch_size, num_workers=self.n_jobs_dataloader
|
||||
)
|
||||
|
||||
# Set device
|
||||
vae = vae.to(self.device)
|
||||
|
||||
# Testing
|
||||
logger.info('Starting testing...')
|
||||
logger.info("Starting testing...")
|
||||
epoch_loss = 0.0
|
||||
n_batches = 0
|
||||
start_time = time.time()
|
||||
@@ -104,7 +134,11 @@ class VAETrainer(BaseTrainer):
|
||||
with torch.no_grad():
|
||||
for data in test_loader:
|
||||
inputs, labels, _, idx = data
|
||||
inputs, labels, idx = inputs.to(self.device), labels.to(self.device), idx.to(self.device)
|
||||
inputs, labels, idx = (
|
||||
inputs.to(self.device),
|
||||
labels.to(self.device),
|
||||
idx.to(self.device),
|
||||
)
|
||||
|
||||
inputs = inputs.view(inputs.size(0), -1)
|
||||
|
||||
@@ -113,9 +147,13 @@ class VAETrainer(BaseTrainer):
|
||||
scores = -likelihood # negative likelihood as anomaly score
|
||||
|
||||
# Save triple of (idx, label, score) in a list
|
||||
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist()))
|
||||
idx_label_score += list(
|
||||
zip(
|
||||
idx.cpu().data.numpy().tolist(),
|
||||
labels.cpu().data.numpy().tolist(),
|
||||
scores.cpu().data.numpy().tolist(),
|
||||
)
|
||||
)
|
||||
|
||||
# Overall loss
|
||||
elbo = likelihood - vae.kl_divergence
|
||||
@@ -133,7 +171,7 @@ class VAETrainer(BaseTrainer):
|
||||
self.test_auc = roc_auc_score(labels, scores)
|
||||
|
||||
# Log results
|
||||
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
|
||||
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
|
||||
logger.info('Test Time: {:.3f}s'.format(self.test_time))
|
||||
logger.info('Finished testing variational autoencoder.')
|
||||
logger.info("Test Loss: {:.6f}".format(epoch_loss / n_batches))
|
||||
logger.info("Test AUC: {:.2f}%".format(100.0 * self.test_auc))
|
||||
logger.info("Test Time: {:.3f}s".format(self.test_time))
|
||||
logger.info("Finished testing variational autoencoder.")
|
||||
|
||||
@@ -41,7 +41,13 @@ class SVI(nn.Module):
|
||||
|
||||
base_sampler = ImportanceWeightedSampler(mc=1, iw=1)
|
||||
|
||||
def __init__(self, model, likelihood=F.binary_cross_entropy, beta=repeat(1), sampler=base_sampler):
|
||||
def __init__(
|
||||
self,
|
||||
model,
|
||||
likelihood=F.binary_cross_entropy,
|
||||
beta=repeat(1),
|
||||
sampler=base_sampler,
|
||||
):
|
||||
super(SVI, self).__init__()
|
||||
self.model = model
|
||||
self.likelihood = likelihood
|
||||
|
||||
Reference in New Issue
Block a user