added deepsad base code

This commit is contained in:
Jan Kowalczyk
2024-06-28 07:42:12 +02:00
parent 2eb1bf2e05
commit 914bb020d0
57 changed files with 4974 additions and 0 deletions

View File

@@ -0,0 +1,173 @@
from base.base_trainer import BaseTrainer
from base.base_dataset import BaseADDataset
from base.base_net import BaseNet
from torch.utils.data.dataloader import DataLoader
from sklearn.metrics import roc_auc_score
import logging
import time
import torch
import torch.optim as optim
import numpy as np
class DeepSADTrainer(BaseTrainer):
def __init__(self, c, eta: float, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150,
lr_milestones: tuple = (), batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda',
n_jobs_dataloader: int = 0):
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
n_jobs_dataloader)
# Deep SAD parameters
self.c = torch.tensor(c, device=self.device) if c is not None else None
self.eta = eta
# Optimization parameters
self.eps = 1e-6
# Results
self.train_time = None
self.test_auc = None
self.test_time = None
self.test_scores = None
def train(self, dataset: BaseADDataset, net: BaseNet):
logger = logging.getLogger()
# Get train data loader
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set device for network
net = net.to(self.device)
# Set optimizer (Adam optimizer for now)
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# Set learning rate scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
# Initialize hypersphere center c (if c not loaded)
if self.c is None:
logger.info('Initializing center c...')
self.c = self.init_center_c(train_loader, net)
logger.info('Center c initialized.')
# Training
logger.info('Starting training...')
start_time = time.time()
net.train()
for epoch in range(self.n_epochs):
scheduler.step()
if epoch in self.lr_milestones:
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
epoch_loss = 0.0
n_batches = 0
epoch_start_time = time.time()
for data in train_loader:
inputs, _, semi_targets, _ = data
inputs, semi_targets = inputs.to(self.device), semi_targets.to(self.device)
# Zero the network parameter gradients
optimizer.zero_grad()
# Update network parameters via backpropagation: forward + backward + optimize
outputs = net(inputs)
dist = torch.sum((outputs - self.c) ** 2, dim=1)
losses = torch.where(semi_targets == 0, dist, self.eta * ((dist + self.eps) ** semi_targets.float()))
loss = torch.mean(losses)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
n_batches += 1
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
self.train_time = time.time() - start_time
logger.info('Training Time: {:.3f}s'.format(self.train_time))
logger.info('Finished training.')
return net
def test(self, dataset: BaseADDataset, net: BaseNet):
logger = logging.getLogger()
# Get test data loader
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set device for network
net = net.to(self.device)
# Testing
logger.info('Starting testing...')
epoch_loss = 0.0
n_batches = 0
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels, semi_targets, idx = data
inputs = inputs.to(self.device)
labels = labels.to(self.device)
semi_targets = semi_targets.to(self.device)
idx = idx.to(self.device)
outputs = net(inputs)
dist = torch.sum((outputs - self.c) ** 2, dim=1)
losses = torch.where(semi_targets == 0, dist, self.eta * ((dist + self.eps) ** semi_targets.float()))
loss = torch.mean(losses)
scores = dist
# Save triples of (idx, label, score) in a list
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
labels.cpu().data.numpy().tolist(),
scores.cpu().data.numpy().tolist()))
epoch_loss += loss.item()
n_batches += 1
self.test_time = time.time() - start_time
self.test_scores = idx_label_score
# Compute AUC
_, labels, scores = zip(*idx_label_score)
labels = np.array(labels)
scores = np.array(scores)
self.test_auc = roc_auc_score(labels, scores)
# Log results
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
logger.info('Test Time: {:.3f}s'.format(self.test_time))
logger.info('Finished testing.')
def init_center_c(self, train_loader: DataLoader, net: BaseNet, eps=0.1):
"""Initialize hypersphere center c as the mean from an initial forward pass on the data."""
n_samples = 0
c = torch.zeros(net.rep_dim, device=self.device)
net.eval()
with torch.no_grad():
for data in train_loader:
# get the inputs of the batch
inputs, _, _, _ = data
inputs = inputs.to(self.device)
outputs = net(inputs)
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
# If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
return c

View File

@@ -0,0 +1,188 @@
from base.base_trainer import BaseTrainer
from base.base_dataset import BaseADDataset
from base.base_net import BaseNet
from optim.variational import SVI, ImportanceWeightedSampler
from utils.misc import binary_cross_entropy
from sklearn.metrics import roc_auc_score
import logging
import time
import torch
import torch.optim as optim
import numpy as np
class SemiDeepGenerativeTrainer(BaseTrainer):
def __init__(self, alpha: float = 0.1, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150,
lr_milestones: tuple = (), batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda',
n_jobs_dataloader: int = 0):
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
n_jobs_dataloader)
self.alpha = alpha
# Results
self.train_time = None
self.test_auc = None
self.test_time = None
self.test_scores = None
def train(self, dataset: BaseADDataset, net: BaseNet):
logger = logging.getLogger()
# Get train data loader
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set device
net = net.to(self.device)
# Use importance weighted sampler (Burda et al., 2015) to get a better estimate on the log-likelihood.
sampler = ImportanceWeightedSampler(mc=1, iw=1)
elbo = SVI(net, likelihood=binary_cross_entropy, sampler=sampler)
# Set optimizer (Adam optimizer for now)
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# Set learning rate scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
# Training
logger.info('Starting training...')
start_time = time.time()
net.train()
for epoch in range(self.n_epochs):
scheduler.step()
if epoch in self.lr_milestones:
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
epoch_loss = 0.0
n_batches = 0
epoch_start_time = time.time()
for data in train_loader:
inputs, labels, semi_targets, _ = data
inputs = inputs.to(self.device)
labels = labels.to(self.device)
semi_targets = semi_targets.to(self.device)
# Get labeled and unlabeled data and make labels one-hot
inputs = inputs.view(inputs.size(0), -1)
x = inputs[semi_targets != 0]
u = inputs[semi_targets == 0]
y = labels[semi_targets != 0]
if y.nelement() > 1:
y_onehot = torch.Tensor(y.size(0), 2).to(self.device) # two labels: 0: normal, 1: outlier
y_onehot.zero_()
y_onehot.scatter_(1, y.view(-1, 1), 1)
# Zero the network parameter gradients
optimizer.zero_grad()
# Update network parameters via backpropagation: forward + backward + optimize
if y.nelement() < 2:
L = torch.tensor(0.0).to(self.device)
else:
L = -elbo(x, y_onehot)
U = -elbo(u)
# Regular cross entropy
if y.nelement() < 2:
classication_loss = torch.tensor(0.0).to(self.device)
else:
# Add auxiliary classification loss q(y|x)
logits = net.classify(x)
eps = 1e-8
classication_loss = torch.sum(y_onehot * torch.log(logits + eps), dim=1).mean()
# Overall loss
loss = L - self.alpha * classication_loss + U # J_alpha
loss.backward()
optimizer.step()
epoch_loss += loss.item()
n_batches += 1
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
self.train_time = time.time() - start_time
logger.info('Training Time: {:.3f}s'.format(self.train_time))
logger.info('Finished training.')
return net
def test(self, dataset: BaseADDataset, net: BaseNet):
logger = logging.getLogger()
# Get test data loader
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set device
net = net.to(self.device)
# Use importance weighted sampler (Burda et al., 2015) to get a better estimate on the log-likelihood.
sampler = ImportanceWeightedSampler(mc=1, iw=1)
elbo = SVI(net, likelihood=binary_cross_entropy, sampler=sampler)
# Testing
logger.info('Starting testing...')
epoch_loss = 0.0
n_batches = 0
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels, _, idx = data
inputs = inputs.to(self.device)
labels = labels.to(self.device)
idx = idx.to(self.device)
# All test data is considered unlabeled
inputs = inputs.view(inputs.size(0), -1)
u = inputs
y = labels
y_onehot = torch.Tensor(y.size(0), 2).to(self.device) # two labels: 0: normal, 1: outlier
y_onehot.zero_()
y_onehot.scatter_(1, y.view(-1, 1), 1)
# Compute loss
L = -elbo(u, y_onehot)
U = -elbo(u)
logits = net.classify(u)
eps = 1e-8
classication_loss = -torch.sum(y_onehot * torch.log(logits + eps), dim=1).mean()
loss = L + self.alpha * classication_loss + U # J_alpha
# Compute scores
scores = logits[:, 1] # likelihood/confidence for anomalous class as anomaly score
# Save triple of (idx, label, score) in a list
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
labels.cpu().data.numpy().tolist(),
scores.cpu().data.numpy().tolist()))
epoch_loss += loss.item()
n_batches += 1
self.test_time = time.time() - start_time
self.test_scores = idx_label_score
# Compute AUC
_, labels, scores = zip(*idx_label_score)
labels = np.array(labels)
scores = np.array(scores)
self.test_auc = roc_auc_score(labels, scores)
# Log results
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
logger.info('Test Time: {:.3f}s'.format(self.test_time))
logger.info('Finished testing.')

View File

@@ -0,0 +1,5 @@
from .DeepSAD_trainer import DeepSADTrainer
from .ae_trainer import AETrainer
from .SemiDGM_trainer import SemiDeepGenerativeTrainer
from .vae_trainer import VAETrainer
from .variational import SVI, ImportanceWeightedSampler

View File

@@ -0,0 +1,136 @@
from base.base_trainer import BaseTrainer
from base.base_dataset import BaseADDataset
from base.base_net import BaseNet
from sklearn.metrics import roc_auc_score
import logging
import time
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
class AETrainer(BaseTrainer):
def __init__(self, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150, lr_milestones: tuple = (),
batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda', n_jobs_dataloader: int = 0):
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
n_jobs_dataloader)
# Results
self.train_time = None
self.test_auc = None
self.test_time = None
def train(self, dataset: BaseADDataset, ae_net: BaseNet):
logger = logging.getLogger()
# Get train data loader
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set loss
criterion = nn.MSELoss(reduction='none')
# Set device
ae_net = ae_net.to(self.device)
criterion = criterion.to(self.device)
# Set optimizer (Adam optimizer for now)
optimizer = optim.Adam(ae_net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# Set learning rate scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
# Training
logger.info('Starting pretraining...')
start_time = time.time()
ae_net.train()
for epoch in range(self.n_epochs):
scheduler.step()
if epoch in self.lr_milestones:
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
epoch_loss = 0.0
n_batches = 0
epoch_start_time = time.time()
for data in train_loader:
inputs, _, _, _ = data
inputs = inputs.to(self.device)
# Zero the network parameter gradients
optimizer.zero_grad()
# Update network parameters via backpropagation: forward + backward + optimize
rec = ae_net(inputs)
rec_loss = criterion(rec, inputs)
loss = torch.mean(rec_loss)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
n_batches += 1
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
self.train_time = time.time() - start_time
logger.info('Pretraining Time: {:.3f}s'.format(self.train_time))
logger.info('Finished pretraining.')
return ae_net
def test(self, dataset: BaseADDataset, ae_net: BaseNet):
logger = logging.getLogger()
# Get test data loader
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set loss
criterion = nn.MSELoss(reduction='none')
# Set device for network
ae_net = ae_net.to(self.device)
criterion = criterion.to(self.device)
# Testing
logger.info('Testing autoencoder...')
epoch_loss = 0.0
n_batches = 0
start_time = time.time()
idx_label_score = []
ae_net.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels, _, idx = data
inputs, labels, idx = inputs.to(self.device), labels.to(self.device), idx.to(self.device)
rec = ae_net(inputs)
rec_loss = criterion(rec, inputs)
scores = torch.mean(rec_loss, dim=tuple(range(1, rec.dim())))
# Save triple of (idx, label, score) in a list
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
labels.cpu().data.numpy().tolist(),
scores.cpu().data.numpy().tolist()))
loss = torch.mean(rec_loss)
epoch_loss += loss.item()
n_batches += 1
self.test_time = time.time() - start_time
# Compute AUC
_, labels, scores = zip(*idx_label_score)
labels = np.array(labels)
scores = np.array(scores)
self.test_auc = roc_auc_score(labels, scores)
# Log results
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
logger.info('Test Time: {:.3f}s'.format(self.test_time))
logger.info('Finished testing autoencoder.')

View File

@@ -0,0 +1,139 @@
from base.base_trainer import BaseTrainer
from base.base_dataset import BaseADDataset
from base.base_net import BaseNet
from utils.misc import binary_cross_entropy
from sklearn.metrics import roc_auc_score
import logging
import time
import torch
import torch.optim as optim
import numpy as np
class VAETrainer(BaseTrainer):
def __init__(self, optimizer_name: str = 'adam', lr: float = 0.001, n_epochs: int = 150, lr_milestones: tuple = (),
batch_size: int = 128, weight_decay: float = 1e-6, device: str = 'cuda', n_jobs_dataloader: int = 0):
super().__init__(optimizer_name, lr, n_epochs, lr_milestones, batch_size, weight_decay, device,
n_jobs_dataloader)
# Results
self.train_time = None
self.test_auc = None
self.test_time = None
def train(self, dataset: BaseADDataset, vae: BaseNet):
logger = logging.getLogger()
# Get train data loader
train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set device
vae = vae.to(self.device)
# Set optimizer (Adam optimizer for now)
optimizer = optim.Adam(vae.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# Set learning rate scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
# Training
logger.info('Starting pretraining...')
start_time = time.time()
vae.train()
for epoch in range(self.n_epochs):
scheduler.step()
if epoch in self.lr_milestones:
logger.info(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))
epoch_loss = 0.0
n_batches = 0
epoch_start_time = time.time()
for data in train_loader:
inputs, _, _, _ = data
inputs = inputs.to(self.device)
inputs = inputs.view(inputs.size(0), -1)
# Zero the network parameter gradients
optimizer.zero_grad()
# Update network parameters via backpropagation: forward + backward + optimize
rec = vae(inputs)
likelihood = -binary_cross_entropy(rec, inputs)
elbo = likelihood - vae.kl_divergence
# Overall loss
loss = -torch.mean(elbo)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
n_batches += 1
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epochs:03} | Train Time: {epoch_train_time:.3f}s '
f'| Train Loss: {epoch_loss / n_batches:.6f} |')
self.train_time = time.time() - start_time
logger.info('Pretraining Time: {:.3f}s'.format(self.train_time))
logger.info('Finished pretraining.')
return vae
def test(self, dataset: BaseADDataset, vae: BaseNet):
logger = logging.getLogger()
# Get test data loader
_, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)
# Set device
vae = vae.to(self.device)
# Testing
logger.info('Starting testing...')
epoch_loss = 0.0
n_batches = 0
start_time = time.time()
idx_label_score = []
vae.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels, _, idx = data
inputs, labels, idx = inputs.to(self.device), labels.to(self.device), idx.to(self.device)
inputs = inputs.view(inputs.size(0), -1)
rec = vae(inputs)
likelihood = -binary_cross_entropy(rec, inputs)
scores = -likelihood # negative likelihood as anomaly score
# Save triple of (idx, label, score) in a list
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
labels.cpu().data.numpy().tolist(),
scores.cpu().data.numpy().tolist()))
# Overall loss
elbo = likelihood - vae.kl_divergence
loss = -torch.mean(elbo)
epoch_loss += loss.item()
n_batches += 1
self.test_time = time.time() - start_time
# Compute AUC
_, labels, scores = zip(*idx_label_score)
labels = np.array(labels)
scores = np.array(scores)
self.test_auc = roc_auc_score(labels, scores)
# Log results
logger.info('Test Loss: {:.6f}'.format(epoch_loss / n_batches))
logger.info('Test AUC: {:.2f}%'.format(100. * self.test_auc))
logger.info('Test Time: {:.3f}s'.format(self.test_time))
logger.info('Finished testing variational autoencoder.')

View File

@@ -0,0 +1,93 @@
import torch
import torch.nn.functional as F
from torch import nn
from itertools import repeat
from utils import enumerate_discrete, log_sum_exp
from networks import log_standard_categorical
# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch
class ImportanceWeightedSampler(object):
"""
Importance weighted sampler (Burda et al., 2015) to be used together with SVI.
:param mc: number of Monte Carlo samples
:param iw: number of Importance Weighted samples
"""
def __init__(self, mc=1, iw=1):
self.mc = mc
self.iw = iw
def resample(self, x):
return x.repeat(self.mc * self.iw, 1)
def __call__(self, elbo):
elbo = elbo.view(self.mc, self.iw, -1)
elbo = torch.mean(log_sum_exp(elbo, dim=1, sum_op=torch.mean), dim=0)
return elbo.view(-1)
class SVI(nn.Module):
"""
Stochastic variational inference (SVI) optimizer for semi-supervised learning.
:param model: semi-supervised model to evaluate
:param likelihood: p(x|y,z) for example BCE or MSE
:param beta: warm-up/scaling of KL-term
:param sampler: sampler for x and y, e.g. for Monte Carlo
"""
base_sampler = ImportanceWeightedSampler(mc=1, iw=1)
def __init__(self, model, likelihood=F.binary_cross_entropy, beta=repeat(1), sampler=base_sampler):
super(SVI, self).__init__()
self.model = model
self.likelihood = likelihood
self.sampler = sampler
self.beta = beta
def forward(self, x, y=None):
is_labeled = False if y is None else True
# Prepare for sampling
xs, ys = (x, y)
# Enumerate choices of label
if not is_labeled:
ys = enumerate_discrete(xs, self.model.y_dim)
xs = xs.repeat(self.model.y_dim, 1)
# Increase sampling dimension
xs = self.sampler.resample(xs)
ys = self.sampler.resample(ys)
reconstruction = self.model(xs, ys)
# p(x|y,z)
likelihood = -self.likelihood(reconstruction, xs)
# p(y)
prior = -log_standard_categorical(ys)
# Equivalent to -L(x, y)
elbo = likelihood + prior - next(self.beta) * self.model.kl_divergence
L = self.sampler(elbo)
if is_labeled:
return torch.mean(L)
logits = self.model.classify(x)
L = L.view_as(logits.t()).t()
# Calculate entropy H(q(y|x)) and sum over all labels
eps = 1e-8
H = -torch.sum(torch.mul(logits, torch.log(logits + eps)), dim=-1)
L = torch.sum(torch.mul(logits, L), dim=-1)
# Equivalent to -U(x)
U = L + H
return torch.mean(U)