added deepsad base code
This commit is contained in:
52
Deep-SAD-PyTorch/src/networks/layers/standard.py
Normal file
52
Deep-SAD-PyTorch/src/networks/layers/standard.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import torch
|
||||
|
||||
from torch.nn import Module
|
||||
from torch.nn import init
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch
|
||||
class Standardize(Module):
|
||||
"""
|
||||
Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes
|
||||
(x - μ) / σ where '/' is applied element-wise.
|
||||
|
||||
Args:
|
||||
in_features: size of each input sample
|
||||
out_features: size of each output sample
|
||||
bias: If set to False, the layer will not learn a translation parameter μ.
|
||||
Default: ``True``
|
||||
|
||||
Attributes:
|
||||
mu: the learnable translation parameter μ.
|
||||
std: the learnable scale parameter σ.
|
||||
"""
|
||||
__constants__ = ['mu']
|
||||
|
||||
def __init__(self, in_features, bias=True, eps=1e-6):
|
||||
super(Standardize, self).__init__()
|
||||
self.in_features = in_features
|
||||
self.out_features = in_features
|
||||
self.eps = eps
|
||||
self.std = Parameter(torch.Tensor(in_features))
|
||||
if bias:
|
||||
self.mu = Parameter(torch.Tensor(in_features))
|
||||
else:
|
||||
self.register_parameter('mu', None)
|
||||
self.reset_parameters()
|
||||
|
||||
def reset_parameters(self):
|
||||
init.constant_(self.std, 1)
|
||||
if self.mu is not None:
|
||||
init.constant_(self.mu, 0)
|
||||
|
||||
def forward(self, x):
|
||||
if self.mu is not None:
|
||||
x -= self.mu
|
||||
x = torch.div(x, self.std + self.eps)
|
||||
return x
|
||||
|
||||
def extra_repr(self):
|
||||
return 'in_features={}, out_features={}, bias={}'.format(
|
||||
self.in_features, self.out_features, self.mu is not None
|
||||
)
|
||||
53
Deep-SAD-PyTorch/src/networks/layers/stochastic.py
Normal file
53
Deep-SAD-PyTorch/src/networks/layers/stochastic.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from torch.autograd import Variable
|
||||
|
||||
|
||||
# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch
|
||||
class Stochastic(nn.Module):
|
||||
"""
|
||||
Base stochastic layer that uses the reparametrization trick (Kingma and Welling, 2013) to draw a sample from a
|
||||
distribution parametrized by mu and log_var.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(Stochastic, self).__init__()
|
||||
|
||||
def reparametrize(self, mu, log_var):
|
||||
epsilon = Variable(torch.randn(mu.size()), requires_grad=False)
|
||||
|
||||
if mu.is_cuda:
|
||||
epsilon = epsilon.to(mu.device)
|
||||
|
||||
# log_std = 0.5 * log_var
|
||||
# std = exp(log_std)
|
||||
std = log_var.mul(0.5).exp_()
|
||||
|
||||
# z = std * epsilon + mu
|
||||
z = mu.addcmul(std, epsilon)
|
||||
|
||||
return z
|
||||
|
||||
def forward(self, x):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class GaussianSample(Stochastic):
|
||||
"""
|
||||
Layer that represents a sample from a Gaussian distribution.
|
||||
"""
|
||||
|
||||
def __init__(self, in_features, out_features):
|
||||
super(GaussianSample, self).__init__()
|
||||
self.in_features = in_features
|
||||
self.out_features = out_features
|
||||
|
||||
self.mu = nn.Linear(in_features, out_features)
|
||||
self.log_var = nn.Linear(in_features, out_features)
|
||||
|
||||
def forward(self, x):
|
||||
mu = self.mu(x)
|
||||
log_var = F.softplus(self.log_var(x))
|
||||
return self.reparametrize(mu, log_var), mu, log_var
|
||||
Reference in New Issue
Block a user