initial work for elpv and subter datasets
elpv as example dataset/implementation subter with final dataset
This commit is contained in:
74
Deep-SAD-PyTorch/src/networks/elpv_LeNet.py
Normal file
74
Deep-SAD-PyTorch/src/networks/elpv_LeNet.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from base.base_net import BaseNet
|
||||
|
||||
|
||||
class ELPV_LeNet(BaseNet):
|
||||
|
||||
def __init__(self, rep_dim=256):
|
||||
super().__init__()
|
||||
|
||||
self.rep_dim = rep_dim
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
|
||||
self.conv1 = nn.Conv2d(1, 8, 5, bias=False, padding=2)
|
||||
self.bn1 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
|
||||
self.conv2 = nn.Conv2d(8, 4, 5, bias=False, padding=2)
|
||||
self.bn2 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
|
||||
self.fc1 = nn.Linear(4 * 75 * 75, self.rep_dim, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x = x.view(-1, 1, 300, 300)
|
||||
x = self.conv1(x)
|
||||
x = self.pool(F.leaky_relu(self.bn1(x)))
|
||||
x = self.conv2(x)
|
||||
x = self.pool(F.leaky_relu(self.bn2(x)))
|
||||
x = x.view(int(x.size(0)), -1)
|
||||
x = self.fc1(x)
|
||||
return x
|
||||
|
||||
|
||||
class ELPV_LeNet_Decoder(BaseNet):
|
||||
|
||||
def __init__(self, rep_dim=256):
|
||||
super().__init__()
|
||||
|
||||
self.rep_dim = rep_dim
|
||||
|
||||
# Decoder network
|
||||
self.fc3 = nn.Linear(self.rep_dim, 2888, bias=False)
|
||||
self.bn1d2 = nn.BatchNorm1d(2888, eps=1e-04, affine=False)
|
||||
self.deconv1 = nn.ConvTranspose2d(2, 4, 5, bias=False, padding=2)
|
||||
self.bn3 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
|
||||
self.deconv2 = nn.ConvTranspose2d(4, 8, 5, bias=False, padding=3)
|
||||
self.bn4 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
|
||||
self.deconv3 = nn.ConvTranspose2d(8, 1, 5, bias=False, padding=2)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.bn1d2(self.fc3(x))
|
||||
x = x.view(int(x.size(0)), 2, 38, 38)
|
||||
x = F.interpolate(F.leaky_relu(x), scale_factor=2)
|
||||
x = self.deconv1(x)
|
||||
x = F.interpolate(F.leaky_relu(self.bn3(x)), scale_factor=2)
|
||||
x = self.deconv2(x)
|
||||
x = F.interpolate(F.leaky_relu(self.bn4(x)), scale_factor=2)
|
||||
x = self.deconv3(x)
|
||||
x = torch.sigmoid(x)
|
||||
return x
|
||||
|
||||
|
||||
class ELPV_LeNet_Autoencoder(BaseNet):
|
||||
|
||||
def __init__(self, rep_dim=256):
|
||||
super().__init__()
|
||||
|
||||
self.rep_dim = rep_dim
|
||||
self.encoder = ELPV_LeNet(rep_dim=rep_dim)
|
||||
self.decoder = ELPV_LeNet_Decoder(rep_dim=rep_dim)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.encoder(x)
|
||||
x = self.decoder(x)
|
||||
return x
|
||||
@@ -1,4 +1,6 @@
|
||||
from .mnist_LeNet import MNIST_LeNet, MNIST_LeNet_Autoencoder
|
||||
from .elpv_LeNet import ELPV_LeNet, ELPV_LeNet_Autoencoder
|
||||
from .subter_LeNet import SubTer_LeNet, SubTer_LeNet_Autoencoder
|
||||
from .fmnist_LeNet import FashionMNIST_LeNet, FashionMNIST_LeNet_Autoencoder
|
||||
from .cifar10_LeNet import CIFAR10_LeNet, CIFAR10_LeNet_Autoencoder
|
||||
from .mlp import MLP, MLP_Autoencoder
|
||||
@@ -11,6 +13,8 @@ def build_network(net_name, ae_net=None):
|
||||
|
||||
implemented_networks = (
|
||||
"mnist_LeNet",
|
||||
"elpv_LeNet",
|
||||
"subter_LeNet",
|
||||
"mnist_DGM_M2",
|
||||
"mnist_DGM_M1M2",
|
||||
"fmnist_LeNet",
|
||||
@@ -39,6 +43,12 @@ def build_network(net_name, ae_net=None):
|
||||
if net_name == "mnist_LeNet":
|
||||
net = MNIST_LeNet()
|
||||
|
||||
if net_name == "subter_LeNet":
|
||||
net = SubTer_LeNet()
|
||||
|
||||
if net_name == "elpv_LeNet":
|
||||
net = ELPV_LeNet()
|
||||
|
||||
if net_name == "mnist_DGM_M2":
|
||||
net = DeepGenerativeModel(
|
||||
[1 * 28 * 28, 2, 32, [128, 64]], classifier_net=MNIST_LeNet
|
||||
@@ -118,6 +128,8 @@ def build_autoencoder(net_name):
|
||||
"""Builds the corresponding autoencoder network."""
|
||||
|
||||
implemented_networks = (
|
||||
"elpv_LeNet",
|
||||
"subter_LeNet",
|
||||
"mnist_LeNet",
|
||||
"mnist_DGM_M1M2",
|
||||
"fmnist_LeNet",
|
||||
@@ -139,6 +151,12 @@ def build_autoencoder(net_name):
|
||||
if net_name == "mnist_LeNet":
|
||||
ae_net = MNIST_LeNet_Autoencoder()
|
||||
|
||||
if net_name == "subter_LeNet":
|
||||
ae_net = SubTer_LeNet_Autoencoder()
|
||||
|
||||
if net_name == "elpv_LeNet":
|
||||
ae_net = ELPV_LeNet_Autoencoder()
|
||||
|
||||
if net_name == "mnist_DGM_M1M2":
|
||||
ae_net = VariationalAutoencoder([1 * 28 * 28, 32, [128, 64]])
|
||||
|
||||
|
||||
70
Deep-SAD-PyTorch/src/networks/subter_LeNet.py
Normal file
70
Deep-SAD-PyTorch/src/networks/subter_LeNet.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from base.base_net import BaseNet
|
||||
|
||||
|
||||
class SubTer_LeNet(BaseNet):
|
||||
|
||||
def __init__(self, rep_dim=1024):
|
||||
super().__init__()
|
||||
|
||||
self.rep_dim = rep_dim
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
|
||||
self.conv1 = nn.Conv2d(1, 8, 5, bias=False, padding=2)
|
||||
self.bn1 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
|
||||
self.conv2 = nn.Conv2d(8, 4, 5, bias=False, padding=2)
|
||||
self.bn2 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
|
||||
self.fc1 = nn.Linear(4 * 512 * 8, self.rep_dim, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x = x.view(-1, 1, 32, 2048)
|
||||
x = self.conv1(x)
|
||||
x = self.pool(F.leaky_relu(self.bn1(x)))
|
||||
x = self.conv2(x)
|
||||
x = self.pool(F.leaky_relu(self.bn2(x)))
|
||||
x = x.view(int(x.size(0)), -1)
|
||||
x = self.fc1(x)
|
||||
return x
|
||||
|
||||
|
||||
class SubTer_LeNet_Decoder(BaseNet):
|
||||
|
||||
def __init__(self, rep_dim=1024):
|
||||
super().__init__()
|
||||
|
||||
self.rep_dim = rep_dim
|
||||
|
||||
# Decoder network
|
||||
self.fc3 = nn.Linear(self.rep_dim, 4 * 512 * 8, bias=False)
|
||||
self.bn3 = nn.BatchNorm2d(4, eps=1e-04, affine=False)
|
||||
self.deconv1 = nn.ConvTranspose2d(4, 8, 5, bias=False, padding=2)
|
||||
self.bn4 = nn.BatchNorm2d(8, eps=1e-04, affine=False)
|
||||
self.deconv2 = nn.ConvTranspose2d(8, 1, 5, bias=False, padding=2)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.fc3(x)
|
||||
x = x.view(int(x.size(0)), 4, 8, 512)
|
||||
x = F.interpolate(F.leaky_relu(self.bn3(x)), scale_factor=2)
|
||||
x = self.deconv1(x)
|
||||
x = F.interpolate(F.leaky_relu(self.bn4(x)), scale_factor=2)
|
||||
x = self.deconv2(x)
|
||||
x = torch.sigmoid(x)
|
||||
return x
|
||||
|
||||
|
||||
class SubTer_LeNet_Autoencoder(BaseNet):
|
||||
|
||||
def __init__(self, rep_dim=1024):
|
||||
super().__init__()
|
||||
|
||||
self.rep_dim = rep_dim
|
||||
self.encoder = SubTer_LeNet(rep_dim=rep_dim)
|
||||
self.decoder = SubTer_LeNet_Decoder(rep_dim=rep_dim)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.encoder(x)
|
||||
x = self.decoder(x)
|
||||
return x
|
||||
Reference in New Issue
Block a user