|
|
|
"""DANN model."""
|
|
|
|
|
|
|
|
import torch.nn as nn
|
|
|
|
from .functions import ReverseLayerF
|
|
|
|
from torchvision import models
|
|
|
|
|
|
|
|
|
|
|
|
class Classifier(nn.Module):
|
|
|
|
""" SVHN architecture without discriminator"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super(Classifier, self).__init__()
|
|
|
|
self.restored = False
|
|
|
|
|
|
|
|
self.feature = nn.Sequential()
|
|
|
|
self.feature.add_module('f_conv1', nn.Conv2d(1, 64, kernel_size=5))
|
|
|
|
self.feature.add_module('f_bn1', nn.BatchNorm2d(64))
|
|
|
|
self.feature.add_module('f_pool1', nn.MaxPool2d(2))
|
|
|
|
self.feature.add_module('f_relu1', nn.ReLU(True))
|
|
|
|
self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5))
|
|
|
|
self.feature.add_module('f_bn2', nn.BatchNorm2d(50))
|
|
|
|
self.feature.add_module('f_drop1', nn.Dropout2d())
|
|
|
|
self.feature.add_module('f_pool2', nn.MaxPool2d(2))
|
|
|
|
self.feature.add_module('f_relu2', nn.ReLU(True))
|
|
|
|
|
|
|
|
self.class_classifier = nn.Sequential()
|
|
|
|
self.class_classifier.add_module('c_fc1', nn.Linear(50 * 4 * 4, 100))
|
|
|
|
self.class_classifier.add_module('c_bn1', nn.BatchNorm2d(100))
|
|
|
|
self.class_classifier.add_module('c_relu1', nn.ReLU(True))
|
|
|
|
self.class_classifier.add_module('c_drop1', nn.Dropout2d())
|
|
|
|
self.class_classifier.add_module('c_fc2', nn.Linear(100, 100))
|
|
|
|
self.class_classifier.add_module('c_bn2', nn.BatchNorm2d(100))
|
|
|
|
self.class_classifier.add_module('c_relu2', nn.ReLU(True))
|
|
|
|
self.class_classifier.add_module('c_fc3', nn.Linear(100, 10))
|
|
|
|
self.class_classifier.add_module('c_softmax', nn.LogSoftmax(dim=1))
|
|
|
|
|
|
|
|
def forward(self, input_data):
|
|
|
|
input_data = input_data.expand(input_data.data.shape[0], 1, 28, 28)
|
|
|
|
feature = self.feature(input_data)
|
|
|
|
feature = feature.view(-1, 50 * 4 * 4)
|
|
|
|
class_output = self.class_classifier(feature)
|
|
|
|
|
|
|
|
return class_output
|
|
|
|
|
|
|
|
class MNISTmodel(nn.Module):
|
|
|
|
""" MNIST architecture"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super(MNISTmodel, self).__init__()
|
|
|
|
self.restored = False
|
|
|
|
|
|
|
|
self.feature = nn.Sequential(
|
|
|
|
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(5, 5)), # 1 28 28, 32 24 24
|
|
|
|
nn.ReLU(inplace=True),
|
|
|
|
nn.MaxPool2d(kernel_size=(2, 2)), # 32 12 12
|
|
|
|
nn.Conv2d(in_channels=32, out_channels=48, kernel_size=(5, 5)), # 48 8 8
|
|
|
|
nn.ReLU(inplace=True),
|
|
|
|
nn.MaxPool2d(kernel_size=(2, 2)), # 48 4 4
|
|
|
|
)
|
|
|
|
|
|
|
|
self.classifier = nn.Sequential(
|
|
|
|
nn.Linear(48*4*4, 100),
|
|
|
|
nn.ReLU(inplace=True),
|
|
|
|
nn.Linear(100, 100),
|
|
|
|
nn.ReLU(inplace=True),
|
|
|
|
nn.Linear(100, 10),
|
|
|
|
)
|
|
|
|
|
|
|
|
self.discriminator = nn.Sequential(
|
|
|
|
nn.Linear(48*4*4, 100),
|
|
|
|
nn.ReLU(inplace=True),
|
|
|
|
nn.Linear(100, 2),
|
|
|
|
)
|
|
|
|
|
|
|
|
def forward(self, input_data, alpha):
|
|
|
|
input_data = input_data.expand(input_data.data.shape[0], 1, 28, 28)
|
|
|
|
feature = self.feature(input_data)
|
|
|
|
feature = feature.view(-1, 48 * 4 * 4)
|
|
|
|
reverse_feature = ReverseLayerF.apply(feature, alpha)
|
|
|
|
class_output = self.classifier(feature)
|
|
|
|
domain_output = self.discriminator(reverse_feature)
|
|
|
|
|
|
|
|
return class_output, domain_output
|
|
|
|
|
|
|
|
class SVHNmodel(nn.Module):
|
|
|
|
""" SVHN architecture"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super(SVHNmodel, self).__init__()
|
|
|
|
self.restored = False
|
|
|
|
|
|
|
|
self.feature = nn.Sequential()
|
|
|
|
self.feature.add_module('f_conv1', nn.Conv2d(1, 64, kernel_size=5))
|
|
|
|
self.feature.add_module('f_bn1', nn.BatchNorm2d(64))
|
|
|
|
self.feature.add_module('f_pool1', nn.MaxPool2d(2))
|
|
|
|
self.feature.add_module('f_relu1', nn.ReLU(True))
|
|
|
|
self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5))
|
|
|
|
self.feature.add_module('f_bn2', nn.BatchNorm2d(50))
|
|
|
|
self.feature.add_module('f_drop1', nn.Dropout2d())
|
|
|
|
self.feature.add_module('f_pool2', nn.MaxPool2d(2))
|
|
|
|
self.feature.add_module('f_relu2', nn.ReLU(True))
|
|
|
|
|
|
|
|
self.class_classifier = nn.Sequential()
|
|
|
|
self.class_classifier.add_module('c_fc1', nn.Linear(50 * 4 * 4, 100))
|
|
|
|
self.class_classifier.add_module('c_bn1', nn.BatchNorm2d(100))
|
|
|
|
self.class_classifier.add_module('c_relu1', nn.ReLU(True))
|
|
|
|
self.class_classifier.add_module('c_drop1', nn.Dropout2d())
|
|
|
|
self.class_classifier.add_module('c_fc2', nn.Linear(100, 100))
|
|
|
|
self.class_classifier.add_module('c_bn2', nn.BatchNorm2d(100))
|
|
|
|
self.class_classifier.add_module('c_relu2', nn.ReLU(True))
|
|
|
|
self.class_classifier.add_module('c_fc3', nn.Linear(100, 10))
|
|
|
|
self.class_classifier.add_module('c_softmax', nn.LogSoftmax(dim=1))
|
|
|
|
|
|
|
|
self.domain_classifier = nn.Sequential()
|
|
|
|
self.domain_classifier.add_module('d_fc1', nn.Linear(50 * 4 * 4, 100))
|
|
|
|
self.domain_classifier.add_module('d_bn1', nn.BatchNorm2d(100))
|
|
|
|
self.domain_classifier.add_module('d_relu1', nn.ReLU(True))
|
|
|
|
self.domain_classifier.add_module('d_fc2', nn.Linear(100, 2))
|
|
|
|
self.domain_classifier.add_module('d_softmax', nn.LogSoftmax(dim=1))
|
|
|
|
|
|
|
|
def forward(self, input_data, alpha):
|
|
|
|
input_data = input_data.expand(input_data.data.shape[0], 1, 28, 28)
|
|
|
|
feature = self.feature(input_data)
|
|
|
|
feature = feature.view(-1, 50 * 4 * 4)
|
|
|
|
reverse_feature = ReverseLayerF.apply(feature, alpha)
|
|
|
|
class_output = self.class_classifier(feature)
|
|
|
|
domain_output = self.domain_classifier(reverse_feature)
|
|
|
|
|
|
|
|
return class_output, domain_output
|
|
|
|
|
|
|
|
class AlexModel(nn.Module):
|
|
|
|
""" AlexNet pretrained on imagenet for Office dataset"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super(AlexModel, self).__init__()
|
|
|
|
self.restored = False
|
|
|
|
model_alexnet = models.alexnet(pretrained=True)
|
|
|
|
|
|
|
|
self.features = model_alexnet.features
|
|
|
|
|
|
|
|
# self.classifier = nn.Sequential()
|
|
|
|
# for i in range(5):
|
|
|
|
# self.classifier.add_module(
|
|
|
|
# "classifier" + str(i), model_alexnet.classifier[i])
|
|
|
|
# self.__in_features = model_alexnet.classifier[4].in_features
|
|
|
|
# self.classifier.add_module('classifier5', nn.Dropout())
|
|
|
|
# self.classifier.add_module('classifier6', nn.Linear(self.__in_features, 256))
|
|
|
|
# self.classifier.add_module('classifier7', nn.BatchNorm2d(256))
|
|
|
|
# self.classifier.add_module('classifier8', nn.ReLU())
|
|
|
|
# self.classifier.add_module('classifier9', nn.Dropout(0.5))
|
|
|
|
# self.classifier.add_module('classifier10', nn.Linear(256, params.class_num_src))
|
|
|
|
self.classifier = nn.Sequential(
|
|
|
|
nn.Dropout(0.5),
|
|
|
|
nn.Linear(256 * 6 * 6, 4096),
|
|
|
|
nn.ReLU(inplace=True),
|
|
|
|
nn.Dropout(0.5),
|
|
|
|
nn.Linear(4096, 256),
|
|
|
|
nn.ReLU(inplace=True),
|
|
|
|
nn.Linear(256, 31),
|
|
|
|
)
|
|
|
|
|
|
|
|
self.discriminator = nn.Sequential(
|
|
|
|
nn.Linear(256 * 6 * 6, 1024),
|
|
|
|
nn.ReLU(),
|
|
|
|
nn.Dropout(0.5),
|
|
|
|
nn.Linear(1024, 1024),
|
|
|
|
nn.ReLU(),
|
|
|
|
nn.Dropout(0.5),
|
|
|
|
nn.Linear(1024, 2),
|
|
|
|
)
|
|
|
|
|
|
|
|
def forward(self, input_data, alpha):
|
|
|
|
input_data = input_data.expand(input_data.data.shape[0], 3, 227, 227)
|
|
|
|
feature = self.features(input_data)
|
|
|
|
feature = feature.view(-1, 256 * 6 * 6)
|
|
|
|
|
|
|
|
reverse_feature = ReverseLayerF.apply(feature, alpha)
|
|
|
|
|
|
|
|
class_output = self.classifier(feature)
|
|
|
|
|
|
|
|
domain_output = self.discriminator(reverse_feature)
|
|
|
|
|
|
|
|
return class_output, domain_output
|