|
@ -43,37 +43,46 @@ class Classifier(nn.Module): |
|
|
return class_output |
|
|
return class_output |
|
|
|
|
|
|
|
|
class MNISTmodel(nn.Module): |
|
|
class MNISTmodel(nn.Module): |
|
|
""" MNIST architecture""" |
|
|
|
|
|
|
|
|
""" MNIST architecture |
|
|
|
|
|
+Dropout2d, 84% ~ 73% |
|
|
|
|
|
-Dropout2d, 50% ~ 73% |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
def __init__(self): |
|
|
def __init__(self): |
|
|
super(MNISTmodel, self).__init__() |
|
|
super(MNISTmodel, self).__init__() |
|
|
self.restored = False |
|
|
self.restored = False |
|
|
|
|
|
|
|
|
self.feature = nn.Sequential( |
|
|
self.feature = nn.Sequential( |
|
|
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(5, 5)), # 1 28 28, 32 24 24 |
|
|
|
|
|
|
|
|
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(5, 5)), # 3 28 28, 32 24 24 |
|
|
|
|
|
nn.BatchNorm2d(32), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.MaxPool2d(kernel_size=(2, 2)), # 32 12 12 |
|
|
nn.MaxPool2d(kernel_size=(2, 2)), # 32 12 12 |
|
|
nn.Conv2d(in_channels=32, out_channels=48, kernel_size=(5, 5)), # 48 8 8 |
|
|
nn.Conv2d(in_channels=32, out_channels=48, kernel_size=(5, 5)), # 48 8 8 |
|
|
|
|
|
nn.BatchNorm2d(48), |
|
|
|
|
|
nn.Dropout2d(), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.MaxPool2d(kernel_size=(2, 2)), # 48 4 4 |
|
|
nn.MaxPool2d(kernel_size=(2, 2)), # 48 4 4 |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
self.classifier = nn.Sequential( |
|
|
self.classifier = nn.Sequential( |
|
|
nn.Linear(48*4*4, 100), |
|
|
nn.Linear(48*4*4, 100), |
|
|
|
|
|
nn.BatchNorm1d(100), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Linear(100, 100), |
|
|
nn.Linear(100, 100), |
|
|
|
|
|
nn.BatchNorm1d(100), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Linear(100, 10), |
|
|
nn.Linear(100, 10), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
self.discriminator = nn.Sequential( |
|
|
self.discriminator = nn.Sequential( |
|
|
nn.Linear(48*4*4, 100), |
|
|
nn.Linear(48*4*4, 100), |
|
|
|
|
|
nn.BatchNorm1d(100), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Linear(100, 2), |
|
|
nn.Linear(100, 2), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
def forward(self, input_data, alpha): |
|
|
def forward(self, input_data, alpha): |
|
|
input_data = input_data.expand(input_data.data.shape[0], 1, 28, 28) |
|
|
|
|
|
|
|
|
input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28) |
|
|
feature = self.feature(input_data) |
|
|
feature = self.feature(input_data) |
|
|
feature = feature.view(-1, 48 * 4 * 4) |
|
|
feature = feature.view(-1, 48 * 4 * 4) |
|
|
reverse_feature = ReverseLayerF.apply(feature, alpha) |
|
|
reverse_feature = ReverseLayerF.apply(feature, alpha) |
|
@ -83,48 +92,55 @@ class MNISTmodel(nn.Module): |
|
|
return class_output, domain_output |
|
|
return class_output, domain_output |
|
|
|
|
|
|
|
|
class SVHNmodel(nn.Module): |
|
|
class SVHNmodel(nn.Module): |
|
|
""" SVHN architecture""" |
|
|
|
|
|
|
|
|
""" SVHN architecture |
|
|
|
|
|
I don't know how to implement the paper's structure |
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
def __init__(self): |
|
|
def __init__(self): |
|
|
super(SVHNmodel, self).__init__() |
|
|
super(SVHNmodel, self).__init__() |
|
|
self.restored = False |
|
|
self.restored = False |
|
|
|
|
|
|
|
|
self.feature = nn.Sequential() |
|
|
|
|
|
self.feature.add_module('f_conv1', nn.Conv2d(1, 64, kernel_size=5)) |
|
|
|
|
|
self.feature.add_module('f_bn1', nn.BatchNorm2d(64)) |
|
|
|
|
|
self.feature.add_module('f_pool1', nn.MaxPool2d(2)) |
|
|
|
|
|
self.feature.add_module('f_relu1', nn.ReLU(True)) |
|
|
|
|
|
self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5)) |
|
|
|
|
|
self.feature.add_module('f_bn2', nn.BatchNorm2d(50)) |
|
|
|
|
|
self.feature.add_module('f_drop1', nn.Dropout2d()) |
|
|
|
|
|
self.feature.add_module('f_pool2', nn.MaxPool2d(2)) |
|
|
|
|
|
self.feature.add_module('f_relu2', nn.ReLU(True)) |
|
|
|
|
|
|
|
|
self.feature = nn.Sequential( |
|
|
|
|
|
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(5, 5), stride=(1, 1)), # 3 28 28, 64 24 24 |
|
|
|
|
|
nn.BatchNorm2d(64), |
|
|
|
|
|
nn.ReLU(inplace=True), |
|
|
|
|
|
nn.MaxPool2d(kernel_size=(2, 2)), # 64 12 12 |
|
|
|
|
|
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(5, 5)), # 64 8 8 |
|
|
|
|
|
nn.BatchNorm2d(64), |
|
|
|
|
|
nn.Dropout2d(), |
|
|
|
|
|
nn.ReLU(inplace=True), |
|
|
|
|
|
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)), # 64 4 4 |
|
|
|
|
|
nn.ReLU(inplace=True), |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
self.class_classifier = nn.Sequential() |
|
|
|
|
|
self.class_classifier.add_module('c_fc1', nn.Linear(50 * 4 * 4, 100)) |
|
|
|
|
|
self.class_classifier.add_module('c_bn1', nn.BatchNorm2d(100)) |
|
|
|
|
|
self.class_classifier.add_module('c_relu1', nn.ReLU(True)) |
|
|
|
|
|
self.class_classifier.add_module('c_drop1', nn.Dropout2d()) |
|
|
|
|
|
self.class_classifier.add_module('c_fc2', nn.Linear(100, 100)) |
|
|
|
|
|
self.class_classifier.add_module('c_bn2', nn.BatchNorm2d(100)) |
|
|
|
|
|
self.class_classifier.add_module('c_relu2', nn.ReLU(True)) |
|
|
|
|
|
self.class_classifier.add_module('c_fc3', nn.Linear(100, 10)) |
|
|
|
|
|
self.class_classifier.add_module('c_softmax', nn.LogSoftmax(dim=1)) |
|
|
|
|
|
|
|
|
self.classifier = nn.Sequential( |
|
|
|
|
|
nn.Linear(64*4*4, 1024), |
|
|
|
|
|
nn.BatchNorm1d(1024), |
|
|
|
|
|
nn.ReLU(inplace=True), |
|
|
|
|
|
nn.Linear(1024, 256), |
|
|
|
|
|
nn.BatchNorm1d(256), |
|
|
|
|
|
nn.ReLU(inplace=True), |
|
|
|
|
|
nn.Linear(256, 10), |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
self.domain_classifier = nn.Sequential() |
|
|
|
|
|
self.domain_classifier.add_module('d_fc1', nn.Linear(50 * 4 * 4, 100)) |
|
|
|
|
|
self.domain_classifier.add_module('d_bn1', nn.BatchNorm2d(100)) |
|
|
|
|
|
self.domain_classifier.add_module('d_relu1', nn.ReLU(True)) |
|
|
|
|
|
self.domain_classifier.add_module('d_fc2', nn.Linear(100, 2)) |
|
|
|
|
|
self.domain_classifier.add_module('d_softmax', nn.LogSoftmax(dim=1)) |
|
|
|
|
|
|
|
|
self.discriminator = nn.Sequential( |
|
|
|
|
|
nn.Linear(64*4*4, 1024), |
|
|
|
|
|
nn.BatchNorm1d(1024), |
|
|
|
|
|
nn.ReLU(inplace=True), |
|
|
|
|
|
nn.Linear(1024, 256), |
|
|
|
|
|
nn.BatchNorm1d(256), |
|
|
|
|
|
nn.ReLU(inplace=True), |
|
|
|
|
|
nn.Linear(256, 2), |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
def forward(self, input_data, alpha): |
|
|
def forward(self, input_data, alpha): |
|
|
input_data = input_data.expand(input_data.data.shape[0], 1, 28, 28) |
|
|
|
|
|
|
|
|
input_data = input_data.expand(input_data.data.shape[0], 3, 28, 28) |
|
|
feature = self.feature(input_data) |
|
|
feature = self.feature(input_data) |
|
|
feature = feature.view(-1, 50 * 4 * 4) |
|
|
|
|
|
|
|
|
feature = feature.view(-1, 64 * 4 * 4) |
|
|
reverse_feature = ReverseLayerF.apply(feature, alpha) |
|
|
reverse_feature = ReverseLayerF.apply(feature, alpha) |
|
|
class_output = self.class_classifier(feature) |
|
|
|
|
|
domain_output = self.domain_classifier(reverse_feature) |
|
|
|
|
|
|
|
|
class_output = self.classifier(feature) |
|
|
|
|
|
domain_output = self.discriminator(reverse_feature) |
|
|
|
|
|
|
|
|
return class_output, domain_output |
|
|
return class_output, domain_output |
|
|
|
|
|
|
|
|