Browse Source

0.6214 update.

master
wogong 6 years ago
parent
commit
1d89bd25e5
  1. 11
      README.md
  2. 30
      core/dann.py
  3. 88
      models/alexnet.py
  4. 9
      models/model.py
  5. 8
      office.py

11
README.md

@ -22,14 +22,15 @@ A pytorch implementation for paper *[Unsupervised Domain Adaptation by Backpropa
## Result ## Result
| | MNIST-MNISTM | SVHN-MNIST |
| :-------------: | :------------: | :--------: |
| Source Only | 0.5225 | 0.5490 |
| DANN | 0.7666 | 0.7385 |
| This Repo | 0.8400 | 0.7339 |
| | MNIST-MNISTM | SVHN-MNIST | Amazon-Webcam |
| :-------------: | :------------: | :--------: | :--------: |
| Source Only | 0.5225 | 0.5490 | 0.6420 |
| DANN | 0.7666 | 0.7385 | 0.7300 |
| This Repo | 0.8400 | 0.7339 | 0.6214 |
- MNIST-MNISTM: `python mnist_mnistm.py` - MNIST-MNISTM: `python mnist_mnistm.py`
- SVHN-MNIST: `python svhn_mnist.py` - SVHN-MNIST: `python svhn_mnist.py`
- Amazon-Webcam: 没有复现成功
## Other implementations ## Other implementations

30
core/dann.py

@ -25,13 +25,13 @@ def train_dann(model, params, src_data_loader, tgt_data_loader, tgt_data_loader_
else: else:
print("training office task") print("training office task")
parameter_list = [ parameter_list = [
{"params": model.features.parameters(), "lr": 1e-5},
{"params": model.fc.parameters(), "lr": 1e-5},
{"params": model.bottleneck.parameters(), "lr": 1e-4},
{"params": model.classifier.parameters(), "lr": 1e-4},
{"params": model.discriminator.parameters(), "lr": 1e-4}
{"params": model.features.parameters(), "lr": 0.001},
{"params": model.fc.parameters(), "lr": 0.001},
{"params": model.bottleneck.parameters()},
{"params": model.classifier.parameters()},
{"params": model.discriminator.parameters()}
] ]
optimizer = optim.SGD(parameter_list)
optimizer = optim.SGD(parameter_list, lr=0.01, momentum=0.9)
criterion = nn.CrossEntropyLoss() criterion = nn.CrossEntropyLoss()
@ -52,9 +52,12 @@ def train_dann(model, params, src_data_loader, tgt_data_loader, tgt_data_loader_
p = float(step + epoch * len_dataloader) / params.num_epochs / len_dataloader p = float(step + epoch * len_dataloader) / params.num_epochs / len_dataloader
alpha = 2. / (1. + np.exp(-10 * p)) - 1 alpha = 2. / (1. + np.exp(-10 * p)) - 1
alpha = 2*alpha
if params.src_dataset == 'mnist' or params.tgt_dataset == 'mnist': if params.src_dataset == 'mnist' or params.tgt_dataset == 'mnist':
print("training mnist task")
adjust_learning_rate(optimizer, p) adjust_learning_rate(optimizer, p)
else:
adjust_learning_rate_office(optimizer, p)
# prepare domain label # prepare domain label
size_src = len(images_src) size_src = len(images_src)
@ -114,10 +117,19 @@ def train_dann(model, params, src_data_loader, tgt_data_loader, tgt_data_loader_
return model return model
def adjust_learning_rate(optimizer, p): def adjust_learning_rate(optimizer, p):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr_0 = 0.01 lr_0 = 0.01
alpha = 10 alpha = 10
beta = 0.75 beta = 0.75
lr = lr_0 / (1 + alpha*p) ** beta lr = lr_0 / (1 + alpha*p) ** beta
for param_group in optimizer.param_groups: for param_group in optimizer.param_groups:
param_group['lr'] = lr
param_group['lr'] = lr
def adjust_learning_rate_office(optimizer, p):
lr_0 = 0.001
alpha = 10
beta = 0.75
lr = lr_0 / (1 + alpha*p) ** beta
for param_group in optimizer.param_groups[:2]:
param_group['lr'] = lr
for param_group in optimizer.param_groups[2:]:
param_group['lr'] = 10*lr

88
models/alexnet.py

@ -0,0 +1,88 @@
import torch
import os
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['AlexNet', 'alexnet']
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1,
padding=(int((local_size-1.0)/2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size,
stride=1,
padding=int((local_size-1.0)/2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),
nn.ReLU(inplace=True),
LRN(local_size=5, alpha=0.0001, beta=0.75),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2),
nn.ReLU(inplace=True),
LRN(local_size=5, alpha=0.0001, beta=0.75),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(256, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
def alexnet(pretrained=False, **kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet(**kwargs)
if pretrained:
model_path = '/home/wogong/Models/alexnet.pth.tar'
pretrained_model = torch.load(model_path)
model.load_state_dict(pretrained_model['state_dict'])
return model

9
models/model.py

@ -3,7 +3,7 @@
import torch.nn as nn import torch.nn as nn
from .functions import ReverseLayerF from .functions import ReverseLayerF
from torchvision import models from torchvision import models
from .alexnet import alexnet
class Classifier(nn.Module): class Classifier(nn.Module):
""" SVHN architecture without discriminator""" """ SVHN architecture without discriminator"""
@ -160,17 +160,16 @@ class AlexModel(nn.Module):
self.__in_features = model_alexnet.classifier[6].in_features # 4096 self.__in_features = model_alexnet.classifier[6].in_features # 4096
self.bottleneck = nn.Sequential( self.bottleneck = nn.Sequential(
nn.Linear(4096, 256),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
nn.Dropout(),
) )
self.classifier = nn.Sequential( self.classifier = nn.Sequential(
nn.Linear(256, 31)
nn.Linear(1024, 31),
) )
self.discriminator = nn.Sequential( self.discriminator = nn.Sequential(
nn.Linear(256, 1024),
nn.Linear(1024, 1024),
nn.ReLU(), nn.ReLU(),
nn.Dropout(), nn.Dropout(),
nn.Linear(1024, 1024), nn.Linear(1024, 1024),

8
office.py

@ -26,17 +26,17 @@ class Config(object):
# params for pretrain # params for pretrain
num_epochs_src = 100 num_epochs_src = 100
log_step_src = 10
log_step_src = 5
save_step_src = 50 save_step_src = 50
eval_step_src = 20 eval_step_src = 20
# params for training dann # params for training dann
## for office ## for office
num_epochs = 4000
log_step = 25 # iters
num_epochs = 2000
log_step = 10 # iters
save_step = 500 save_step = 500
eval_step = 50 # epochs
eval_step = 5 # epochs
manual_seed = 8888 manual_seed = 8888
alpha = 0 alpha = 0

Loading…
Cancel
Save