Browse Source

add bn layer, source only get 60%, while DA get unstable results, maximum 61%, very similar to source only.

master
wogong 5 years ago
parent
commit
5e65946d13
  1. 2
      datasets/gtsrb.py
  2. 2
      datasets/synsigns.py
  3. 4
      experiments/synsigns_gtsrb.py
  4. 3
      experiments/synsigns_gtsrb_src_only.py
  5. 14
      models/model.py

2
datasets/gtsrb.py

@ -15,7 +15,7 @@ def get_gtsrb(dataset_root, batch_size, train):
# image pre-processing # image pre-processing
pre_process = transforms.Compose([ pre_process = transforms.Compose([
transforms.Resize((48, 48)),
transforms.Resize((40, 40)),
transforms.ToTensor(), transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
]) ])

2
datasets/synsigns.py

@ -43,7 +43,7 @@ def get_synsigns(dataset_root, batch_size, train):
"""Get Synthetic Signs datasets loader.""" """Get Synthetic Signs datasets loader."""
# image pre-processing # image pre-processing
pre_process = transforms.Compose([ pre_process = transforms.Compose([
transforms.Resize((48, 48)),
transforms.Resize((40, 40)),
transforms.ToTensor(), transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
]) ])

4
experiments/synsigns_gtsrb.py

@ -13,7 +13,7 @@ class Config(object):
# params for path # params for path
model_name = "synsigns-gtsrb" model_name = "synsigns-gtsrb"
model_base = '/home/wogong/models/pytorch-dann' model_base = '/home/wogong/models/pytorch-dann'
note = '48-fixrelu-l2-dropout'
note = '40-bn'
model_root = os.path.join(model_base, model_name, note + '_' + datetime.datetime.now().strftime('%m%d_%H%M%S')) model_root = os.path.join(model_base, model_name, note + '_' + datetime.datetime.now().strftime('%m%d_%H%M%S'))
os.makedirs(model_root) os.makedirs(model_root)
config = os.path.join(model_root, 'config.txt') config = os.path.join(model_root, 'config.txt')
@ -45,7 +45,7 @@ class Config(object):
save_step = 100 save_step = 100
eval_step = 1 eval_step = 1
manual_seed = None
manual_seed = 42
alpha = 0 alpha = 0
# params for SGD optimizer # params for SGD optimizer

3
experiments/synsigns_gtsrb_src_only.py

@ -13,7 +13,7 @@ class Config(object):
# params for path # params for path
model_name = "synsigns-gtsrb" model_name = "synsigns-gtsrb"
model_base = '/home/wogong/models/pytorch-dann' model_base = '/home/wogong/models/pytorch-dann'
note = 'src-only-48-fixrelu-l2-dropout'
note = 'src-only-40-bn'
model_root = os.path.join(model_base, model_name, note + '_' + datetime.datetime.now().strftime('%m%d_%H%M%S')) model_root = os.path.join(model_base, model_name, note + '_' + datetime.datetime.now().strftime('%m%d_%H%M%S'))
os.makedirs(model_root) os.makedirs(model_root)
config = os.path.join(model_root, 'config.txt') config = os.path.join(model_root, 'config.txt')
@ -23,7 +23,6 @@ class Config(object):
# params for datasets and data loader # params for datasets and data loader
batch_size = 128 batch_size = 128
img_size = 40
# params for source dataset # params for source dataset
src_dataset = "synsigns" src_dataset = "synsigns"

14
models/model.py

@ -214,35 +214,41 @@ class GTSRBmodel(nn.Module):
self.feature = nn.Sequential( self.feature = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=96, kernel_size=(5, 5)), # 36 ; 44 nn.Conv2d(in_channels=3, out_channels=96, kernel_size=(5, 5)), # 36 ; 44
nn.BatchNorm2d(96),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)), # 18 ; 22 nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)), # 18 ; 22
nn.Conv2d(in_channels=96, out_channels=144, kernel_size=(3, 3)), # 16 ; 20 nn.Conv2d(in_channels=96, out_channels=144, kernel_size=(3, 3)), # 16 ; 20
nn.BatchNorm2d(144),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)), # 8 ; 10 nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)), # 8 ; 10
nn.Conv2d(in_channels=144, out_channels=256, kernel_size=(5, 5)), # 4 ; 6 nn.Conv2d(in_channels=144, out_channels=256, kernel_size=(5, 5)), # 4 ; 6
nn.BatchNorm2d(256),
nn.Dropout2d(), nn.Dropout2d(),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)), # 2 ; 3 nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)), # 2 ; 3
) )
self.classifier = nn.Sequential( self.classifier = nn.Sequential(
nn.Linear(256 * 3 * 3, 512),
nn.Linear(256 * 2 * 2, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
nn.Linear(512, 43), nn.Linear(512, 43),
) )
self.discriminator = nn.Sequential( self.discriminator = nn.Sequential(
nn.Linear(256 * 3 * 3, 1024),
nn.Linear(256 * 2 * 2, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
nn.Linear(1024, 1024), nn.Linear(1024, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True), nn.ReLU(inplace=True),
nn.Linear(1024, 2), nn.Linear(1024, 2),
) )
def forward(self, input_data, alpha = 1.0): def forward(self, input_data, alpha = 1.0):
input_data = input_data.expand(input_data.data.shape[0], 3, 48, 48)
input_data = input_data.expand(input_data.data.shape[0], 3, 40, 40)
feature = self.feature(input_data) feature = self.feature(input_data)
feature = feature.view(-1, 256 * 3 * 3)
feature = feature.view(-1, 256 * 2 * 2)
reverse_feature = ReverseLayerF.apply(feature, alpha) reverse_feature = ReverseLayerF.apply(feature, alpha)
class_output = self.classifier(feature) class_output = self.classifier(feature)
domain_output = self.discriminator(reverse_feature) domain_output = self.discriminator(reverse_feature)

Loading…
Cancel
Save