Browse Source

minor update.

master
wogong 7 years ago
parent
commit
4e8352de30
  1. 59
      core/test.py
  2. 4
      datasets/mnistm.py
  3. 2
      datasets/svhn.py
  4. 7
      main.py
  5. 39
      models/classifier.py
  6. 14
      utils.py

59
core/test.py

@ -38,7 +38,7 @@ def test(dataloader, epoch):
batch_size = len(t_label)
input_img = torch.FloatTensor(batch_size, 3, params.image_size, params.image_size)
input_img = torch.FloatTensor(batch_size, 3, params.digit_image_size, params.digit_image_size)
class_label = torch.LongTensor(batch_size)
if cuda:
@ -61,7 +61,7 @@ def test(dataloader, epoch):
accu = n_correct * 1.0 / n_total
print 'epoch: %d, accuracy: %f' % (epoch, accu)
print('epoch: %d, accuracy: %f' % (epoch, accu))
def test_from_save(model, saved_model, data_loader):
@ -93,7 +93,7 @@ def test_from_save(model, saved_model, data_loader):
loss /= len(data_loader)
acc /= len(data_loader.dataset)
print("Avg Loss = {}, Avg Accuracy = {:2%}".format(loss, acc))
print("Avg Loss = {}, Avg Accuracy = {:.2%}".format(loss, acc))
def eval(model, data_loader):
@ -104,30 +104,71 @@ def eval(model, data_loader):
# init loss and accuracy
loss = 0.0
acc = 0.0
acc_domain = 0.0
# set loss function
criterion = nn.NLLLoss()
criterion = nn.CrossEntropyLoss()
# evaluate network
for (images, labels) in data_loader:
images = make_variable(images, volatile=True)
labels = make_variable(labels) #labels = labels.squeeze(1)
preds, _ = model(images, alpha=0)
size_tgt = len(labels)
labels_domain = make_variable(torch.ones(size_tgt).long())
criterion(preds, labels)
preds, domain = model(images, alpha=0)
loss += criterion(preds, labels).data[0]
pred_cls = preds.data.max(1)[1]
pred_domain = domain.data.max(1)[1]
acc += pred_cls.eq(labels.data).cpu().sum()
acc_domain += pred_domain.eq(labels_domain.data).cpu().sum()
loss /= len(data_loader)
acc /= len(data_loader.dataset)
acc_domain /= len(data_loader.dataset)
print("Avg Loss = {}, Avg Accuracy = {:2%}".format(loss, acc))
print("Avg Loss = {:.6f}, Avg Accuracy = {:.2%}, Avg Domain Accuracy = {:2%}".format(loss, acc, acc_domain))
def eval_src(model, data_loader):
"""Evaluate model for dataset."""
# set eval state for Dropout and BN layers
model.eval()
# init loss and accuracy
loss = 0.0
acc = 0.0
acc_domain = 0.0
# set loss function
criterion = nn.CrossEntropyLoss()
# evaluate network
for (images, labels) in data_loader:
images = make_variable(images, volatile=True)
labels = make_variable(labels) #labels = labels.squeeze(1)
size_tgt = len(labels)
labels_domain = make_variable(torch.zeros(size_tgt).long())
preds, domain = model(images, alpha=0)
loss += criterion(preds, labels).data[0]
pred_cls = preds.data.max(1)[1]
pred_domain = domain.data.max(1)[1]
acc += pred_cls.eq(labels.data).cpu().sum()
acc_domain += pred_domain.eq(labels_domain.data).cpu().sum()
loss /= len(data_loader)
acc /= len(data_loader.dataset)
acc_domain /= len(data_loader.dataset)
print("Avg Loss = {:.6f}, Avg Accuracy = {:.2%}, Avg Domain Accuracy = {:2%}".format(loss, acc, acc_domain))
def eval_src_(model, data_loader):
"""Evaluate classifier for source domain."""
# set eval state for Dropout and BN layers
model.eval()
@ -156,4 +197,4 @@ def eval_src(model, data_loader):
loss /= len(data_loader)
acc /= len(data_loader.dataset)
print("Avg Loss = {}, Avg Accuracy = {:2%}".format(loss, acc))
print("Avg Loss = {:.6f}, Avg Accuracy = {:.2%}".format(loss, acc))

4
datasets/mnistm.py

@ -1,7 +1,7 @@
"""Dataset setting and data loader for MNIST_M."""
import torch
from torchvision import datasets, transforms
from torchvision import transforms
import torch.utils.data as data
from PIL import Image
import os
@ -41,7 +41,7 @@ class GetLoader(data.Dataset):
def get_mnistm(train):
"""Get MNISTM datasets loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.Resize(params.image_size),
pre_process = transforms.Compose([transforms.Resize(params.digit_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,

2
datasets/svhn.py

@ -12,7 +12,7 @@ def get_svhn(train):
"""Get SVHN datasets loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.Grayscale(),
transforms.Resize(params.image_size),
transforms.Resize(params.digit_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,

7
main.py

@ -1,5 +1,4 @@
from models.model import CNNModel
from models.classifier import Classifier
from models.model import SVHNmodel, Classifier
from core.dann import train_dann
from core.test import eval, eval_src
@ -33,14 +32,14 @@ print("=== Evaluating source classifier for target domain ===")
eval_src(src_classifier, tgt_data_loader_eval)
# load dann model
dann = init_model(net=CNNModel(), restore=params.dann_restore)
dann = init_model(net=SVHNmodel(), restore=params.dann_restore)
# train dann model
print("=== Training dann model ===")
if not (dann.restored and params.dann_restore):
dann = train_dann(dann, src_data_loader, tgt_data_loader, tgt_data_loader_eval)
w
# eval dann model
print("=== Evaluating dann for source domain ===")
eval(dann, src_data_loader_eval)

39
models/classifier.py

@ -1,39 +0,0 @@
"""Classifier for source domain"""
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.restored = False
self.feature = nn.Sequential()
self.feature.add_module('f_conv1', nn.Conv2d(1, 64, kernel_size=5))
self.feature.add_module('f_bn1', nn.BatchNorm2d(64))
self.feature.add_module('f_pool1', nn.MaxPool2d(2))
self.feature.add_module('f_relu1', nn.ReLU(True))
self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5))
self.feature.add_module('f_bn2', nn.BatchNorm2d(50))
self.feature.add_module('f_drop1', nn.Dropout2d())
self.feature.add_module('f_pool2', nn.MaxPool2d(2))
self.feature.add_module('f_relu2', nn.ReLU(True))
self.class_classifier = nn.Sequential()
self.class_classifier.add_module('c_fc1', nn.Linear(50 * 4 * 4, 100))
self.class_classifier.add_module('c_bn1', nn.BatchNorm2d(100))
self.class_classifier.add_module('c_relu1', nn.ReLU(True))
self.class_classifier.add_module('c_drop1', nn.Dropout2d())
self.class_classifier.add_module('c_fc2', nn.Linear(100, 100))
self.class_classifier.add_module('c_bn2', nn.BatchNorm2d(100))
self.class_classifier.add_module('c_relu2', nn.ReLU(True))
self.class_classifier.add_module('c_fc3', nn.Linear(100, 10))
self.class_classifier.add_module('c_softmax', nn.LogSoftmax(dim=1))
def forward(self, input_data):
input_data = input_data.expand(input_data.data.shape[0], 1, 28, 28)
feature = self.feature(input_data)
feature = feature.view(-1, 50 * 4 * 4)
class_output = self.class_classifier(feature)
return class_output

14
utils.py

@ -9,6 +9,8 @@ from torch.autograd import Variable
import params
from datasets import get_mnist, get_mnistm, get_svhn
from datasets.office import get_office
from datasets.officecaltech import get_officecaltech
def make_variable(tensor, volatile=False):
@ -63,12 +65,17 @@ def get_data_loader(name, train=True):
return get_mnistm(train)
elif name == "SVHN":
return get_svhn(train)
elif name == "amazon31":
return get_office(train, 'amazon')
elif name == "webcam31":
return get_office(train, 'webcam')
elif name == "webcam10":
return get_officecaltech(train, 'webcam')
def init_model(net, restore):
"""Init models with cuda and weights."""
# init weights of model
net.apply(init_weights)
# net.apply(init_weights)
# restore model weights
if restore is not None and os.path.exists(restore):
@ -92,5 +99,4 @@ def save_model(net, filename):
os.makedirs(params.model_root)
torch.save(net.state_dict(),
os.path.join(params.model_root, filename))
print("save pretrained model to: {}".format(os.path.join(params.model_root,
filename)))
print("save pretrained model to: {}".format(os.path.join(params.model_root, filename)))
Loading…
Cancel
Save