diff --git a/datasets/mnist.py b/datasets/mnist.py index 1bbc192..b872e2c 100644 --- a/datasets/mnist.py +++ b/datasets/mnist.py @@ -8,7 +8,7 @@ import os def get_mnist(dataset_root, batch_size, train): """Get MNIST datasets loader.""" # image pre-processing - pre_process = transforms.Compose([transforms.Resize(28), # different img size settings for mnist(28) and svhn(32). + pre_process = transforms.Compose([transforms.Resize(32), # different img size settings for mnist(28) and svhn(32). transforms.ToTensor(), transforms.Normalize( mean=(0.5), diff --git a/experiments/svhn_mnist.py b/experiments/svhn_mnist.py index c053ad9..7d02fd2 100644 --- a/experiments/svhn_mnist.py +++ b/experiments/svhn_mnist.py @@ -1,23 +1,20 @@ import os import sys import datetime -from tensorboardX import SummaryWriter import torch -sys.path.append('../') +sys.path.append(os.path.abspath('.')) from models.model import SVHNmodel from core.train import train_dann from utils.utils import get_data_loader, init_model, init_random_seed +from utils.altutils import setLogger class Config(object): # params for path - model_name = "svhn-mnist" - model_base = '/home/wogong/models/pytorch-dann' - model_root = os.path.expanduser(os.path.join('~', 'Models', 'pytorch-DANN', model_name)) - note = 'paper-structure' - model_root = os.path.join(model_base, model_name, note + '_' + datetime.datetime.now().strftime('%m%d_%H%M%S')) - os.makedirs(model_root) + currentDir = os.path.dirname(os.path.realpath(__file__)) + dataset_root = os.environ["DATASETDIR"] + model_root = os.path.join(currentDir, 'checkpoints') config = os.path.join(model_root, 'config.txt') finetune_flag = False lr_adjust_flag = 'simple' @@ -28,13 +25,11 @@ class Config(object): # params for source dataset src_dataset = "svhn" - src_image_root = os.path.join('/home/wogong/datasets', 'svhn') src_model_trained = True src_classifier_restore = os.path.join(model_root, src_dataset + '-source-classifier-final.pt') # params for target dataset tgt_dataset = "mnist" - tgt_image_root = os.path.join('/home/wogong/datasets', 'mnist') tgt_model_trained = True dann_restore = os.path.join(model_root, src_dataset + '-' + tgt_dataset + '-dann-final.pt') @@ -61,24 +56,22 @@ class Config(object): momentum = 0.9 weight_decay = 1e-6 - def __init__(self): - public_props = (name for name in dir(self) if not name.startswith('_')) - with open(self.config, 'w') as f: - for name in public_props: - f.write(name + ': ' + str(getattr(self, name)) + '\n') - params = Config() -logger = SummaryWriter(params.model_root) + +currentDir = os.path.dirname(os.path.realpath(__file__)) +logFile = os.path.join(currentDir+'/../', 'dann-{}-{}.log'.format(params.src_dataset, params.tgt_dataset)) +loggi = setLogger(logFile) + device = torch.device("cuda:" + params.gpu_id if torch.cuda.is_available() else "cpu") # init random seed init_random_seed(params.manual_seed) # load dataset -src_data_loader = get_data_loader(params.src_dataset, params.src_image_root, params.batch_size, train=True) -src_data_loader_eval = get_data_loader(params.src_dataset, params.src_image_root, params.batch_size, train=False) -tgt_data_loader = get_data_loader(params.tgt_dataset, params.tgt_image_root, params.batch_size, train=True) -tgt_data_loader_eval = get_data_loader(params.tgt_dataset, params.tgt_image_root, params.batch_size, train=False) +src_data_loader = get_data_loader(params.src_dataset, params.dataset_root, params.batch_size, train=True) +src_data_loader_eval = get_data_loader(params.src_dataset, params.dataset_root, params.batch_size, train=False) +tgt_data_loader = get_data_loader(params.tgt_dataset, params.dataset_root, params.batch_size, train=True) +tgt_data_loader_eval = get_data_loader(params.tgt_dataset, params.dataset_root, params.batch_size, train=False) # load dann model dann = init_model(net=SVHNmodel(), restore=None) @@ -86,4 +79,4 @@ dann = init_model(net=SVHNmodel(), restore=None) # train dann model print("Training dann model") if not (dann.restored and params.dann_restore): - dann = train_dann(dann, params, src_data_loader, tgt_data_loader, tgt_data_loader_eval, device, logger) + dann = train_dann(dann, params, src_data_loader, tgt_data_loader, tgt_data_loader_eval, device, loggi)