text
stringlengths 1
93.6k
|
---|
lab[~mask] = 7.787 * lab[~mask] + 16. / 116.
|
x, y, z = lab[..., 0:1], lab[..., 1:2], lab[..., 2:3]
|
# Vector scaling
|
L = (116. * y) - 16.
|
a = 500.0 * (x - y)
|
b = 200.0 * (y - z)
|
return torch.cat([L, a, b], dim=-1).permute(0,3,1,2)
|
def label2one_hot_torch(labels, C=14):
|
# w.r.t http://jacobkimmel.github.io/pytorch_onehot/
|
'''
|
Converts an integer label torch.autograd.Variable to a one-hot Variable.
|
Parameters
|
----------
|
labels : torch.autograd.Variable of torch.cuda.LongTensor
|
N x 1 x H x W, where N is batch size.
|
Each value is an integer representing correct classification.
|
C : integer.
|
number of classes in labels.
|
Returns
|
-------
|
target : torch.cuda.FloatTensor
|
N x C x H x W, where C is class number. One-hot encoded.
|
'''
|
b,_, h, w = labels.shape
|
one_hot = torch.zeros(b, C, h, w, dtype=torch.long).cuda()
|
target = one_hot.scatter_(1, labels.type(torch.long).data, 1) #require long type
|
return target.type(torch.float32)
|
# <FILESEP>
|
import torch
|
import data as Data
|
import model as Model
|
import argparse
|
import logging
|
import core.logger as Logger
|
from tensorboardX import SummaryWriter
|
import os
|
import numpy as np
|
from math import *
|
import time
|
from util.visualizer import Visualizer
|
from PIL import Image
|
def save_image(image_numpy, image_path):
|
image_pil = Image.fromarray(image_numpy.astype('uint8'))
|
image_pil.save(image_path)
|
if __name__ == "__main__":
|
parser = argparse.ArgumentParser()
|
parser.add_argument('-c', '--config', type=str, default='config/test.json', help='JSON file for configuration')
|
parser.add_argument('-p', '--phase', type=str, choices=['train', 'test'],
|
help='Run either train(training) or test(inference)', default='train')
|
parser.add_argument('-gpu', '--gpu_ids', type=str, default=None)
|
parser.add_argument('-debug', '-d', action='store_true')
|
# parse configs
|
args = parser.parse_args()
|
opt = Logger.parse(args)
|
# Convert to NoneDict, which return None for missing key.
|
opt = Logger.dict_to_nonedict(opt)
|
visualizer = Visualizer(opt)
|
# logging
|
torch.backends.cudnn.enabled = True
|
torch.backends.cudnn.benchmark = True
|
Logger.setup_logger(None, opt['path']['log'], 'train', level=logging.INFO, screen=True)
|
logger = logging.getLogger('base')
|
logger.info(Logger.dict2str(opt))
|
tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger'])
|
batchSize = opt['datasets']['train']['batch_size']
|
# dataset
|
for phase, dataset_opt in opt['datasets'].items():
|
if phase == 'train':
|
train_set = Data.create_dataset_xcad(dataset_opt, phase)
|
train_loader = Data.create_dataloader(train_set, dataset_opt, phase)
|
training_iters = int(ceil(train_set.data_len / float(batchSize)))
|
val_set = Data.create_dataset_xcad(dataset_opt, 'val')
|
val_loader = Data.create_dataloader(val_set, dataset_opt, 'val')
|
valid_iters = int(ceil(val_set.data_len / float(batchSize)))
|
elif phase == 'test':
|
val_set = Data.create_dataset_xcad(dataset_opt, 'test')
|
val_loader = Data.create_dataloader(val_set, dataset_opt, phase)
|
valid_iters = int(ceil(val_set.data_len / float(batchSize)))
|
logger.info('Initial Dataset Finished')
|
# model
|
diffusion = Model.create_model(opt)
|
logger.info('Initial Model Finished')
|
# Train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.