text
stringlengths 1
93.6k
|
---|
parser.add_argument('--testlist', help='test list')
|
parser.add_argument('--epochs', type=int, default=16, help='number of epochs to train')
|
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
|
parser.add_argument('--lrepochs', type=str, default="4,6,8:2", help='epoch ids to downscale lr and the downscale rate')
|
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay')
|
parser.add_argument('--nviews', type=int, default=5, help='total number of views')
|
parser.add_argument('--batch_size', type=int, default=1, help='train batch size')
|
parser.add_argument('--numdepth', type=int, default=192, help='the number of depth values')
|
parser.add_argument('--interval_scale', type=float, default=1.06, help='the number of depth values')
|
parser.add_argument('--loadckpt', default=None, help='load a specific checkpoint')
|
parser.add_argument('--logdir', default='./checkpoints', help='the directory to save checkpoints/logs')
|
parser.add_argument('--resume', action='store_true', help='continue to train the model')
|
parser.add_argument('--summary_freq', type=int, default=10, help='print and summary frequency')
|
parser.add_argument('--save_freq', type=int, default=1, help='save checkpoint frequency')
|
parser.add_argument('--eval_freq', type=int, default=1, help='eval freq')
|
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed')
|
parser.add_argument('--pin_m', action='store_true', help='data loader pin memory')
|
parser.add_argument("--local_rank", type=int, default=0)
|
parser.add_argument('--share_cr', action='store_true', help='whether share the cost volume regularization')
|
parser.add_argument('--ndepths', type=str, default="48,32,8", help='ndepths')
|
parser.add_argument('--depth_inter_r', type=str, default="4,1,0.5", help='depth_intervals_ratio')
|
parser.add_argument('--dlossw', type=str, default="0.5,1.0,2.0", help='depth loss weight for different stage')
|
parser.add_argument('--cr_base_chs', type=str, default="8,8,8", help='cost regularization base channels')
|
parser.add_argument('--grad_method', type=str, default="detach", choices=["detach", "undetach"], help='grad method')
|
parser.add_argument('--using_apex', action='store_true', help='using apex, need to install apex')
|
parser.add_argument('--sync_bn', action='store_true',help='enabling apex sync BN.')
|
parser.add_argument('--opt-level', type=str, default="O0")
|
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
|
parser.add_argument('--loss-scale', type=str, default=None)
|
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
|
is_distributed = num_gpus > 1
|
# main function
|
def train(model, model_loss, optimizer, TrainImgLoader, TestImgLoader, start_epoch, args):
|
milestones = [len(TrainImgLoader) * int(epoch_idx) for epoch_idx in args.lrepochs.split(':')[0].split(',')]
|
lr_gamma = 1 / float(args.lrepochs.split(':')[1])
|
lr_scheduler = WarmupMultiStepLR(optimizer, milestones, gamma=lr_gamma, warmup_factor=1.0/3, warmup_iters=500,
|
last_epoch=len(TrainImgLoader) * start_epoch - 1)
|
for epoch_idx in range(start_epoch, args.epochs):
|
global_step = len(TrainImgLoader) * epoch_idx
|
# training
|
if is_distributed:
|
TrainImgLoader.sampler.set_epoch(epoch_idx)
|
for batch_idx, sample in enumerate(TrainImgLoader):
|
start_time = time.time()
|
global_step = len(TrainImgLoader) * epoch_idx + batch_idx
|
do_summary = global_step % args.summary_freq == 0
|
loss, scalar_outputs, image_outputs = train_sample(model, model_loss, optimizer, sample, args)
|
lr_scheduler.step()
|
if (not is_distributed) or (dist.get_rank() == 0):
|
if do_summary:
|
save_scalars(logger, 'train', scalar_outputs, global_step)
|
# save_images(logger, 'train', image_outputs, global_step)
|
print(
|
"Epoch {}/{}, Iter {}/{}, lr {:.6f}, train loss = {:.3f}, depth loss = {:.3f}, kl loss = {:.3f}, approx_kl = {:.3f}, time = {:.3f}".format(
|
epoch_idx, args.epochs, batch_idx, len(TrainImgLoader),
|
optimizer.param_groups[0]["lr"],
|
loss,
|
scalar_outputs['depth_loss'],
|
scalar_outputs['kl_loss'],
|
scalar_outputs['approx_kl'],
|
time.time() - start_time))
|
del scalar_outputs, image_outputs
|
# checkpoint
|
if (not is_distributed) or (dist.get_rank() == 0):
|
if (epoch_idx + 1) % args.save_freq == 0:
|
torch.save({
|
'epoch': epoch_idx,
|
'model': model.module.state_dict(),
|
'optimizer': optimizer.state_dict()},
|
"{}/model_{:0>6}.ckpt".format(args.logdir, epoch_idx))
|
gc.collect()
|
def test(model, model_loss, TestImgLoader, args):
|
avg_test_scalars = DictAverageMeter()
|
for batch_idx, sample in enumerate(TestImgLoader):
|
start_time = time.time()
|
loss, scalar_outputs, image_outputs = test_sample_depth(model, model_loss, sample, args)
|
avg_test_scalars.update(scalar_outputs)
|
del scalar_outputs, image_outputs
|
if (not is_distributed) or (dist.get_rank() == 0):
|
print('Iter {}/{}, test loss = {:.3f}, time = {:3f}'.format(batch_idx, len(TestImgLoader), loss,
|
time.time() - start_time))
|
if batch_idx % 100 == 0:
|
print("Iter {}/{}, test results = {}".format(batch_idx, len(TestImgLoader), avg_test_scalars.mean()))
|
if (not is_distributed) or (dist.get_rank() == 0):
|
print("final", avg_test_scalars.mean())
|
def train_sample(model, model_loss, optimizer, sample, args):
|
model.train()
|
optimizer.zero_grad()
|
sample_cuda = tocuda(sample)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.