text
stringlengths 1
93.6k
|
---|
"mask": sample["mask"]["stage1"],
|
"errormap": (depth_est - depth_gt).abs() * mask}
|
if is_distributed:
|
scalar_outputs = reduce_scalar_outputs(scalar_outputs)
|
return tensor2float(scalar_outputs["loss"]), tensor2float(scalar_outputs), tensor2numpy(image_outputs)
|
def profile():
|
warmup_iter = 5
|
iter_dataloader = iter(TestImgLoader)
|
@make_nograd_func
|
def do_iteration():
|
torch.cuda.synchronize()
|
torch.cuda.synchronize()
|
start_time = time.perf_counter()
|
test_sample_depth(next(iter_dataloader), detailed_summary=True)
|
torch.cuda.synchronize()
|
end_time = time.perf_counter()
|
return end_time - start_time
|
for i in range(warmup_iter):
|
t = do_iteration()
|
print('WarpUp Iter {}, time = {:.4f}'.format(i, t))
|
with torch.autograd.profiler.profile(enabled=True, use_cuda=True) as prof:
|
for i in range(5):
|
t = do_iteration()
|
print('Profile Iter {}, time = {:.4f}'.format(i, t))
|
time.sleep(0.02)
|
if prof is not None:
|
# print(prof)
|
trace_fn = 'chrome-trace.bin'
|
prof.export_chrome_trace(trace_fn)
|
print("chrome trace file is written to: ", trace_fn)
|
if __name__ == '__main__':
|
# parse arguments and check
|
args = parser.parse_args()
|
# using sync_bn by using nvidia-apex, need to install apex.
|
if args.sync_bn:
|
assert args.using_apex, "must set using apex and install nvidia-apex"
|
if args.using_apex:
|
try:
|
from apex.parallel import DistributedDataParallel as DDP
|
from apex.fp16_utils import *
|
from apex import amp, optimizers
|
from apex.multi_tensor_apply import multi_tensor_applier
|
except ImportError:
|
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
|
if args.resume:
|
assert args.mode == "train"
|
assert args.loadckpt is None
|
if args.testpath is None:
|
args.testpath = args.trainpath
|
if is_distributed:
|
torch.cuda.set_device(args.local_rank)
|
torch.distributed.init_process_group(
|
backend="nccl", init_method="env://"
|
)
|
synchronize()
|
set_random_seed(args.seed)
|
# device = torch.device(args.device)
|
device = torch.device(args.local_rank)
|
if (not is_distributed) or (dist.get_rank() == 0):
|
# create logger for mode "train" and "testall"
|
if args.mode == "train":
|
if not os.path.isdir(args.logdir):
|
os.makedirs(args.logdir)
|
current_time_str = str(datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
|
print("current time", current_time_str)
|
print("creating new summary file")
|
logger = SummaryWriter(args.logdir)
|
print("argv:", sys.argv[1:])
|
print_args(args)
|
# model, optimizer
|
model = CascadeMVSNet(refine=False, ndepths=[int(nd) for nd in args.ndepths.split(",") if nd],
|
depth_interals_ratio=[float(d_i) for d_i in args.depth_inter_r.split(",") if d_i],
|
share_cr=args.share_cr,
|
cr_base_chs=[int(ch) for ch in args.cr_base_chs.split(",") if ch],
|
grad_method=args.grad_method)
|
model.to(device)
|
# using kl loss
|
model_loss = cas_mvsnet_loss_kl
|
if args.sync_bn:
|
import apex
|
print("using apex synced BN")
|
model = apex.parallel.convert_syncbn_model(model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.