text
stringlengths 1
93.6k
|
---|
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9, 0.999), weight_decay=args.wd)
|
# load parameters
|
start_epoch = 0
|
if args.resume:
|
saved_models = [fn for fn in os.listdir(args.logdir) if fn.endswith(".ckpt")]
|
saved_models = sorted(saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0]))
|
# use the latest checkpoint file
|
loadckpt = os.path.join(args.logdir, saved_models[-1])
|
print("resuming", loadckpt)
|
state_dict = torch.load(loadckpt, map_location=torch.device("cpu"))
|
model.load_state_dict(state_dict['model'])
|
optimizer.load_state_dict(state_dict['optimizer'])
|
start_epoch = state_dict['epoch'] + 1
|
elif args.loadckpt:
|
# load checkpoint file specified by args.loadckpt
|
print("loading model {}".format(args.loadckpt))
|
state_dict = torch.load(args.loadckpt, map_location=torch.device("cpu"))
|
model.load_state_dict(state_dict['model'])
|
if (not is_distributed) or (dist.get_rank() == 0):
|
print("start at epoch {}".format(start_epoch))
|
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
|
if args.using_apex:
|
# Initialize Amp
|
model, optimizer = amp.initialize(model, optimizer,
|
opt_level=args.opt_level,
|
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
|
loss_scale=args.loss_scale
|
)
|
if is_distributed:
|
print("Let's use", torch.cuda.device_count(), "GPUs!")
|
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
|
model = torch.nn.parallel.DistributedDataParallel(
|
model, device_ids=[args.local_rank], output_device=args.local_rank,
|
# find_unused_parameters=True,
|
# this should be removed if we update BatchNorm stats
|
# broadcast_buffers=False,
|
)
|
else:
|
if torch.cuda.is_available():
|
print("Let's use", torch.cuda.device_count(), "GPUs!")
|
model = nn.DataParallel(model)
|
# dataset, dataloader
|
MVSDataset = find_dataset_def(args.dataset)
|
train_dataset = MVSDataset(args.trainpath, args.pseudopath, args.trainlist, "train", args.nviews, args.numdepth, args.interval_scale)
|
TESTDataset = find_dataset_def('dtu_yao')
|
test_dataset = TESTDataset(args.testpath, args.testlist, "test", args.nviews, args.numdepth, args.interval_scale)
|
if is_distributed:
|
train_sampler = torch.utils.data.DistributedSampler(train_dataset, num_replicas=dist.get_world_size(),
|
rank=dist.get_rank())
|
test_sampler = torch.utils.data.DistributedSampler(test_dataset, num_replicas=dist.get_world_size(),
|
rank=dist.get_rank())
|
TrainImgLoader = DataLoader(train_dataset, args.batch_size, sampler=train_sampler, num_workers=2,
|
drop_last=True,
|
pin_memory=args.pin_m)
|
TestImgLoader = DataLoader(test_dataset, args.batch_size, sampler=test_sampler, num_workers=2, drop_last=False,
|
pin_memory=args.pin_m)
|
else:
|
TrainImgLoader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=0, drop_last=True,
|
pin_memory=args.pin_m)
|
TestImgLoader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=0, drop_last=False,
|
pin_memory=args.pin_m)
|
if args.mode == "train":
|
train(model, model_loss, optimizer, TrainImgLoader, TestImgLoader, start_epoch, args)
|
elif args.mode == "test":
|
test(model, model_loss, TestImgLoader, args)
|
elif args.mode == "profile":
|
profile()
|
else:
|
raise NotImplementedError
|
# <FILESEP>
|
import logging
|
import os
|
from datetime import datetime
|
import paho.mqtt.client as mqtt
|
from dotenv import load_dotenv
|
from constants import callback_api_version_map, protocol_map
|
from exporter.metric.node_configuration_metrics import NodeConfigurationMetrics
|
from exporter.metric_cleanup_job import MetricTrackingRegistry
|
try:
|
from meshtastic.mesh_pb2 import MeshPacket
|
from meshtastic.mqtt_pb2 import ServiceEnvelope
|
except ImportError:
|
from meshtastic.protobuf.mesh_pb2 import MeshPacket
|
from meshtastic.protobuf.mqtt_pb2 import ServiceEnvelope
|
from prometheus_client import start_http_server
|
from psycopg_pool import ConnectionPool
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.