text
stringlengths 1
93.6k
|
---|
all_loss['val_cls'] = all_loss.pop('cls')
|
all_loss['val_patch'] = all_loss.pop('patch')
|
all_loss['val_recon'] = all_loss.pop('recon')
|
all_loss['val_loss'] = all_loss.pop('loss')
|
# logging
|
torch.cuda.synchronize()
|
metric_logger.update(val_loss=all_loss['val_loss'].item())
|
for key, value in all_loss.items():
|
metric_logger.update(**{key: value.item()})
|
metric_logger.synchronize_between_processes()
|
print("Averaged validation stats:", metric_logger)
|
return_dict = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
return return_dict
|
# <FILESEP>
|
import os
|
import logging as log
|
from keras.callbacks import EarlyStopping, ModelCheckpoint
|
# define training strategy
|
def train_model(model, dataset):
|
log.info("training model (train on %d samples, validate on %d) ..." % ( \
|
len(dataset.Y_train),
|
len(dataset.Y_val) ) )
|
loss = 'binary_crossentropy'
|
optimizer = 'adam'
|
metrics = ['accuracy']
|
model.compile(loss = loss, optimizer = optimizer, metrics = metrics)
|
checkpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "checkpoints")
|
os.makedirs(checkpath, exist_ok=True)
|
checkpath = os.path.join(checkpath, 'model-epoch{epoch:03d}-acc{val_acc:03f}.h5')
|
# this will stop the training when the validation accuracy will stop changing
|
stopper = EarlyStopping(monitor = 'val_acc', min_delta=0.0001, patience = 5, mode = 'auto')
|
# this will take snapshots of the best performing epoch
|
saver = ModelCheckpoint(checkpath, save_best_only=True, verbose=1, monitor='val_loss', mode='min')
|
# start training
|
return model.fit( dataset.X_train, dataset.Y_train,
|
batch_size = 64,
|
epochs = 50,
|
verbose = 2,
|
validation_data = (dataset.X_val, dataset.Y_val),
|
callbacks = [saver, stopper])
|
# <FILESEP>
|
from experiments.whole_song_gen import WholeSongGeneration
|
import torch
|
from argparse import ArgumentParser
|
DEFAULT_FRM_MODEL_FOLDER = 'results_default/frm---/v-default'
|
DEFAULT_CTP_MODEL_FOLDER = 'results_default/ctp-a-b-/v-default'
|
DEFAULT_LSH_MODEL_FOLDER = 'results_default/lsh-a-b-/v-default'
|
DEFAULT_ACC_MODEL_FOLDER = 'results_default/acc-a-b-/v-default'
|
DEFAULT_DEMO_DIR = 'demo'
|
def init_parser():
|
parser = ArgumentParser(description='inference a whole-song generation experiment')
|
parser.add_argument(
|
"--demo_dir",
|
default=DEFAULT_DEMO_DIR,
|
help='directory in which to generated samples'
|
)
|
parser.add_argument("--mpath0", default=DEFAULT_FRM_MODEL_FOLDER, help="Form generation model path")
|
parser.add_argument("--mid0", default='default', help="Form generation model id")
|
parser.add_argument("--mpath1", default=DEFAULT_CTP_MODEL_FOLDER, help="Counterpoint generation model path")
|
parser.add_argument("--mid1", default='default', help="Counterpoint generation model id")
|
parser.add_argument("--mpath2", default=DEFAULT_LSH_MODEL_FOLDER, help="Lead Sheet generation model path")
|
parser.add_argument("--mid2", default='default', help="Lead Sheet generation model id")
|
parser.add_argument("--mpath3", default=DEFAULT_ACC_MODEL_FOLDER, help="Accompaniment generation model path")
|
parser.add_argument("--mid3", default='default', help="Accompaniment generation model id")
|
parser.add_argument("--nsample", default=1, type=int, help="Number of generated samples")
|
parser.add_argument("--pstring", help="Specify phrase structure. If specified, key must be specified.")
|
parser.add_argument("--nbpm", default=4, type=int, help="Number of beats per measure")
|
parser.add_argument("--key", default=0, type=int, help="Tonic of the key (0 - 11)")
|
parser.add_argument('--minor', action='store_false', help="Whether to generated in minor key.")
|
parser.add_argument('--debug', action='store_true', help="Whether to use a toy dataset")
|
return parser
|
if __name__ == '__main__':
|
parser = init_parser()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.