|
#This settings file can be loaded back to Latent Majesty Diffusion. If you like your setting consider sharing it to the settings library at https: |
|
[model] |
|
latent_diffusion_model = finetuned |
|
#THIS SETTING CAN RUN IN T4!!! |
|
[clip_list] |
|
perceptors = ['[clip - mlfoundations - ViT-B-16--openai]', '[clip - mlfoundations - ViT-L-14--openai]', '[clip - mlfoundations - RN50x16--openai]', '[clip - mlfoundations - ViT-B-32--laion2b_e16]'] |
|
|
|
[basic_settings] |
|
#Perceptor things |
|
#Everthing Disabled Here. |
|
#width = 256 |
|
#height = 256 |
|
#latent_diffusion_guidance_scale = 10 |
|
#clip_guidance_scale = 16000 |
|
#aesthetic_loss_scale = 200 |
|
#augment_cuts=True |
|
|
|
#Init image settings |
|
starting_timestep = 0.9 |
|
init_scale = 1000 |
|
init_brightness = 0.0 |
|
|
|
[advanced_settings] |
|
#Add CLIP Guidance and all the flavors or just run normal Latent Diffusion |
|
use_cond_fn = True |
|
|
|
#Custom schedules for cuts. Check out the schedules documentation here |
|
custom_schedule_setting = [[30, 1000, 8], 'gfpgan:1.5', [20, 200, 8], 'gfpgan:1.0', [50, 220, 4]] |
|
|
|
#Cut settings |
|
clamp_index = [2.4, 2.1] |
|
cut_overview = [8]*500 + [4]*500 |
|
cut_innercut = [0]*500 + [4]*500 |
|
cut_blur_n = [0]*1300 |
|
cut_blur_kernel = 3 |
|
cut_ic_pow = 0.6 |
|
cut_icgray_p = [0.1]*300 + [0]*1000 |
|
cutn_batches = 1 |
|
range_index = [0]*200 + [50000.0]*400 + [0]*1000 |
|
active_function = "softsign" |
|
ths_method= "clamp" |
|
tv_scales = [150]*1 + [0]*3 |
|
|
|
#If you uncomment this line you can schedule the CLIP guidance across the steps. Otherwise the clip_guidance_scale will be used |
|
clip_guidance_schedule = [16000]*1000 |
|
|
|
#Apply symmetric loss (force simmetry to your results) |
|
symmetric_loss_scale = 0 |
|
|
|
#Latent Diffusion Advanced Settings |
|
#Use when latent upscale to correct satuation problem |
|
scale_div = 1 |
|
#Magnify grad before clamping by how many times |
|
opt_mag_mul = 20 |
|
opt_ddim_eta = 1.3 |
|
opt_eta_end = 1.1 |
|
opt_temperature = 0.98 |
|
|
|
#Grad advanced settings |
|
grad_center = False |
|
#Lower value result in more coherent and detailed result, higher value makes it focus on more dominent concept |
|
grad_scale=0.25 |
|
score_modifier = True |
|
threshold_percentile = 0.85 |
|
threshold = 1 |
|
var_index = [2]*300 + [0]*700 |
|
var_range = 0.5 |
|
mean_index = [0]*1000 |
|
mean_range = 0.75 |
|
|
|
#Init image advanced settings |
|
init_rotate=False |
|
mask_rotate=False |
|
init_magnitude = 0.18215 |
|
|
|
#More settings |
|
RGB_min = -0.95 |
|
RGB_max = 0.95 |
|
#How to pad the image with cut_overview |
|
padargs = {'mode': 'constant', 'value': -1} |
|
flip_aug=False |
|
|
|
#Experimental aesthetic embeddings, work only with OpenAI ViT-B/32 and ViT-L/14 |
|
experimental_aesthetic_embeddings = True |
|
#How much you want this to influence your result |
|
experimental_aesthetic_embeddings_weight = 0.3 |
|
#9 are good aesthetic embeddings, 0 are bad ones |
|
experimental_aesthetic_embeddings_score = 8 |
|
|
|
# For fun dont change except if you really know what your are doing |
|
grad_blur = False |
|
compress_steps = 200 |
|
compress_factor = 0.1 |
|
punish_steps = 200 |
|
punish_factor = 0.5 |
|
|