File size: 2,506 Bytes
79dc332 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
from diffsynth import ModelManager, SDXLImagePipeline
from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
import torch, os, argparse
os.environ["TOKENIZERS_PARALLELISM"] = "True"
class LightningModel(LightningModelForT2ILoRA):
def __init__(
self,
torch_dtype=torch.float16, pretrained_weights=[],
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", pretrained_lora_path=None,
):
super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
# Load models
model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
model_manager.load_models(pretrained_weights)
self.pipe = SDXLImagePipeline.from_model_manager(model_manager)
self.pipe.scheduler.set_timesteps(1000)
self.freeze_parameters()
self.add_lora_to_model(
self.pipe.denoising_model(),
lora_rank=lora_rank,
lora_alpha=lora_alpha,
lora_target_modules=lora_target_modules,
init_lora_weights=init_lora_weights,
pretrained_lora_path=pretrained_lora_path,
)
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_path",
type=str,
default=None,
required=True,
help="Path to pretrained model. For example, `models/stable_diffusion_xl/sd_xl_base_1.0.safetensors`.",
)
parser.add_argument(
"--lora_target_modules",
type=str,
default="to_q,to_k,to_v,to_out",
help="Layers with LoRA modules.",
)
parser = add_general_parsers(parser)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
model = LightningModel(
torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
pretrained_weights=[args.pretrained_path],
learning_rate=args.learning_rate,
use_gradient_checkpointing=args.use_gradient_checkpointing,
lora_rank=args.lora_rank,
lora_alpha=args.lora_alpha,
init_lora_weights=args.init_lora_weights,
pretrained_lora_path=args.pretrained_lora_path,
lora_target_modules=args.lora_target_modules
)
launch_training_task(model, args)
|