#!/usr/bin/env python3 """ Main entry point for Multi-Model Orchestrator """ import sys import argparse from multi_model_orchestrator import DemoMultiModelOrchestrator, SimpleMultiModelOrchestrator def main(): parser = argparse.ArgumentParser(description="Multi-Model Orchestrator") parser.add_argument("--demo", action="store_true", help="Run demo mode") parser.add_argument("--real", action="store_true", help="Run with real models") parser.add_argument("--caption", type=str, help="Generate caption for image") parser.add_argument("--generate-image", type=str, help="Generate image from text") parser.add_argument("--multimodal", nargs=2, metavar=("IMAGE", "TEXT"), help="Process multimodal task (image_path text_prompt)") args = parser.parse_args() if args.demo: # Run demo from multi_model_orchestrator.demo_orchestrator import main as demo_main demo_main() elif args.real: # Run with real models print("Real model mode - requires model downloads") print("Use: python -m multi_model_orchestrator.multi_model_example") elif args.caption: # Generate caption orchestrator = DemoMultiModelOrchestrator() orchestrator.initialize_models() caption = orchestrator.generate_caption(args.caption) print(f"Caption: {caption}") elif args.generate_image: # Generate image orchestrator = DemoMultiModelOrchestrator() orchestrator.initialize_models() image_path = orchestrator.generate_image(args.generate_image) print(f"Generated image: {image_path}") elif args.multimodal: # Multimodal processing image_path, text_prompt = args.multimodal orchestrator = DemoMultiModelOrchestrator() orchestrator.initialize_models() results = orchestrator.process_multimodal_task(image_path, text_prompt) print("Results:") for key, value in results.items(): print(f" {key}: {value}") else: # Default: run demo from multi_model_orchestrator.demo_orchestrator import main as demo_main demo_main() if __name__ == "__main__": main()