kunaliitkgp09 commited on
Commit
b4740c6
·
verified ·
1 Parent(s): b87b09b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +52 -0
  2. LICENSE +21 -0
  3. MANIFEST.in +9 -0
  4. MULTI_MODEL_README.md +339 -0
  5. README.md +139 -0
  6. build/lib/multi_model_orchestrator/__init__.py +27 -0
  7. build/lib/multi_model_orchestrator/demo_orchestrator.py +352 -0
  8. build/lib/multi_model_orchestrator/multi_model_orchestrator.py +497 -0
  9. build/lib/multi_model_orchestrator/simple_orchestrator.py +323 -0
  10. dist/multi_model_orchestrator-1.0.0-py3-none-any.whl +0 -0
  11. dist/multi_model_orchestrator-1.0.0.tar.gz +3 -0
  12. main.py +56 -0
  13. multi_model_env/.gitignore +2 -0
  14. multi_model_env/bin/Activate.ps1 +248 -0
  15. multi_model_env/bin/activate +76 -0
  16. multi_model_env/bin/activate.csh +27 -0
  17. multi_model_env/bin/activate.fish +69 -0
  18. multi_model_env/bin/diffusers-cli +8 -0
  19. multi_model_env/bin/f2py +8 -0
  20. multi_model_env/bin/hf +8 -0
  21. multi_model_env/bin/huggingface-cli +8 -0
  22. multi_model_env/bin/isympy +8 -0
  23. multi_model_env/bin/normalizer +8 -0
  24. multi_model_env/bin/numpy-config +8 -0
  25. multi_model_env/bin/pip +8 -0
  26. multi_model_env/bin/pip3 +8 -0
  27. multi_model_env/bin/pip3.13 +8 -0
  28. multi_model_env/bin/python +0 -0
  29. multi_model_env/bin/python3 +0 -0
  30. multi_model_env/bin/python3.13 +0 -0
  31. multi_model_env/bin/tiny-agents +8 -0
  32. multi_model_env/bin/torchfrtrace +8 -0
  33. multi_model_env/bin/torchrun +8 -0
  34. multi_model_env/bin/tqdm +8 -0
  35. multi_model_env/bin/transformers +8 -0
  36. multi_model_env/bin/transformers-cli +8 -0
  37. multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/INSTALLER +1 -0
  38. multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/LICENSE.txt +28 -0
  39. multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/METADATA +92 -0
  40. multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/RECORD +14 -0
  41. multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/WHEEL +5 -0
  42. multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/top_level.txt +1 -0
  43. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libXau.6.dylib +0 -0
  44. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libavif.16.3.0.dylib +3 -0
  45. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libbrotlicommon.1.1.0.dylib +3 -0
  46. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libbrotlidec.1.1.0.dylib +3 -0
  47. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libfreetype.6.dylib +3 -0
  48. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libharfbuzz.0.dylib +3 -0
  49. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libjpeg.62.4.0.dylib +3 -0
  50. multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/liblcms2.2.dylib +3 -0
.gitattributes CHANGED
@@ -33,3 +33,55 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libavif.16.3.0.dylib filter=lfs diff=lfs merge=lfs -text
37
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libbrotlicommon.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
38
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libbrotlidec.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
39
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libfreetype.6.dylib filter=lfs diff=lfs merge=lfs -text
40
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libharfbuzz.0.dylib filter=lfs diff=lfs merge=lfs -text
41
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libjpeg.62.4.0.dylib filter=lfs diff=lfs merge=lfs -text
42
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/liblcms2.2.dylib filter=lfs diff=lfs merge=lfs -text
43
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/liblzma.5.dylib filter=lfs diff=lfs merge=lfs -text
44
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libopenjp2.2.5.3.dylib filter=lfs diff=lfs merge=lfs -text
45
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libpng16.16.dylib filter=lfs diff=lfs merge=lfs -text
46
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libtiff.6.dylib filter=lfs diff=lfs merge=lfs -text
47
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libwebp.7.dylib filter=lfs diff=lfs merge=lfs -text
48
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libwebpmux.3.dylib filter=lfs diff=lfs merge=lfs -text
49
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libxcb.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
50
+ multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libz.1.3.1.zlib-ng.dylib filter=lfs diff=lfs merge=lfs -text
51
+ multi_model_env/lib/python3.13/site-packages/PIL/_imaging.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
52
+ multi_model_env/lib/python3.13/site-packages/PIL/_imagingft.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
53
+ multi_model_env/lib/python3.13/site-packages/charset_normalizer/md.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
54
+ multi_model_env/lib/python3.13/site-packages/charset_normalizer/md__mypyc.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
55
+ multi_model_env/lib/python3.13/site-packages/functorch/_C.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
56
+ multi_model_env/lib/python3.13/site-packages/hf_xet/hf_xet.abi3.so filter=lfs diff=lfs merge=lfs -text
57
+ multi_model_env/lib/python3.13/site-packages/numpy/.dylibs/libgcc_s.1.1.dylib filter=lfs diff=lfs merge=lfs -text
58
+ multi_model_env/lib/python3.13/site-packages/numpy/.dylibs/libgfortran.5.dylib filter=lfs diff=lfs merge=lfs -text
59
+ multi_model_env/lib/python3.13/site-packages/numpy/.dylibs/libquadmath.0.dylib filter=lfs diff=lfs merge=lfs -text
60
+ multi_model_env/lib/python3.13/site-packages/numpy/.dylibs/libscipy_openblas64_.dylib filter=lfs diff=lfs merge=lfs -text
61
+ multi_model_env/lib/python3.13/site-packages/numpy/_core/_multiarray_tests.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
62
+ multi_model_env/lib/python3.13/site-packages/numpy/_core/_multiarray_umath.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
63
+ multi_model_env/lib/python3.13/site-packages/numpy/_core/_simd.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
64
+ multi_model_env/lib/python3.13/site-packages/numpy/fft/_pocketfft_umath.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
65
+ multi_model_env/lib/python3.13/site-packages/numpy/linalg/_umath_linalg.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
66
+ multi_model_env/lib/python3.13/site-packages/numpy/random/_bounded_integers.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
67
+ multi_model_env/lib/python3.13/site-packages/numpy/random/_common.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
68
+ multi_model_env/lib/python3.13/site-packages/numpy/random/_generator.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
69
+ multi_model_env/lib/python3.13/site-packages/numpy/random/_mt19937.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
70
+ multi_model_env/lib/python3.13/site-packages/numpy/random/_pcg64.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
71
+ multi_model_env/lib/python3.13/site-packages/numpy/random/_philox.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
72
+ multi_model_env/lib/python3.13/site-packages/numpy/random/bit_generator.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
73
+ multi_model_env/lib/python3.13/site-packages/numpy/random/mtrand.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
74
+ multi_model_env/lib/python3.13/site-packages/pip/_vendor/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text
75
+ multi_model_env/lib/python3.13/site-packages/pip/_vendor/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text
76
+ multi_model_env/lib/python3.13/site-packages/pip/_vendor/distlib/w64-arm.exe filter=lfs diff=lfs merge=lfs -text
77
+ multi_model_env/lib/python3.13/site-packages/pip/_vendor/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text
78
+ multi_model_env/lib/python3.13/site-packages/regex/_regex.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
79
+ multi_model_env/lib/python3.13/site-packages/safetensors/_safetensors_rust.abi3.so filter=lfs diff=lfs merge=lfs -text
80
+ multi_model_env/lib/python3.13/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
81
+ multi_model_env/lib/python3.13/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
82
+ multi_model_env/lib/python3.13/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
83
+ multi_model_env/lib/python3.13/site-packages/torch/lib/libc10.dylib filter=lfs diff=lfs merge=lfs -text
84
+ multi_model_env/lib/python3.13/site-packages/torch/lib/libomp.dylib filter=lfs diff=lfs merge=lfs -text
85
+ multi_model_env/lib/python3.13/site-packages/torch/lib/libtorch_cpu.dylib filter=lfs diff=lfs merge=lfs -text
86
+ multi_model_env/lib/python3.13/site-packages/torch/lib/libtorch_python.dylib filter=lfs diff=lfs merge=lfs -text
87
+ multi_model_env/lib/python3.13/site-packages/yaml/_yaml.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Kunal Dhanda
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
MANIFEST.in ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ include README.md
2
+ include MULTI_MODEL_README.md
3
+ include requirements.txt
4
+ include multi_model_requirements.txt
5
+ include LICENSE
6
+ include *.json
7
+ include *.py
8
+ recursive-exclude * __pycache__
9
+ recursive-exclude * *.py[co]
MULTI_MODEL_README.md ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Multi-Model Orchestrator: Parent-Child LLM System
2
+
3
+ A sophisticated multi-model orchestration system that manages parent-child LLM relationships, specifically integrating the [CLIP-GPT2 Image Captioner](https://huggingface.co/kunaliitkgp09/clip-gpt2-image-captioner) and [Flickr30k Text-to-Image](https://huggingface.co/kunaliitkgp09/flickr30k-text-to-image) models.
4
+
5
+ ## 🚀 Features
6
+
7
+ ### **Parent Orchestrator**
8
+ - **Intelligent Task Routing**: Automatically routes tasks to appropriate child models
9
+ - **Model Management**: Handles loading, caching, and lifecycle of child models
10
+ - **Error Handling**: Robust error handling and recovery mechanisms
11
+ - **Task History**: Comprehensive logging and monitoring of all operations
12
+ - **Async Support**: Both synchronous and asynchronous processing modes
13
+
14
+ ### **Child Models**
15
+ - **CLIP-GPT2 Image Captioner**: Converts images to descriptive text captions
16
+ - **Flickr30k Text-to-Image**: Generates images from text descriptions
17
+ - **Extensible Architecture**: Easy to add new child models
18
+
19
+ ### **Advanced Capabilities**
20
+ - **Multimodal Processing**: Combines multiple child models for complex tasks
21
+ - **Batch Processing**: Handle multiple tasks efficiently
22
+ - **Performance Monitoring**: Track processing times and success rates
23
+ - **Memory Management**: Efficient GPU/CPU memory usage
24
+
25
+ ## 📁 Project Structure
26
+
27
+ ```
28
+ ├── multi_model_orchestrator.py # Advanced orchestrator with full features
29
+ ├── simple_orchestrator.py # Simplified interface matching original code
30
+ ├── multi_model_example.py # Comprehensive examples and demonstrations
31
+ ├── multi_model_requirements.txt # Dependencies for multi-model system
32
+ └── MULTI_MODEL_README.md # This file
33
+ ```
34
+
35
+ ## 🛠️ Installation
36
+
37
+ 1. **Install dependencies:**
38
+ ```bash
39
+ pip install -r multi_model_requirements.txt
40
+ ```
41
+
42
+ 2. **Verify installation:**
43
+ ```python
44
+ import torch
45
+ from transformers import CLIPProcessor
46
+ from diffusers import StableDiffusionPipeline
47
+ print("All dependencies installed successfully!")
48
+ ```
49
+
50
+ ## 🎯 Quick Start
51
+
52
+ ### **Basic Usage (Matching Original Code)**
53
+
54
+ ```python
55
+ from simple_orchestrator import SimpleMultiModelOrchestrator
56
+
57
+ # Initialize orchestrator
58
+ orchestrator = SimpleMultiModelOrchestrator()
59
+ orchestrator.initialize_models()
60
+
61
+ # Generate caption from image
62
+ caption = orchestrator.generate_caption("sample_image.jpg")
63
+ print(f"Caption: {caption}")
64
+
65
+ # Generate image from text
66
+ image_path = orchestrator.generate_image("A beautiful sunset over mountains")
67
+ print(f"Generated image: {image_path}")
68
+
69
+ # Route tasks
70
+ caption = orchestrator.route_task("caption", "sample_image.jpg")
71
+ image_path = orchestrator.route_task("generate_image", "A cat on a windowsill")
72
+ ```
73
+
74
+ ### **Advanced Usage**
75
+
76
+ ```python
77
+ from multi_model_orchestrator import MultiModelOrchestrator
78
+ import asyncio
79
+
80
+ async def main():
81
+ # Initialize advanced orchestrator
82
+ orchestrator = MultiModelOrchestrator()
83
+ await orchestrator.initialize()
84
+
85
+ # Multimodal processing
86
+ results = await orchestrator.process_multimodal(
87
+ image_path="sample_image.jpg",
88
+ text_prompt="A serene landscape with mountains"
89
+ )
90
+
91
+ print("Results:", results)
92
+
93
+ asyncio.run(main())
94
+ ```
95
+
96
+ ## 🔧 Model Integration
97
+
98
+ ### **Child Model 1: CLIP-GPT2 Image Captioner**
99
+ - **Model**: `kunaliitkgp09/clip-gpt2-image-captioner`
100
+ - **Task**: Image-to-text captioning
101
+ - **Input**: Image file path
102
+ - **Output**: Descriptive text caption
103
+ - **Performance**: ~40% accuracy on test samples
104
+
105
+ ### **Child Model 2: Flickr30k Text-to-Image**
106
+ - **Model**: `kunaliitkgp09/flickr30k-text-to-image`
107
+ - **Task**: Text-to-image generation
108
+ - **Input**: Text prompt
109
+ - **Output**: Generated image file
110
+ - **Performance**: Fine-tuned on Flickr30k dataset
111
+
112
+ ## 📊 Usage Examples
113
+
114
+ ### **1. Image Captioning**
115
+ ```python
116
+ # Generate caption from image
117
+ caption = orchestrator.generate_caption("path/to/image.jpg")
118
+ print(f"Generated Caption: {caption}")
119
+ ```
120
+
121
+ ### **2. Text-to-Image Generation**
122
+ ```python
123
+ # Generate image from text
124
+ image_path = orchestrator.generate_image("A majestic eagle soaring over mountains")
125
+ print(f"Generated Image: {image_path}")
126
+ ```
127
+
128
+ ### **3. Multimodal Processing**
129
+ ```python
130
+ # Process both image and text together
131
+ results = orchestrator.process_multimodal_task(
132
+ image_path="sample_image.jpg",
133
+ text_prompt="A serene landscape with mountains"
134
+ )
135
+
136
+ print("Caption:", results["caption"])
137
+ print("Generated Image:", results["generated_image"])
138
+ print("Analysis:", results["analysis_prompt"])
139
+ ```
140
+
141
+ ### **4. Async Processing**
142
+ ```python
143
+ # Async version for better performance
144
+ async def async_example():
145
+ results = await orchestrator.process_multimodal_async(
146
+ image_path="sample_image.jpg",
147
+ text_prompt="A futuristic cityscape"
148
+ )
149
+ return results
150
+ ```
151
+
152
+ ### **5. Batch Processing**
153
+ ```python
154
+ # Process multiple tasks
155
+ image_tasks = [
156
+ "A beautiful sunset",
157
+ "A cozy coffee shop",
158
+ "A vibrant garden"
159
+ ]
160
+
161
+ for prompt in image_tasks:
162
+ image_path = orchestrator.generate_image(prompt)
163
+ print(f"Generated: {image_path}")
164
+ ```
165
+
166
+ ## 🔍 Task History and Monitoring
167
+
168
+ ```python
169
+ # Get orchestrator status
170
+ status = orchestrator.get_status()
171
+ print(f"Status: {status}")
172
+
173
+ # Get task history
174
+ history = orchestrator.get_task_history()
175
+ for task in history:
176
+ print(f"Task: {task['task_type']}, Time: {task['processing_time']:.2f}s")
177
+
178
+ # Save task history
179
+ orchestrator.save_task_history("my_tasks.json")
180
+ ```
181
+
182
+ ## ⚙️ Configuration Options
183
+
184
+ ### **Model Configuration**
185
+ ```python
186
+ # Custom model parameters
187
+ orchestrator = SimpleMultiModelOrchestrator(device="cuda") # or "cpu"
188
+
189
+ # Custom generation parameters
190
+ image_path = orchestrator.generate_image(
191
+ "A beautiful landscape",
192
+ output_path="custom_output.png"
193
+ )
194
+ ```
195
+
196
+ ### **Async Configuration**
197
+ ```python
198
+ # Async orchestrator with concurrent processing
199
+ async_orchestrator = AsyncMultiModelOrchestrator()
200
+
201
+ # Process tasks concurrently
202
+ results = await async_orchestrator.process_multimodal_async(
203
+ image_path="image.jpg",
204
+ text_prompt="prompt"
205
+ )
206
+ ```
207
+
208
+ ## 🎯 Use Cases
209
+
210
+ ### **1. Content Creation**
211
+ - Generate captions for social media images
212
+ - Create images from text descriptions
213
+ - Multimodal content analysis
214
+
215
+ ### **2. Research and Development**
216
+ - Model performance comparison
217
+ - Multimodal AI research
218
+ - Prototype development
219
+
220
+ ### **3. Production Systems**
221
+ - Automated content generation
222
+ - Image analysis pipelines
223
+ - Text-to-image applications
224
+
225
+ ### **4. Educational Applications**
226
+ - AI model demonstration
227
+ - Multimodal learning systems
228
+ - Research toolkits
229
+
230
+ ## 🔧 Advanced Features
231
+
232
+ ### **Error Handling**
233
+ ```python
234
+ try:
235
+ caption = orchestrator.generate_caption("image.jpg")
236
+ except Exception as e:
237
+ print(f"Error: {e}")
238
+ # Handle error gracefully
239
+ ```
240
+
241
+ ### **Performance Optimization**
242
+ ```python
243
+ # Use async for better performance
244
+ async def optimized_processing():
245
+ tasks = [
246
+ orchestrator.generate_caption_async("image1.jpg"),
247
+ orchestrator.generate_caption_async("image2.jpg"),
248
+ orchestrator.generate_image_async("prompt1"),
249
+ orchestrator.generate_image_async("prompt2")
250
+ ]
251
+
252
+ results = await asyncio.gather(*tasks)
253
+ return results
254
+ ```
255
+
256
+ ### **Custom Model Integration**
257
+ ```python
258
+ # Add new child models
259
+ class CustomChildModel:
260
+ def __init__(self, model_name):
261
+ self.model = load_model(model_name)
262
+
263
+ def process(self, input_data):
264
+ # Custom processing logic
265
+ return result
266
+
267
+ # Integrate with orchestrator
268
+ orchestrator.add_child_model("custom_model", CustomChildModel("model_name"))
269
+ ```
270
+
271
+ ## 📈 Performance Metrics
272
+
273
+ The orchestrator tracks various performance metrics:
274
+
275
+ - **Processing Time**: Time taken for each task
276
+ - **Success Rate**: Percentage of successful operations
277
+ - **Memory Usage**: GPU/CPU memory consumption
278
+ - **Model Load Times**: Time to initialize each child model
279
+ - **Task Throughput**: Number of tasks processed per second
280
+
281
+ ## 🚨 Important Notes
282
+
283
+ ### **System Requirements**
284
+ - **GPU**: Recommended for optimal performance (CUDA compatible)
285
+ - **RAM**: 8GB+ for smooth operation
286
+ - **Storage**: 5GB+ for model downloads and generated content
287
+ - **Python**: 3.8+ required
288
+
289
+ ### **Model Downloads**
290
+ - Models are downloaded automatically on first use
291
+ - CLIP-GPT2: ~500MB
292
+ - Stable Diffusion: ~4GB
293
+ - Total initial download: ~5GB
294
+
295
+ ### **Memory Management**
296
+ - Models are loaded into GPU memory when available
297
+ - CPU fallback available for systems without GPU
298
+ - Memory usage scales with batch size and model complexity
299
+
300
+ ## 🤝 Contributing
301
+
302
+ Contributions are welcome! Please feel free to submit pull requests or open issues for:
303
+
304
+ - New child model integrations
305
+ - Performance improvements
306
+ - Bug fixes
307
+ - Documentation enhancements
308
+ - Feature requests
309
+
310
+ ## 📄 License
311
+
312
+ This project is licensed under the MIT License - see the LICENSE file for details.
313
+
314
+ ## 🙏 Acknowledgments
315
+
316
+ - **CLIP-GPT2 Model**: [kunaliitkgp09/clip-gpt2-image-captioner](https://huggingface.co/kunaliitkgp09/clip-gpt2-image-captioner)
317
+ - **Stable Diffusion Model**: [kunaliitkgp09/flickr30k-text-to-image](https://huggingface.co/kunaliitkgp09/flickr30k-text-to-image)
318
+ - **Hugging Face**: For providing the model hosting platform
319
+ - **PyTorch**: For the deep learning framework
320
+ - **Transformers**: For the model loading and processing utilities
321
+
322
+ ## 📚 References
323
+
324
+ 1. **CLIP**: "Learning Transferable Visual Representations" (Radford et al., 2021)
325
+ 2. **GPT-2**: "Language Models are Unsupervised Multitask Learners" (Radford et al., 2019)
326
+ 3. **Stable Diffusion**: "High-Resolution Image Synthesis with Latent Diffusion Models" (Rombach et al., 2022)
327
+ 4. **Flickr30k**: "From Image Descriptions to Visual Denotations" (Young et al., 2014)
328
+
329
+ ## 🔗 Links
330
+
331
+ - **CLIP-GPT2 Model**: https://huggingface.co/kunaliitkgp09/clip-gpt2-image-captioner
332
+ - **Flickr30k Text-to-Image**: https://huggingface.co/kunaliitkgp09/flickr30k-text-to-image
333
+ - **Hugging Face Hub**: https://huggingface.co/
334
+ - **PyTorch**: https://pytorch.org/
335
+ - **Transformers**: https://huggingface.co/docs/transformers/
336
+
337
+ ---
338
+
339
+ **Happy Orchestrating! 🚀**
README.md ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: mit
5
+ library_name: multi-model-orchestrator
6
+ tags:
7
+ - ai
8
+ - machine-learning
9
+ - multimodal
10
+ - image-captioning
11
+ - text-to-image
12
+ - orchestration
13
+ - transformers
14
+ - pytorch
15
+ ---
16
+
17
+ # Multi-Model Orchestrator
18
+
19
+ A sophisticated multi-model orchestration system that manages parent-child LLM relationships, specifically integrating CLIP-GPT2 image captioner and Flickr30k text-to-image models.
20
+
21
+ ## 🚀 Features
22
+
23
+ ### **Parent Orchestrator**
24
+ - **Intelligent Task Routing**: Automatically routes tasks to appropriate child models
25
+ - **Model Management**: Handles loading, caching, and lifecycle of child models
26
+ - **Error Handling**: Robust error handling and recovery mechanisms
27
+ - **Task History**: Comprehensive logging and monitoring of all operations
28
+ - **Async Support**: Both synchronous and asynchronous processing modes
29
+
30
+ ### **Child Models**
31
+ - **CLIP-GPT2 Image Captioner**: Converts images to descriptive text captions
32
+ - **Flickr30k Text-to-Image**: Generates images from text descriptions
33
+ - **Extensible Architecture**: Easy to add new child models
34
+
35
+ ## 📦 Installation
36
+
37
+ ```bash
38
+ pip install git+https://huggingface.co/kunaliitkgp09/multi-model-orchestrator
39
+ ```
40
+
41
+ ## 🎯 Quick Start
42
+
43
+ ```python
44
+ from multi_model_orchestrator import SimpleMultiModelOrchestrator
45
+
46
+ # Initialize orchestrator
47
+ orchestrator = SimpleMultiModelOrchestrator()
48
+ orchestrator.initialize_models()
49
+
50
+ # Generate caption from image
51
+ caption = orchestrator.generate_caption("sample_image.jpg")
52
+ print(f"Caption: {caption}")
53
+
54
+ # Generate image from text
55
+ image_path = orchestrator.generate_image("A beautiful sunset over mountains")
56
+ print(f"Generated image: {image_path}")
57
+ ```
58
+
59
+ ## 🔗 Model Integration
60
+
61
+ ### **Child Model 1: CLIP-GPT2 Image Captioner**
62
+ - **Model**: `kunaliitkgp09/clip-gpt2-image-captioner`
63
+ - **Task**: Image-to-text captioning
64
+ - **Performance**: ~40% accuracy on test samples
65
+
66
+ ### **Child Model 2: Flickr30k Text-to-Image**
67
+ - **Model**: `kunaliitkgp09/flickr30k-text-to-image`
68
+ - **Task**: Text-to-image generation
69
+ - **Performance**: Fine-tuned on Flickr30k dataset
70
+
71
+ ## 📊 Usage Examples
72
+
73
+ ### **Multimodal Processing**
74
+ ```python
75
+ # Process both image and text together
76
+ results = orchestrator.process_multimodal_task(
77
+ image_path="sample_image.jpg",
78
+ text_prompt="A serene landscape with mountains"
79
+ )
80
+
81
+ print("Caption:", results["caption"])
82
+ print("Generated Image:", results["generated_image"])
83
+ ```
84
+
85
+ ### **Async Processing**
86
+ ```python
87
+ from multi_model_orchestrator import AsyncMultiModelOrchestrator
88
+ import asyncio
89
+
90
+ async def async_example():
91
+ orchestrator = AsyncMultiModelOrchestrator()
92
+ orchestrator.initialize_models()
93
+
94
+ results = await orchestrator.process_multimodal_async(
95
+ image_path="sample_image.jpg",
96
+ text_prompt="A futuristic cityscape"
97
+ )
98
+ return results
99
+
100
+ asyncio.run(async_example())
101
+ ```
102
+
103
+ ## 🎯 Use Cases
104
+
105
+ - **Content Creation**: Generate captions and images for social media
106
+ - **Research and Development**: Model performance comparison and prototyping
107
+ - **Production Systems**: Automated content generation pipelines
108
+ - **Educational Applications**: AI model demonstration and learning
109
+
110
+ ## 📈 Performance Metrics
111
+
112
+ - **Processing Time**: Optimized for real-time applications
113
+ - **Memory Usage**: Efficient GPU/CPU memory management
114
+ - **Success Rate**: Robust error handling and recovery
115
+ - **Extensibility**: Easy integration of new child models
116
+
117
+ ## 🤝 Contributing
118
+
119
+ Contributions are welcome! Please feel free to submit pull requests or open issues for:
120
+ - New child model integrations
121
+ - Performance improvements
122
+ - Bug fixes
123
+ - Documentation enhancements
124
+
125
+ ## 📄 License
126
+
127
+ This project is licensed under the MIT License.
128
+
129
+ ## 🙏 Acknowledgments
130
+
131
+ - **CLIP-GPT2 Model**: [kunaliitkgp09/clip-gpt2-image-captioner](https://huggingface.co/kunaliitkgp09/clip-gpt2-image-captioner)
132
+ - **Stable Diffusion Model**: [kunaliitkgp09/flickr30k-text-to-image](https://huggingface.co/kunaliitkgp09/flickr30k-text-to-image)
133
+ - **Hugging Face**: For providing the model hosting platform
134
+ - **PyTorch**: For the deep learning framework
135
+ - **Transformers**: For the model loading and processing utilities
136
+
137
+ ---
138
+
139
+ **Happy Orchestrating! 🚀**
build/lib/multi_model_orchestrator/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-Model Orchestrator: Parent-Child LLM System
3
+
4
+ A sophisticated multi-model orchestration system that manages parent-child LLM relationships,
5
+ specifically integrating CLIP-GPT2 image captioner and Flickr30k text-to-image models.
6
+
7
+ Author: Kunal Dhanda
8
+ Version: 1.0.0
9
+ """
10
+
11
+ from .simple_orchestrator import SimpleMultiModelOrchestrator, AsyncMultiModelOrchestrator
12
+ from .multi_model_orchestrator import MultiModelOrchestrator, ParentOrchestrator, ChildModel, ModelManager
13
+ from .demo_orchestrator import DemoMultiModelOrchestrator
14
+
15
+ __version__ = "1.0.0"
16
+ __author__ = "Kunal Dhanda"
17
+ __email__ = "[email protected]"
18
+
19
+ __all__ = [
20
+ "SimpleMultiModelOrchestrator",
21
+ "AsyncMultiModelOrchestrator",
22
+ "MultiModelOrchestrator",
23
+ "ParentOrchestrator",
24
+ "ChildModel",
25
+ "ModelManager",
26
+ "DemoMultiModelOrchestrator"
27
+ ]
build/lib/multi_model_orchestrator/demo_orchestrator.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Demo Multi-Model Orchestrator
4
+ Shows the structure and functionality without downloading large models
5
+ """
6
+
7
+ import time
8
+ import json
9
+ from typing import Dict, Any, Optional
10
+
11
+ class DemoMultiModelOrchestrator:
12
+ """Demo orchestrator that simulates the multi-model system"""
13
+
14
+ def __init__(self, device: str = "cpu"):
15
+ self.device = device
16
+ self.caption_model_name = "kunaliitkgp09/clip-gpt2-image-captioner"
17
+ self.text2img_model_name = "kunaliitkgp09/flickr30k-text-to-image"
18
+
19
+ # Simulated model instances
20
+ self.caption_processor = None
21
+ self.caption_model = None
22
+ self.text2img_pipeline = None
23
+ self.tokenizer = None
24
+
25
+ # Task history
26
+ self.task_history = []
27
+
28
+ def initialize_models(self):
29
+ """Simulate model initialization"""
30
+ print("🔧 Initializing child models...")
31
+
32
+ try:
33
+ # Simulate loading captioning model
34
+ print("📥 Loading CLIP-GPT2 captioning model...")
35
+ time.sleep(1) # Simulate loading time
36
+ self.caption_processor = "CLIPProcessor"
37
+ self.caption_model = "CLIP-GPT2-Model"
38
+ self.tokenizer = "GPT2Tokenizer"
39
+ print("✅ Captioning model loaded")
40
+
41
+ # Simulate loading text-to-image model
42
+ print("📥 Loading Stable Diffusion text-to-image model...")
43
+ time.sleep(1) # Simulate loading time
44
+ self.text2img_pipeline = "StableDiffusionPipeline"
45
+ print("✅ Text-to-image model loaded")
46
+
47
+ print("🎉 All models initialized successfully!")
48
+ return True
49
+
50
+ except Exception as e:
51
+ print(f"❌ Error initializing models: {e}")
52
+ return False
53
+
54
+ def generate_caption(self, image_path: str) -> str:
55
+ """Simulate image captioning"""
56
+ start_time = time.time()
57
+
58
+ try:
59
+ print(f"🖼️ Processing image: {image_path}")
60
+ time.sleep(0.5) # Simulate processing time
61
+
62
+ # Simulate caption generation
63
+ sample_captions = [
64
+ "A beautiful landscape with mountains and trees",
65
+ "A person standing in front of a scenic view",
66
+ "A colorful sunset over the ocean",
67
+ "A cozy room with warm lighting",
68
+ "A busy street in a modern city"
69
+ ]
70
+
71
+ import random
72
+ caption = random.choice(sample_captions)
73
+
74
+ # Log task
75
+ self._log_task("caption", image_path, caption, time.time() - start_time)
76
+
77
+ return caption
78
+
79
+ except Exception as e:
80
+ print(f"❌ Error generating caption: {e}")
81
+ self._log_task("caption", image_path, None, time.time() - start_time, str(e))
82
+ raise
83
+
84
+ def generate_image(self, text_prompt: str, output_path: Optional[str] = None) -> str:
85
+ """Simulate text-to-image generation"""
86
+ start_time = time.time()
87
+
88
+ try:
89
+ print(f"🎨 Generating image from prompt: '{text_prompt}'")
90
+ time.sleep(1) # Simulate generation time
91
+
92
+ # Generate unique output path if not provided
93
+ if output_path is None:
94
+ timestamp = int(time.time())
95
+ output_path = f"generated_image_{timestamp}.png"
96
+
97
+ # Simulate image generation
98
+ print(f"💾 Saving generated image to: {output_path}")
99
+
100
+ # Log task
101
+ self._log_task("generate_image", text_prompt, output_path, time.time() - start_time)
102
+
103
+ return output_path
104
+
105
+ except Exception as e:
106
+ print(f"❌ Error generating image: {e}")
107
+ self._log_task("generate_image", text_prompt, None, time.time() - start_time, str(e))
108
+ raise
109
+
110
+ def route_task(self, task_type: str, input_data: str) -> str:
111
+ """Parent model decides which child to call"""
112
+ print(f"🎯 Parent orchestrator routing task: {task_type}")
113
+
114
+ if task_type == "caption":
115
+ return self.generate_caption(input_data)
116
+ elif task_type == "generate_image":
117
+ return self.generate_image(input_data)
118
+ else:
119
+ raise ValueError("Invalid task type: choose 'caption' or 'generate_image'")
120
+
121
+ def process_multimodal_task(self, image_path: str, text_prompt: str) -> Dict[str, str]:
122
+ """Process a multimodal task using both child models"""
123
+ print("🔄 Processing multimodal task...")
124
+ results = {}
125
+
126
+ # Step 1: Generate caption from image
127
+ try:
128
+ print("📝 Step 1: Generating caption from image...")
129
+ caption = self.generate_caption(image_path)
130
+ results["caption"] = caption
131
+ except Exception as e:
132
+ results["caption"] = f"Error: {str(e)}"
133
+
134
+ # Step 2: Generate image from text prompt
135
+ try:
136
+ print("🎨 Step 2: Generating image from text...")
137
+ generated_image_path = self.generate_image(text_prompt)
138
+ results["generated_image"] = generated_image_path
139
+ except Exception as e:
140
+ results["generated_image"] = f"Error: {str(e)}"
141
+
142
+ # Step 3: Create analysis prompt
143
+ if results["caption"] and not results["caption"].startswith("Error"):
144
+ analysis_prompt = f"Analyze this image caption: {results['caption']}"
145
+ results["analysis_prompt"] = analysis_prompt
146
+
147
+ return results
148
+
149
+ def _log_task(self, task_type: str, input_data: str, output: Any,
150
+ processing_time: float, error: Optional[str] = None):
151
+ """Log task execution"""
152
+ self.task_history.append({
153
+ "task_type": task_type,
154
+ "input_data": str(input_data)[:100], # Truncate for logging
155
+ "output": str(output)[:200] if output else None,
156
+ "processing_time": processing_time,
157
+ "timestamp": time.time(),
158
+ "error": error
159
+ })
160
+
161
+ def get_task_history(self) -> list:
162
+ """Get task execution history"""
163
+ return self.task_history
164
+
165
+ def save_task_history(self, filepath: str = "demo_task_history.json"):
166
+ """Save task history to file"""
167
+ with open(filepath, 'w') as f:
168
+ json.dump(self.task_history, f, indent=2)
169
+ print(f"📄 Task history saved to {filepath}")
170
+
171
+ def get_status(self) -> Dict[str, Any]:
172
+ """Get orchestrator status"""
173
+ return {
174
+ "models_loaded": {
175
+ "caption_model": self.caption_model is not None,
176
+ "text2img_pipeline": self.text2img_pipeline is not None
177
+ },
178
+ "total_tasks": len(self.task_history),
179
+ "device": self.device,
180
+ "child_models": {
181
+ "clip_gpt2_captioner": "kunaliitkgp09/clip-gpt2-image-captioner",
182
+ "flickr30k_text2img": "kunaliitkgp09/flickr30k-text-to-image"
183
+ }
184
+ }
185
+
186
+ def demo_basic_usage():
187
+ """Demo 1: Basic usage of the orchestrator"""
188
+ print("="*60)
189
+ print("DEMO 1: BASIC USAGE")
190
+ print("="*60)
191
+
192
+ # Initialize orchestrator
193
+ orchestrator = DemoMultiModelOrchestrator()
194
+
195
+ # Initialize models
196
+ print("🚀 Initializing orchestrator...")
197
+ if not orchestrator.initialize_models():
198
+ print("❌ Failed to initialize models. Exiting.")
199
+ return
200
+
201
+ # Get status
202
+ status = orchestrator.get_status()
203
+ print(f"\n📊 Orchestrator Status:")
204
+ print(json.dumps(status, indent=2))
205
+
206
+ # Example 1: Image captioning
207
+ print("\n" + "-"*40)
208
+ print("🖼️ IMAGE CAPTIONING")
209
+ print("-"*40)
210
+ try:
211
+ caption = orchestrator.generate_caption("sample_image.jpg")
212
+ print(f"📝 Generated Caption: {caption}")
213
+ except Exception as e:
214
+ print(f"❌ Caption generation failed: {e}")
215
+
216
+ # Example 2: Text-to-image generation
217
+ print("\n" + "-"*40)
218
+ print("🎨 TEXT-TO-IMAGE GENERATION")
219
+ print("-"*40)
220
+ try:
221
+ image_path = orchestrator.generate_image("A beautiful sunset over mountains with a lake")
222
+ print(f"🖼️ Generated Image: {image_path}")
223
+ except Exception as e:
224
+ print(f"❌ Image generation failed: {e}")
225
+
226
+ # Example 3: Task routing
227
+ print("\n" + "-"*40)
228
+ print("🎯 TASK ROUTING")
229
+ print("-"*40)
230
+ try:
231
+ # Route caption task
232
+ caption = orchestrator.route_task("caption", "sample_image.jpg")
233
+ print(f"📝 Routed Caption: {caption}")
234
+
235
+ # Route image generation task
236
+ image_path = orchestrator.route_task("generate_image", "A cat sitting on a windowsill")
237
+ print(f"🖼️ Routed Image: {image_path}")
238
+ except Exception as e:
239
+ print(f"❌ Task routing failed: {e}")
240
+
241
+ def demo_multimodal_processing():
242
+ """Demo 2: Multimodal processing"""
243
+ print("\n" + "="*60)
244
+ print("DEMO 2: MULTIMODAL PROCESSING")
245
+ print("="*60)
246
+
247
+ orchestrator = DemoMultiModelOrchestrator()
248
+
249
+ if not orchestrator.initialize_models():
250
+ print("❌ Failed to initialize models. Exiting.")
251
+ return
252
+
253
+ # Process multimodal task
254
+ print("🔄 Processing multimodal task...")
255
+ try:
256
+ results = orchestrator.process_multimodal_task(
257
+ image_path="sample_image.jpg",
258
+ text_prompt="A serene landscape with mountains and a flowing river"
259
+ )
260
+
261
+ print("\n📊 Multimodal Results:")
262
+ for key, value in results.items():
263
+ print(f" {key}: {value}")
264
+
265
+ except Exception as e:
266
+ print(f"❌ Multimodal processing failed: {e}")
267
+
268
+ def demo_task_history():
269
+ """Demo 3: Task history and monitoring"""
270
+ print("\n" + "="*60)
271
+ print("DEMO 3: TASK HISTORY")
272
+ print("="*60)
273
+
274
+ orchestrator = DemoMultiModelOrchestrator()
275
+
276
+ if not orchestrator.initialize_models():
277
+ print("❌ Failed to initialize models. Exiting.")
278
+ return
279
+
280
+ # Perform some tasks
281
+ print("🔄 Performing tasks to build history...")
282
+
283
+ try:
284
+ # Task 1
285
+ caption1 = orchestrator.generate_caption("image1.jpg")
286
+ print(f"📝 Task 1 - Caption: {caption1}")
287
+
288
+ # Task 2
289
+ image1 = orchestrator.generate_image("A peaceful forest scene")
290
+ print(f"🖼️ Task 2 - Image: {image1}")
291
+
292
+ # Task 3
293
+ caption2 = orchestrator.generate_caption("image2.jpg")
294
+ print(f"📝 Task 3 - Caption: {caption2}")
295
+
296
+ except Exception as e:
297
+ print(f"❌ Task execution failed: {e}")
298
+
299
+ # Display task history
300
+ print("\n" + "-"*40)
301
+ print("📋 TASK HISTORY")
302
+ print("-"*40)
303
+ history = orchestrator.get_task_history()
304
+
305
+ for i, task in enumerate(history):
306
+ print(f"\n📊 Task {i+1}:")
307
+ print(f" Type: {task['task_type']}")
308
+ print(f" Input: {task['input_data']}")
309
+ print(f" Output: {task['output']}")
310
+ print(f" Processing Time: {task['processing_time']:.2f}s")
311
+ print(f" Success: {task.get('error') is None}")
312
+ if task.get('error'):
313
+ print(f" Error: {task['error']}")
314
+
315
+ # Save task history
316
+ orchestrator.save_task_history("demo_task_history.json")
317
+ print(f"\n💾 Task history saved to demo_task_history.json")
318
+
319
+ def main():
320
+ """Run all demos"""
321
+ print("🎭 Multi-Model Orchestrator Demo")
322
+ print("This demo shows the structure and functionality of the orchestrator.")
323
+ print("Note: This is a simulation - actual models would be downloaded and used.")
324
+
325
+ try:
326
+ # Run demos
327
+ demo_basic_usage()
328
+ demo_multimodal_processing()
329
+ demo_task_history()
330
+
331
+ print("\n" + "="*60)
332
+ print("🎉 ALL DEMOS COMPLETED SUCCESSFULLY!")
333
+ print("="*60)
334
+
335
+ print("\n📁 Generated files:")
336
+ print(" - demo_task_history.json (task history)")
337
+
338
+ print("\n🔗 Real Model Links:")
339
+ print(" - CLIP-GPT2 Captioner: https://huggingface.co/kunaliitkgp09/clip-gpt2-image-captioner")
340
+ print(" - Flickr30k Text-to-Image: https://huggingface.co/kunaliitkgp09/flickr30k-text-to-image")
341
+
342
+ print("\n🚀 Next steps:")
343
+ print("1. Install dependencies: pip install -r multi_model_requirements.txt")
344
+ print("2. Run with real models: python multi_model_example.py")
345
+ print("3. Integrate into your applications")
346
+ print("4. Add more child models to the system")
347
+
348
+ except Exception as e:
349
+ print(f"\n❌ Error during execution: {e}")
350
+
351
+ if __name__ == "__main__":
352
+ main()
build/lib/multi_model_orchestrator/multi_model_orchestrator.py ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import asyncio
3
+ import logging
4
+ import json
5
+ import time
6
+ from typing import Dict, List, Optional, Union, Any
7
+ from dataclasses import dataclass
8
+ from enum import Enum
9
+ from pathlib import Path
10
+ import traceback
11
+
12
+ from transformers import AutoProcessor, AutoModelForCausalLM, CLIPProcessor, GPT2Tokenizer
13
+ from diffusers import StableDiffusionPipeline
14
+ from PIL import Image
15
+ import numpy as np
16
+
17
+ # Configure logging
18
+ logging.basicConfig(level=logging.INFO)
19
+ logger = logging.getLogger(__name__)
20
+
21
+ class TaskType(Enum):
22
+ """Enumeration of supported task types"""
23
+ IMAGE_TO_TEXT = "image_to_text"
24
+ TEXT_TO_IMAGE = "text_to_image"
25
+ IMAGE_ANALYSIS = "image_analysis"
26
+ TEXT_GENERATION = "text_generation"
27
+ MULTIMODAL_GENERATION = "multimodal_generation"
28
+
29
+ @dataclass
30
+ class ModelConfig:
31
+ """Configuration for child models"""
32
+ name: str
33
+ model_type: str
34
+ device: str = "cuda"
35
+ max_length: int = 50
36
+ temperature: float = 0.7
37
+ top_p: float = 0.9
38
+ batch_size: int = 1
39
+
40
+ @dataclass
41
+ class TaskResult:
42
+ """Result from a child model task"""
43
+ success: bool
44
+ output: Any
45
+ model_used: str
46
+ processing_time: float
47
+ error_message: Optional[str] = None
48
+ metadata: Optional[Dict] = None
49
+
50
+ class ModelManager:
51
+ """Manages loading and caching of child models"""
52
+
53
+ def __init__(self, device: str = "cuda"):
54
+ self.device = device if torch.cuda.is_available() else "cpu"
55
+ self.models = {}
56
+ self.processors = {}
57
+ self.pipelines = {}
58
+ self.model_configs = {}
59
+
60
+ def register_model(self, name: str, config: ModelConfig):
61
+ """Register a model configuration"""
62
+ self.model_configs[name] = config
63
+
64
+ def load_model(self, name: str) -> bool:
65
+ """Load a specific model"""
66
+ if name not in self.model_configs:
67
+ logger.error(f"Model {name} not registered")
68
+ return False
69
+
70
+ config = self.model_configs[name]
71
+
72
+ try:
73
+ logger.info(f"Loading model: {name}")
74
+ start_time = time.time()
75
+
76
+ if config.model_type == "caption":
77
+ # Load CLIP-GPT2 captioning model
78
+ self.processors[name] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
79
+ self.models[name] = AutoModelForCausalLM.from_pretrained(config.name).to(self.device)
80
+
81
+ elif config.model_type == "text2img":
82
+ # Load Stable Diffusion text-to-image model
83
+ self.pipelines[name] = StableDiffusionPipeline.from_pretrained(
84
+ config.name,
85
+ torch_dtype=torch.float16 if self.device == "cuda" else torch.float32
86
+ ).to(self.device)
87
+
88
+ elif config.model_type == "llm":
89
+ # Load general language model
90
+ self.processors[name] = AutoProcessor.from_pretrained(config.name)
91
+ self.models[name] = AutoModelForCausalLM.from_pretrained(config.name).to(self.device)
92
+
93
+ load_time = time.time() - start_time
94
+ logger.info(f"Model {name} loaded successfully in {load_time:.2f}s")
95
+ return True
96
+
97
+ except Exception as e:
98
+ logger.error(f"Failed to load model {name}: {str(e)}")
99
+ return False
100
+
101
+ def get_model(self, name: str):
102
+ """Get a loaded model"""
103
+ if name in self.models:
104
+ return self.models[name]
105
+ elif name in self.pipelines:
106
+ return self.pipelines[name]
107
+ else:
108
+ raise ValueError(f"Model {name} not loaded")
109
+
110
+ def get_processor(self, name: str):
111
+ """Get a model processor"""
112
+ if name in self.processors:
113
+ return self.processors[name]
114
+ else:
115
+ raise ValueError(f"Processor for {name} not loaded")
116
+
117
+ class ChildModel:
118
+ """Base class for child models"""
119
+
120
+ def __init__(self, name: str, model_manager: ModelManager):
121
+ self.name = name
122
+ self.model_manager = model_manager
123
+ self.config = model_manager.model_configs[name]
124
+
125
+ async def process(self, input_data: Any) -> TaskResult:
126
+ """Process input data and return result"""
127
+ start_time = time.time()
128
+
129
+ try:
130
+ if self.config.model_type == "caption":
131
+ result = await self._process_caption(input_data)
132
+ elif self.config.model_type == "text2img":
133
+ result = await self._process_text2img(input_data)
134
+ elif self.config.model_type == "llm":
135
+ result = await self._process_llm(input_data)
136
+ else:
137
+ raise ValueError(f"Unknown model type: {self.config.model_type}")
138
+
139
+ processing_time = time.time() - start_time
140
+
141
+ return TaskResult(
142
+ success=True,
143
+ output=result,
144
+ model_used=self.name,
145
+ processing_time=processing_time,
146
+ metadata={"model_type": self.config.model_type}
147
+ )
148
+
149
+ except Exception as e:
150
+ processing_time = time.time() - start_time
151
+ logger.error(f"Error in model {self.name}: {str(e)}")
152
+
153
+ return TaskResult(
154
+ success=False,
155
+ output=None,
156
+ model_used=self.name,
157
+ processing_time=processing_time,
158
+ error_message=str(e)
159
+ )
160
+
161
+ async def _process_caption(self, image_path: str) -> str:
162
+ """Process image captioning task"""
163
+ # Run in thread pool to avoid blocking
164
+ loop = asyncio.get_event_loop()
165
+ return await loop.run_in_executor(None, self._caption_sync, image_path)
166
+
167
+ def _caption_sync(self, image_path: str) -> str:
168
+ """Synchronous image captioning"""
169
+ image = Image.open(image_path).convert("RGB")
170
+ processor = self.model_manager.get_processor(self.name)
171
+ model = self.model_manager.get_model(self.name)
172
+
173
+ inputs = processor(images=image, return_tensors="pt").to(self.device)
174
+
175
+ with torch.no_grad():
176
+ output = model.generate(
177
+ **inputs,
178
+ max_length=self.config.max_length,
179
+ temperature=self.config.temperature,
180
+ top_p=self.config.top_p,
181
+ do_sample=True,
182
+ pad_token_id=processor.tokenizer.eos_token_id
183
+ )
184
+
185
+ caption = processor.tokenizer.batch_decode(output, skip_special_tokens=True)[0]
186
+ return caption
187
+
188
+ async def _process_text2img(self, prompt: str) -> str:
189
+ """Process text-to-image generation task"""
190
+ loop = asyncio.get_event_loop()
191
+ return await loop.run_in_executor(None, self._text2img_sync, prompt)
192
+
193
+ def _text2img_sync(self, prompt: str) -> str:
194
+ """Synchronous text-to-image generation"""
195
+ pipeline = self.model_manager.get_model(self.name)
196
+
197
+ # Generate unique output path
198
+ timestamp = int(time.time())
199
+ output_path = f"generated_image_{timestamp}.png"
200
+
201
+ with torch.no_grad():
202
+ image = pipeline(
203
+ prompt,
204
+ num_inference_steps=20,
205
+ guidance_scale=7.5,
206
+ width=512,
207
+ height=512
208
+ ).images[0]
209
+
210
+ image.save(output_path)
211
+ return output_path
212
+
213
+ async def _process_llm(self, text: str) -> str:
214
+ """Process general language model task"""
215
+ loop = asyncio.get_event_loop()
216
+ return await loop.run_in_executor(None, self._llm_sync, text)
217
+
218
+ def _llm_sync(self, text: str) -> str:
219
+ """Synchronous language model processing"""
220
+ processor = self.model_manager.get_processor(self.name)
221
+ model = self.model_manager.get_model(self.name)
222
+
223
+ inputs = processor(text, return_tensors="pt").to(self.device)
224
+
225
+ with torch.no_grad():
226
+ output = model.generate(
227
+ **inputs,
228
+ max_length=self.config.max_length,
229
+ temperature=self.config.temperature,
230
+ top_p=self.config.top_p,
231
+ do_sample=True,
232
+ pad_token_id=processor.tokenizer.eos_token_id
233
+ )
234
+
235
+ result = processor.tokenizer.batch_decode(output, skip_special_tokens=True)[0]
236
+ return result
237
+
238
+ class ParentOrchestrator:
239
+ """Parent model that orchestrates child models"""
240
+
241
+ def __init__(self, device: str = "cuda"):
242
+ self.device = device if torch.cuda.is_available() else "cpu"
243
+ self.model_manager = ModelManager(device)
244
+ self.child_models = {}
245
+ self.task_history = []
246
+
247
+ # Register child models
248
+ self._register_child_models()
249
+
250
+ def _register_child_models(self):
251
+ """Register all child models"""
252
+ # CLIP-GPT2 Image Captioner
253
+ self.model_manager.register_model(
254
+ "clip_gpt2_captioner",
255
+ ModelConfig(
256
+ name="kunaliitkgp09/clip-gpt2-image-captioner",
257
+ model_type="caption",
258
+ device=self.device,
259
+ max_length=50,
260
+ temperature=0.7
261
+ )
262
+ )
263
+
264
+ # Flickr30k Text-to-Image
265
+ self.model_manager.register_model(
266
+ "flickr30k_text2img",
267
+ ModelConfig(
268
+ name="kunaliitkgp09/flickr30k-text-to-image",
269
+ model_type="text2img",
270
+ device=self.device,
271
+ temperature=0.8
272
+ )
273
+ )
274
+
275
+ # Optional: Add a general LLM for text processing
276
+ self.model_manager.register_model(
277
+ "gpt2_text",
278
+ ModelConfig(
279
+ name="gpt2",
280
+ model_type="llm",
281
+ device=self.device,
282
+ max_length=100,
283
+ temperature=0.8
284
+ )
285
+ )
286
+
287
+ async def initialize_models(self):
288
+ """Initialize all child models"""
289
+ logger.info("Initializing child models...")
290
+
291
+ for model_name in self.model_manager.model_configs:
292
+ success = self.model_manager.load_model(model_name)
293
+ if success:
294
+ self.child_models[model_name] = ChildModel(model_name, self.model_manager)
295
+ logger.info(f"Child model {model_name} initialized successfully")
296
+ else:
297
+ logger.error(f"Failed to initialize child model {model_name}")
298
+
299
+ async def route_task(self, task_type: TaskType, input_data: Any,
300
+ model_name: Optional[str] = None) -> TaskResult:
301
+ """Route task to appropriate child model"""
302
+
303
+ # Determine which model to use
304
+ if model_name is None:
305
+ model_name = self._select_model_for_task(task_type)
306
+
307
+ if model_name not in self.child_models:
308
+ return TaskResult(
309
+ success=False,
310
+ output=None,
311
+ model_used=model_name,
312
+ processing_time=0.0,
313
+ error_message=f"Model {model_name} not available"
314
+ )
315
+
316
+ # Process task
317
+ child_model = self.child_models[model_name]
318
+ result = await child_model.process(input_data)
319
+
320
+ # Log task
321
+ self.task_history.append({
322
+ "task_type": task_type.value,
323
+ "model_used": model_name,
324
+ "input_data": str(input_data)[:100], # Truncate for logging
325
+ "success": result.success,
326
+ "processing_time": result.processing_time,
327
+ "timestamp": time.time()
328
+ })
329
+
330
+ return result
331
+
332
+ def _select_model_for_task(self, task_type: TaskType) -> str:
333
+ """Select appropriate model for task type"""
334
+ if task_type == TaskType.IMAGE_TO_TEXT:
335
+ return "clip_gpt2_captioner"
336
+ elif task_type == TaskType.TEXT_TO_IMAGE:
337
+ return "flickr30k_text2img"
338
+ elif task_type == TaskType.TEXT_GENERATION:
339
+ return "gpt2_text"
340
+ else:
341
+ raise ValueError(f"No model available for task type: {task_type}")
342
+
343
+ async def process_multimodal_task(self, image_path: str, text_prompt: str) -> Dict[str, TaskResult]:
344
+ """Process a multimodal task using multiple child models"""
345
+ results = {}
346
+
347
+ # Step 1: Generate caption from image
348
+ caption_result = await self.route_task(
349
+ TaskType.IMAGE_TO_TEXT,
350
+ image_path,
351
+ "clip_gpt2_captioner"
352
+ )
353
+ results["caption"] = caption_result
354
+
355
+ # Step 2: Generate image from text prompt
356
+ image_result = await self.route_task(
357
+ TaskType.TEXT_TO_IMAGE,
358
+ text_prompt,
359
+ "flickr30k_text2img"
360
+ )
361
+ results["generated_image"] = image_result
362
+
363
+ # Step 3: Generate text analysis using LLM
364
+ if caption_result.success:
365
+ analysis_prompt = f"Analyze this image caption: {caption_result.output}"
366
+ analysis_result = await self.route_task(
367
+ TaskType.TEXT_GENERATION,
368
+ analysis_prompt,
369
+ "gpt2_text"
370
+ )
371
+ results["analysis"] = analysis_result
372
+
373
+ return results
374
+
375
+ def get_task_history(self) -> List[Dict]:
376
+ """Get task execution history"""
377
+ return self.task_history
378
+
379
+ def get_model_status(self) -> Dict[str, bool]:
380
+ """Get status of all child models"""
381
+ return {
382
+ name: name in self.child_models
383
+ for name in self.model_manager.model_configs
384
+ }
385
+
386
+ def save_task_history(self, filepath: str):
387
+ """Save task history to file"""
388
+ with open(filepath, 'w') as f:
389
+ json.dump(self.task_history, f, indent=2)
390
+ logger.info(f"Task history saved to {filepath}")
391
+
392
+ class MultiModelOrchestrator:
393
+ """Main orchestrator class with simplified interface"""
394
+
395
+ def __init__(self, device: str = "cuda"):
396
+ self.parent = ParentOrchestrator(device)
397
+ self.initialized = False
398
+
399
+ async def initialize(self):
400
+ """Initialize the orchestrator"""
401
+ await self.parent.initialize_models()
402
+ self.initialized = True
403
+ logger.info("Multi-model orchestrator initialized successfully")
404
+
405
+ async def generate_caption(self, image_path: str) -> str:
406
+ """Generate caption from image"""
407
+ if not self.initialized:
408
+ await self.initialize()
409
+
410
+ result = await self.parent.route_task(TaskType.IMAGE_TO_TEXT, image_path)
411
+
412
+ if result.success:
413
+ return result.output
414
+ else:
415
+ raise RuntimeError(f"Caption generation failed: {result.error_message}")
416
+
417
+ async def generate_image(self, text_prompt: str) -> str:
418
+ """Generate image from text"""
419
+ if not self.initialized:
420
+ await self.initialize()
421
+
422
+ result = await self.parent.route_task(TaskType.TEXT_TO_IMAGE, text_prompt)
423
+
424
+ if result.success:
425
+ return result.output
426
+ else:
427
+ raise RuntimeError(f"Image generation failed: {result.error_message}")
428
+
429
+ async def process_multimodal(self, image_path: str, text_prompt: str) -> Dict[str, Any]:
430
+ """Process multimodal task"""
431
+ if not self.initialized:
432
+ await self.initialize()
433
+
434
+ results = await self.parent.process_multimodal_task(image_path, text_prompt)
435
+
436
+ # Extract outputs
437
+ output = {}
438
+ for key, result in results.items():
439
+ if result.success:
440
+ output[key] = result.output
441
+ else:
442
+ output[key] = f"Error: {result.error_message}"
443
+
444
+ return output
445
+
446
+ def get_status(self) -> Dict[str, Any]:
447
+ """Get orchestrator status"""
448
+ return {
449
+ "initialized": self.initialized,
450
+ "model_status": self.parent.get_model_status(),
451
+ "total_tasks": len(self.parent.get_task_history())
452
+ }
453
+
454
+ # Usage examples
455
+ async def main():
456
+ """Example usage of the multi-model orchestrator"""
457
+
458
+ # Initialize orchestrator
459
+ orchestrator = MultiModelOrchestrator()
460
+ await orchestrator.initialize()
461
+
462
+ print("Multi-Model Orchestrator Status:")
463
+ print(json.dumps(orchestrator.get_status(), indent=2))
464
+
465
+ # Example 1: Image captioning
466
+ try:
467
+ # Note: Replace with actual image path
468
+ caption = await orchestrator.generate_caption("sample_image.jpg")
469
+ print(f"\nGenerated Caption: {caption}")
470
+ except Exception as e:
471
+ print(f"Caption generation error: {e}")
472
+
473
+ # Example 2: Text-to-image generation
474
+ try:
475
+ image_path = await orchestrator.generate_image("A beautiful sunset over mountains")
476
+ print(f"\nGenerated Image saved at: {image_path}")
477
+ except Exception as e:
478
+ print(f"Image generation error: {e}")
479
+
480
+ # Example 3: Multimodal processing
481
+ try:
482
+ multimodal_result = await orchestrator.process_multimodal(
483
+ "sample_image.jpg",
484
+ "A serene landscape with mountains"
485
+ )
486
+ print(f"\nMultimodal Results:")
487
+ for key, value in multimodal_result.items():
488
+ print(f" {key}: {value}")
489
+ except Exception as e:
490
+ print(f"Multimodal processing error: {e}")
491
+
492
+ # Save task history
493
+ orchestrator.parent.save_task_history("task_history.json")
494
+
495
+ if __name__ == "__main__":
496
+ # Run the example
497
+ asyncio.run(main())
build/lib/multi_model_orchestrator/simple_orchestrator.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Simplified Multi-Model Orchestrator
4
+ Based on the original code structure with enhanced functionality
5
+ """
6
+
7
+ import asyncio
8
+ import torch
9
+ from transformers import CLIPProcessor, GPT2Tokenizer
10
+ from diffusers import StableDiffusionPipeline
11
+ from PIL import Image
12
+ import time
13
+ import json
14
+ from typing import Dict, Any, Optional
15
+
16
+ class SimpleMultiModelOrchestrator:
17
+ """Simplified orchestrator matching the original code structure"""
18
+
19
+ def __init__(self, device: str = "cuda"):
20
+ self.device = device if torch.cuda.is_available() else "cpu"
21
+ self.caption_model_name = "kunaliitkgp09/clip-gpt2-image-captioner"
22
+ self.text2img_model_name = "kunaliitkgp09/flickr30k-text-to-image"
23
+
24
+ # Model instances
25
+ self.caption_processor = None
26
+ self.caption_model = None
27
+ self.text2img_pipeline = None
28
+ self.tokenizer = None
29
+
30
+ # Task history
31
+ self.task_history = []
32
+
33
+ def initialize_models(self):
34
+ """Initialize all child models"""
35
+ print("Initializing child models...")
36
+
37
+ try:
38
+ # Load captioning model
39
+ print("Loading CLIP-GPT2 captioning model...")
40
+ self.caption_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
41
+ self.caption_model = torch.hub.load('huggingface/pytorch-transformers', 'model',
42
+ self.caption_model_name).to(self.device)
43
+ self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
44
+ self.tokenizer.pad_token = self.tokenizer.eos_token
45
+ print("✓ Captioning model loaded")
46
+
47
+ # Load text-to-image model
48
+ print("Loading Stable Diffusion text-to-image model...")
49
+ self.text2img_pipeline = StableDiffusionPipeline.from_pretrained(
50
+ self.text2img_model_name,
51
+ torch_dtype=torch.float16 if self.device == "cuda" else torch.float32
52
+ ).to(self.device)
53
+ print("✓ Text-to-image model loaded")
54
+
55
+ print("All models initialized successfully!")
56
+ return True
57
+
58
+ except Exception as e:
59
+ print(f"Error initializing models: {e}")
60
+ return False
61
+
62
+ def generate_caption(self, image_path: str) -> str:
63
+ """Child Model 1 - Image Captioning"""
64
+ start_time = time.time()
65
+
66
+ try:
67
+ image = Image.open(image_path).convert("RGB")
68
+
69
+ # Process image with CLIP
70
+ inputs = self.caption_processor(images=image, return_tensors="pt").to(self.device)
71
+
72
+ # Generate caption
73
+ with torch.no_grad():
74
+ output = self.caption_model.generate(
75
+ **inputs,
76
+ max_length=50,
77
+ temperature=0.7,
78
+ do_sample=True,
79
+ pad_token_id=self.tokenizer.eos_token_id
80
+ )
81
+
82
+ caption = self.tokenizer.batch_decode(output, skip_special_tokens=True)[0]
83
+
84
+ # Log task
85
+ self._log_task("caption", image_path, caption, time.time() - start_time)
86
+
87
+ return caption
88
+
89
+ except Exception as e:
90
+ print(f"Error generating caption: {e}")
91
+ self._log_task("caption", image_path, None, time.time() - start_time, str(e))
92
+ raise
93
+
94
+ def generate_image(self, text_prompt: str, output_path: Optional[str] = None) -> str:
95
+ """Child Model 2 - Text-to-Image"""
96
+ start_time = time.time()
97
+
98
+ try:
99
+ # Generate unique output path if not provided
100
+ if output_path is None:
101
+ timestamp = int(time.time())
102
+ output_path = f"generated_image_{timestamp}.png"
103
+
104
+ # Generate image
105
+ with torch.no_grad():
106
+ image = self.text2img_pipeline(
107
+ text_prompt,
108
+ num_inference_steps=20,
109
+ guidance_scale=7.5,
110
+ width=512,
111
+ height=512
112
+ ).images[0]
113
+
114
+ # Save image
115
+ image.save(output_path)
116
+
117
+ # Log task
118
+ self._log_task("generate_image", text_prompt, output_path, time.time() - start_time)
119
+
120
+ return output_path
121
+
122
+ except Exception as e:
123
+ print(f"Error generating image: {e}")
124
+ self._log_task("generate_image", text_prompt, None, time.time() - start_time, str(e))
125
+ raise
126
+
127
+ def route_task(self, task_type: str, input_data: str) -> str:
128
+ """Parent model decides which child to call"""
129
+ if task_type == "caption":
130
+ return self.generate_caption(input_data)
131
+ elif task_type == "generate_image":
132
+ return self.generate_image(input_data)
133
+ else:
134
+ raise ValueError("Invalid task type: choose 'caption' or 'generate_image'")
135
+
136
+ def process_multimodal_task(self, image_path: str, text_prompt: str) -> Dict[str, str]:
137
+ """Process a multimodal task using both child models"""
138
+ results = {}
139
+
140
+ # Step 1: Generate caption from image
141
+ try:
142
+ caption = self.generate_caption(image_path)
143
+ results["caption"] = caption
144
+ except Exception as e:
145
+ results["caption"] = f"Error: {str(e)}"
146
+
147
+ # Step 2: Generate image from text prompt
148
+ try:
149
+ generated_image_path = self.generate_image(text_prompt)
150
+ results["generated_image"] = generated_image_path
151
+ except Exception as e:
152
+ results["generated_image"] = f"Error: {str(e)}"
153
+
154
+ # Step 3: Create analysis prompt
155
+ if results["caption"] and not results["caption"].startswith("Error"):
156
+ analysis_prompt = f"Analyze this image caption: {results['caption']}"
157
+ results["analysis_prompt"] = analysis_prompt
158
+
159
+ return results
160
+
161
+ def _log_task(self, task_type: str, input_data: str, output: Any,
162
+ processing_time: float, error: Optional[str] = None):
163
+ """Log task execution"""
164
+ self.task_history.append({
165
+ "task_type": task_type,
166
+ "input_data": str(input_data)[:100], # Truncate for logging
167
+ "output": str(output)[:200] if output else None,
168
+ "processing_time": processing_time,
169
+ "timestamp": time.time(),
170
+ "error": error
171
+ })
172
+
173
+ def get_task_history(self) -> list:
174
+ """Get task execution history"""
175
+ return self.task_history
176
+
177
+ def save_task_history(self, filepath: str = "task_history.json"):
178
+ """Save task history to file"""
179
+ with open(filepath, 'w') as f:
180
+ json.dump(self.task_history, f, indent=2)
181
+ print(f"Task history saved to {filepath}")
182
+
183
+ def get_status(self) -> Dict[str, Any]:
184
+ """Get orchestrator status"""
185
+ return {
186
+ "models_loaded": {
187
+ "caption_model": self.caption_model is not None,
188
+ "text2img_pipeline": self.text2img_pipeline is not None
189
+ },
190
+ "total_tasks": len(self.task_history),
191
+ "device": self.device
192
+ }
193
+
194
+ # Async version for better performance
195
+ class AsyncMultiModelOrchestrator(SimpleMultiModelOrchestrator):
196
+ """Async version of the orchestrator"""
197
+
198
+ async def generate_caption_async(self, image_path: str) -> str:
199
+ """Async image captioning"""
200
+ loop = asyncio.get_event_loop()
201
+ return await loop.run_in_executor(None, self.generate_caption, image_path)
202
+
203
+ async def generate_image_async(self, text_prompt: str, output_path: Optional[str] = None) -> str:
204
+ """Async text-to-image generation"""
205
+ loop = asyncio.get_event_loop()
206
+ return await loop.run_in_executor(None, self.generate_image, text_prompt, output_path)
207
+
208
+ async def process_multimodal_async(self, image_path: str, text_prompt: str) -> Dict[str, str]:
209
+ """Async multimodal processing"""
210
+ # Run both tasks concurrently
211
+ caption_task = self.generate_caption_async(image_path)
212
+ image_task = self.generate_image_async(text_prompt)
213
+
214
+ results = {}
215
+
216
+ try:
217
+ # Wait for both tasks to complete
218
+ caption, generated_image_path = await asyncio.gather(
219
+ caption_task, image_task, return_exceptions=True
220
+ )
221
+
222
+ # Handle results
223
+ if isinstance(caption, Exception):
224
+ results["caption"] = f"Error: {str(caption)}"
225
+ else:
226
+ results["caption"] = caption
227
+
228
+ if isinstance(generated_image_path, Exception):
229
+ results["generated_image"] = f"Error: {str(generated_image_path)}"
230
+ else:
231
+ results["generated_image"] = generated_image_path
232
+
233
+ # Create analysis prompt
234
+ if results["caption"] and not results["caption"].startswith("Error"):
235
+ analysis_prompt = f"Analyze this image caption: {results['caption']}"
236
+ results["analysis_prompt"] = analysis_prompt
237
+
238
+ except Exception as e:
239
+ results["error"] = f"Multimodal processing failed: {str(e)}"
240
+
241
+ return results
242
+
243
+ # Usage examples
244
+ def main():
245
+ """Example usage of the multi-model orchestrator"""
246
+
247
+ # Initialize orchestrator
248
+ orchestrator = SimpleMultiModelOrchestrator()
249
+
250
+ # Initialize models
251
+ if not orchestrator.initialize_models():
252
+ print("Failed to initialize models. Exiting.")
253
+ return
254
+
255
+ print("\nMulti-Model Orchestrator Status:")
256
+ print(json.dumps(orchestrator.get_status(), indent=2))
257
+
258
+ # Example 1: Image captioning
259
+ try:
260
+ # Note: Replace with actual image path
261
+ caption = orchestrator.route_task("caption", "sample_image.jpg")
262
+ print(f"\nGenerated Caption: {caption}")
263
+ except Exception as e:
264
+ print(f"Caption generation error: {e}")
265
+
266
+ # Example 2: Text-to-image generation
267
+ try:
268
+ image_path = orchestrator.route_task("generate_image", "A beautiful sunset over mountains")
269
+ print(f"\nGenerated Image saved at: {image_path}")
270
+ except Exception as e:
271
+ print(f"Image generation error: {e}")
272
+
273
+ # Example 3: Multimodal processing
274
+ try:
275
+ multimodal_result = orchestrator.process_multimodal_task(
276
+ "sample_image.jpg",
277
+ "A serene landscape with mountains"
278
+ )
279
+ print(f"\nMultimodal Results:")
280
+ for key, value in multimodal_result.items():
281
+ print(f" {key}: {value}")
282
+ except Exception as e:
283
+ print(f"Multimodal processing error: {e}")
284
+
285
+ # Save task history
286
+ orchestrator.save_task_history()
287
+
288
+ async def async_main():
289
+ """Async example usage"""
290
+
291
+ # Initialize async orchestrator
292
+ orchestrator = AsyncMultiModelOrchestrator()
293
+
294
+ # Initialize models
295
+ if not orchestrator.initialize_models():
296
+ print("Failed to initialize models. Exiting.")
297
+ return
298
+
299
+ print("\nAsync Multi-Model Orchestrator Status:")
300
+ print(json.dumps(orchestrator.get_status(), indent=2))
301
+
302
+ # Example: Concurrent multimodal processing
303
+ try:
304
+ multimodal_result = await orchestrator.process_multimodal_async(
305
+ "sample_image.jpg",
306
+ "A beautiful sunset over mountains"
307
+ )
308
+ print(f"\nAsync Multimodal Results:")
309
+ for key, value in multimodal_result.items():
310
+ print(f" {key}: {value}")
311
+ except Exception as e:
312
+ print(f"Async multimodal processing error: {e}")
313
+
314
+ # Save task history
315
+ orchestrator.save_task_history("async_task_history.json")
316
+
317
+ if __name__ == "__main__":
318
+ print("Running synchronous example...")
319
+ main()
320
+
321
+ print("\n" + "="*50)
322
+ print("Running async example...")
323
+ asyncio.run(async_main())
dist/multi_model_orchestrator-1.0.0-py3-none-any.whl ADDED
Binary file (16.1 kB). View file
 
dist/multi_model_orchestrator-1.0.0.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49e6ac00733a0b80bddbc4b49c8785b73b7d9741094e5ec11e56bb8ffd014015
3
+ size 20248
main.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Main entry point for Multi-Model Orchestrator
4
+ """
5
+
6
+ import sys
7
+ import argparse
8
+ from multi_model_orchestrator import DemoMultiModelOrchestrator, SimpleMultiModelOrchestrator
9
+
10
+ def main():
11
+ parser = argparse.ArgumentParser(description="Multi-Model Orchestrator")
12
+ parser.add_argument("--demo", action="store_true", help="Run demo mode")
13
+ parser.add_argument("--real", action="store_true", help="Run with real models")
14
+ parser.add_argument("--caption", type=str, help="Generate caption for image")
15
+ parser.add_argument("--generate-image", type=str, help="Generate image from text")
16
+ parser.add_argument("--multimodal", nargs=2, metavar=("IMAGE", "TEXT"),
17
+ help="Process multimodal task (image_path text_prompt)")
18
+
19
+ args = parser.parse_args()
20
+
21
+ if args.demo:
22
+ # Run demo
23
+ from multi_model_orchestrator.demo_orchestrator import main as demo_main
24
+ demo_main()
25
+ elif args.real:
26
+ # Run with real models
27
+ print("Real model mode - requires model downloads")
28
+ print("Use: python -m multi_model_orchestrator.multi_model_example")
29
+ elif args.caption:
30
+ # Generate caption
31
+ orchestrator = DemoMultiModelOrchestrator()
32
+ orchestrator.initialize_models()
33
+ caption = orchestrator.generate_caption(args.caption)
34
+ print(f"Caption: {caption}")
35
+ elif args.generate_image:
36
+ # Generate image
37
+ orchestrator = DemoMultiModelOrchestrator()
38
+ orchestrator.initialize_models()
39
+ image_path = orchestrator.generate_image(args.generate_image)
40
+ print(f"Generated image: {image_path}")
41
+ elif args.multimodal:
42
+ # Multimodal processing
43
+ image_path, text_prompt = args.multimodal
44
+ orchestrator = DemoMultiModelOrchestrator()
45
+ orchestrator.initialize_models()
46
+ results = orchestrator.process_multimodal_task(image_path, text_prompt)
47
+ print("Results:")
48
+ for key, value in results.items():
49
+ print(f" {key}: {value}")
50
+ else:
51
+ # Default: run demo
52
+ from multi_model_orchestrator.demo_orchestrator import main as demo_main
53
+ demo_main()
54
+
55
+ if __name__ == "__main__":
56
+ main()
multi_model_env/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Created by venv; see https://docs.python.org/3/library/venv.html
2
+ *
multi_model_env/bin/Activate.ps1 ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <#
2
+ .Synopsis
3
+ Activate a Python virtual environment for the current PowerShell session.
4
+
5
+ .Description
6
+ Pushes the python executable for a virtual environment to the front of the
7
+ $Env:PATH environment variable and sets the prompt to signify that you are
8
+ in a Python virtual environment. Makes use of the command line switches as
9
+ well as the `pyvenv.cfg` file values present in the virtual environment.
10
+
11
+ .Parameter VenvDir
12
+ Path to the directory that contains the virtual environment to activate. The
13
+ default value for this is the parent of the directory that the Activate.ps1
14
+ script is located within.
15
+
16
+ .Parameter Prompt
17
+ The prompt prefix to display when this virtual environment is activated. By
18
+ default, this prompt is the name of the virtual environment folder (VenvDir)
19
+ surrounded by parentheses and followed by a single space (ie. '(.venv) ').
20
+
21
+ .Example
22
+ Activate.ps1
23
+ Activates the Python virtual environment that contains the Activate.ps1 script.
24
+
25
+ .Example
26
+ Activate.ps1 -Verbose
27
+ Activates the Python virtual environment that contains the Activate.ps1 script,
28
+ and shows extra information about the activation as it executes.
29
+
30
+ .Example
31
+ Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
32
+ Activates the Python virtual environment located in the specified location.
33
+
34
+ .Example
35
+ Activate.ps1 -Prompt "MyPython"
36
+ Activates the Python virtual environment that contains the Activate.ps1 script,
37
+ and prefixes the current prompt with the specified string (surrounded in
38
+ parentheses) while the virtual environment is active.
39
+
40
+ .Notes
41
+ On Windows, it may be required to enable this Activate.ps1 script by setting the
42
+ execution policy for the user. You can do this by issuing the following PowerShell
43
+ command:
44
+
45
+ PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
46
+
47
+ For more information on Execution Policies:
48
+ https://go.microsoft.com/fwlink/?LinkID=135170
49
+
50
+ #>
51
+ Param(
52
+ [Parameter(Mandatory = $false)]
53
+ [String]
54
+ $VenvDir,
55
+ [Parameter(Mandatory = $false)]
56
+ [String]
57
+ $Prompt
58
+ )
59
+
60
+ <# Function declarations --------------------------------------------------- #>
61
+
62
+ <#
63
+ .Synopsis
64
+ Remove all shell session elements added by the Activate script, including the
65
+ addition of the virtual environment's Python executable from the beginning of
66
+ the PATH variable.
67
+
68
+ .Parameter NonDestructive
69
+ If present, do not remove this function from the global namespace for the
70
+ session.
71
+
72
+ #>
73
+ function global:deactivate ([switch]$NonDestructive) {
74
+ # Revert to original values
75
+
76
+ # The prior prompt:
77
+ if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
78
+ Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
79
+ Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
80
+ }
81
+
82
+ # The prior PYTHONHOME:
83
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
84
+ Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
85
+ Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
86
+ }
87
+
88
+ # The prior PATH:
89
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
90
+ Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
91
+ Remove-Item -Path Env:_OLD_VIRTUAL_PATH
92
+ }
93
+
94
+ # Just remove the VIRTUAL_ENV altogether:
95
+ if (Test-Path -Path Env:VIRTUAL_ENV) {
96
+ Remove-Item -Path env:VIRTUAL_ENV
97
+ }
98
+
99
+ # Just remove VIRTUAL_ENV_PROMPT altogether.
100
+ if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
101
+ Remove-Item -Path env:VIRTUAL_ENV_PROMPT
102
+ }
103
+
104
+ # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
105
+ if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
106
+ Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
107
+ }
108
+
109
+ # Leave deactivate function in the global namespace if requested:
110
+ if (-not $NonDestructive) {
111
+ Remove-Item -Path function:deactivate
112
+ }
113
+ }
114
+
115
+ <#
116
+ .Description
117
+ Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
118
+ given folder, and returns them in a map.
119
+
120
+ For each line in the pyvenv.cfg file, if that line can be parsed into exactly
121
+ two strings separated by `=` (with any amount of whitespace surrounding the =)
122
+ then it is considered a `key = value` line. The left hand string is the key,
123
+ the right hand is the value.
124
+
125
+ If the value starts with a `'` or a `"` then the first and last character is
126
+ stripped from the value before being captured.
127
+
128
+ .Parameter ConfigDir
129
+ Path to the directory that contains the `pyvenv.cfg` file.
130
+ #>
131
+ function Get-PyVenvConfig(
132
+ [String]
133
+ $ConfigDir
134
+ ) {
135
+ Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
136
+
137
+ # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
138
+ $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
139
+
140
+ # An empty map will be returned if no config file is found.
141
+ $pyvenvConfig = @{ }
142
+
143
+ if ($pyvenvConfigPath) {
144
+
145
+ Write-Verbose "File exists, parse `key = value` lines"
146
+ $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
147
+
148
+ $pyvenvConfigContent | ForEach-Object {
149
+ $keyval = $PSItem -split "\s*=\s*", 2
150
+ if ($keyval[0] -and $keyval[1]) {
151
+ $val = $keyval[1]
152
+
153
+ # Remove extraneous quotations around a string value.
154
+ if ("'""".Contains($val.Substring(0, 1))) {
155
+ $val = $val.Substring(1, $val.Length - 2)
156
+ }
157
+
158
+ $pyvenvConfig[$keyval[0]] = $val
159
+ Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
160
+ }
161
+ }
162
+ }
163
+ return $pyvenvConfig
164
+ }
165
+
166
+
167
+ <# Begin Activate script --------------------------------------------------- #>
168
+
169
+ # Determine the containing directory of this script
170
+ $VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
171
+ $VenvExecDir = Get-Item -Path $VenvExecPath
172
+
173
+ Write-Verbose "Activation script is located in path: '$VenvExecPath'"
174
+ Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
175
+ Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
176
+
177
+ # Set values required in priority: CmdLine, ConfigFile, Default
178
+ # First, get the location of the virtual environment, it might not be
179
+ # VenvExecDir if specified on the command line.
180
+ if ($VenvDir) {
181
+ Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
182
+ }
183
+ else {
184
+ Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
185
+ $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
186
+ Write-Verbose "VenvDir=$VenvDir"
187
+ }
188
+
189
+ # Next, read the `pyvenv.cfg` file to determine any required value such
190
+ # as `prompt`.
191
+ $pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
192
+
193
+ # Next, set the prompt from the command line, or the config file, or
194
+ # just use the name of the virtual environment folder.
195
+ if ($Prompt) {
196
+ Write-Verbose "Prompt specified as argument, using '$Prompt'"
197
+ }
198
+ else {
199
+ Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
200
+ if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
201
+ Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
202
+ $Prompt = $pyvenvCfg['prompt'];
203
+ }
204
+ else {
205
+ Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
206
+ Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
207
+ $Prompt = Split-Path -Path $venvDir -Leaf
208
+ }
209
+ }
210
+
211
+ Write-Verbose "Prompt = '$Prompt'"
212
+ Write-Verbose "VenvDir='$VenvDir'"
213
+
214
+ # Deactivate any currently active virtual environment, but leave the
215
+ # deactivate function in place.
216
+ deactivate -nondestructive
217
+
218
+ # Now set the environment variable VIRTUAL_ENV, used by many tools to determine
219
+ # that there is an activated venv.
220
+ $env:VIRTUAL_ENV = $VenvDir
221
+
222
+ $env:VIRTUAL_ENV_PROMPT = $Prompt
223
+
224
+ if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
225
+
226
+ Write-Verbose "Setting prompt to '$Prompt'"
227
+
228
+ # Set the prompt to include the env name
229
+ # Make sure _OLD_VIRTUAL_PROMPT is global
230
+ function global:_OLD_VIRTUAL_PROMPT { "" }
231
+ Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
232
+ New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
233
+
234
+ function global:prompt {
235
+ Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
236
+ _OLD_VIRTUAL_PROMPT
237
+ }
238
+ }
239
+
240
+ # Clear PYTHONHOME
241
+ if (Test-Path -Path Env:PYTHONHOME) {
242
+ Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
243
+ Remove-Item -Path Env:PYTHONHOME
244
+ }
245
+
246
+ # Add the venv to the PATH
247
+ Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
248
+ $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
multi_model_env/bin/activate ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate" *from bash*
2
+ # You cannot run it directly
3
+
4
+ deactivate () {
5
+ # reset old environment variables
6
+ if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
7
+ PATH="${_OLD_VIRTUAL_PATH:-}"
8
+ export PATH
9
+ unset _OLD_VIRTUAL_PATH
10
+ fi
11
+ if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
12
+ PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
13
+ export PYTHONHOME
14
+ unset _OLD_VIRTUAL_PYTHONHOME
15
+ fi
16
+
17
+ # Call hash to forget past locations. Without forgetting
18
+ # past locations the $PATH changes we made may not be respected.
19
+ # See "man bash" for more details. hash is usually a builtin of your shell
20
+ hash -r 2> /dev/null
21
+
22
+ if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
23
+ PS1="${_OLD_VIRTUAL_PS1:-}"
24
+ export PS1
25
+ unset _OLD_VIRTUAL_PS1
26
+ fi
27
+
28
+ unset VIRTUAL_ENV
29
+ unset VIRTUAL_ENV_PROMPT
30
+ if [ ! "${1:-}" = "nondestructive" ] ; then
31
+ # Self destruct!
32
+ unset -f deactivate
33
+ fi
34
+ }
35
+
36
+ # unset irrelevant variables
37
+ deactivate nondestructive
38
+
39
+ # on Windows, a path can contain colons and backslashes and has to be converted:
40
+ case "$(uname)" in
41
+ CYGWIN*|MSYS*|MINGW*)
42
+ # transform D:\path\to\venv to /d/path/to/venv on MSYS and MINGW
43
+ # and to /cygdrive/d/path/to/venv on Cygwin
44
+ VIRTUAL_ENV=$(cygpath /Users/kunaldhanda/DeepSeek/multi_model_env)
45
+ export VIRTUAL_ENV
46
+ ;;
47
+ *)
48
+ # use the path as-is
49
+ export VIRTUAL_ENV=/Users/kunaldhanda/DeepSeek/multi_model_env
50
+ ;;
51
+ esac
52
+
53
+ _OLD_VIRTUAL_PATH="$PATH"
54
+ PATH="$VIRTUAL_ENV/"bin":$PATH"
55
+ export PATH
56
+
57
+ VIRTUAL_ENV_PROMPT=multi_model_env
58
+ export VIRTUAL_ENV_PROMPT
59
+
60
+ # unset PYTHONHOME if set
61
+ # this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
62
+ # could use `if (set -u; : $PYTHONHOME) ;` in bash
63
+ if [ -n "${PYTHONHOME:-}" ] ; then
64
+ _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
65
+ unset PYTHONHOME
66
+ fi
67
+
68
+ if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
69
+ _OLD_VIRTUAL_PS1="${PS1:-}"
70
+ PS1="("multi_model_env") ${PS1:-}"
71
+ export PS1
72
+ fi
73
+
74
+ # Call hash to forget past commands. Without forgetting
75
+ # past commands the $PATH changes we made may not be respected
76
+ hash -r 2> /dev/null
multi_model_env/bin/activate.csh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate.csh" *from csh*.
2
+ # You cannot run it directly.
3
+
4
+ # Created by Davide Di Blasi <[email protected]>.
5
+ # Ported to Python 3.3 venv by Andrew Svetlov <[email protected]>
6
+
7
+ alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
8
+
9
+ # Unset irrelevant variables.
10
+ deactivate nondestructive
11
+
12
+ setenv VIRTUAL_ENV /Users/kunaldhanda/DeepSeek/multi_model_env
13
+
14
+ set _OLD_VIRTUAL_PATH="$PATH"
15
+ setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
16
+ setenv VIRTUAL_ENV_PROMPT multi_model_env
17
+
18
+
19
+ set _OLD_VIRTUAL_PROMPT="$prompt"
20
+
21
+ if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
22
+ set prompt = "("multi_model_env") $prompt:q"
23
+ endif
24
+
25
+ alias pydoc python -m pydoc
26
+
27
+ rehash
multi_model_env/bin/activate.fish ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source <venv>/bin/activate.fish" *from fish*
2
+ # (https://fishshell.com/). You cannot run it directly.
3
+
4
+ function deactivate -d "Exit virtual environment and return to normal shell environment"
5
+ # reset old environment variables
6
+ if test -n "$_OLD_VIRTUAL_PATH"
7
+ set -gx PATH $_OLD_VIRTUAL_PATH
8
+ set -e _OLD_VIRTUAL_PATH
9
+ end
10
+ if test -n "$_OLD_VIRTUAL_PYTHONHOME"
11
+ set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
12
+ set -e _OLD_VIRTUAL_PYTHONHOME
13
+ end
14
+
15
+ if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
16
+ set -e _OLD_FISH_PROMPT_OVERRIDE
17
+ # prevents error when using nested fish instances (Issue #93858)
18
+ if functions -q _old_fish_prompt
19
+ functions -e fish_prompt
20
+ functions -c _old_fish_prompt fish_prompt
21
+ functions -e _old_fish_prompt
22
+ end
23
+ end
24
+
25
+ set -e VIRTUAL_ENV
26
+ set -e VIRTUAL_ENV_PROMPT
27
+ if test "$argv[1]" != "nondestructive"
28
+ # Self-destruct!
29
+ functions -e deactivate
30
+ end
31
+ end
32
+
33
+ # Unset irrelevant variables.
34
+ deactivate nondestructive
35
+
36
+ set -gx VIRTUAL_ENV /Users/kunaldhanda/DeepSeek/multi_model_env
37
+
38
+ set -gx _OLD_VIRTUAL_PATH $PATH
39
+ set -gx PATH "$VIRTUAL_ENV/"bin $PATH
40
+ set -gx VIRTUAL_ENV_PROMPT multi_model_env
41
+
42
+ # Unset PYTHONHOME if set.
43
+ if set -q PYTHONHOME
44
+ set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
45
+ set -e PYTHONHOME
46
+ end
47
+
48
+ if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
49
+ # fish uses a function instead of an env var to generate the prompt.
50
+
51
+ # Save the current fish_prompt function as the function _old_fish_prompt.
52
+ functions -c fish_prompt _old_fish_prompt
53
+
54
+ # With the original prompt function renamed, we can override with our own.
55
+ function fish_prompt
56
+ # Save the return status of the last command.
57
+ set -l old_status $status
58
+
59
+ # Output the venv prompt; color taken from the blue of the Python logo.
60
+ printf "%s(%s)%s " (set_color 4B8BBE) multi_model_env (set_color normal)
61
+
62
+ # Restore the return status of the previous command.
63
+ echo "exit $old_status" | .
64
+ # Output the original/"old" prompt.
65
+ _old_fish_prompt
66
+ end
67
+
68
+ set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
69
+ end
multi_model_env/bin/diffusers-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from diffusers.commands.diffusers_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/f2py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy.f2py.f2py2e import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/hf ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from huggingface_hub.cli.hf import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/huggingface-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from huggingface_hub.commands.huggingface_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/isympy ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from isympy import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/normalizer ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from charset_normalizer import cli
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(cli.cli_detect())
multi_model_env/bin/numpy-config ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy._configtool import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/pip ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/pip3 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/pip3.13 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/python ADDED
Binary file (52.6 kB). View file
 
multi_model_env/bin/python3 ADDED
Binary file (52.6 kB). View file
 
multi_model_env/bin/python3.13 ADDED
Binary file (52.6 kB). View file
 
multi_model_env/bin/tiny-agents ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from huggingface_hub.inference._mcp.cli import app
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(app())
multi_model_env/bin/torchfrtrace ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from tools.flight_recorder.fr_trace import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/torchrun ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from torch.distributed.run import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/tqdm ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from tqdm.cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/transformers ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from transformers.commands.transformers_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
multi_model_env/bin/transformers-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/Users/kunaldhanda/DeepSeek/multi_model_env/bin/python3.13
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from transformers.commands.transformers_cli import main_cli
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main_cli())
multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2010 Pallets
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
24
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/METADATA ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: MarkupSafe
3
+ Version: 3.0.2
4
+ Summary: Safely add untrusted strings to HTML/XML markup.
5
+ Maintainer-email: Pallets <[email protected]>
6
+ License: Copyright 2010 Pallets
7
+
8
+ Redistribution and use in source and binary forms, with or without
9
+ modification, are permitted provided that the following conditions are
10
+ met:
11
+
12
+ 1. Redistributions of source code must retain the above copyright
13
+ notice, this list of conditions and the following disclaimer.
14
+
15
+ 2. Redistributions in binary form must reproduce the above copyright
16
+ notice, this list of conditions and the following disclaimer in the
17
+ documentation and/or other materials provided with the distribution.
18
+
19
+ 3. Neither the name of the copyright holder nor the names of its
20
+ contributors may be used to endorse or promote products derived from
21
+ this software without specific prior written permission.
22
+
23
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
26
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
+
35
+ Project-URL: Donate, https://palletsprojects.com/donate
36
+ Project-URL: Documentation, https://markupsafe.palletsprojects.com/
37
+ Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
38
+ Project-URL: Source, https://github.com/pallets/markupsafe/
39
+ Project-URL: Chat, https://discord.gg/pallets
40
+ Classifier: Development Status :: 5 - Production/Stable
41
+ Classifier: Environment :: Web Environment
42
+ Classifier: Intended Audience :: Developers
43
+ Classifier: License :: OSI Approved :: BSD License
44
+ Classifier: Operating System :: OS Independent
45
+ Classifier: Programming Language :: Python
46
+ Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
47
+ Classifier: Topic :: Text Processing :: Markup :: HTML
48
+ Classifier: Typing :: Typed
49
+ Requires-Python: >=3.9
50
+ Description-Content-Type: text/markdown
51
+ License-File: LICENSE.txt
52
+
53
+ # MarkupSafe
54
+
55
+ MarkupSafe implements a text object that escapes characters so it is
56
+ safe to use in HTML and XML. Characters that have special meanings are
57
+ replaced so that they display as the actual characters. This mitigates
58
+ injection attacks, meaning untrusted user input can safely be displayed
59
+ on a page.
60
+
61
+
62
+ ## Examples
63
+
64
+ ```pycon
65
+ >>> from markupsafe import Markup, escape
66
+
67
+ >>> # escape replaces special characters and wraps in Markup
68
+ >>> escape("<script>alert(document.cookie);</script>")
69
+ Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
70
+
71
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
72
+ >>> Markup("<strong>Hello</strong>")
73
+ Markup('<strong>hello</strong>')
74
+
75
+ >>> escape(Markup("<strong>Hello</strong>"))
76
+ Markup('<strong>hello</strong>')
77
+
78
+ >>> # Markup is a str subclass
79
+ >>> # methods and operators escape their arguments
80
+ >>> template = Markup("Hello <em>{name}</em>")
81
+ >>> template.format(name='"World"')
82
+ Markup('Hello <em>&#34;World&#34;</em>')
83
+ ```
84
+
85
+ ## Donate
86
+
87
+ The Pallets organization develops and supports MarkupSafe and other
88
+ popular packages. In order to grow the community of contributors and
89
+ users, and allow the maintainers to devote more time to the projects,
90
+ [please donate today][].
91
+
92
+ [please donate today]: https://palletsprojects.com/donate
multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/RECORD ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MarkupSafe-3.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ MarkupSafe-3.0.2.dist-info/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
3
+ MarkupSafe-3.0.2.dist-info/METADATA,sha256=aAwbZhSmXdfFuMM-rEHpeiHRkBOGESyVLJIuwzHP-nw,3975
4
+ MarkupSafe-3.0.2.dist-info/RECORD,,
5
+ MarkupSafe-3.0.2.dist-info/WHEEL,sha256=EhaGmhgTZV8uqhZxBmQmxqlBexDOCFpUXsFLjK8lF9g,109
6
+ MarkupSafe-3.0.2.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
7
+ markupsafe/__init__.py,sha256=sr-U6_27DfaSrj5jnHYxWN-pvhM27sjlDplMDPZKm7k,13214
8
+ markupsafe/__pycache__/__init__.cpython-313.pyc,,
9
+ markupsafe/__pycache__/_native.cpython-313.pyc,,
10
+ markupsafe/_native.py,sha256=hSLs8Jmz5aqayuengJJ3kdT5PwNpBWpKrmQSdipndC8,210
11
+ markupsafe/_speedups.c,sha256=O7XulmTo-epI6n2FtMVOrJXl8EAaIwD2iNYmBI5SEoQ,4149
12
+ markupsafe/_speedups.cpython-313-darwin.so,sha256=zqa2NWhnDkGCuJVPBpLRKADVKQXphH3cRvm6rY8Hvds,50624
13
+ markupsafe/_speedups.pyi,sha256=ENd1bYe7gbBUf2ywyYWOGUpnXOHNJ-cgTNqetlW8h5k,41
14
+ markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.2.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp313-cp313-macosx_11_0_arm64
5
+
multi_model_env/lib/python3.13/site-packages/MarkupSafe-3.0.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ markupsafe
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libXau.6.dylib ADDED
Binary file (70 kB). View file
 
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libavif.16.3.0.dylib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:173bc240d1d7ba7cc6fbd4769a942e5743dfe3d5c60f5d1cdf6bfac1c23e3b5b
3
+ size 2999728
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libbrotlicommon.1.1.0.dylib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9de349e2275ea9702e3f69ee5bae2645e91f69c0474d6674e0b126ebe3a6d305
3
+ size 201200
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libbrotlidec.1.1.0.dylib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:296c21f63059fbe2c426b4abfe7ed9274797a5bff875bd58dd75e362c1121c56
3
+ size 104576
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libfreetype.6.dylib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25662cfcc8b01db700126023a5e4e032a06d3f08c863899a53eda73d0e37dd65
3
+ size 1208336
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libharfbuzz.0.dylib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74be7efca5f46967668a99acc7eb2b52f4d7d769b5e96d3a70242ae90bddb50e
3
+ size 1772960
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/libjpeg.62.4.0.dylib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3738d4c83621823e877e389de5c311e3923cef2ed6faf8af613b9c1ab1908ac9
3
+ size 637088
multi_model_env/lib/python3.13/site-packages/PIL/.dylibs/liblcms2.2.dylib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b42c5cb99e069f860382cebe75034b23d5e8dfe979c1b3715790872d0395b2f8
3
+ size 557776