In this chapter, we’ll explore advanced techniques and best practices for optimizing LLM deployments in production environments. We’ll cover strategies for improving performance, reducing costs, and ensuring reliability at scale.
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
# Configure quantization
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype="float16",
bnb_4bit_quant_type="nf4"
)
# Load quantized model
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
quantization_config=quantization_config,
device_map="auto"
)
import torch
torch.cuda.empty_cache() # Clear GPU cache
torch.cuda.memory_summary() # Monitor memory usage
class DynamicBatcher:
def __init__(self, max_batch_size=32, max_wait_time=0.1):
self.queue = []
self.max_batch_size = max_batch_size
self.max_wait_time = max_wait_time
async def add_request(self, request):
self.queue.append(request)
if len(self.queue) >= self.max_batch_size:
return await self.process_batch()
return await self.wait_for_batch()
from fastapi import FastAPI
from load_balancer import LoadBalancer
app = FastAPI()
lb = LoadBalancer(["server1", "server2", "server3"])
@app.post("/generate")
async def generate(request: Request):
server = lb.get_next_server()
return await server.process(request)
from functools import lru_cache
import redis
redis_client = redis.Redis(host='localhost', port=6379)
@lru_cache(maxsize=1000)
def get_cached_response(prompt: str) -> str:
# Check local cache first
if cached := redis_client.get(prompt):
return cached
# Generate and cache response
response = generate_response(prompt)
redis_client.setex(prompt, 3600, response) # Cache for 1 hour
return response
from prometheus_client import Counter, Histogram
requests_total = Counter('requests_total', 'Total requests processed')
latency_seconds = Histogram('latency_seconds', 'Request latency')
@app.post("/generate")
async def generate(request: Request):
requests_total.inc()
with latency_seconds.time():
return await process_request(request)
class AutoScaler:
def __init__(self, min_instances=1, max_instances=10):
self.instances = min_instances
self.min_instances = min_instances
self.max_instances = max_instances
def scale_based_on_load(self, current_load):
target_instances = max(
self.min_instances,
min(self.max_instances, current_load // 100)
)
self.adjust_instances(target_instances)
class TokenBudget:
def __init__(self, max_daily_tokens=1000000):
self.max_daily_tokens = max_daily_tokens
self.used_tokens = 0
self.reset_time = datetime.now() + timedelta(days=1)
def can_process_request(self, token_count):
if datetime.now() >= self.reset_time:
self.used_tokens = 0
self.reset_time = datetime.now() + timedelta(days=1)
if self.used_tokens + token_count <= self.max_daily_tokens:
self.used_tokens += token_count
return True
return False
Gradual Rollout
Error Handling
Security