In this chapter, we’ll explore various techniques and strategies for optimizing local inference with Large Language Models. We’ll cover topics like quantization, caching, attention optimizations, and hardware utilization.
Quantization reduces model size and memory usage by converting weights to lower precision formats.
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
# Configure 4-bit quantization
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype="float16",
bnb_4bit_quant_type="nf4"
)
# Load quantized model
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
quantization_config=quantization_config,
device_map="auto"
)
from transformers import AutoModelForCausalLM
# Load model in 8-bit
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
load_in_8bit=True,
device_map="auto"
)
Flash Attention reduces memory usage and increases speed by optimizing attention computation.
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
use_flash_attention_2=True,
device_map="auto"
)
Reduces memory usage for long sequences by using local attention windows.
config = AutoConfig.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
config.sliding_window = 4096 # Set window size
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
config=config
)
import torch
def print_gpu_memory():
print(f"Allocated: {torch.cuda.memory_allocated() / 1024**2:.2f}MB")
print(f"Cached: {torch.cuda.memory_reserved() / 1024**2:.2f}MB")
# Monitor memory usage
print_gpu_memory()
torch.cuda.empty_cache() # Clear unused memory
print_gpu_memory()
model.gradient_checkpointing_enable() # Enable gradient checkpointing
Offload parts of the model to CPU when GPU memory is limited.
from transformers import AutoModelForCausalLM
# Configure CPU offloading
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
device_map="auto",
offload_folder="offload"
)
def batch_inference(prompts, batch_size=4):
results = []
for i in range(0, len(prompts), batch_size):
batch = prompts[i:i + batch_size]
inputs = tokenizer(batch, padding=True, return_tensors="pt")
outputs = model.generate(**inputs)
decoded = tokenizer.batch_decode(outputs)
results.extend(decoded)
return results
class DynamicBatcher:
def __init__(self, model, tokenizer, max_batch_size=8):
self.model = model
self.tokenizer = tokenizer
self.max_batch_size = max_batch_size
self.queue = []
async def add_request(self, prompt):
self.queue.append(prompt)
if len(self.queue) >= self.max_batch_size:
return await self.process_batch()
return None
class KVCache:
def __init__(self, max_size=1000):
self.cache = {}
self.max_size = max_size
def get(self, key_tokens):
key = tuple(key_tokens.tolist())
return self.cache.get(key)
def put(self, key_tokens, value):
if len(self.cache) >= self.max_size:
self.cache.pop(next(iter(self.cache)))
key = tuple(key_tokens.tolist())
self.cache[key] = value
from accelerate import dispatch_model
# Distribute model across GPUs
model = dispatch_model(model, device_map="balanced")
import torch.nn as nn
# Wrap model for data parallel processing
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
import torch
# Set number of threads for CPU operations
torch.set_num_threads(8)
import time
class LatencyTracker:
def __init__(self):
self.times = []
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.times.append(time.perf_counter() - self.start)
@property
def average(self):
return sum(self.times) / len(self.times)
class ThroughputMonitor:
def __init__(self, window_size=100):
self.window_size = window_size
self.timestamps = []
def add_request(self):
self.timestamps.append(time.time())
if len(self.timestamps) > self.window_size:
self.timestamps.pop(0)
@property
def requests_per_second(self):
if len(self.timestamps) < 2:
return 0
time_diff = self.timestamps[-1] - self.timestamps[0]
return len(self.timestamps) / time_diff
Model Selection
Memory Management
Performance Optimization