import os import logging import pickle import time from typing import List, Tuple, Dict import numpy as np import torch from torch import nn, Tensor as T from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer from dpr.indexer.faiss_indexers import DenseFlatIndexer from tqdm import tqdm # 导入 tqdm 用于显示进度条 logger = logging.getLogger() logging.basicConfig(level=logging.INFO) def load_context_embeddings(embedding_file: str) -> Dict[str, np.ndarray]: """ 从给定文件中加载上下文嵌入 :param embedding_file: 上下文嵌入文件路径 (.pkl 格式) :return: 上下文ID到嵌入向量的字典 """ with open(embedding_file, "rb") as f: embeddings = pickle.load(f) return embeddings def generate_question_vectors(questions: List[str], model_name: str, batch_size: int, device: str) -> T: """ 使用 Hugging Face 模型生成问题嵌入向量 :param questions: 要生成嵌入的问题列表 :param model_name: Hugging Face 模型名称 :param batch_size: 批量大小 :param device: 设备('cpu' 或 'cuda') :return: 问题嵌入向量张量 """ tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(model_name) model = DPRQuestionEncoder.from_pretrained(model_name).to(device) n = len(questions) query_vectors = [] with torch.no_grad(): for batch_start in tqdm(range(0, n, batch_size), desc="Generating question embeddings", unit="batch"): batch_questions = questions[batch_start:batch_start + batch_size] inputs = tokenizer(batch_questions, return_tensors="pt", padding=True, truncation=True).to(device) outputs = model(**inputs) query_vectors.extend(outputs.pooler_output.cpu().split(1, dim=0)) if len(query_vectors) % 100 == 0: logger.info("Encoded queries %d", len(query_vectors)) query_tensor = torch.cat(query_vectors, dim=0) logger.info("Total encoded queries tensor size: %s", query_tensor.size()) assert query_tensor.size(0) == len(questions) return query_tensor class LocalFaissRetriever: def __init__(self, question_encoder_name: str, batch_size: int, device: str, index: DenseFlatIndexer): self.question_encoder_name = question_encoder_name self.batch_size = batch_size self.device = device self.index = index def generate_question_vectors(self, questions: List[str]) -> T: return generate_question_vectors(questions, self.question_encoder_name, self.batch_size, self.device) def index_encoded_data(self, embeddings: Dict[str, np.ndarray], buffer_size: int): """ 从给定的上下文嵌入字典中索引已编码的上下文嵌入 :param embeddings: 上下文ID到嵌入向量的字典 :param buffer_size: 每次索引的缓冲区大小 """ buffer = [] for doc_id, vector in embeddings.items(): print(f"doc_id:{doc_id}") buffer.append((doc_id, vector)) if len(buffer) >= buffer_size: self.index.index_data(buffer) buffer = [] if buffer: self.index.index_data(buffer) logger.info("Data indexing completed.") def get_top_docs(self, query_vectors: np.array, top_docs: int = 100) -> List[Tuple[List[object], List[float]]]: """ 使用 Faiss 索引检索最佳匹配的上下文 :param query_vectors: 查询嵌入向量 :param top_docs: 要返回的文档数量 :return: 检索到的上下文 ID 和得分的列表 """ start_time = time.time() results = [] for start_idx in tqdm(range(0, query_vectors.shape[0], 512), desc="Retrieving top docs", unit="batch"): batch_query_vectors = query_vectors[start_idx: start_idx + 512] batch_results = self.index.search_knn(batch_query_vectors, top_docs) print(f"batch_results:{batch_results[0]}") results.extend(batch_results) logger.info("Index search time: %f sec.", time.time() - start_time) return results def main(): import argparse parser = argparse.ArgumentParser(description="Dense Retriever for Question Answering") parser.add_argument("--questions_file", type=str, required=True, help="Path to the input questions file (one question per line)") parser.add_argument("--context_embeddings_file", type=str, required=True, help="Path to the context embeddings file (.pkl)") parser.add_argument("--batch_size", type=int, default=8, help="Batch size for question encoding") parser.add_argument("--model_name", type=str, default="facebook/dpr-question_encoder-multiset-base", help="Name of the question encoder model") parser.add_argument("--index_path", type=str, required=True, help="Path to store or load the Faiss index") parser.add_argument("--top_docs", type=int, default=10, help="Number of top documents to retrieve") parser.add_argument("--device", type=str, default="cuda", help="Device to run the model on (e.g., 'cuda' or 'cpu')") parser.add_argument("--output_file", type=str, required=True, help="Path to save the retrieval results (.pkl)") args = parser.parse_args() with open(args.questions_file, "r") as f: questions = [line.strip() for line in f.readlines()] context_embeddings = load_context_embeddings(args.context_embeddings_file) vector_size = next(iter(context_embeddings.values())).shape[0] index = DenseFlatIndexer(vector_size) if os.path.exists(args.index_path): logger.info(f"Loading existing index from {args.index_path}") index.deserialize(args.index_path) retriever = LocalFaissRetriever(args.model_name, args.batch_size, args.device, index) else: os.makedirs(args.index_path, exist_ok=True) logger.info(f"Creating new index and saving to {args.index_path}") index.init_index(vector_size) retriever = LocalFaissRetriever(args.model_name, args.batch_size, args.device, index) retriever.index_encoded_data(context_embeddings, buffer_size=1000) index.serialize(args.index_path) question_vectors = retriever.generate_question_vectors(questions) top_results_and_scores = retriever.get_top_docs(question_vectors.numpy(), args.top_docs) queries_results = [] for i, _ in tqdm(enumerate(questions), desc="Processing queries", total=len(questions), unit="query"): docs_id = [str(item) for item in top_results_and_scores[i][0]] docs_score = [score for score in top_results_and_scores[i][1]] queries_results.append((docs_id, docs_score)) with open(args.output_file, "wb") as f: pickle.dump(queries_results, f) print(f"Results saved to {args.output_file}") if __name__ == "__main__": main()