feat: 优化

This commit is contained in:
李如威 2025-08-06 00:04:21 +08:00
parent af9f064eaf
commit 1d7dd1f03b
4 changed files with 228 additions and 482 deletions

View File

@ -1,123 +0,0 @@
"""
BaseRAG 本地API接口使用示例
这个示例展示了如何配置BaseRAG使用本地部署的嵌入API接口
以及当API不可用时如何自动回退到本地模型
"""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src"))
from base_rag import BaseRAG
class LocalAPIRAG(BaseRAG):
def ingest(self, documents):
"""批量添加文档"""
if documents:
self.vector_store.add_texts(documents)
print(f"已向向量库添加 {len(documents)} 个文档")
def query(self, question, k=3):
"""查询文档"""
return self.similarity_search_with_rerank(question, k=k)
def demo_local_api():
"""演示本地API配置"""
print("=== 本地API接口配置示例 ===\n")
# 本地API配置假设有本地嵌入服务
api_embedding_config = {
"type": "api",
"api_url": "http://localhost:8080", # 假设的本地API地址
"model": "text-embedding-model",
"api_key": "optional-key" # 可选
}
print("正在尝试连接本地API...")
try:
rag_api = LocalAPIRAG(
vector_store_name="api_test",
embedding_config=api_embedding_config,
rerank_config={"enabled": True, "method": "similarity", "top_k": 3}
)
print("本地API连接成功!")
except Exception as e:
print(f"本地API连接失败: {e}")
print("系统会自动回退到本地模型")
def demo_local_model():
"""演示本地模型配置"""
print("\n=== 本地模型配置示例 ===\n")
# 本地模型配置
local_embedding_config = {
"type": "local",
"model_name": "sentence-transformers/all-MiniLM-L6-v2"
}
rag_local = LocalAPIRAG(
vector_store_name="local_test",
embedding_config=local_embedding_config,
rerank_config={"enabled": True, "method": "similarity", "top_k": 3}
)
# 测试文档
test_documents = [
"Python是一种高级编程语言语法简洁明了。",
"机器学习是人工智能的一个重要分支。",
"深度学习使用神经网络来模拟人脑的学习过程。",
"自然语言处理帮助计算机理解和生成人类语言。"
]
print("正在添加测试文档...")
rag_local.ingest(test_documents)
# 测试查询
query = "什么是机器学习?"
print(f"\n查询: {query}")
results = rag_local.query(query, k=2)
print("查询结果:")
for i, doc in enumerate(results, 1):
print(f" {i}. {doc.page_content}")
def demo_local_path():
"""演示使用本地模型路径的配置"""
print("\n=== 本地模型路径配置示例 ===\n")
# 假设你有本地下载的模型
local_path_config = {
"type": "local",
"model_path": "/path/to/your/local/model", # 替换为实际路径
"model_kwargs": {"device": "cpu"}
}
print("本地模型路径配置:")
print(f" 路径: {local_path_config['model_path']}")
print(" 注意: 请确保路径存在且包含有效的sentence-transformers模型")
def main():
"""主函数"""
print("BaseRAG 本地API和模型配置示例\n")
# 演示不同的配置方式
demo_local_api()
demo_local_model()
demo_local_path()
print("\n=== 配置建议 ===")
print("1. 开发测试: 使用本地模型,快速启动")
print("2. 生产环境: 使用本地API接口便于扩展和管理")
print("3. 离线部署: 使用本地模型路径,无需网络连接")
print("4. 混合部署: API主用本地模型备用")
if __name__ == "__main__":
main()

View File

@ -23,22 +23,15 @@ class SimpleRAG(BaseRAG):
def main(): def main():
# 嵌入模型配置
embedding_config = {
"type": "local",
"model_name": "sentence-transformers/all-MiniLM-L6-v2",
}
# 重排配置 - 使用基于余弦相似度的重排 # 重排配置 - 使用基于余弦相似度的重排
rerank_config = { rerank_config = {
"enabled": True, "enabled": True,
"method": "similarity", # 使用相似度重排(无需额外依赖) "type": "local",
"top_k": 3 "model": "BAAI/bge-reranker-base",
"top_k": 3,
} }
rag = SimpleRAG( rag = SimpleRAG(rerank_config=rerank_config)
rerank_config=rerank_config
)
print("RAG系统含重排功能初始化完成!") print("RAG系统含重排功能初始化完成!")
# 添加更多测试文档 # 添加更多测试文档
@ -50,7 +43,7 @@ def main():
"苹果派是一种传统的美式甜点,由苹果馅和酥脆的派皮制成。", "苹果派是一种传统的美式甜点,由苹果馅和酥脆的派皮制成。",
"苹果醋是由苹果发酵制成的,具有一定的保健功效,可以帮助消化。", "苹果醋是由苹果发酵制成的,具有一定的保健功效,可以帮助消化。",
"iPhone是苹果公司生产的智能手机具有先进的技术和优秀的用户体验。", "iPhone是苹果公司生产的智能手机具有先进的技术和优秀的用户体验。",
"机器学习是人工智能的一个分支Python是机器学习领域最流行的编程语言之一。" "机器学习是人工智能的一个分支Python是机器学习领域最流行的编程语言之一。",
] ]
print("正在添加文档...") print("正在添加文档...")
rag.ingest(documents) rag.ingest(documents)
@ -59,26 +52,26 @@ def main():
# 测试查询并比较重排效果 # 测试查询并比较重排效果
test_query = "苹果的营养价值和健康效益" test_query = "苹果的营养价值和健康效益"
print(f"\n测试查询: '{test_query}'") print(f"\n测试查询: '{test_query}'")
print("\n=== 不使用重排的结果 ===") print("\n=== 不使用重排的结果 ===")
result_no_rerank = rag.query_without_rerank(test_query, k=3) result_no_rerank = rag.query_without_rerank(test_query, k=3)
for i, doc in enumerate(result_no_rerank, 1): for i, doc in enumerate(result_no_rerank, 1):
print(f"{i}. {doc.page_content}") print(f"{i}. {doc.page_content}")
print("\n=== 使用重排的结果 ===") print("\n=== 使用重排的结果 ===")
result_with_rerank = rag.query(test_query, k=3) result_with_rerank = rag.query(test_query, k=3)
for i, doc in enumerate(result_with_rerank, 1): for i, doc in enumerate(result_with_rerank, 1):
print(f"{i}. {doc.page_content}") print(f"{i}. {doc.page_content}")
# 另一个测试查询 # 另一个测试查询
test_query2 = "苹果公司的主要产品" test_query2 = "苹果公司的主要产品"
print(f"\n\n测试查询2: '{test_query2}'") print(f"\n\n测试查询2: '{test_query2}'")
print("\n=== 不使用重排的结果 ===") print("\n=== 不使用重排的结果 ===")
result_no_rerank2 = rag.query_without_rerank(test_query2, k=3) result_no_rerank2 = rag.query_without_rerank(test_query2, k=3)
for i, doc in enumerate(result_no_rerank2, 1): for i, doc in enumerate(result_no_rerank2, 1):
print(f"{i}. {doc.page_content}") print(f"{i}. {doc.page_content}")
print("\n=== 使用重排的结果 ===") print("\n=== 使用重排的结果 ===")
result_with_rerank2 = rag.query(test_query2, k=3) result_with_rerank2 = rag.query(test_query2, k=3)
for i, doc in enumerate(result_with_rerank2, 1): for i, doc in enumerate(result_with_rerank2, 1):

View File

@ -1,135 +0,0 @@
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src"))
from base_rag import BaseRAG
class RerankRAG(BaseRAG):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.document_count = 0
def ingest(self, documents):
"""批量添加文档,避免重复"""
if documents:
# 清空现有集合并重新添加所有文档
self.vector_store.delete_collection()
# 重新初始化向量库
from langchain_chroma import Chroma
self.vector_store = Chroma(
collection_name=self.vector_store_name,
embedding_function=self.embedding_model,
persist_directory=self.persist_directory,
)
# 添加所有文档
self.vector_store.add_texts(documents)
self.document_count = len(documents)
print(f"已添加 {self.document_count} 个文档到向量库")
def query_with_scores(self, question, k=5):
"""带分数的查询,用于比较重排效果"""
# 不使用重排的结果
docs_no_rerank = self.similarity_search(question, k=k)
# 使用重排的结果
docs_with_rerank = self.similarity_search_with_rerank(question, k=k)
return docs_no_rerank, docs_with_rerank
def query(self, question, k=3):
return self.similarity_search_with_rerank(question, k=k)
def main():
# 嵌入模型配置
embedding_config = {
"type": "local",
"model_name": "sentence-transformers/all-MiniLM-L6-v2",
}
# 重排配置
rerank_config = {"enabled": True, "method": "cross_encoder", "top_k": 3}
# 初始化RAG系统
rag = RerankRAG(
vector_store_name="rerank_test",
embedding_config=embedding_config,
rerank_config=rerank_config,
retriever_top_k=5 # 获取更多候选文档
)
print("RAG系统含重排功能初始化完成!\n")
# 测试文档 - 关于不同主题的文档
documents = [
# 水果相关
"苹果是一种非常有营养的水果富含维生素C、纤维和抗氧化剂对心脏健康有益。",
"橙子含有丰富的维生素C是柑橘类水果的代表有助于增强免疫系统。",
"香蕉富含钾元素,能够帮助维持血压稳定,是运动员的理想能量补充。",
# 科技公司相关
"苹果公司Apple Inc.是全球知名的科技公司主要产品包括iPhone、iPad、Mac电脑等。",
"谷歌公司专注于搜索引擎和云计算服务Android操作系统是其重要产品。",
"微软公司开发Windows操作系统和Office办公软件在企业软件领域占据重要地位。",
# 编程语言相关
"Python是一种高级编程语言语法简洁广泛用于数据科学、机器学习和Web开发。",
"Java是面向对象的编程语言具有跨平台特性在企业级开发中应用广泛。",
"JavaScript是Web开发的核心语言可以实现网页的交互功能和动态效果。",
# 健康相关
"规律运动有助于维持身体健康建议每周至少进行150分钟的中等强度有氧运动。",
"均衡饮食是健康的基础,应该多吃蔬菜水果,减少加工食品的摄入。",
"充足的睡眠对身心健康至关重要成年人每天应保证7-9小时的睡眠时间。"
]
print("正在添加测试文档...")
rag.ingest(documents)
print(f"文档添加完成!\n")
# 测试查询
test_queries = [
{
"query": "苹果对健康有什么好处?",
"expected_topic": "应该更偏向水果营养相关的文档"
},
{
"query": "苹果公司的主要业务是什么?",
"expected_topic": "应该更偏向科技公司相关的文档"
},
{
"query": "如何保持身体健康?",
"expected_topic": "应该更偏向健康建议相关的文档"
}
]
for i, test_case in enumerate(test_queries, 1):
query = test_case["query"]
expected = test_case["expected_topic"]
print(f"=== 测试查询 {i}: {query} ===")
print(f"期望结果: {expected}\n")
# 获取两种搜索结果
docs_no_rerank, docs_with_rerank = rag.query_with_scores(query, k=5)
print("📍 不使用重排的结果:")
for j, doc in enumerate(docs_no_rerank[:3], 1):
print(f" {j}. {doc.page_content}")
print("\n🎯 使用重排的结果:")
for j, doc in enumerate(docs_with_rerank[:3], 1):
print(f" {j}. {doc.page_content}")
print("\n" + "="*80 + "\n")
print("重排功能测试完成!")
print("\n说明:")
print("- 重排功能通过计算查询与文档的余弦相似度来重新排序检索结果")
print("- 理论上重排后的结果应该更符合查询意图")
print("- 如果结果相同,可能是因为初始检索结果已经很好,或需要更多样化的测试数据")
if __name__ == "__main__":
main()

View File

@ -1,5 +1,5 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import List, Optional, Dict, ClassVar, Union, Tuple from typing import List, Optional, Dict, ClassVar, Union, Tuple, Any
import threading import threading
import numpy as np import numpy as np
@ -13,12 +13,166 @@ from langchain.llms.base import BaseLLM
from langchain.schema import Document from langchain.schema import Document
class BaseRAG(ABC): class ModelManager:
"""统一的模型管理类用于创建和缓存embedding和rerank模型"""
# 类级别的模型缓存 # 类级别的模型缓存
_embedding_models: ClassVar[Dict[str, Embeddings]] = {} _models: ClassVar[Dict[str, Any]] = {}
# 线程锁,保护模型缓存的并发访问 # 线程锁,保护模型缓存的并发访问
_lock: ClassVar[threading.Lock] = threading.Lock() _lock: ClassVar[threading.Lock] = threading.Lock()
@classmethod
def get_config_key(cls, config: Dict, model_type: str = "embedding") -> str:
"""根据配置生成唯一的缓存键"""
config_type = config.get("type", "local")
prefix = f"{model_type}_{config_type}"
if config_type == "local":
# 支持本地路径和模型名称两种方式
if "model_path" in config:
path_key = config["model_path"].replace("/", "_").replace("\\", "_")
return f"{prefix}_path_{path_key}"
else:
model_key = config.get("model_name", config.get("model", "default"))
return f"{prefix}_name_{model_key}"
elif config_type == "api":
api_key = (
config.get("api_url", "default").replace("/", "_").replace(":", "_")
)
return f"{prefix}_api_{api_key}"
else:
model_key = config.get("model", "default")
return f"{prefix}_{model_key}"
@classmethod
def get_or_create_model(cls, config: Dict, model_type: str, creator_func) -> Any:
"""获取或创建模型(带缓存,线程安全)"""
config_key = cls.get_config_key(config, model_type)
# 双重检查锁定模式
if config_key in cls._models:
print(f"使用缓存的{model_type}模型: {config_key}")
return cls._models[config_key]
with cls._lock:
# 再次检查,防止并发创建
if config_key not in cls._models:
print(f"正在创建{model_type}模型: {config_key}")
cls._models[config_key] = creator_func(config)
else:
print(f"使用缓存的{model_type}模型: {config_key}")
return cls._models[config_key]
@staticmethod
def create_embedding_model(config: Dict) -> Embeddings:
"""创建嵌入模型"""
config_type = config.get("type", "local")
if config_type == "local":
# 支持本地路径和模型名称两种方式
if "model_path" in config:
model_path = config["model_path"]
print(f"从本地路径加载嵌入模型: {model_path}")
model_name = model_path
else:
model_name = config.get(
"model_name",
config.get("model", "sentence-transformers/all-MiniLM-L6-v2"),
)
print(f"从HuggingFace Hub加载嵌入模型: {model_name}")
return HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
encode_kwargs=config.get(
"encode_kwargs", {"normalize_embeddings": True}
),
)
elif config_type == "api":
try:
from langchain_openai import OpenAIEmbeddings
api_url = config.get("api_url")
if not api_url:
raise ValueError("使用API类型时必须提供api_url")
print(f"连接到嵌入API: {api_url}")
return OpenAIEmbeddings(
model=config.get("model", "text-embedding"),
base_url=api_url,
api_key=config.get("api_key", "dummy"),
max_retries=config.get("max_retries", 3),
)
except ImportError:
print("警告: langchain_openai未安装回退到本地模型")
model_name = config.get(
"model", "sentence-transformers/all-MiniLM-L6-v2"
)
print(f"回退到本地模型: {model_name}")
return HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
encode_kwargs=config.get(
"encode_kwargs", {"normalize_embeddings": True}
),
)
else:
raise ValueError(
f"不支持的嵌入模型类型: {config_type},支持的类型: 'local', 'api'"
)
@staticmethod
def create_rerank_model(config: Dict) -> Any:
"""创建重排模型"""
config_type = config.get("type", "local")
if config_type == "local":
try:
from sentence_transformers import CrossEncoder
# 支持本地路径和模型名称两种方式
if "model_path" in config:
model_path = config["model_path"]
print(f"从本地路径加载重排模型: {model_path}")
return CrossEncoder(model_path)
else:
model_name = config.get("model", "BAAI/bge-reranker-base")
print(f"从HuggingFace Hub加载BGE重排模型: {model_name}")
return CrossEncoder(model_name)
except ImportError:
print("警告: sentence-transformers未安装无法使用本地重排模型")
return None
except Exception as e:
print(f"本地重排模型加载失败: {e}")
return None
elif config_type == "api":
try:
api_url = config.get("api_url")
if not api_url:
raise ValueError("使用API类型时必须提供api_url")
print(f"连接到重排API: {api_url}")
return {
"type": "api",
"api_url": api_url,
"model": config.get("model", "reranker"),
"api_key": config.get("api_key", "dummy"),
"max_retries": config.get("max_retries", 3),
}
except Exception as e:
print(f"API重排模型初始化失败: {e}")
return None
else:
raise ValueError(f"不支持的重排模型类型: {config_type},支持的类型: 'local', 'api'")
class BaseRAG(ABC):
def __init__( def __init__(
self, self,
vector_store_name: str = "default", vector_store_name: str = "default",
@ -43,9 +197,9 @@ class BaseRAG(ABC):
本地部署接口: {"type": "api", "api_url": "http://localhost:8000/embeddings", "model": "your-model"} 本地部署接口: {"type": "api", "api_url": "http://localhost:8000/embeddings", "model": "your-model"}
rerank_config 示例: rerank_config 示例:
{"enabled": True, "method": "cross_encoder", "model": "cross-encoder/ms-marco-MiniLM-L-6-v2", "top_k": 3} {"enabled": True, "type": "local", "model": "BAAI/bge-reranker-base", "top_k": 3}
{"enabled": True, "method": "bge", "model": "BAAI/bge-reranker-base", "top_k": 3} {"enabled": True, "type": "local", "model_path": "/path/to/your/rerank/model", "top_k": 3}
{"enabled": True, "method": "similarity", "top_k": 3} {"enabled": True, "type": "api", "api_url": "http://localhost:8000/rerank", "model": "reranker-model", "api_key": "your-key", "top_k": 3}
""" """
self.vector_store_name = vector_store_name self.vector_store_name = vector_store_name
self.embedding_config = embedding_config or { self.embedding_config = embedding_config or {
@ -57,16 +211,17 @@ class BaseRAG(ABC):
self.persist_directory = persist_directory self.persist_directory = persist_directory
self.rerank_config = rerank_config or {"enabled": False} self.rerank_config = rerank_config or {"enabled": False}
# 使用缓存的嵌入模型 # 使用统一的模型管理器创建嵌入模型
config_key = self._get_config_key(self.embedding_config) self.embedding_model = ModelManager.get_or_create_model(
self.embedding_model = self._get_or_create_embedding_model( self.embedding_config, "embedding", ModelManager.create_embedding_model
config_key, self.embedding_config
) )
# 初始化重排模型 # 初始化重排模型
self.reranker = None self.reranker = None
if self.rerank_config.get("enabled", False): if self.rerank_config.get("enabled", False):
self.reranker = self._init_reranker() self.reranker = ModelManager.get_or_create_model(
self.rerank_config, "rerank", ModelManager.create_rerank_model
)
# 初始化 Chroma 向量库 # 初始化 Chroma 向量库
self.vector_store = Chroma( self.vector_store = Chroma(
@ -75,151 +230,6 @@ class BaseRAG(ABC):
persist_directory=persist_directory, persist_directory=persist_directory,
) )
@staticmethod
def _get_config_key(config: Dict) -> str:
"""
根据配置生成唯一的缓存键
"""
config_type = config.get("type", "local")
if config_type == "local":
# 支持本地路径和模型名称两种方式
if "model_path" in config:
return f"local_path_{config['model_path'].replace('/', '_').replace('\\', '_')}"
else:
return f"local_name_{config.get('model_name', 'default')}"
elif config_type == "api":
return f"api_{config.get('api_url', 'default').replace('/', '_').replace(':', '_')}"
else:
return f"{config_type}_{config.get('model', 'default')}"
@classmethod
def _get_or_create_embedding_model(
cls, config_key: str, config: Dict
) -> Embeddings:
"""
获取或创建嵌入模型带缓存线程安全
"""
# 双重检查锁定模式,先检查是否已存在(避免不必要的锁开销)
if config_key in cls._embedding_models:
print(f"使用缓存的嵌入模型: {config_key}")
return cls._embedding_models[config_key]
# 获取锁,进行安全的创建操作
with cls._lock:
# 再次检查,防止在等待锁期间其他线程已经创建了模型
if config_key not in cls._embedding_models:
print(f"正在创建嵌入模型: {config_key}")
cls._embedding_models[config_key] = cls._create_embedding_model(config)
else:
print(f"使用缓存的嵌入模型: {config_key}")
return cls._embedding_models[config_key]
@staticmethod
def _create_embedding_model(config: Dict) -> Embeddings:
"""
根据配置创建嵌入模型
"""
config_type = config.get("type", "local")
if config_type == "local":
# 支持本地路径和模型名称两种方式
if "model_path" in config:
model_path = config["model_path"]
print(f"从本地路径加载模型: {model_path}")
return HuggingFaceEmbeddings(
model_name=model_path,
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
encode_kwargs=config.get(
"encode_kwargs", {"normalize_embeddings": True}
),
)
else:
model_name = config.get(
"model_name", "sentence-transformers/all-MiniLM-L6-v2"
)
print(f"从HuggingFace Hub加载模型: {model_name}")
return HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
encode_kwargs=config.get(
"encode_kwargs", {"normalize_embeddings": True}
),
)
elif config_type == "api":
# 本地部署的嵌入API接口
try:
from langchain_openai import OpenAIEmbeddings
api_url = config.get("api_url")
if not api_url:
raise ValueError("使用API类型时必须提供api_url")
print(f"连接到本地嵌入API: {api_url}")
return OpenAIEmbeddings(
model=config.get("model", "text-embedding"),
base_url=api_url,
api_key=config.get("api_key", "dummy"), # 本地API可能不需要密钥
max_retries=config.get("max_retries", 3),
)
except ImportError:
print("警告: langchain_openai未安装无法使用API接口")
# 回退到本地模型
model_name = config.get(
"model", "sentence-transformers/all-MiniLM-L6-v2"
)
print(f"回退到本地模型: {model_name}")
return HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
encode_kwargs=config.get(
"encode_kwargs", {"normalize_embeddings": True}
),
)
else:
raise ValueError(
f"不支持的嵌入模型类型: {config_type},支持的类型: 'local', 'api'"
)
def _init_reranker(self):
"""初始化重排模型"""
method = self.rerank_config.get("method", "cross_encoder")
# 相似度重排不需要额外的模型
if method == "similarity":
print("使用基于余弦相似度的重排方法")
return "similarity" # 返回标识符
if method == "cross_encoder":
try:
from sentence_transformers import CrossEncoder
model_name = self.rerank_config.get(
"model", "cross-encoder/ms-marco-MiniLM-L-6-v2"
)
print(f"正在加载CrossEncoder重排模型: {model_name}")
return CrossEncoder(model_name)
except ImportError:
print("警告: sentence-transformers未安装无法使用CrossEncoder重排")
return None
elif method == "bge":
try:
from FlagEmbedding import FlagReranker
model_name = self.rerank_config.get("model", "BAAI/bge-reranker-base")
print(f"正在加载BGE重排模型: {model_name}")
return FlagReranker(model_name, use_fp16=True)
except ImportError:
print("警告: FlagEmbedding未安装无法使用BGE重排")
return None
else:
print(f"警告: 不支持的重排方法: {method},将使用相似度重排")
return "similarity"
def _rerank_documents( def _rerank_documents(
self, query: str, documents: List[Document] self, query: str, documents: List[Document]
) -> List[Document]: ) -> List[Document]:
@ -227,36 +237,22 @@ class BaseRAG(ABC):
if not documents: if not documents:
return documents return documents
method = self.rerank_config.get("method", "cross_encoder")
top_k = self.rerank_config.get("top_k", len(documents)) top_k = self.rerank_config.get("top_k", len(documents))
# 如果是相似度重排,直接调用相似度重排方法 # 检查reranker模型是否可用
if method == "similarity": if not self.reranker:
return self._similarity_rerank(query, documents) print(f"重排模型未初始化,跳过重排")
return documents[:top_k]
# 其他方法需要reranker模型
if not self.reranker or self.reranker == "similarity":
print(f"重排模型未初始化,使用默认相似度重排")
return self._similarity_rerank(query, documents)
try: try:
if method == "cross_encoder": # 判断是否为API模式
# 准备输入对 if isinstance(self.reranker, dict) and self.reranker.get("type") == "api":
return self._api_rerank(query, documents, top_k)
else:
# 本地模型模式CrossEncoder
query_doc_pairs = [(query, doc.page_content) for doc in documents] query_doc_pairs = [(query, doc.page_content) for doc in documents]
scores = self.reranker.predict(query_doc_pairs) scores = self.reranker.predict(query_doc_pairs)
# 根据分数排序
doc_scores = list(zip(documents, scores))
doc_scores.sort(key=lambda x: x[1], reverse=True)
# 返回top_k个文档
return [doc for doc, score in doc_scores[:top_k]]
elif method == "bge":
# 使用BGE重排
query_doc_pairs = [[query, doc.page_content] for doc in documents]
scores = self.reranker.compute_score(query_doc_pairs)
# 处理单个文档的情况 # 处理单个文档的情况
if not isinstance(scores, list): if not isinstance(scores, list):
scores = [scores] scores = [scores]
@ -269,44 +265,59 @@ class BaseRAG(ABC):
return [doc for doc, score in doc_scores[:top_k]] return [doc for doc, score in doc_scores[:top_k]]
except Exception as e: except Exception as e:
print(f"重排失败: {e}回退到相似度重排") print(f"重排失败: {e}跳过重排")
return self._similarity_rerank(query, documents) return documents[:top_k]
return self._similarity_rerank(query, documents) def _api_rerank(
self, query: str, documents: List[Document], top_k: int
def _similarity_rerank(
self, query: str, documents: List[Document]
) -> List[Document]: ) -> List[Document]:
"""基于余弦相似度的简单重排(备选方案)""" """使用API进行重排"""
if not documents: import requests
return documents import json
try: try:
# 获取查询向量 api_config = self.reranker
query_embedding = self.embedding_model.embed_query(query) api_url = api_config["api_url"]
# 获取文档向量 # 准备API请求数据
doc_texts = [doc.page_content for doc in documents] payload = {
doc_embeddings = self.embedding_model.embed_documents(doc_texts) "model": api_config["model"],
"query": query,
"documents": [doc.page_content for doc in documents],
"top_k": top_k,
}
# 计算余弦相似度 headers = {
similarities = [] "Content-Type": "application/json",
for doc_emb in doc_embeddings: "Authorization": f"Bearer {api_config['api_key']}",
similarity = np.dot(query_embedding, doc_emb) / ( }
np.linalg.norm(query_embedding) * np.linalg.norm(doc_emb)
)
similarities.append(similarity)
# 根据相似度排序 # 发送API请求
doc_similarities = list(zip(documents, similarities)) response = requests.post(api_url, json=payload, headers=headers, timeout=30)
doc_similarities.sort(key=lambda x: x[1], reverse=True)
top_k = self.rerank_config.get("top_k", len(documents)) if response.status_code == 200:
return [doc for doc, sim in doc_similarities[:top_k]] result = response.json()
# 假设API返回格式为: {"scores": [0.9, 0.8, ...]} 或 {"results": [{"index": 0, "score": 0.9}, ...]}
if "scores" in result:
scores = result["scores"]
elif "results" in result:
scores = [item["score"] for item in result["results"]]
else:
raise ValueError("API返回格式不支持")
# 根据分数排序
doc_scores = list(zip(documents, scores))
doc_scores.sort(key=lambda x: x[1], reverse=True)
return [doc for doc, score in doc_scores[:top_k]]
else:
print(f"API重排请求失败: {response.status_code}, {response.text}")
return documents[:top_k]
except Exception as e: except Exception as e:
print(f"相似度重排失败: {e}") print(f"API重排失败: {e},跳过重排")
return documents return documents[:top_k]
def load_and_split_documents(self, file_path: str) -> List[Document]: def load_and_split_documents(self, file_path: str) -> List[Document]:
""" """