feat: 处理问题
This commit is contained in:
parent
5f3fc6ff07
commit
af9f064eaf
|
@ -37,7 +37,6 @@ def main():
|
|||
}
|
||||
|
||||
rag = SimpleRAG(
|
||||
embedding_config=embedding_config,
|
||||
rerank_config=rerank_config
|
||||
)
|
||||
print("RAG系统(含重排功能)初始化完成!")
|
||||
|
@ -53,7 +52,6 @@ def main():
|
|||
"iPhone是苹果公司生产的智能手机,具有先进的技术和优秀的用户体验。",
|
||||
"机器学习是人工智能的一个分支,Python是机器学习领域最流行的编程语言之一。"
|
||||
]
|
||||
|
||||
print("正在添加文档...")
|
||||
rag.ingest(documents)
|
||||
print(f"文档添加完成! 共添加了 {len(documents)} 个文档")
|
||||
|
|
|
@ -38,10 +38,10 @@ class BaseRAG(ABC):
|
|||
:param rerank_config: 重排配置
|
||||
|
||||
embedding_config 示例:
|
||||
本地模型名称: {"type": "local", "model_name": "sentence-transformers/all-MiniLM-L6-v2"}
|
||||
本地模型名称: {"type": "local", "model_name": "BAAI/bge-small-zh-v1.5"}
|
||||
本地模型路径: {"type": "local", "model_path": "/path/to/your/model"}
|
||||
本地部署接口: {"type": "api", "api_url": "http://localhost:8000/embeddings", "model": "your-model"}
|
||||
|
||||
|
||||
rerank_config 示例:
|
||||
{"enabled": True, "method": "cross_encoder", "model": "cross-encoder/ms-marco-MiniLM-L-6-v2", "top_k": 3}
|
||||
{"enabled": True, "method": "bge", "model": "BAAI/bge-reranker-base", "top_k": 3}
|
||||
|
@ -50,7 +50,7 @@ class BaseRAG(ABC):
|
|||
self.vector_store_name = vector_store_name
|
||||
self.embedding_config = embedding_config or {
|
||||
"type": "local",
|
||||
"model_name": "sentence-transformers/all-MiniLM-L6-v2",
|
||||
"model_name": "BAAI/bge-small-zh-v1.5",
|
||||
}
|
||||
self.retriever_top_k = retriever_top_k
|
||||
self.llm = llm
|
||||
|
@ -151,11 +151,11 @@ class BaseRAG(ABC):
|
|||
# 本地部署的嵌入API接口
|
||||
try:
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
|
||||
api_url = config.get("api_url")
|
||||
if not api_url:
|
||||
raise ValueError("使用API类型时必须提供api_url")
|
||||
|
||||
|
||||
print(f"连接到本地嵌入API: {api_url}")
|
||||
return OpenAIEmbeddings(
|
||||
model=config.get("model", "text-embedding"),
|
||||
|
@ -166,12 +166,16 @@ class BaseRAG(ABC):
|
|||
except ImportError:
|
||||
print("警告: langchain_openai未安装,无法使用API接口")
|
||||
# 回退到本地模型
|
||||
model_name = config.get("model", "sentence-transformers/all-MiniLM-L6-v2")
|
||||
model_name = config.get(
|
||||
"model", "sentence-transformers/all-MiniLM-L6-v2"
|
||||
)
|
||||
print(f"回退到本地模型: {model_name}")
|
||||
return HuggingFaceEmbeddings(
|
||||
model_name=model_name,
|
||||
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
|
||||
encode_kwargs=config.get("encode_kwargs", {"normalize_embeddings": True}),
|
||||
encode_kwargs=config.get(
|
||||
"encode_kwargs", {"normalize_embeddings": True}
|
||||
),
|
||||
)
|
||||
|
||||
else:
|
||||
|
@ -182,101 +186,109 @@ class BaseRAG(ABC):
|
|||
def _init_reranker(self):
|
||||
"""初始化重排模型"""
|
||||
method = self.rerank_config.get("method", "cross_encoder")
|
||||
|
||||
|
||||
# 相似度重排不需要额外的模型
|
||||
if method == "similarity":
|
||||
print("使用基于余弦相似度的重排方法")
|
||||
return "similarity" # 返回标识符
|
||||
|
||||
|
||||
if method == "cross_encoder":
|
||||
try:
|
||||
from sentence_transformers import CrossEncoder
|
||||
model_name = self.rerank_config.get("model", "cross-encoder/ms-marco-MiniLM-L-6-v2")
|
||||
|
||||
model_name = self.rerank_config.get(
|
||||
"model", "cross-encoder/ms-marco-MiniLM-L-6-v2"
|
||||
)
|
||||
print(f"正在加载CrossEncoder重排模型: {model_name}")
|
||||
return CrossEncoder(model_name)
|
||||
except ImportError:
|
||||
print("警告: sentence-transformers未安装,无法使用CrossEncoder重排")
|
||||
return None
|
||||
|
||||
|
||||
elif method == "bge":
|
||||
try:
|
||||
from FlagEmbedding import FlagReranker
|
||||
|
||||
model_name = self.rerank_config.get("model", "BAAI/bge-reranker-base")
|
||||
print(f"正在加载BGE重排模型: {model_name}")
|
||||
return FlagReranker(model_name, use_fp16=True)
|
||||
except ImportError:
|
||||
print("警告: FlagEmbedding未安装,无法使用BGE重排")
|
||||
return None
|
||||
|
||||
|
||||
else:
|
||||
print(f"警告: 不支持的重排方法: {method},将使用相似度重排")
|
||||
return "similarity"
|
||||
|
||||
def _rerank_documents(self, query: str, documents: List[Document]) -> List[Document]:
|
||||
def _rerank_documents(
|
||||
self, query: str, documents: List[Document]
|
||||
) -> List[Document]:
|
||||
"""对检索到的文档进行重排"""
|
||||
if not documents:
|
||||
return documents
|
||||
|
||||
|
||||
method = self.rerank_config.get("method", "cross_encoder")
|
||||
top_k = self.rerank_config.get("top_k", len(documents))
|
||||
|
||||
|
||||
# 如果是相似度重排,直接调用相似度重排方法
|
||||
if method == "similarity":
|
||||
return self._similarity_rerank(query, documents)
|
||||
|
||||
|
||||
# 其他方法需要reranker模型
|
||||
if not self.reranker or self.reranker == "similarity":
|
||||
print(f"重排模型未初始化,使用默认相似度重排")
|
||||
return self._similarity_rerank(query, documents)
|
||||
|
||||
|
||||
try:
|
||||
if method == "cross_encoder":
|
||||
# 准备输入对
|
||||
query_doc_pairs = [(query, doc.page_content) for doc in documents]
|
||||
scores = self.reranker.predict(query_doc_pairs)
|
||||
|
||||
|
||||
# 根据分数排序
|
||||
doc_scores = list(zip(documents, scores))
|
||||
doc_scores.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
# 返回top_k个文档
|
||||
return [doc for doc, score in doc_scores[:top_k]]
|
||||
|
||||
|
||||
elif method == "bge":
|
||||
# 使用BGE重排
|
||||
query_doc_pairs = [[query, doc.page_content] for doc in documents]
|
||||
scores = self.reranker.compute_score(query_doc_pairs)
|
||||
|
||||
|
||||
# 处理单个文档的情况
|
||||
if not isinstance(scores, list):
|
||||
scores = [scores]
|
||||
|
||||
|
||||
# 根据分数排序
|
||||
doc_scores = list(zip(documents, scores))
|
||||
doc_scores.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
# 返回top_k个文档
|
||||
return [doc for doc, score in doc_scores[:top_k]]
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"重排失败: {e},回退到相似度重排")
|
||||
return self._similarity_rerank(query, documents)
|
||||
|
||||
|
||||
return self._similarity_rerank(query, documents)
|
||||
|
||||
def _similarity_rerank(self, query: str, documents: List[Document]) -> List[Document]:
|
||||
def _similarity_rerank(
|
||||
self, query: str, documents: List[Document]
|
||||
) -> List[Document]:
|
||||
"""基于余弦相似度的简单重排(备选方案)"""
|
||||
if not documents:
|
||||
return documents
|
||||
|
||||
|
||||
try:
|
||||
# 获取查询向量
|
||||
query_embedding = self.embedding_model.embed_query(query)
|
||||
|
||||
|
||||
# 获取文档向量
|
||||
doc_texts = [doc.page_content for doc in documents]
|
||||
doc_embeddings = self.embedding_model.embed_documents(doc_texts)
|
||||
|
||||
|
||||
# 计算余弦相似度
|
||||
similarities = []
|
||||
for doc_emb in doc_embeddings:
|
||||
|
@ -284,14 +296,14 @@ class BaseRAG(ABC):
|
|||
np.linalg.norm(query_embedding) * np.linalg.norm(doc_emb)
|
||||
)
|
||||
similarities.append(similarity)
|
||||
|
||||
|
||||
# 根据相似度排序
|
||||
doc_similarities = list(zip(documents, similarities))
|
||||
doc_similarities.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
top_k = self.rerank_config.get("top_k", len(documents))
|
||||
return [doc for doc, sim in doc_similarities[:top_k]]
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"相似度重排失败: {e}")
|
||||
return documents
|
||||
|
@ -303,7 +315,7 @@ class BaseRAG(ABC):
|
|||
loader = TextLoader(file_path, encoding="utf-8")
|
||||
documents = loader.load()
|
||||
|
||||
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
||||
splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=20)
|
||||
return splitter.split_documents(documents)
|
||||
|
||||
def add_documents_to_vector_store(self, documents: List[Document]):
|
||||
|
@ -338,7 +350,9 @@ class BaseRAG(ABC):
|
|||
k = k or self.retriever_top_k
|
||||
return self.vector_store.similarity_search(query, k=k)
|
||||
|
||||
def similarity_search_with_rerank(self, query: str, k: int = None) -> List[Document]:
|
||||
def similarity_search_with_rerank(
|
||||
self, query: str, k: int = None
|
||||
) -> List[Document]:
|
||||
"""
|
||||
带重排功能的相似性搜索。
|
||||
"""
|
||||
|
@ -347,13 +361,13 @@ class BaseRAG(ABC):
|
|||
if self.rerank_config.get("enabled", False):
|
||||
# 获取更多候选文档进行重排
|
||||
initial_k = max(initial_k * 2, 10)
|
||||
|
||||
|
||||
documents = self.vector_store.similarity_search(query, k=initial_k)
|
||||
|
||||
|
||||
# 如果启用了重排,进行重排
|
||||
if self.rerank_config.get("enabled", False) and documents:
|
||||
documents = self._rerank_documents(query, documents)
|
||||
|
||||
|
||||
# 返回最终的top_k结果
|
||||
final_k = k or self.retriever_top_k
|
||||
return documents[:final_k]
|
||||
|
|
Loading…
Reference in New Issue