easy-rag/services/rag_service.py

283 lines
9.9 KiB
Python

from typing import List, Dict, Any
import asyncio
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.callbacks import AsyncIteratorCallbackHandler
from services.vector_store import AsyncVectorStore
from utils.logger import get_logger
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
import time
import torch
class AsyncRAGService:
"""异步 RAG 服务主类"""
def __init__(self):
self.logger = get_logger(__name__)
self.vector_store = AsyncVectorStore()
self.openai_api_base = os.getenv("OPENAI_BASE_URL")
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.llm = ChatOpenAI(
model="deepseek-r1:8b",
temperature=0.7,
openai_api_key=self.openai_api_key,
openai_api_base=self.openai_api_base,
)
self.prompt_template = PromptTemplate(
input_variables=["context", "question"],
template=(
"基于以下上下文回答问题。如果上下文中没有相关信息,请说明无法从提供的文档中找到答案。\n\n"
"上下文:\n"
"{context}\n\n"
"问题:{question}\n\n"
"答案:"
),
)
self.tokenizer = AutoTokenizer.from_pretrained(
"/Volumes/LRW/Model/Qwen3-Embedding-0.6B", trust_remote_code=True
)
self.rerank_model = AutoModelForSequenceClassification.from_pretrained(
"/Volumes/LRW/Model/Qwen3-Embedding-0.6B",
trust_remote_code=True,
device_map="auto", # 或 "cuda"
)
self.logger.info("RAG服务初始化完成")
async def add_document_async(self, content: str, filename: str) -> str:
"""异步添加文档"""
start_time = time.time()
try:
self.logger.info(f"开始添加文档: {filename}")
result = await self.vector_store.add_document_async(content, filename)
duration = time.time() - start_time
self.logger.info(f"文档添加成功: {filename}, 耗时: {duration:.2f}s")
return result
except Exception as e:
duration = time.time() - start_time
self.logger.error(
f"文档添加失败: {filename}, 错误: {str(e)}, 耗时: {duration:.2f}s"
)
raise
async def chat_async(
self, question: str, top_k: int = 3, temperature: float = 0.7
) -> Dict[str, Any]:
"""异步聊天问答"""
start_time = time.time()
try:
self.logger.info(f"开始处理问答: {question[:50]}...")
# 异步检索相关文档
search_results = await self.vector_store.search_async(question, top_k)
self.logger.debug(f"检索到 {len(search_results)} 个相关文档")
if not search_results:
self.logger.warning("未找到相关文档")
return {
"answer": "抱歉,我无法在现有文档中找到相关信息来回答您的问题。",
"sources": [],
"processing_time": time.time() - start_time,
}
# rerank
reranked_results = await self._rerank_results(question, search_results)
# 并行执行上下文构建和 LLM 调用准备
context_task = asyncio.create_task(
self._build_context_async(reranked_results)
)
sources_task = asyncio.create_task(
self._format_sources_async(reranked_results)
)
# 等待上下文构建完成
context = await context_task
# 异步生成回答
self.llm.temperature = temperature
prompt = self.prompt_template.format(context=context, question=question)
response = await asyncio.to_thread(self.llm.invoke, prompt)
# 等待源信息格式化完成
sources = await sources_task
duration = time.time() - start_time
self.logger.info(f"问答处理完成, 耗时: {duration:.2f}s")
return {
"answer": response.content,
"sources": sources,
"processing_time": duration,
}
except Exception as e:
duration = time.time() - start_time
self.logger.error(f"问答处理失败: {str(e)}, 耗时: {duration:.2f}s")
raise
async def chat_stream_async(
self, question: str, top_k: int = 3, temperature: float = 0.7
):
"""异步流式聊天问答"""
start_time = time.time()
# 异步检索相关文档
search_results = await self.vector_store.search_async(question, top_k)
if not search_results:
yield {
"content": "抱歉,我无法在现有文档中找到相关信息来回答您的问题。",
"is_final": True,
"sources": [],
"processing_time": time.time() - start_time,
}
return
# rerank
reranked_results = await self._rerank_results(question, search_results)
# 构建上下文和源信息
context_task = self._build_context_async(reranked_results)
sources_task = self._format_sources_async(reranked_results)
context = await context_task
# 设置 LLM 参数
prompt = self.prompt_template.format(context=context, question=question)
callback_handler = AsyncIteratorCallbackHandler()
stream_llm = ChatOpenAI(
model="deepseek-r1:8b",
streaming=True,
callbacks=[callback_handler],
openai_api_key=self.openai_api_key,
openai_api_base=self.openai_api_base,
)
self.logger.info("启动 LLM 流式生成任务...")
task = asyncio.create_task(stream_llm.ainvoke(prompt))
self.logger.info("LLM 流式生成任务已启动")
async for token in callback_handler.aiter():
yield {
"content": token,
"is_final": False,
"sources": None,
"processing_time": None,
}
await task
# 最后一个数据块包含完整信息
sources = await sources_task
yield {
"content": "",
"is_final": True,
"sources": sources,
"processing_time": time.time() - start_time,
}
async def get_documents_async(self) -> List[Dict[str, Any]]:
"""异步获取文档列表"""
return await self.vector_store.get_documents_async()
async def delete_document_async(self, doc_id: str) -> bool:
"""异步删除文档"""
return await self.vector_store.delete_document_async(doc_id)
async def _format_sources_async(
self, search_results: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
def _format_sources():
return [
{
"filename": r["metadata"]["filename"],
"content": (
(r["content"][:200] + "...")
if len(r["content"]) > 200
else r["content"]
),
"similarity": 1 - r["distance"],
"rerank_score": r.get("rerank_score", None),
}
for r in search_results
]
return await asyncio.to_thread(_format_sources)
async def _rerank_results(
self, question: str, search_results: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""使用 Qwen3-Reranker 对搜索结果批量重排序"""
# 准备 batch 输入:格式必须是 Query: xxx\nDocument: yyy
batch_texts = [
f"Query: {question}\nDocument: {r['content'][:1000]}" # 可以根据显存调整截断长度
for r in search_results
]
# 使用 tokenizer 构建 batch 输入
inputs = self.tokenizer(
batch_texts,
return_tensors="pt",
padding=True,
truncation=True,
max_length=1024, # Qwen3 的最大上下文长度,建议限制
).to(self.rerank_model.device)
# 推理打分(关闭梯度计算)
with torch.no_grad():
outputs = self.rerank_model(**inputs)
logits = outputs.logits.squeeze(-1)
# 如果是二分类模型,通常需要做 sigmoid 激活
scores = torch.sigmoid(logits).tolist()
# 写入到每个 search_result 中
for r, score in zip(search_results, scores):
r["rerank_score"] = max(0.0, min(score, 1.0)) # 保证分数在 0-1 范围
return search_results
async def _build_context_async(self, search_results: List[Dict[str, Any]]) -> str:
"""异步构建上下文"""
def _build_context():
return "\n\n".join(
[
f"文档片段 {i+1} (来源: {result['metadata']['filename']}):\n{result['content']}"
for i, result in enumerate(search_results)
]
)
return await asyncio.to_thread(_build_context)
async def _format_sources_async(
self, search_results: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""异步格式化源信息"""
def _format_sources():
return [
{
"filename": result["metadata"]["filename"],
"content": (
result["content"][:200] + "..."
if len(result["content"]) > 200
else result["content"]
),
"similarity": 1 - result["distance"],
}
for result in search_results
]
return await asyncio.to_thread(_format_sources)