221 lines
7.5 KiB
Python
221 lines
7.5 KiB
Python
from typing import List, Dict, Any
|
|
import asyncio
|
|
from langchain_openai import ChatOpenAI
|
|
from langchain.prompts import PromptTemplate
|
|
from services.vector_store import AsyncVectorStore
|
|
from utils.logger import get_logger
|
|
import os
|
|
import time
|
|
|
|
|
|
class AsyncRAGService:
|
|
"""异步 RAG 服务主类"""
|
|
|
|
def __init__(self):
|
|
self.logger = get_logger(__name__)
|
|
self.vector_store = AsyncVectorStore()
|
|
self.llm = ChatOpenAI(
|
|
model="deepseek-r1:8b",
|
|
temperature=0.7,
|
|
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
openai_api_base=os.getenv("OPENAI_BASE_URL"),
|
|
)
|
|
|
|
self.prompt_template = PromptTemplate(
|
|
input_variables=["context", "question"],
|
|
template="""
|
|
基于以下上下文回答问题。如果上下文中没有相关信息,请说明无法从提供的文档中找到答案。
|
|
|
|
上下文:
|
|
{context}
|
|
|
|
问题:{question}
|
|
|
|
答案:""",
|
|
)
|
|
|
|
self.logger.info("RAG服务初始化完成")
|
|
|
|
async def add_document_async(self, content: str, filename: str) -> str:
|
|
"""异步添加文档"""
|
|
start_time = time.time()
|
|
try:
|
|
self.logger.info(f"开始添加文档: {filename}")
|
|
result = await self.vector_store.add_document_async(content, filename)
|
|
duration = time.time() - start_time
|
|
self.logger.info(f"文档添加成功: {filename}, 耗时: {duration:.2f}s")
|
|
return result
|
|
except Exception as e:
|
|
duration = time.time() - start_time
|
|
self.logger.error(f"文档添加失败: {filename}, 错误: {str(e)}, 耗时: {duration:.2f}s")
|
|
raise
|
|
|
|
async def chat_async(
|
|
self, question: str, top_k: int = 3, temperature: float = 0.7
|
|
) -> Dict[str, Any]:
|
|
"""异步聊天问答"""
|
|
start_time = time.time()
|
|
|
|
try:
|
|
self.logger.info(f"开始处理问答: {question[:50]}...")
|
|
|
|
# 异步检索相关文档
|
|
search_results = await self.vector_store.search_async(question, top_k)
|
|
self.logger.debug(f"检索到 {len(search_results)} 个相关文档")
|
|
|
|
if not search_results:
|
|
self.logger.warning("未找到相关文档")
|
|
return {
|
|
"answer": "抱歉,我无法在现有文档中找到相关信息来回答您的问题。",
|
|
"sources": [],
|
|
"processing_time": time.time() - start_time,
|
|
}
|
|
|
|
# 并行执行上下文构建和 LLM 调用准备
|
|
context_task = asyncio.create_task(self._build_context_async(search_results))
|
|
sources_task = asyncio.create_task(self._format_sources_async(search_results))
|
|
|
|
# 等待上下文构建完成
|
|
context = await context_task
|
|
|
|
# 异步生成回答
|
|
self.llm.temperature = temperature
|
|
prompt = self.prompt_template.format(context=context, question=question)
|
|
|
|
response = await asyncio.to_thread(self.llm.invoke, prompt)
|
|
|
|
# 等待源信息格式化完成
|
|
sources = await sources_task
|
|
|
|
duration = time.time() - start_time
|
|
self.logger.info(f"问答处理完成, 耗时: {duration:.2f}s")
|
|
|
|
return {
|
|
"answer": response.content,
|
|
"sources": sources,
|
|
"processing_time": duration,
|
|
}
|
|
|
|
except Exception as e:
|
|
duration = time.time() - start_time
|
|
self.logger.error(f"问答处理失败: {str(e)}, 耗时: {duration:.2f}s")
|
|
raise
|
|
|
|
async def chat_stream_async(
|
|
self, question: str, top_k: int = 3, temperature: float = 0.7
|
|
):
|
|
"""异步流式聊天问答"""
|
|
start_time = time.time()
|
|
|
|
# 异步检索相关文档
|
|
search_results = await self.vector_store.search_async(question, top_k)
|
|
|
|
if not search_results:
|
|
yield {
|
|
"content": "抱歉,我无法在现有文档中找到相关信息来回答您的问题。",
|
|
"is_final": True,
|
|
"sources": [],
|
|
"processing_time": time.time() - start_time,
|
|
}
|
|
return
|
|
|
|
# 构建上下文和源信息
|
|
context_task = self._build_context_async(search_results)
|
|
sources_task = self._format_sources_async(search_results)
|
|
|
|
context = await context_task
|
|
|
|
# 设置 LLM 参数
|
|
self.llm.temperature = temperature
|
|
prompt = self.prompt_template.format(context=context, question=question)
|
|
|
|
# 流式生成回答
|
|
accumulated_content = ""
|
|
async for chunk in self._stream_llm_response(prompt):
|
|
accumulated_content += chunk
|
|
yield {
|
|
"content": chunk,
|
|
"is_final": False,
|
|
"sources": None,
|
|
"processing_time": None,
|
|
}
|
|
|
|
# 最后一个数据块包含完整信息
|
|
sources = await sources_task
|
|
yield {
|
|
"content": "",
|
|
"is_final": True,
|
|
"sources": sources,
|
|
"processing_time": time.time() - start_time,
|
|
}
|
|
|
|
async def _stream_llm_response(self, prompt: str):
|
|
"""流式调用 LLM"""
|
|
# 使用 LangChain 的流式接口
|
|
try:
|
|
# 获取流式响应
|
|
stream = await asyncio.to_thread(self.llm.stream, prompt)
|
|
async for chunk in self._async_stream_wrapper(stream):
|
|
if hasattr(chunk, "content") and chunk.content:
|
|
yield chunk.content
|
|
except Exception as e:
|
|
yield f"生成回答时发生错误: {str(e)}"
|
|
|
|
async def _async_stream_wrapper(self, stream):
|
|
"""将同步流转换为异步流"""
|
|
|
|
def get_next_chunk(stream_iter):
|
|
try:
|
|
return next(stream_iter)
|
|
except StopIteration:
|
|
return None
|
|
|
|
stream_iter = iter(stream)
|
|
while True:
|
|
chunk = await asyncio.to_thread(get_next_chunk, stream_iter)
|
|
if chunk is None:
|
|
break
|
|
yield chunk
|
|
|
|
async def get_documents_async(self) -> List[Dict[str, Any]]:
|
|
"""异步获取文档列表"""
|
|
return await self.vector_store.get_documents_async()
|
|
|
|
async def delete_document_async(self, doc_id: str) -> bool:
|
|
"""异步删除文档"""
|
|
return await self.vector_store.delete_document_async(doc_id)
|
|
|
|
async def _build_context_async(self, search_results: List[Dict[str, Any]]) -> str:
|
|
"""异步构建上下文"""
|
|
|
|
def _build_context():
|
|
return "\n\n".join(
|
|
[
|
|
f"文档片段 {i+1} (来源: {result['metadata']['filename']}):\n{result['content']}"
|
|
for i, result in enumerate(search_results)
|
|
]
|
|
)
|
|
|
|
return await asyncio.to_thread(_build_context)
|
|
|
|
async def _format_sources_async(
|
|
self, search_results: List[Dict[str, Any]]
|
|
) -> List[Dict[str, Any]]:
|
|
"""异步格式化源信息"""
|
|
|
|
def _format_sources():
|
|
return [
|
|
{
|
|
"filename": result["metadata"]["filename"],
|
|
"content": (
|
|
result["content"][:200] + "..."
|
|
if len(result["content"]) > 200
|
|
else result["content"]
|
|
),
|
|
"similarity": 1 - result["distance"],
|
|
}
|
|
for result in search_results
|
|
]
|
|
|
|
return await asyncio.to_thread(_format_sources)
|