feat: 优化流模式
This commit is contained in:
parent
03ee2525ab
commit
d7a4d671f0
3
main.py
3
main.py
|
@ -47,6 +47,7 @@ app = FastAPI(
|
|||
|
||||
# 创建API路由器
|
||||
from fastapi import APIRouter
|
||||
|
||||
api_router = APIRouter(prefix="/api")
|
||||
|
||||
# 添加CORS中间件
|
||||
|
@ -252,7 +253,7 @@ async def chat_stream(
|
|||
|
||||
return StreamingResponse(
|
||||
generate_stream(),
|
||||
media_type="text/plain",
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
|
|
|
@ -2,6 +2,7 @@ from typing import List, Dict, Any
|
|||
import asyncio
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.callbacks import AsyncIteratorCallbackHandler
|
||||
from services.vector_store import AsyncVectorStore
|
||||
from utils.logger import get_logger
|
||||
import os
|
||||
|
@ -12,15 +13,17 @@ class AsyncRAGService:
|
|||
"""异步 RAG 服务主类"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.logger = get_logger(__name__)
|
||||
self.vector_store = AsyncVectorStore()
|
||||
self.openai_api_base = os.getenv("OPENAI_BASE_URL")
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.llm = ChatOpenAI(
|
||||
model="deepseek-r1:8b",
|
||||
temperature=0.7,
|
||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||
openai_api_base=os.getenv("OPENAI_BASE_URL"),
|
||||
)
|
||||
|
||||
self.prompt_template = PromptTemplate(
|
||||
input_variables=["context", "question"],
|
||||
template="""
|
||||
|
@ -47,7 +50,9 @@ class AsyncRAGService:
|
|||
return result
|
||||
except Exception as e:
|
||||
duration = time.time() - start_time
|
||||
self.logger.error(f"文档添加失败: {filename}, 错误: {str(e)}, 耗时: {duration:.2f}s")
|
||||
self.logger.error(
|
||||
f"文档添加失败: {filename}, 错误: {str(e)}, 耗时: {duration:.2f}s"
|
||||
)
|
||||
raise
|
||||
|
||||
async def chat_async(
|
||||
|
@ -72,8 +77,12 @@ class AsyncRAGService:
|
|||
}
|
||||
|
||||
# 并行执行上下文构建和 LLM 调用准备
|
||||
context_task = asyncio.create_task(self._build_context_async(search_results))
|
||||
sources_task = asyncio.create_task(self._format_sources_async(search_results))
|
||||
context_task = asyncio.create_task(
|
||||
self._build_context_async(search_results)
|
||||
)
|
||||
sources_task = asyncio.create_task(
|
||||
self._format_sources_async(search_results)
|
||||
)
|
||||
|
||||
# 等待上下文构建完成
|
||||
context = await context_task
|
||||
|
@ -126,20 +135,31 @@ class AsyncRAGService:
|
|||
context = await context_task
|
||||
|
||||
# 设置 LLM 参数
|
||||
self.llm.temperature = temperature
|
||||
prompt = self.prompt_template.format(context=context, question=question)
|
||||
|
||||
# 流式生成回答
|
||||
accumulated_content = ""
|
||||
async for chunk in self._stream_llm_response(prompt):
|
||||
accumulated_content += chunk
|
||||
callback_handler = AsyncIteratorCallbackHandler()
|
||||
stream_llm = ChatOpenAI(
|
||||
model="deepseek-r1:8b",
|
||||
streaming=True,
|
||||
callbacks=[callback_handler],
|
||||
openai_api_key=self.openai_api_key,
|
||||
openai_api_base=self.openai_api_base,
|
||||
)
|
||||
|
||||
self.logger.info("启动 LLM 流式生成任务...")
|
||||
task = asyncio.create_task(stream_llm.ainvoke(prompt))
|
||||
self.logger.info("LLM 流式生成任务已启动")
|
||||
|
||||
async for token in callback_handler.aiter():
|
||||
yield {
|
||||
"content": chunk,
|
||||
"content": token,
|
||||
"is_final": False,
|
||||
"sources": None,
|
||||
"processing_time": None,
|
||||
}
|
||||
|
||||
await task
|
||||
|
||||
# 最后一个数据块包含完整信息
|
||||
sources = await sources_task
|
||||
yield {
|
||||
|
@ -149,34 +169,6 @@ class AsyncRAGService:
|
|||
"processing_time": time.time() - start_time,
|
||||
}
|
||||
|
||||
async def _stream_llm_response(self, prompt: str):
|
||||
"""流式调用 LLM"""
|
||||
# 使用 LangChain 的流式接口
|
||||
try:
|
||||
# 获取流式响应
|
||||
stream = await asyncio.to_thread(self.llm.stream, prompt)
|
||||
async for chunk in self._async_stream_wrapper(stream):
|
||||
if hasattr(chunk, "content") and chunk.content:
|
||||
yield chunk.content
|
||||
except Exception as e:
|
||||
yield f"生成回答时发生错误: {str(e)}"
|
||||
|
||||
async def _async_stream_wrapper(self, stream):
|
||||
"""将同步流转换为异步流"""
|
||||
|
||||
def get_next_chunk(stream_iter):
|
||||
try:
|
||||
return next(stream_iter)
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
stream_iter = iter(stream)
|
||||
while True:
|
||||
chunk = await asyncio.to_thread(get_next_chunk, stream_iter)
|
||||
if chunk is None:
|
||||
break
|
||||
yield chunk
|
||||
|
||||
async def get_documents_async(self) -> List[Dict[str, Any]]:
|
||||
"""异步获取文档列表"""
|
||||
return await self.vector_store.get_documents_async()
|
||||
|
|
|
@ -13,11 +13,13 @@ from sentence_transformers import SentenceTransformer
|
|||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from utils.logger import get_logger
|
||||
|
||||
class AsyncVectorStore:
|
||||
"""异步向量存储服务"""
|
||||
|
||||
def __init__(self, persist_directory: str = "./chroma_db"):
|
||||
self.logger = get_logger(__name__)
|
||||
self.persist_directory = persist_directory
|
||||
self.client = chromadb.PersistentClient(
|
||||
path=persist_directory, settings=Settings(anonymized_telemetry=False)
|
||||
|
@ -28,12 +30,12 @@ class AsyncVectorStore:
|
|||
|
||||
# 尝试初始化向量编码器,如果网络失败则使用本地方案
|
||||
try:
|
||||
print("正在加载向量编码模型...")
|
||||
self.logger.info("正在加载向量编码模型...")
|
||||
self.encoder = SentenceTransformer("all-MiniLM-L6-v2")
|
||||
print("✓ 向量编码模型加载成功")
|
||||
self.logger.info("✓ 向量编码模型加载成功")
|
||||
except Exception as e:
|
||||
print(f"⚠️ 向量编码模型加载失败: {e}")
|
||||
print("使用简单的文本向量化方案(仅用于演示)")
|
||||
self.logger.error(f"⚠️ 向量编码模型加载失败: {e}")
|
||||
self.logger.error("使用简单的文本向量化方案(仅用于演示)")
|
||||
self.encoder = None
|
||||
|
||||
self.text_splitter = RecursiveCharacterTextSplitter(
|
||||
|
|
Loading…
Reference in New Issue