feat: 优化流模式

This commit is contained in:
李如威 2025-07-09 23:37:40 +08:00
parent 03ee2525ab
commit d7a4d671f0
4 changed files with 38 additions and 43 deletions

View File

@ -47,6 +47,7 @@ app = FastAPI(
# 创建API路由器 # 创建API路由器
from fastapi import APIRouter from fastapi import APIRouter
api_router = APIRouter(prefix="/api") api_router = APIRouter(prefix="/api")
# 添加CORS中间件 # 添加CORS中间件
@ -252,7 +253,7 @@ async def chat_stream(
return StreamingResponse( return StreamingResponse(
generate_stream(), generate_stream(),
media_type="text/plain", media_type="text/event-stream",
headers={ headers={
"Cache-Control": "no-cache", "Cache-Control": "no-cache",
"Connection": "keep-alive", "Connection": "keep-alive",

View File

@ -2,6 +2,7 @@ from typing import List, Dict, Any
import asyncio import asyncio
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from langchain.callbacks import AsyncIteratorCallbackHandler
from services.vector_store import AsyncVectorStore from services.vector_store import AsyncVectorStore
from utils.logger import get_logger from utils.logger import get_logger
import os import os
@ -12,15 +13,17 @@ class AsyncRAGService:
"""异步 RAG 服务主类""" """异步 RAG 服务主类"""
def __init__(self): def __init__(self):
self.logger = get_logger(__name__) self.logger = get_logger(__name__)
self.vector_store = AsyncVectorStore() self.vector_store = AsyncVectorStore()
self.openai_api_base = os.getenv("OPENAI_BASE_URL")
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.llm = ChatOpenAI( self.llm = ChatOpenAI(
model="deepseek-r1:8b", model="deepseek-r1:8b",
temperature=0.7, temperature=0.7,
openai_api_key=os.getenv("OPENAI_API_KEY"), openai_api_key=os.getenv("OPENAI_API_KEY"),
openai_api_base=os.getenv("OPENAI_BASE_URL"), openai_api_base=os.getenv("OPENAI_BASE_URL"),
) )
self.prompt_template = PromptTemplate( self.prompt_template = PromptTemplate(
input_variables=["context", "question"], input_variables=["context", "question"],
template=""" template="""
@ -47,7 +50,9 @@ class AsyncRAGService:
return result return result
except Exception as e: except Exception as e:
duration = time.time() - start_time duration = time.time() - start_time
self.logger.error(f"文档添加失败: {filename}, 错误: {str(e)}, 耗时: {duration:.2f}s") self.logger.error(
f"文档添加失败: {filename}, 错误: {str(e)}, 耗时: {duration:.2f}s"
)
raise raise
async def chat_async( async def chat_async(
@ -72,8 +77,12 @@ class AsyncRAGService:
} }
# 并行执行上下文构建和 LLM 调用准备 # 并行执行上下文构建和 LLM 调用准备
context_task = asyncio.create_task(self._build_context_async(search_results)) context_task = asyncio.create_task(
sources_task = asyncio.create_task(self._format_sources_async(search_results)) self._build_context_async(search_results)
)
sources_task = asyncio.create_task(
self._format_sources_async(search_results)
)
# 等待上下文构建完成 # 等待上下文构建完成
context = await context_task context = await context_task
@ -126,20 +135,31 @@ class AsyncRAGService:
context = await context_task context = await context_task
# 设置 LLM 参数 # 设置 LLM 参数
self.llm.temperature = temperature
prompt = self.prompt_template.format(context=context, question=question) prompt = self.prompt_template.format(context=context, question=question)
# 流式生成回答 callback_handler = AsyncIteratorCallbackHandler()
accumulated_content = "" stream_llm = ChatOpenAI(
async for chunk in self._stream_llm_response(prompt): model="deepseek-r1:8b",
accumulated_content += chunk streaming=True,
callbacks=[callback_handler],
openai_api_key=self.openai_api_key,
openai_api_base=self.openai_api_base,
)
self.logger.info("启动 LLM 流式生成任务...")
task = asyncio.create_task(stream_llm.ainvoke(prompt))
self.logger.info("LLM 流式生成任务已启动")
async for token in callback_handler.aiter():
yield { yield {
"content": chunk, "content": token,
"is_final": False, "is_final": False,
"sources": None, "sources": None,
"processing_time": None, "processing_time": None,
} }
await task
# 最后一个数据块包含完整信息 # 最后一个数据块包含完整信息
sources = await sources_task sources = await sources_task
yield { yield {
@ -149,34 +169,6 @@ class AsyncRAGService:
"processing_time": time.time() - start_time, "processing_time": time.time() - start_time,
} }
async def _stream_llm_response(self, prompt: str):
"""流式调用 LLM"""
# 使用 LangChain 的流式接口
try:
# 获取流式响应
stream = await asyncio.to_thread(self.llm.stream, prompt)
async for chunk in self._async_stream_wrapper(stream):
if hasattr(chunk, "content") and chunk.content:
yield chunk.content
except Exception as e:
yield f"生成回答时发生错误: {str(e)}"
async def _async_stream_wrapper(self, stream):
"""将同步流转换为异步流"""
def get_next_chunk(stream_iter):
try:
return next(stream_iter)
except StopIteration:
return None
stream_iter = iter(stream)
while True:
chunk = await asyncio.to_thread(get_next_chunk, stream_iter)
if chunk is None:
break
yield chunk
async def get_documents_async(self) -> List[Dict[str, Any]]: async def get_documents_async(self) -> List[Dict[str, Any]]:
"""异步获取文档列表""" """异步获取文档列表"""
return await self.vector_store.get_documents_async() return await self.vector_store.get_documents_async()

View File

@ -13,11 +13,13 @@ from sentence_transformers import SentenceTransformer
from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter
import uuid import uuid
from datetime import datetime from datetime import datetime
from utils.logger import get_logger
class AsyncVectorStore: class AsyncVectorStore:
"""异步向量存储服务""" """异步向量存储服务"""
def __init__(self, persist_directory: str = "./chroma_db"): def __init__(self, persist_directory: str = "./chroma_db"):
self.logger = get_logger(__name__)
self.persist_directory = persist_directory self.persist_directory = persist_directory
self.client = chromadb.PersistentClient( self.client = chromadb.PersistentClient(
path=persist_directory, settings=Settings(anonymized_telemetry=False) path=persist_directory, settings=Settings(anonymized_telemetry=False)
@ -28,12 +30,12 @@ class AsyncVectorStore:
# 尝试初始化向量编码器,如果网络失败则使用本地方案 # 尝试初始化向量编码器,如果网络失败则使用本地方案
try: try:
print("正在加载向量编码模型...") self.logger.info("正在加载向量编码模型...")
self.encoder = SentenceTransformer("all-MiniLM-L6-v2") self.encoder = SentenceTransformer("all-MiniLM-L6-v2")
print("✓ 向量编码模型加载成功") self.logger.info("✓ 向量编码模型加载成功")
except Exception as e: except Exception as e:
print(f"⚠️ 向量编码模型加载失败: {e}") self.logger.error(f"⚠️ 向量编码模型加载失败: {e}")
print("使用简单的文本向量化方案(仅用于演示)") self.logger.error("使用简单的文本向量化方案(仅用于演示)")
self.encoder = None self.encoder = None
self.text_splitter = RecursiveCharacterTextSplitter( self.text_splitter = RecursiveCharacterTextSplitter(

0
test_auth.py Normal file
View File