feat: 增加 stream 对话

This commit is contained in:
李如威 2025-07-07 23:49:17 +08:00
parent 99ca254f78
commit 11a74ea763
5 changed files with 265 additions and 10 deletions

110
README.md
View File

@ -7,6 +7,7 @@
- 🚀 **高性能 API 服务** - 基于 FastAPI 构建
- 📄 **多格式文档支持** - PDF、TXT 文档处理和向量化
- 🔍 **智能检索问答** - 基于向量相似度的文档检索
- 🌊 **流式响应支持** - 实时流式聊天问答体验
- 💾 **向量数据库** - ChromaDB 持久化存储
- 🤖 **多模型支持** - 支持多种 LLM 模型集成
- 📊 **RESTful API** - 标准化的 REST 接口
@ -125,6 +126,24 @@ Content-Type: application/json
}
```
### 流式聊天问答 🆕
```
POST /chat/stream
Content-Type: application/json
{
"question": "你的问题",
"top_k": 3, # 可选,检索文档数量,默认 3
"temperature": 0.7 # 可选LLM 温度参数,默认 0.7
}
返回:流式响应 (Server-Sent Events)
- content: 文本内容片段
- is_final: 是否为最后一个数据块
- sources: 引用来源(仅在最后一个数据块中)
- processing_time: 处理时间(仅在最后一个数据块中)
```
### 获取文档列表
```
GET /documents
@ -200,12 +219,22 @@ curl -X POST "http://localhost:8000/chat" \
"question": "文档的主要内容是什么?",
"top_k": 3
}'
# 4. 流式聊天问答
curl -X POST "http://localhost:8000/chat/stream" \
-H "accept: text/plain" \
-H "Content-Type: application/json" \
-d '{
"question": "详细解释一下文档的核心观点?",
"top_k": 3
}'
```
### 2. Python 客户端示例
```python
import requests
import json
# 上传文档
with open('document.pdf', 'rb') as f:
@ -223,6 +252,87 @@ response = requests.post(
}
)
print(response.json())
# 流式聊天问答
def stream_chat(question):
response = requests.post(
'http://localhost:8000/chat/stream',
json={'question': question, 'top_k': 3},
stream=True
)
for line in response.iter_lines():
if line:
# 解析 Server-Sent Events 格式
if line.startswith(b'data: '):
data = json.loads(line[6:])
# 打印文本内容
if data.get('content'):
print(data['content'], end='', flush=True)
# 处理最终数据块
if data.get('is_final'):
print(f"\n\n处理时间: {data.get('processing_time', 0):.2f}秒")
print(f"参考来源: {len(data.get('sources', []))}个文档")
break
# 使用流式聊天
stream_chat("详细解释文档的主要观点")
```
### 3. JavaScript/前端示例
```javascript
// 流式聊天问答 - 前端实现
async function streamChat(question) {
const response = await fetch('/chat/stream', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
question: question,
top_k: 3
})
});
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.slice(6));
// 显示文本内容
if (data.content) {
document.getElementById('chat-output').innerHTML += data.content;
}
// 处理最终数据块
if (data.is_final) {
console.log(`处理时间: ${data.processing_time}秒`);
console.log(`参考来源: ${data.sources.length}个文档`);
}
} catch (e) {
console.error('解析数据失败:', e);
}
}
}
}
}
// 使用示例
streamChat('请解释文档的主要内容');
```
## 开发指南

52
main.py
View File

@ -1,16 +1,19 @@
from fastapi import FastAPI, File, UploadFile, HTTPException, Depends
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fastapi.responses import JSONResponse, StreamingResponse
import uvicorn
import os
from typing import List
import shutil
from io import BytesIO
import json
from config import config
from models import (
ChatRequest,
ChatResponse,
StreamChatRequest,
StreamChatChunk,
DocumentInfo,
ErrorResponse,
SuccessResponse,
@ -122,7 +125,9 @@ async def upload_document(
@app.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest, service: AsyncRAGService = Depends(get_rag_service)):
async def chat(
request: ChatRequest, service: AsyncRAGService = Depends(get_rag_service)
):
"""聊天问答接口"""
try:
result = await service.chat_async(
@ -141,6 +146,45 @@ async def chat(request: ChatRequest, service: AsyncRAGService = Depends(get_rag_
raise HTTPException(status_code=500, detail=f"问答处理失败: {str(e)}")
@app.post("/chat/stream")
async def chat_stream(
request: StreamChatRequest, service: AsyncRAGService = Depends(get_rag_service)
):
"""流式聊天问答接口"""
async def generate_stream():
try:
async for chunk_data in service.chat_stream_async(
question=request.question,
top_k=request.top_k,
temperature=request.temperature,
):
# 将数据转换为 JSON 格式并添加换行符
chunk = StreamChatChunk(**chunk_data)
yield f"data: {chunk.model_dump_json()}\n\n"
except Exception as e:
# 发生错误时发送错误信息
error_chunk = StreamChatChunk(
content=f"生成回答时发生错误: {str(e)}",
is_final=True,
sources=[],
processing_time=0.0,
)
yield f"data: {error_chunk.model_dump_json()}\n\n"
return StreamingResponse(
generate_stream(),
media_type="text/plain",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*",
},
)
@app.get("/documents", response_model=List[DocumentInfo])
async def get_documents(service: AsyncRAGService = Depends(get_rag_service)):
"""获取文档列表接口"""
@ -161,7 +205,9 @@ async def get_documents(service: AsyncRAGService = Depends(get_rag_service)):
@app.delete("/documents/{doc_id}", response_model=SuccessResponse)
async def delete_document(doc_id: str, service: AsyncRAGService = Depends(get_rag_service)):
async def delete_document(
doc_id: str, service: AsyncRAGService = Depends(get_rag_service)
):
"""删除文档接口"""
try:
success = await service.delete_document_async(doc_id)

View File

@ -42,3 +42,18 @@ class SuccessResponse(BaseModel):
"""成功响应模型"""
message: str
data: Optional[dict] = None
class StreamChatRequest(BaseModel):
"""流式聊天请求模型"""
question: str
top_k: Optional[int] = 3
temperature: Optional[float] = 0.7
class StreamChatChunk(BaseModel):
"""流式聊天数据块模型"""
content: str
is_final: bool = False
sources: Optional[List[dict]] = None
processing_time: Optional[float] = None

View File

@ -74,6 +74,81 @@ class AsyncRAGService:
"processing_time": time.time() - start_time,
}
async def chat_stream_async(
self, question: str, top_k: int = 3, temperature: float = 0.7
):
"""异步流式聊天问答"""
start_time = time.time()
# 异步检索相关文档
search_results = await self.vector_store.search_async(question, top_k)
if not search_results:
yield {
"content": "抱歉,我无法在现有文档中找到相关信息来回答您的问题。",
"is_final": True,
"sources": [],
"processing_time": time.time() - start_time,
}
return
# 构建上下文和源信息
context_task = self._build_context_async(search_results)
sources_task = self._format_sources_async(search_results)
context = await context_task
# 设置 LLM 参数
self.llm.temperature = temperature
prompt = self.prompt_template.format(context=context, question=question)
# 流式生成回答
accumulated_content = ""
async for chunk in self._stream_llm_response(prompt):
accumulated_content += chunk
yield {
"content": chunk,
"is_final": False,
"sources": None,
"processing_time": None,
}
# 最后一个数据块包含完整信息
sources = await sources_task
yield {
"content": "",
"is_final": True,
"sources": sources,
"processing_time": time.time() - start_time,
}
async def _stream_llm_response(self, prompt: str):
"""流式调用 LLM"""
# 使用 LangChain 的流式接口
try:
# 获取流式响应
stream = await asyncio.to_thread(self.llm.stream, prompt)
async for chunk in self._async_stream_wrapper(stream):
if hasattr(chunk, 'content') and chunk.content:
yield chunk.content
except Exception as e:
yield f"生成回答时发生错误: {str(e)}"
async def _async_stream_wrapper(self, stream):
"""将同步流转换为异步流"""
def get_next_chunk(stream_iter):
try:
return next(stream_iter)
except StopIteration:
return None
stream_iter = iter(stream)
while True:
chunk = await asyncio.to_thread(get_next_chunk, stream_iter)
if chunk is None:
break
yield chunk
async def get_documents_async(self) -> List[Dict[str, Any]]:
"""异步获取文档列表"""
return await self.vector_store.get_documents_async()

View File

@ -1,6 +1,6 @@
import requests
import json
from datetime import datetime
def test_upload_and_chat():
"""测试文档上传和聊天功能"""
@ -45,17 +45,26 @@ def test_upload_and_chat():
# 测试聊天
print("4. 测试聊天...")
chat_data = {"question": "什么是人工智能?", "top_k": 3, "temperature": 0.7}
start_time = datetime.now()
response = requests.post(
f"{base_url}/chat", json=chat_data, headers={"Content-Type": "application/json"}
f"{base_url}/chat/stream",
json=chat_data,
headers={"Content-Type": "application/json"},
stream=True,
)
print(f"状态码: {response.status_code}")
if response.status_code == 200:
chat_result = response.json()
print(f"回答: {chat_result['answer']}")
print(f"处理时间: {chat_result['processing_time']:.2f}")
print(f"来源数量: {len(chat_result['sources'])}")
# 遍历响应体逐行处理流式数据适用于text/event-stream 或 chunked json
last_line = None
for line in response.iter_lines(decode_unicode=True):
if line:
last_line = line
print(f"回答: {line}")
end_time = datetime.now()
processing_time = (end_time - start_time).total_seconds()
print(f"处理时间: {processing_time:.2f}")
print(f"来源数量: {len(json.loads(last_line.replace('data: ', ''))['sources'])}")
else:
print(f"聊天失败: {response.text}")
print()