easy-rag/services/rag_service.py

366 lines
13 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from typing import List, Dict, Any
import asyncio
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.callbacks import AsyncIteratorCallbackHandler
from services.vector_store import AsyncVectorStore
from utils.logger import get_logger
from transformers import AutoTokenizer, AutoModelForCausalLM
import os
import time
import torch
import json
class AsyncRAGService:
"""异步 RAG 服务主类"""
def __init__(self):
self.logger = get_logger(__name__)
self.vector_store = AsyncVectorStore()
self.openai_api_base = os.getenv("OPENAI_BASE_URL")
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.rerank_model_path = os.getenv("RERANK_MODEL_PATH", "")
self.open_rerank = bool(self.rerank_model_path)
self.llm = ChatOpenAI(
model="deepseek-r1:8b",
temperature=0.7,
openai_api_key=self.openai_api_key,
openai_api_base=self.openai_api_base,
)
self.prompt_template = PromptTemplate(
input_variables=["context", "question"],
template=(
"基于以下上下文回答问题。如果上下文中没有相关信息,请说明无法从提供的文档中找到答案。\n\n"
"上下文:\n"
"{context}\n\n"
"问题:{question}\n\n"
"答案:"
),
)
# 如果需要重排
if self.open_rerank:
self.logger.info("初始化 Ranker 模型...")
self.tokenizer = AutoTokenizer.from_pretrained(
self.rerank_model_path, padding_side="left"
)
# 强制设置 padding token
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.rerank_model = AutoModelForCausalLM.from_pretrained(
self.rerank_model_path
).eval()
# 确保模型配置与 tokenizer 一致
if hasattr(self.rerank_model.config, "pad_token_id"):
self.rerank_model.config.pad_token_id = self.tokenizer.pad_token_id
self.logger.info("✓ 初始化 Ranker 模型成功")
self.logger.info("RAG服务初始化完成")
async def add_document_async(self, content: str, filename: str) -> str:
"""异步添加文档"""
start_time = time.time()
try:
self.logger.info(f"开始添加文档: {filename}")
result = await self.vector_store.add_document_async(content, filename)
duration = time.time() - start_time
self.logger.info(f"文档添加成功: {filename}, 耗时: {duration:.2f}s")
return result
except Exception as e:
duration = time.time() - start_time
self.logger.error(
f"文档添加失败: {filename}, 错误: {str(e)}, 耗时: {duration:.2f}s"
)
raise
async def chat_async(
self, question: str, top_k: int = 3, temperature: float = 0.7
) -> Dict[str, Any]:
"""异步聊天问答"""
start_time = time.time()
try:
self.logger.info(f"开始处理问答: {question[:50]}...")
# 异步检索相关文档
search_results = await self.vector_store.search_async(question, top_k)
self.logger.debug(f"检索到 {len(search_results)} 个相关文档")
if not search_results:
self.logger.warning("未找到相关文档")
return {
"answer": "抱歉,我无法在现有文档中找到相关信息来回答您的问题。",
"sources": [],
"processing_time": time.time() - start_time,
}
# rerank
reranked_results = await self._rerank_results(question, search_results, skip_rerank=not self.open_rerank)
# 并行执行上下文构建和 LLM 调用准备
context_task = asyncio.create_task(
self._build_context_async(reranked_results)
)
sources_task = asyncio.create_task(
self._format_sources_async(reranked_results)
)
# 等待上下文构建完成
context = await context_task
# 异步生成回答
self.llm.temperature = temperature
prompt = self.prompt_template.format(context=context, question=question)
response = await asyncio.to_thread(self.llm.invoke, prompt)
# 等待源信息格式化完成
sources = await sources_task
duration = time.time() - start_time
self.logger.info(f"问答处理完成, 耗时: {duration:.2f}s")
return {
"answer": response.content,
"sources": sources,
"processing_time": duration,
}
except Exception as e:
duration = time.time() - start_time
self.logger.error(f"问答处理失败: {str(e)}, 耗时: {duration:.2f}s")
raise
async def chat_stream_async(
self, question: str, top_k: int = 3, temperature: float = 0.7
):
"""异步流式聊天问答"""
start_time = time.time()
# 异步检索相关文档
search_results = await self.vector_store.search_async(question, top_k)
if not search_results:
yield {
"content": "抱歉,我无法在现有文档中找到相关信息来回答您的问题。",
"is_final": True,
"sources": [],
"processing_time": time.time() - start_time,
}
return
# rerank
reranked_results = await self._rerank_results(question, search_results, skip_rerank=not self.open_rerank)
# 构建上下文和源信息
context_task = self._build_context_async(reranked_results)
sources_task = self._format_sources_async(reranked_results)
context = await context_task
# 设置 LLM 参数
prompt = self.prompt_template.format(context=context, question=question)
callback_handler = AsyncIteratorCallbackHandler()
stream_llm = ChatOpenAI(
model="deepseek-r1:8b",
streaming=True,
callbacks=[callback_handler],
openai_api_key=self.openai_api_key,
openai_api_base=self.openai_api_base,
)
self.logger.info("启动 LLM 流式生成任务...")
task = asyncio.create_task(stream_llm.ainvoke(prompt))
self.logger.info("LLM 流式生成任务已启动")
async for token in callback_handler.aiter():
yield {
"content": token,
"is_final": False,
"sources": None,
"processing_time": None,
}
await task
# 最后一个数据块包含完整信息
sources = await sources_task
yield {
"content": "",
"is_final": True,
"sources": sources,
"processing_time": time.time() - start_time,
}
async def get_documents_async(self) -> List[Dict[str, Any]]:
"""异步获取文档列表"""
return await self.vector_store.get_documents_async()
async def delete_document_async(self, doc_id: str) -> bool:
"""异步删除文档"""
return await self.vector_store.delete_document_async(doc_id)
async def _format_sources_async(
self, search_results: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
def _format_sources():
return [
{
"filename": r["metadata"]["filename"],
"content": (
(r["content"][:200] + "...")
if len(r["content"]) > 200
else r["content"]
),
"similarity": 1 - r["distance"],
"rerank_score": r.get("rerank_score", None),
}
for r in search_results
]
return await asyncio.to_thread(_format_sources)
async def _rerank_results(
self, question: str, search_results: List[Dict[str, Any]], skip_rerank: bool = False
) -> List[Dict[str, Any]]:
"""使用 Qwen3-Reranker 对搜索结果批量重排序"""
if skip_rerank:
self.logger.info("跳过重排序")
return search_results
if not search_results:
return []
# ==== Prompt 设置 ====
prefix = (
"<|im_start|>system\n"
"You are a helpful assistant that determines whether a document answers a given query. "
'Respond only with "yes" if the document is helpful, otherwise "no".\n'
"<|im_end|>\n"
"<|im_start|>user\n"
)
suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
prefix_tokens = self.tokenizer.encode(prefix, add_special_tokens=False)
suffix_tokens = self.tokenizer.encode(suffix, add_special_tokens=False)
# 构造符合格式的输入
def format_pair(query: str, doc: str) -> str:
return f"<Query>: {query}\n<Document>: {doc}"
pairs = [
format_pair(question, r["content"][:1000]) # 文本截断,避免超长
for r in search_results
]
# 分词 + 拼接前后缀 + padding
inputs = self.tokenizer(
pairs,
padding="max_length",
truncation="longest_first",
return_attention_mask=True, # 确保返回 attention_mask
max_length=8192 - len(prefix_tokens) - len(suffix_tokens),
return_tensors="pt",
)
# 手动添加前后缀
batch_size = inputs["input_ids"].shape[0]
max_len = 8192
# 创建新的输入张量
new_input_ids = torch.full((batch_size, max_len), self.tokenizer.pad_token_id, dtype=torch.long)
new_attention_mask = torch.zeros((batch_size, max_len), dtype=torch.long)
for i in range(batch_size):
# 获取原始序列去除padding
original_ids = inputs["input_ids"][i]
original_mask = inputs["attention_mask"][i]
actual_length = original_mask.sum().item()
# 构建新序列prefix + original + suffix
new_sequence = (
prefix_tokens + original_ids[:actual_length].tolist() + suffix_tokens
)
new_length = len(new_sequence)
if new_length <= max_len:
new_input_ids[i, :new_length] = torch.tensor(new_sequence)
new_attention_mask[i, :new_length] = 1
inputs = {
"input_ids": new_input_ids.to(self.rerank_model.device),
"attention_mask": new_attention_mask.to(self.rerank_model.device),
}
# 获取 yes / no 的 token id初始化时保存也可
token_true_id = self.tokenizer.convert_tokens_to_ids("yes")
token_false_id = self.tokenizer.convert_tokens_to_ids("no")
# 推理评分
self.logger.info("模型准备输入完毕,开始推理...")
with torch.no_grad():
outputs = self.rerank_model(**inputs)
logits = outputs.logits
# 检查 logits 的维度
if logits.dim() == 3: # 如果是3维取最后一个token的logits
logits = logits[:, -1, :]
elif logits.dim() != 2: # 如果是2维直接使用
raise ValueError(f"Unexpected logits shape: {logits.shape}")
# 提取 yes/no token 的 logits
true_logits = logits[:, token_true_id]
false_logits = logits[:, token_false_id]
# 推荐用 logits 差值作为分数
scores = (true_logits - false_logits).tolist()
self.logger.info("模型推理完成")
# 写入每条结果
for r, score in zip(search_results, scores):
r["rerank_score"] = round(float(score), 4)
self.logger.info(f"重排序完成,得分范围: {min(scores)} - {max(scores)} \n\n {json.dumps(search_results, indent=4)}")
return search_results
async def _build_context_async(self, search_results: List[Dict[str, Any]]) -> str:
"""异步构建上下文"""
def _build_context():
return "\n\n".join(
[
f"文档片段 {i+1} (来源: {result['metadata']['filename']}):\n{result['content']}"
for i, result in enumerate(search_results)
]
)
return await asyncio.to_thread(_build_context)
async def _format_sources_async(
self, search_results: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""异步格式化源信息"""
def _format_sources():
return [
{
"filename": result["metadata"]["filename"],
"content": (
result["content"][:200] + "..."
if len(result["content"]) > 200
else result["content"]
),
"similarity": 1 - result["distance"],
}
for result in search_results
]
return await asyncio.to_thread(_format_sources)