feat: 引入本地模型

This commit is contained in:
李如威 2025-07-11 00:08:52 +08:00
parent 7807866a37
commit e98c2feb36
1 changed files with 37 additions and 36 deletions

View File

@ -5,8 +5,10 @@ from langchain.prompts import PromptTemplate
from langchain.callbacks import AsyncIteratorCallbackHandler
from services.vector_store import AsyncVectorStore
from utils.logger import get_logger
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
import time
import torch
class AsyncRAGService:
@ -25,25 +27,6 @@ class AsyncRAGService:
openai_api_base=self.openai_api_base,
)
self.rerank_llm = ChatOpenAI(
model="dengcao/Qwen3-Reranker-8B:Q3_K_M",
temperature=0.7,
openai_api_key=self.openai_api_key,
openai_api_base=self.openai_api_base,
)
self.rerank_prompt_template = PromptTemplate(
input_variables=["question", "content"],
template=(
"你是一个智能评分助手,请判断以下“文档片段”与“用户问题”的相关程度。\n"
"请只输出一个介于 0 到 1 之间的分数,数值越高表示相关性越强。\n\n"
"用户问题:\n"
"{question}\n\n"
"文档片段:\n"
"{content}"
),
)
self.prompt_template = PromptTemplate(
input_variables=["context", "question"],
template=(
@ -55,6 +38,15 @@ class AsyncRAGService:
),
)
self.tokenizer = AutoTokenizer.from_pretrained(
"/Volumes/LRW/Model/Qwen3-Embedding-0.6B", trust_remote_code=True
)
self.rerank_model = AutoModelForSequenceClassification.from_pretrained(
"/Volumes/LRW/Model/Qwen3-Embedding-0.6B",
trust_remote_code=True,
device_map="auto", # 或 "cuda"
)
self.logger.info("RAG服务初始化完成")
async def add_document_async(self, content: str, filename: str) -> str:
@ -224,27 +216,36 @@ class AsyncRAGService:
async def _rerank_results(
self, question: str, search_results: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""使用 rerank LLM 对搜索结果重新排序"""
"""使用 Qwen3-Reranker 对搜索结果批量重排序"""
async def score_result(result: Dict[str, Any]) -> float:
prompt = self.rerank_prompt_template.format(
content=result["content"][:1000],
question=question
)
try:
response = await asyncio.to_thread(self.rerank_llm.invoke, prompt)
self.logger.info(f"rerank 评分: {response.content.strip()} for {result['metadata']['filename']}")
score = float(response.content.strip())
return max(0.0, min(score, 1.0))
except Exception as e:
self.logger.warning(f"rerank 评分失败fallback 使用向量相似度: {e}")
return 1 - result["distance"]
# 准备 batch 输入:格式必须是 Query: xxx\nDocument: yyy
batch_texts = [
f"Query: {question}\nDocument: {r['content'][:1000]}" # 可以根据显存调整截断长度
for r in search_results
]
scores = await asyncio.gather(*[score_result(r) for r in search_results])
# 使用 tokenizer 构建 batch 输入
inputs = self.tokenizer(
batch_texts,
return_tensors="pt",
padding=True,
truncation=True,
max_length=1024, # Qwen3 的最大上下文长度,建议限制
).to(self.rerank_model.device)
# 推理打分(关闭梯度计算)
with torch.no_grad():
outputs = self.rerank_model(**inputs)
logits = outputs.logits.squeeze(-1)
# 如果是二分类模型,通常需要做 sigmoid 激活
scores = torch.sigmoid(logits).tolist()
# 写入到每个 search_result 中
for r, score in zip(search_results, scores):
r["rerank_score"] = score
r["rerank_score"] = max(0.0, min(score, 1.0)) # 保证分数在 0-1 范围
return sorted(search_results, key=lambda r: r["rerank_score"], reverse=True)
return search_results
async def _build_context_async(self, search_results: List[Dict[str, Any]]) -> str:
"""异步构建上下文"""