feat: 重排
This commit is contained in:
parent
8cb491613f
commit
45a4836776
|
@ -9,6 +9,7 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import torch
|
import torch
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
class AsyncRAGService:
|
class AsyncRAGService:
|
||||||
|
@ -38,14 +39,17 @@ class AsyncRAGService:
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
self.tokenizer = AutoTokenizer.from_pretrained("/Volumes/LRW/Model/Qwen3-Embedding-0.6B", padding_side="left")
|
||||||
"/Volumes/LRW/Model/Qwen3-Embedding-0.6B", trust_remote_code=True
|
# 强制设置 padding token
|
||||||
)
|
if self.tokenizer.pad_token is None:
|
||||||
self.rerank_model = AutoModelForSequenceClassification.from_pretrained(
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
||||||
"/Volumes/LRW/Model/Qwen3-Embedding-0.6B",
|
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
|
||||||
trust_remote_code=True,
|
|
||||||
device_map="auto", # 或 "cuda"
|
self.rerank_model = AutoModelForSequenceClassification.from_pretrained("/Volumes/LRW/Model/Qwen3-Embedding-0.6B").eval()
|
||||||
)
|
|
||||||
|
# 确保模型配置与 tokenizer 一致
|
||||||
|
if hasattr(self.rerank_model.config, "pad_token_id"):
|
||||||
|
self.rerank_model.config.pad_token_id = self.tokenizer.pad_token_id
|
||||||
|
|
||||||
self.logger.info("RAG服务初始化完成")
|
self.logger.info("RAG服务初始化完成")
|
||||||
|
|
||||||
|
@ -214,37 +218,106 @@ class AsyncRAGService:
|
||||||
return await asyncio.to_thread(_format_sources)
|
return await asyncio.to_thread(_format_sources)
|
||||||
|
|
||||||
async def _rerank_results(
|
async def _rerank_results(
|
||||||
self, question: str, search_results: List[Dict[str, Any]]
|
self, question: str, search_results: List[Dict[str, Any]], skip_rerank: bool = True
|
||||||
) -> List[Dict[str, Any]]:
|
) -> List[Dict[str, Any]]:
|
||||||
"""使用 Qwen3-Reranker 对搜索结果批量重排序"""
|
"""使用 Qwen3-Reranker 对搜索结果批量重排序"""
|
||||||
|
|
||||||
# 准备 batch 输入:格式必须是 Query: xxx\nDocument: yyy
|
if skip_rerank:
|
||||||
batch_texts = [
|
self.logger.info("跳过重排序")
|
||||||
f"Query: {question}\nDocument: {r['content'][:1000]}" # 可以根据显存调整截断长度
|
return search_results
|
||||||
|
|
||||||
|
if not search_results:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# 模型相关常量(可初始化时提前保存)
|
||||||
|
instruction = (
|
||||||
|
"Given a web search query, retrieve relevant passages that answer the query"
|
||||||
|
)
|
||||||
|
prefix = '<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be "yes" or "no".<|im_end|>\n<|im_start|>user\n'
|
||||||
|
suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
|
||||||
|
|
||||||
|
prefix_tokens = self.tokenizer.encode(prefix, add_special_tokens=False)
|
||||||
|
suffix_tokens = self.tokenizer.encode(suffix, add_special_tokens=False)
|
||||||
|
|
||||||
|
# 构造符合格式的输入
|
||||||
|
def format_pair(query, doc):
|
||||||
|
return f"<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {doc}"
|
||||||
|
|
||||||
|
pairs = [
|
||||||
|
format_pair(question, r["content"][:1000]) # 文本截断,避免超长
|
||||||
for r in search_results
|
for r in search_results
|
||||||
]
|
]
|
||||||
|
|
||||||
# 使用 tokenizer 构建 batch 输入
|
# 分词 + 拼接前后缀 + padding
|
||||||
inputs = self.tokenizer(
|
inputs = self.tokenizer(
|
||||||
batch_texts,
|
pairs,
|
||||||
|
padding="max_length",
|
||||||
|
truncation="longest_first",
|
||||||
|
return_attention_mask=True, # 确保返回 attention_mask
|
||||||
|
max_length=8192 - len(prefix_tokens) - len(suffix_tokens),
|
||||||
return_tensors="pt",
|
return_tensors="pt",
|
||||||
padding=True,
|
)
|
||||||
truncation=True,
|
|
||||||
max_length=1024, # Qwen3 的最大上下文长度,建议限制
|
|
||||||
).to(self.rerank_model.device)
|
|
||||||
|
|
||||||
# 推理打分(关闭梯度计算)
|
# 手动添加前后缀
|
||||||
|
batch_size = inputs["input_ids"].shape[0]
|
||||||
|
max_len = 8192
|
||||||
|
|
||||||
|
# 创建新的输入张量
|
||||||
|
new_input_ids = torch.full((batch_size, max_len), self.tokenizer.pad_token_id, dtype=torch.long)
|
||||||
|
new_attention_mask = torch.zeros((batch_size, max_len), dtype=torch.long)
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
# 获取原始序列(去除padding)
|
||||||
|
original_ids = inputs["input_ids"][i]
|
||||||
|
original_mask = inputs["attention_mask"][i]
|
||||||
|
actual_length = original_mask.sum().item()
|
||||||
|
|
||||||
|
# 构建新序列:prefix + original + suffix
|
||||||
|
new_sequence = (
|
||||||
|
prefix_tokens + original_ids[:actual_length].tolist() + suffix_tokens
|
||||||
|
)
|
||||||
|
new_length = len(new_sequence)
|
||||||
|
|
||||||
|
if new_length <= max_len:
|
||||||
|
new_input_ids[i, :new_length] = torch.tensor(new_sequence)
|
||||||
|
new_attention_mask[i, :new_length] = 1
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
"input_ids": new_input_ids.to(self.rerank_model.device),
|
||||||
|
"attention_mask": new_attention_mask.to(self.rerank_model.device),
|
||||||
|
}
|
||||||
|
|
||||||
|
# 获取 yes / no 的 token id(初始化时保存也可)
|
||||||
|
token_true_id = self.tokenizer.convert_tokens_to_ids("yes")
|
||||||
|
token_false_id = self.tokenizer.convert_tokens_to_ids("no")
|
||||||
|
|
||||||
|
# 推理评分
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
outputs = self.rerank_model(**inputs)
|
outputs = self.rerank_model(**inputs)
|
||||||
logits = outputs.logits.squeeze(-1)
|
logits = outputs.logits
|
||||||
|
|
||||||
# 如果是二分类模型,通常需要做 sigmoid 激活
|
# 检查 logits 的维度
|
||||||
scores = torch.sigmoid(logits).tolist()
|
if logits.dim() == 3:
|
||||||
|
# 如果是3维,取最后一个token的logits
|
||||||
|
logits = logits[:, -1, :]
|
||||||
|
elif logits.dim() == 2:
|
||||||
|
# 如果是2维,直接使用
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unexpected logits dimension: {logits.dim()}")
|
||||||
|
|
||||||
# 写入到每个 search_result 中
|
# 提取 yes/no token 的 logits
|
||||||
|
true_logits = logits[:, token_true_id]
|
||||||
|
false_logits = logits[:, token_false_id]
|
||||||
|
|
||||||
|
stacked = torch.stack([false_logits, true_logits], dim=1)
|
||||||
|
probs = torch.nn.functional.softmax(stacked, dim=1)
|
||||||
|
scores = probs[:, 1].tolist() # 取 "yes" 的概率值
|
||||||
|
|
||||||
|
# 写入每条结果
|
||||||
for r, score in zip(search_results, scores):
|
for r, score in zip(search_results, scores):
|
||||||
r["rerank_score"] = max(0.0, min(score, 1.0)) # 保证分数在 0-1 范围
|
r["rerank_score"] = round(float(score), 4)
|
||||||
|
self.logger.info(f"重排序完成,得分范围: {min(scores)} - {max(scores)} \n\n {json.dumps(search_results, indent=4)}")
|
||||||
return search_results
|
return search_results
|
||||||
|
|
||||||
async def _build_context_async(self, search_results: List[Dict[str, Any]]) -> str:
|
async def _build_context_async(self, search_results: List[Dict[str, Any]]) -> str:
|
||||||
|
|
Loading…
Reference in New Issue