Compare commits

..

No commits in common. "7fb2b9b876e06d521457fb1942c89448b1c1ab31" and "45a4836776d365c30b5bde3b856b398e562b04ca" have entirely different histories.

3 changed files with 30 additions and 58 deletions

View File

@ -5,16 +5,6 @@ OPENAI_BASE_URL=https://api.openai.com/v1
# 向量数据库配置 # 向量数据库配置
CHROMA_PERSIST_DIRECTORY=./chroma_db CHROMA_PERSIST_DIRECTORY=./chroma_db
# 模型配置
# RERANK_MODEL_PATH=/Volumes/LRW/Model/Qwen3-Reranker-0.6B
# RERANK_MODEL_TYPE=Qwen3-Reranker-0.6B
# RERANK_MODEL_DEVICE=cpu
# EMBEDDING_MODEL_PATH=/Volumes/LRW/Model/Qwen3-Embedding-0.6B
# EMBEDDING_MODEL_TYPE=/Volumes/LRW/Model/Qwen3-Embedding-0.6B
# EMBEDDING_MODEL_DEVICE=cpu
# 应用配置 # 应用配置
APP_NAME=Easy RAG Service APP_NAME=Easy RAG Service
APP_VERSION=1.0.0 APP_VERSION=1.0.0

View File

@ -21,14 +21,6 @@ class Config:
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1") OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
# 模型配置
EMBEDDING_MODEL_PATH = os.getenv("EMBEDDING_MODEL_PATH", "")
EMBEDDING_MODEL_TYPE = os.getenv("EMBEDDING_MODEL_TYPE", "")
EMBEDDING_MODEL_DEVICE = os.getenv("EMBEDDING_MODEL_DEVICE", "")
RERANK_MODEL_PATH = os.getenv("RERANK_MODEL_PATH", "")
RERANK_MODEL_TYPE = os.getenv("RERANK_MODEL_TYPE", "")
RERANK_MODEL_DEVICE = os.getenv("RERANK_MODEL_DEVICE", "")
# 向量数据库配置 # 向量数据库配置
CHROMA_PERSIST_DIRECTORY = os.getenv("CHROMA_PERSIST_DIRECTORY", "./chroma_db") CHROMA_PERSIST_DIRECTORY = os.getenv("CHROMA_PERSIST_DIRECTORY", "./chroma_db")

View File

@ -21,9 +21,6 @@ class AsyncRAGService:
self.vector_store = AsyncVectorStore() self.vector_store = AsyncVectorStore()
self.openai_api_base = os.getenv("OPENAI_BASE_URL") self.openai_api_base = os.getenv("OPENAI_BASE_URL")
self.openai_api_key = os.getenv("OPENAI_API_KEY") self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.rerank_model_path = os.getenv("RERANK_MODEL_PATH", "")
self.open_rerank = bool(self.rerank_model_path)
self.llm = ChatOpenAI( self.llm = ChatOpenAI(
model="deepseek-r1:8b", model="deepseek-r1:8b",
temperature=0.7, temperature=0.7,
@ -42,23 +39,17 @@ class AsyncRAGService:
), ),
) )
# 如果需要重排 self.tokenizer = AutoTokenizer.from_pretrained("/Volumes/LRW/Model/Qwen3-Embedding-0.6B", padding_side="left")
if self.open_rerank: # 强制设置 padding token
self.logger.info("初始化 Ranker 模型...") if self.tokenizer.pad_token is None:
self.tokenizer = AutoTokenizer.from_pretrained( self.tokenizer.pad_token = self.tokenizer.eos_token
self.rerank_model_path, padding_side="left" self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
)
# 强制设置 padding token self.rerank_model = AutoModelForSequenceClassification.from_pretrained("/Volumes/LRW/Model/Qwen3-Embedding-0.6B").eval()
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token # 确保模型配置与 tokenizer 一致
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id if hasattr(self.rerank_model.config, "pad_token_id"):
self.rerank_model = AutoModelForSequenceClassification.from_pretrained( self.rerank_model.config.pad_token_id = self.tokenizer.pad_token_id
self.rerank_model_path
).eval()
# 确保模型配置与 tokenizer 一致
if hasattr(self.rerank_model.config, "pad_token_id"):
self.rerank_model.config.pad_token_id = self.tokenizer.pad_token_id
self.logger.info("✓ 初始化 Ranker 模型成功")
self.logger.info("RAG服务初始化完成") self.logger.info("RAG服务初始化完成")
@ -100,7 +91,7 @@ class AsyncRAGService:
} }
# rerank # rerank
reranked_results = await self._rerank_results(question, search_results, skip_rerank=not self.open_rerank) reranked_results = await self._rerank_results(question, search_results)
# 并行执行上下文构建和 LLM 调用准备 # 并行执行上下文构建和 LLM 调用准备
context_task = asyncio.create_task( context_task = asyncio.create_task(
@ -155,7 +146,7 @@ class AsyncRAGService:
return return
# rerank # rerank
reranked_results = await self._rerank_results(question, search_results, skip_rerank=not self.open_rerank) reranked_results = await self._rerank_results(question, search_results)
# 构建上下文和源信息 # 构建上下文和源信息
context_task = self._build_context_async(reranked_results) context_task = self._build_context_async(reranked_results)
@ -227,7 +218,7 @@ class AsyncRAGService:
return await asyncio.to_thread(_format_sources) return await asyncio.to_thread(_format_sources)
async def _rerank_results( async def _rerank_results(
self, question: str, search_results: List[Dict[str, Any]], skip_rerank: bool = False self, question: str, search_results: List[Dict[str, Any]], skip_rerank: bool = True
) -> List[Dict[str, Any]]: ) -> List[Dict[str, Any]]:
"""使用 Qwen3-Reranker 对搜索结果批量重排序""" """使用 Qwen3-Reranker 对搜索结果批量重排序"""
@ -238,22 +229,19 @@ class AsyncRAGService:
if not search_results: if not search_results:
return [] return []
# ==== Prompt 设置 ==== # 模型相关常量(可初始化时提前保存)
prefix = ( instruction = (
"<|im_start|>system\n" "Given a web search query, retrieve relevant passages that answer the query"
"You are a helpful assistant that determines whether a document answers a given query. "
'Respond only with "yes" if the document is helpful, otherwise "no".\n'
"<|im_end|>\n"
"<|im_start|>user\n"
) )
prefix = '<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be "yes" or "no".<|im_end|>\n<|im_start|>user\n'
suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n" suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
prefix_tokens = self.tokenizer.encode(prefix, add_special_tokens=False) prefix_tokens = self.tokenizer.encode(prefix, add_special_tokens=False)
suffix_tokens = self.tokenizer.encode(suffix, add_special_tokens=False) suffix_tokens = self.tokenizer.encode(suffix, add_special_tokens=False)
# 构造符合格式的输入 # 构造符合格式的输入
def format_pair(query: str, doc: str) -> str: def format_pair(query, doc):
return f"<Query>: {query}\n<Document>: {doc}" return f"<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {doc}"
pairs = [ pairs = [
format_pair(question, r["content"][:1000]) # 文本截断,避免超长 format_pair(question, r["content"][:1000]) # 文本截断,避免超长
@ -304,29 +292,31 @@ class AsyncRAGService:
token_false_id = self.tokenizer.convert_tokens_to_ids("no") token_false_id = self.tokenizer.convert_tokens_to_ids("no")
# 推理评分 # 推理评分
self.logger.info("模型准备输入完毕,开始推理...")
with torch.no_grad(): with torch.no_grad():
outputs = self.rerank_model(**inputs) outputs = self.rerank_model(**inputs)
logits = outputs.logits logits = outputs.logits
# 检查 logits 的维度 # 检查 logits 的维度
if logits.dim() == 3: # 如果是3维取最后一个token的logits if logits.dim() == 3:
# 如果是3维取最后一个token的logits
logits = logits[:, -1, :] logits = logits[:, -1, :]
elif logits.dim() != 2: # 如果是2维直接使用 elif logits.dim() == 2:
raise ValueError(f"Unexpected logits shape: {logits.shape}") # 如果是2维直接使用
pass
else:
raise ValueError(f"Unexpected logits dimension: {logits.dim()}")
# 提取 yes/no token 的 logits # 提取 yes/no token 的 logits
true_logits = logits[:, token_true_id] true_logits = logits[:, token_true_id]
false_logits = logits[:, token_false_id] false_logits = logits[:, token_false_id]
# 推荐用 logits 差值作为分数 stacked = torch.stack([false_logits, true_logits], dim=1)
scores = (true_logits - false_logits).tolist() probs = torch.nn.functional.softmax(stacked, dim=1)
self.logger.info("模型推理完成") scores = probs[:, 1].tolist() # 取 "yes" 的概率值
# 写入每条结果 # 写入每条结果
for r, score in zip(search_results, scores): for r, score in zip(search_results, scores):
r["rerank_score"] = round(float(score), 4) r["rerank_score"] = round(float(score), 4)
self.logger.info(f"重排序完成,得分范围: {min(scores)} - {max(scores)} \n\n {json.dumps(search_results, indent=4)}") self.logger.info(f"重排序完成,得分范围: {min(scores)} - {max(scores)} \n\n {json.dumps(search_results, indent=4)}")
return search_results return search_results