feat: 模型配置

This commit is contained in:
李如威 2025-07-11 23:57:08 +08:00
parent 45a4836776
commit a25d94b8ae
3 changed files with 58 additions and 30 deletions

View File

@ -5,6 +5,16 @@ OPENAI_BASE_URL=https://api.openai.com/v1
# 向量数据库配置
CHROMA_PERSIST_DIRECTORY=./chroma_db
# 模型配置
# RERANK_MODEL_PATH=/Volumes/LRW/Model/Qwen3-Reranker-0.6B
# RERANK_MODEL_TYPE=Qwen3-Reranker-0.6B
# RERANK_MODEL_DEVICE=cpu
# EMBEDDING_MODEL_PATH=your_embedding_model_path_here
# EMBEDDING_MODEL_TYPE=your_embedding_model_type_here
# EMBEDDING_MODEL_DEVICE=cpu
# 应用配置
APP_NAME=Easy RAG Service
APP_VERSION=1.0.0

View File

@ -21,6 +21,14 @@ class Config:
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
# 模型配置
EMBEDDING_MODEL_PATH = os.getenv("EMBEDDING_MODEL_PATH", "")
EMBEDDING_MODEL_TYPE = os.getenv("EMBEDDING_MODEL_TYPE", "")
EMBEDDING_MODEL_DEVICE = os.getenv("EMBEDDING_MODEL_DEVICE", "")
RERANK_MODEL_PATH = os.getenv("RERANK_MODEL_PATH", "")
RERANK_MODEL_TYPE = os.getenv("RERANK_MODEL_TYPE", "")
RERANK_MODEL_DEVICE = os.getenv("RERANK_MODEL_DEVICE", "")
# 向量数据库配置
CHROMA_PERSIST_DIRECTORY = os.getenv("CHROMA_PERSIST_DIRECTORY", "./chroma_db")

View File

@ -21,6 +21,9 @@ class AsyncRAGService:
self.vector_store = AsyncVectorStore()
self.openai_api_base = os.getenv("OPENAI_BASE_URL")
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.rerank_model_path = os.getenv("RERANK_MODEL_PATH", "")
self.open_rerank = bool(self.rerank_model_path)
self.llm = ChatOpenAI(
model="deepseek-r1:8b",
temperature=0.7,
@ -39,17 +42,23 @@ class AsyncRAGService:
),
)
self.tokenizer = AutoTokenizer.from_pretrained("/Volumes/LRW/Model/Qwen3-Embedding-0.6B", padding_side="left")
# 强制设置 padding token
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.rerank_model = AutoModelForSequenceClassification.from_pretrained("/Volumes/LRW/Model/Qwen3-Embedding-0.6B").eval()
# 确保模型配置与 tokenizer 一致
if hasattr(self.rerank_model.config, "pad_token_id"):
self.rerank_model.config.pad_token_id = self.tokenizer.pad_token_id
# 如果需要重排
if self.open_rerank:
self.logger.info("初始化 Ranker 模型...")
self.tokenizer = AutoTokenizer.from_pretrained(
self.rerank_model_path, padding_side="left"
)
# 强制设置 padding token
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.rerank_model = AutoModelForSequenceClassification.from_pretrained(
self.rerank_model_path
).eval()
# 确保模型配置与 tokenizer 一致
if hasattr(self.rerank_model.config, "pad_token_id"):
self.rerank_model.config.pad_token_id = self.tokenizer.pad_token_id
self.logger.info("✓ 初始化 Ranker 模型成功")
self.logger.info("RAG服务初始化完成")
@ -91,7 +100,7 @@ class AsyncRAGService:
}
# rerank
reranked_results = await self._rerank_results(question, search_results)
reranked_results = await self._rerank_results(question, search_results, skip_rerank=not self.open_rerank)
# 并行执行上下文构建和 LLM 调用准备
context_task = asyncio.create_task(
@ -146,7 +155,7 @@ class AsyncRAGService:
return
# rerank
reranked_results = await self._rerank_results(question, search_results)
reranked_results = await self._rerank_results(question, search_results, skip_rerank=not self.open_rerank)
# 构建上下文和源信息
context_task = self._build_context_async(reranked_results)
@ -218,7 +227,7 @@ class AsyncRAGService:
return await asyncio.to_thread(_format_sources)
async def _rerank_results(
self, question: str, search_results: List[Dict[str, Any]], skip_rerank: bool = True
self, question: str, search_results: List[Dict[str, Any]], skip_rerank: bool = False
) -> List[Dict[str, Any]]:
"""使用 Qwen3-Reranker 对搜索结果批量重排序"""
@ -229,19 +238,22 @@ class AsyncRAGService:
if not search_results:
return []
# 模型相关常量(可初始化时提前保存)
instruction = (
"Given a web search query, retrieve relevant passages that answer the query"
# ==== Prompt 设置 ====
prefix = (
"<|im_start|>system\n"
"You are a helpful assistant that determines whether a document answers a given query. "
'Respond only with "yes" if the document is helpful, otherwise "no".\n'
"<|im_end|>\n"
"<|im_start|>user\n"
)
prefix = '<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be "yes" or "no".<|im_end|>\n<|im_start|>user\n'
suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
prefix_tokens = self.tokenizer.encode(prefix, add_special_tokens=False)
suffix_tokens = self.tokenizer.encode(suffix, add_special_tokens=False)
# 构造符合格式的输入
def format_pair(query, doc):
return f"<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {doc}"
def format_pair(query: str, doc: str) -> str:
return f"<Query>: {query}\n<Document>: {doc}"
pairs = [
format_pair(question, r["content"][:1000]) # 文本截断,避免超长
@ -292,31 +304,29 @@ class AsyncRAGService:
token_false_id = self.tokenizer.convert_tokens_to_ids("no")
# 推理评分
self.logger.info("模型准备输入完毕,开始推理...")
with torch.no_grad():
outputs = self.rerank_model(**inputs)
logits = outputs.logits
# 检查 logits 的维度
if logits.dim() == 3:
# 如果是3维取最后一个token的logits
if logits.dim() == 3: # 如果是3维取最后一个token的logits
logits = logits[:, -1, :]
elif logits.dim() == 2:
# 如果是2维直接使用
pass
else:
raise ValueError(f"Unexpected logits dimension: {logits.dim()}")
elif logits.dim() != 2: # 如果是2维直接使用
raise ValueError(f"Unexpected logits shape: {logits.shape}")
# 提取 yes/no token 的 logits
true_logits = logits[:, token_true_id]
false_logits = logits[:, token_false_id]
stacked = torch.stack([false_logits, true_logits], dim=1)
probs = torch.nn.functional.softmax(stacked, dim=1)
scores = probs[:, 1].tolist() # 取 "yes" 的概率值
# 推荐用 logits 差值作为分数
scores = (true_logits - false_logits).tolist()
self.logger.info("模型推理完成")
# 写入每条结果
for r, score in zip(search_results, scores):
r["rerank_score"] = round(float(score), 4)
self.logger.info(f"重排序完成,得分范围: {min(scores)} - {max(scores)} \n\n {json.dumps(search_results, indent=4)}")
return search_results