diff --git a/scripts/run_test.sh b/scripts/run_test.sh index 1ecc9b0..eecc2e2 100644 --- a/scripts/run_test.sh +++ b/scripts/run_test.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash export $(cat .env | xargs) -pytest -s -W ignore::DeprecationWarning -k test_search src/tests/test_nodes.py +pytest -s -W ignore::DeprecationWarning src/tests/test_nodes.py::test_rerank diff --git a/src/pipeline/config.py b/src/pipeline/config.py index e322641..97cf82c 100644 --- a/src/pipeline/config.py +++ b/src/pipeline/config.py @@ -14,6 +14,9 @@ class Config(TypedDict): llm_api_key: str llm_api_host: str llm_model: str + rerank_api_key: str + rerank_api_host: str + rerank_model: str embedding_api_key: str embedding_api_host: str embedding_model: str @@ -33,6 +36,9 @@ def _read_config() -> Config: "llm_api_host": os.getenv("LLM_API_HOST"), "llm_api_key": os.getenv("LLM_API_KEY"), "llm_model": os.getenv("LLM_MODEL"), + "rerank_api_host": os.getenv("RERANK_API_HOST"), + "rerank_api_key": os.getenv("RERANK_API_KEY"), + "rerank_model": os.getenv("RERANK_MODEL"), "embedding_api_host": os.getenv("EMBEDDING_API_HOST"), "embedding_api_key": os.getenv("EMBEDDING_API_KEY"), "embedding_model": os.getenv("EMBEDDING_MODEL"), diff --git a/src/pipeline/core/llm.py b/src/pipeline/core/llm.py index b53a6d0..1757952 100644 --- a/src/pipeline/core/llm.py +++ b/src/pipeline/core/llm.py @@ -1,4 +1,3 @@ -from ast import List import httpx import json from src.pipeline.config import config @@ -46,6 +45,23 @@ class AsyncLLm: "Authorization": f"Bearer {self.embedding_api_key}", }, ) + # rerank + self.rerank_api = config["rerank_api_host"].rstrip("/") + "/score" + self.rerank_model = config["rerank_model"] + self.rerank_api_key = config["rerank_api_key"] + self.rerank_client = httpx.AsyncClient( + http2=False, + trust_env=False, + timeout=httpx.Timeout(timeout), + limits=httpx.Limits( + max_connections=max_connections, + max_keepalive_connections=max_keepalive, + ), + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {self.rerank_api_key}", + }, + ) async def embedding(self, text: str) -> list[float]: try: @@ -66,7 +82,48 @@ class AsyncLLm: return [] - async def rerank(self, query: str, documents: List[dict]) -> List[dict]: + async def rerank(self, query: str, documents: list[dict]) -> list[dict]: + + logger.debug("\n\n default:\n\n" + "\n".join([str({"content": x["content"][:10], "score": x["score"]}) for x in documents])) + + if not documents: + return [] + + text_1 = [] + text_2 = [] + + for doc in documents: + text_1.append(query) + text_2.append(doc.get("content", "")[:1024]) + + # Qwen3 专用 + payload = { + "model": self.rerank_model, + "text_1": text_1, + "text_2": text_2, + } + + try: + resp = await self.rerank_client.post(self.rerank_api, json=payload) + resp.raise_for_status() + result = resp.json() + scores = [item["score"] for item in result["data"]] + + reranked = [] + for doc, score in zip(documents, scores): + d = dict(doc) + d["rerank_score"] = float(score) + reranked.append(d) + reranked.sort(key=lambda x: x["rerank_score"], reverse=True) + + logger.debug("\n\n rerank: \n\n" + "\n".join([str({"content": x["content"][:10], "score": x["score"], "rerank_score": x["rerank_score"]}) for x in reranked])) + + return reranked + except httpx.HTTPStatusError as e: + logger.error(e) + logger.error(f"Rerank HTTP error: {e.response.text}") + except Exception as e: + logger.exception("Rerank request failed") return [] async def chat( diff --git a/src/pipeline/core/nodes.py b/src/pipeline/core/nodes.py index e26a0c4..700a806 100644 --- a/src/pipeline/core/nodes.py +++ b/src/pipeline/core/nodes.py @@ -143,6 +143,7 @@ class SearchFromESNode(AsyncNode): return await es.client.hybrid_search_es(**prep_res) async def post_async(self, shared, prep_res, exec_res): + logger.debug(f"SearchFromESNode: length:{len(exec_res)}") shared["results"] = exec_res return "default" @@ -154,12 +155,16 @@ class RerankNode(AsyncNode): async def prep_async(self, shared): # 准备要重排的数据 - return {"query": shared["text"], "results": shared.get("results", [])} + return { + "query": shared["text"], + "results": shared.get("results", [])[: shared["rerank_top_k"]], + } async def exec_async(self, prep_res): query = prep_res["query"] results = prep_res["results"] - + logger.debug(f"query: {query}") + logger.debug(f"results: {results}") if not results: return [] diff --git a/src/tests/test_nodes.py b/src/tests/test_nodes.py index 86186fc..de08d11 100644 --- a/src/tests/test_nodes.py +++ b/src/tests/test_nodes.py @@ -1,3 +1,4 @@ +import asyncio import pytest import json from src.pipeline.core.pocket_flow import AsyncFlow @@ -78,13 +79,17 @@ async def test_rerank(): shared = { "text": "哪里盛产矿石", "index": "test_kb", - "top_k": 10, + "top_k": 50, + "rerank_top_k": 5, + "top_n": 3, "results": [], # [{es_id, doc_id, title, type, created_at, score, content}] } embeddingNode = nodes.EmbeddingNode() searchNode = nodes.SearchFromESNode() - embeddingNode >> searchNode + rerankNode = nodes.RerankNode() + embeddingNode >> searchNode >> rerankNode + # embeddingNode >> searchNode flow = AsyncFlow(embeddingNode) await flow.run_async(shared) @@ -94,7 +99,7 @@ async def test_rerank(): res = await llm.client.chat( messages=[ {"role": "system", "content": utils.rag_system_prompt()}, - {"role": "user", "content": utils.rag_user_prompt(shared["text"], shared["results"])}, + {"role": "user", "content": utils.rag_user_prompt(shared["text"], shared["results"][:shared["top_n"]])}, ] )