diff --git a/scripts/run_test.sh b/scripts/run_test.sh index f3acf62..1ecc9b0 100644 --- a/scripts/run_test.sh +++ b/scripts/run_test.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash export $(cat .env | xargs) -pytest -s -W ignore::DeprecationWarning src/tests/test_nodes.py +pytest -s -W ignore::DeprecationWarning -k test_search src/tests/test_nodes.py diff --git a/src/pipeline/core/llm.py b/src/pipeline/core/llm.py index b97396d..b53a6d0 100644 --- a/src/pipeline/core/llm.py +++ b/src/pipeline/core/llm.py @@ -1,3 +1,4 @@ +from ast import List import httpx import json from src.pipeline.config import config @@ -65,6 +66,9 @@ class AsyncLLm: return [] + async def rerank(self, query: str, documents: List[dict]) -> List[dict]: + return [] + async def chat( self, messages: list[dict], diff --git a/src/pipeline/core/nodes.py b/src/pipeline/core/nodes.py index 3cf7d6b..e26a0c4 100644 --- a/src/pipeline/core/nodes.py +++ b/src/pipeline/core/nodes.py @@ -145,3 +145,30 @@ class SearchFromESNode(AsyncNode): async def post_async(self, shared, prep_res, exec_res): shared["results"] = exec_res return "default" + + +class RerankNode(AsyncNode): + """ + 使用 LLM 对搜索结果进行重排 + """ + + async def prep_async(self, shared): + # 准备要重排的数据 + return {"query": shared["text"], "results": shared.get("results", [])} + + async def exec_async(self, prep_res): + query = prep_res["query"] + results = prep_res["results"] + + if not results: + return [] + + # 调用 LLM 进行 rerank,这里假设 llm.client.rerank 接口存在 + # 返回格式:[{"es_id":..., "score":..., "rank_score":...}] + reranked = await llm.client.rerank(query=query, documents=results) + return reranked + + async def post_async(self, shared, prep_res, exec_res): + # 更新 shared 中的结果为重排后的结果 + shared["results"] = exec_res + return "default" diff --git a/src/tests/test_nodes.py b/src/tests/test_nodes.py index c758a9c..86186fc 100644 --- a/src/tests/test_nodes.py +++ b/src/tests/test_nodes.py @@ -7,7 +7,6 @@ from src.pipeline.core import llm, es, nodes, utils @pytest.mark.asyncio async def test_embedding(): - return await llm.init_client() await es.init_client() @@ -43,7 +42,43 @@ async def test_search(): shared = { "text": "哪里盛产矿石", "index": "test_kb", - "top_k": 5, + "top_k": 10, + "results": [], # [{es_id, doc_id, title, type, created_at, score, content}] + } + + embeddingNode = nodes.EmbeddingNode() + searchNode = nodes.SearchFromESNode() + embeddingNode >> searchNode + flow = AsyncFlow(embeddingNode) + + await flow.run_async(shared) + + logger.debug(json.dumps({**shared, "embedding": shared["embedding"][:4]}, indent=4, ensure_ascii=False)) + + request = llm.client.stream_chat( + messages=[ + {"role": "system", "content": utils.rag_system_prompt()}, + {"role": "user", "content": utils.rag_user_prompt(shared["text"], shared["results"])}, + ] + ) + + async for chunk in request: + logger.debug(chunk) + + await llm.close_client() + await es.close_client() + + +@pytest.mark.asyncio +async def test_rerank(): + await llm.init_client() + await es.init_client() + logger.debug("search from es") + + shared = { + "text": "哪里盛产矿石", + "index": "test_kb", + "top_k": 10, "results": [], # [{es_id, doc_id, title, type, created_at, score, content}] } @@ -59,8 +94,7 @@ async def test_search(): res = await llm.client.chat( messages=[ {"role": "system", "content": utils.rag_system_prompt()}, - {"role": "system", "content": utils.rag_user_prompt(shared["text"], shared["results"])}, - # {"role": "system", "content": "你好"}, + {"role": "user", "content": utils.rag_user_prompt(shared["text"], shared["results"])}, ] )