From 5ae6ed73a58ec334472bb9bd1664a94b84b4ac4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=A6=82=E5=A8=81?= Date: Thu, 25 Dec 2025 16:48:26 +0800 Subject: [PATCH] feat: web search --- requirements.txt | 3 +- src/pipeline/core/nodes.py | 78 ++++++++++++++++++++++++++++---------- src/pipeline/core/utils.py | 31 ++++++++++++++- src/tests/test.py | 5 --- src/tests/test_nodes.py | 9 +++-- src/tests/test_utils.py | 10 +++++ 6 files changed, 105 insertions(+), 31 deletions(-) delete mode 100644 src/tests/test.py create mode 100644 src/tests/test_utils.py diff --git a/requirements.txt b/requirements.txt index a6f17ac..9bd0c95 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,4 +8,5 @@ scikit-learn aiofiles pillow loguru -httpx \ No newline at end of file +httpx +baidusearch \ No newline at end of file diff --git a/src/pipeline/core/nodes.py b/src/pipeline/core/nodes.py index 700a806..230a062 100644 --- a/src/pipeline/core/nodes.py +++ b/src/pipeline/core/nodes.py @@ -1,9 +1,11 @@ +import json import uuid +import re from src.pipeline.core.pocket_flow import AsyncBatchNode, AsyncNode -from src.pipeline.core.utils import fixed_size_chunk, load_document, logger +from src.pipeline.core.utils import fixed_size_chunk, load_document, logger, baidu_search_async from src.pipeline.core import llm from src.pipeline.core import es -import re +from itertools import chain class ChunkDocumentsNode(AsyncBatchNode): @@ -129,36 +131,70 @@ class EmbeddingNode(AsyncNode): return "default" -class SearchFromESNode(AsyncNode): +class SearchNode(AsyncBatchNode): async def prep_async(self, shared): - return { - "index": shared["index"], - "query_text": shared["text"], - "query_vector": shared["embedding"], - "top_k": shared.get("top_k", 5), - } + tasks = [ + { + "from": "es", + "data": { + "index": shared["index"], + "query_text": shared["text"], + "query_vector": shared["embedding"], + "top_k": shared.get("top_k", 5), + }, + } + ] + if shared.get("search_web", False): + tasks.append( + { + "from": "web", + "data": { + "query": shared["text"], + "max_results": shared.get("top_k", 5), + }, + } + ) + return tasks async def exec_async(self, prep_res): - return await es.client.hybrid_search_es(**prep_res) + _data = prep_res["data"] + _from = prep_res["from"] + if _from == "es": + return ("es", await es.client.hybrid_search_es(**_data)) + if _from == "web": + return ("web", await baidu_search_async(**_data)) async def post_async(self, shared, prep_res, exec_res): - logger.debug(f"SearchFromESNode: length:{len(exec_res)}") - shared["results"] = exec_res + for _from, _data in exec_res: + if _from == 'es': + shared["results"] = _data + if _from == "web": + shared["web_results"] = _data return "default" -class RerankNode(AsyncNode): +class RerankNode(AsyncBatchNode): """ 使用 LLM 对搜索结果进行重排 """ async def prep_async(self, shared): # 准备要重排的数据 - return { - "query": shared["text"], - "results": shared.get("results", [])[: shared["rerank_top_k"]], - } + tasks = [ + { + "query": shared["text"], + "results": shared.get("results", [])[: shared["rerank_top_k"]], + } + ] + if shared.get("search_web", False): + tasks.append( + { + "query": shared["text"], + "results": shared.get("web_results", [])[: shared["rerank_top_k"]], + } + ) + return tasks async def exec_async(self, prep_res): query = prep_res["query"] @@ -167,13 +203,15 @@ class RerankNode(AsyncNode): logger.debug(f"results: {results}") if not results: return [] - # 调用 LLM 进行 rerank,这里假设 llm.client.rerank 接口存在 - # 返回格式:[{"es_id":..., "score":..., "rank_score":...}] + # 返回格式:[{"es_id":..., "score":..., "rerank_score":...}] reranked = await llm.client.rerank(query=query, documents=results) return reranked async def post_async(self, shared, prep_res, exec_res): # 更新 shared 中的结果为重排后的结果 - shared["results"] = exec_res + results = list(chain.from_iterable(exec_res)) + logger.debug(results) + results.sort(key=lambda x: x["rerank_score"], reverse=True) + shared["results"] = results return "default" diff --git a/src/pipeline/core/utils.py b/src/pipeline/core/utils.py index 888be1f..0319ad7 100644 --- a/src/pipeline/core/utils.py +++ b/src/pipeline/core/utils.py @@ -3,12 +3,13 @@ import docx import fitz # PyMuPDF import aiofiles import io -import os +import re import sys from pathlib import Path from PIL import Image from loguru import logger from src.pipeline.config import config +from baidusearch.baidusearch import search # ----------------------------- # 日志 @@ -187,3 +188,31 @@ def rag_user_prompt(query: str, documents: list[dict]) -> str: """ logger.debug(prompt) return prompt + +# ----------------------------- +# 其他工具 +# ----------------------------- + + +async def baidu_search_async(query: str, max_results: int = 5): + """ + 异步调用 baidusearch(内部用 asyncio.to_thread 封装同步函数) + 返回结构化搜索结果列表 + """ + + def sync_search(): + return list(search(query, num_results=max_results)) + + results = await asyncio.to_thread(sync_search) + docs = [] + for r in results: + docs.append( + { + "title": r.get("title"), + "content": re.sub(r"\s+", " ", r.get("abstract", "")), + "url": r.get("url"), + "type": "web", + "score": 0.5, + } + ) + return docs diff --git a/src/tests/test.py b/src/tests/test.py deleted file mode 100644 index b8b7c46..0000000 --- a/src/tests/test.py +++ /dev/null @@ -1,5 +0,0 @@ -import pytest - -@pytest.mark.asyncio -async def test_embedding(): - print("\n1") \ No newline at end of file diff --git a/src/tests/test_nodes.py b/src/tests/test_nodes.py index de08d11..f7fa0f1 100644 --- a/src/tests/test_nodes.py +++ b/src/tests/test_nodes.py @@ -48,7 +48,7 @@ async def test_search(): } embeddingNode = nodes.EmbeddingNode() - searchNode = nodes.SearchFromESNode() + searchNode = nodes.SearchNode() embeddingNode >> searchNode flow = AsyncFlow(embeddingNode) @@ -77,16 +77,17 @@ async def test_rerank(): logger.debug("search from es") shared = { - "text": "哪里盛产矿石", + "text": "山海经中描述了哪里盛产矿石", "index": "test_kb", - "top_k": 50, + "top_k": 10, + "search_web": False, "rerank_top_k": 5, "top_n": 3, "results": [], # [{es_id, doc_id, title, type, created_at, score, content}] } embeddingNode = nodes.EmbeddingNode() - searchNode = nodes.SearchFromESNode() + searchNode = nodes.SearchNode() rerankNode = nodes.RerankNode() embeddingNode >> searchNode >> rerankNode # embeddingNode >> searchNode diff --git a/src/tests/test_utils.py b/src/tests/test_utils.py new file mode 100644 index 0000000..3d5c104 --- /dev/null +++ b/src/tests/test_utils.py @@ -0,0 +1,10 @@ +import pytest +import json +from src.pipeline.core.utils import logger, baidu_search_async + +@pytest.mark.asyncio +async def test_search_web(): + query = "孙悟空哪里出生" + logger.debug(f"query: {query} ...") + results = await baidu_search_async(query, max_results=5) + logger.debug(json.dumps(results, indent=4, ensure_ascii=False))