From 8b1db6dd331f296242eb64978f18fbb650d4aee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=A6=82=E5=A8=81?= Date: Wed, 10 Dec 2025 17:43:22 +0800 Subject: [PATCH] feat: chunk --- src/pipeline/core/utils.py | 26 ++++++++++---- src/pipeline/nodes/__int__.py | 4 +++ src/pipeline/nodes/chunk_document_node.py | 36 +++++++++++++++++++ .../nodes.py => nodes/read_document_node.py} | 8 +++-- src/tests/test_nodes.py | 7 ++-- 5 files changed, 69 insertions(+), 12 deletions(-) create mode 100644 src/pipeline/nodes/__int__.py create mode 100644 src/pipeline/nodes/chunk_document_node.py rename src/pipeline/{core/nodes.py => nodes/read_document_node.py} (83%) diff --git a/src/pipeline/core/utils.py b/src/pipeline/core/utils.py index 2318f10..f54e79a 100644 --- a/src/pipeline/core/utils.py +++ b/src/pipeline/core/utils.py @@ -11,9 +11,7 @@ from PIL import Image # ----------------------------- # 初始化 OCR(只初始化一次),注意这是 CPU 版本,如果需要 GPU 需要额外配置 -print("ocr 加载...") ocr = {} -print("ocr 加载结束") async def _ocr_image_bytes(img_bytes: bytes) -> str: """对图片字节流做 OCR(线程池避免阻塞 asyncio)""" @@ -66,11 +64,12 @@ async def _load_pdf(path: str) -> str: # 获取图片 image_bytes_list = [] - for img in page.get_images(full=True): - xref = img[0] - pix = fitz.Pixmap(doc, xref) - img_bytes = pix.tobytes("png") - image_bytes_list.append(img_bytes) + # TODO: ocr 识别 + # for img in page.get_images(full=True): + # xref = img[0] + # pix = fitz.Pixmap(doc, xref) + # img_bytes = pix.tobytes("png") + # image_bytes_list.append(img_bytes) pages.append((page_text, image_bytes_list)) return pages @@ -109,3 +108,16 @@ async def load_document(path: str) -> str: return await _load_pdf(path) raise ValueError(f"Unsupported file type: {suffix}") + +# ----------------------------- +# 文本切割,目前只有简单长度切割 +# ----------------------------- + +def fixed_size_chunk(text, chunk_size=500, overlap=50): + chunks = [] + start = 0 + while start < len(text): + end = start + chunk_size + chunks.append(text[max(0, start - overlap) : min(len(text), end + overlap)]) + start += chunk_size + return chunks diff --git a/src/pipeline/nodes/__int__.py b/src/pipeline/nodes/__int__.py new file mode 100644 index 0000000..d1921c8 --- /dev/null +++ b/src/pipeline/nodes/__int__.py @@ -0,0 +1,4 @@ +from src.pipeline.nodes.chunk_document_node import ChunkDocumentsNode +from src.pipeline.nodes.read_document_node import ReadDocumentNode + +__all__ = ["ChunkDocumentsNode", "ReadDocumentNode"] diff --git a/src/pipeline/nodes/chunk_document_node.py b/src/pipeline/nodes/chunk_document_node.py new file mode 100644 index 0000000..5b6c528 --- /dev/null +++ b/src/pipeline/nodes/chunk_document_node.py @@ -0,0 +1,36 @@ +from src.pipeline.core.pocket_flow import AsyncBatchNode +from src.pipeline.core.utils import fixed_size_chunk +import json +import re + + +class ChunkDocumentsNode(AsyncBatchNode): + async def prep_async(self, shared): + return shared["documents"] + + async def exec_async(self, document): + """ 简单切片 + :param self + :param document: {text, file_name} + """ + # print(f"document: {document}") + text = document["text"] + # 先将所有制表符等替换为一个空格 + text = re.sub(r"[ \t]+", " ", text) + # 再将多个空格替换为一个空格 + text = re.sub(r" +", " ", text) + # 去除首尾空格 + text = text.strip() + return [{"text": x, "file_name": document["file_name"]} for x in fixed_size_chunk(text)] + + async def post_async(self, shared, prep_res, exec_res_list): + all_chunks = [] + for chunks in exec_res_list: + all_chunks.extend(chunks) + + print(f"all_chunks: {json.dumps(all_chunks, indent=2, ensure_ascii=False)}") + shared["texts"] = all_chunks + + return "default" + + diff --git a/src/pipeline/core/nodes.py b/src/pipeline/nodes/read_document_node.py similarity index 83% rename from src/pipeline/core/nodes.py rename to src/pipeline/nodes/read_document_node.py index bd32c06..50df9a5 100644 --- a/src/pipeline/core/nodes.py +++ b/src/pipeline/nodes/read_document_node.py @@ -1,6 +1,7 @@ from src.pipeline.core.pocket_flow import AsyncBatchNode from src.pipeline.core.utils import load_document + class ReadDocumentNode(AsyncBatchNode): async def prep_async(self, shared): return shared["files"] @@ -10,6 +11,7 @@ class ReadDocumentNode(AsyncBatchNode): document_text = await load_document(file_path) return { "file_path": file_path, + "file_name": file_path.split("/")[-1], "text": document_text, "text_length": len(document_text), "status": "done", @@ -18,6 +20,7 @@ class ReadDocumentNode(AsyncBatchNode): except Exception as e: return { "file_path": file_path, + "file_name": file_path.split("/")[-1], "text": "", "text_length": 0, "status": "error", @@ -25,6 +28,5 @@ class ReadDocumentNode(AsyncBatchNode): } async def post_async(self, shared, prep_res, exec_res): - - print([{**x, "text": x["text"][:5] + "..."} for x in exec_res]) - return {} + shared["documents"] = exec_res + return "default" diff --git a/src/tests/test_nodes.py b/src/tests/test_nodes.py index 91e0a5a..dc5ebd4 100644 --- a/src/tests/test_nodes.py +++ b/src/tests/test_nodes.py @@ -1,8 +1,9 @@ import pytest -from src.pipeline.core.nodes import ReadDocumentNode +from src.pipeline.nodes.read_document_node import ReadDocumentNode +from src.pipeline.nodes.chunk_document_node import ChunkDocumentsNode +# from src.pipeline.nodes import ReadDocumentNode, ChunkDocumentsNode from src.pipeline.core.pocket_flow import AsyncFlow - @pytest.mark.asyncio async def test_embedding(): @@ -18,6 +19,8 @@ async def test_embedding(): } readNode = ReadDocumentNode() + chunkNode = ChunkDocumentsNode() + readNode >> chunkNode flow = AsyncFlow(readNode) await flow.run_async(shared)