feat: chunk

This commit is contained in:
李如威 2025-12-10 17:43:22 +08:00
parent 5ac5b5c7b1
commit 8b1db6dd33
5 changed files with 69 additions and 12 deletions

View File

@ -11,9 +11,7 @@ from PIL import Image
# -----------------------------
# 初始化 OCR只初始化一次注意这是 CPU 版本,如果需要 GPU 需要额外配置
print("ocr 加载...")
ocr = {}
print("ocr 加载结束")
async def _ocr_image_bytes(img_bytes: bytes) -> str:
"""对图片字节流做 OCR线程池避免阻塞 asyncio"""
@ -66,11 +64,12 @@ async def _load_pdf(path: str) -> str:
# 获取图片
image_bytes_list = []
for img in page.get_images(full=True):
xref = img[0]
pix = fitz.Pixmap(doc, xref)
img_bytes = pix.tobytes("png")
image_bytes_list.append(img_bytes)
# TODO: ocr 识别
# for img in page.get_images(full=True):
# xref = img[0]
# pix = fitz.Pixmap(doc, xref)
# img_bytes = pix.tobytes("png")
# image_bytes_list.append(img_bytes)
pages.append((page_text, image_bytes_list))
return pages
@ -109,3 +108,16 @@ async def load_document(path: str) -> str:
return await _load_pdf(path)
raise ValueError(f"Unsupported file type: {suffix}")
# -----------------------------
# 文本切割,目前只有简单长度切割
# -----------------------------
def fixed_size_chunk(text, chunk_size=500, overlap=50):
chunks = []
start = 0
while start < len(text):
end = start + chunk_size
chunks.append(text[max(0, start - overlap) : min(len(text), end + overlap)])
start += chunk_size
return chunks

View File

@ -0,0 +1,4 @@
from src.pipeline.nodes.chunk_document_node import ChunkDocumentsNode
from src.pipeline.nodes.read_document_node import ReadDocumentNode
__all__ = ["ChunkDocumentsNode", "ReadDocumentNode"]

View File

@ -0,0 +1,36 @@
from src.pipeline.core.pocket_flow import AsyncBatchNode
from src.pipeline.core.utils import fixed_size_chunk
import json
import re
class ChunkDocumentsNode(AsyncBatchNode):
async def prep_async(self, shared):
return shared["documents"]
async def exec_async(self, document):
""" 简单切片
:param self
:param document: {text, file_name}
"""
# print(f"document: {document}")
text = document["text"]
# 先将所有制表符等替换为一个空格
text = re.sub(r"[ \t]+", " ", text)
# 再将多个空格替换为一个空格
text = re.sub(r" +", " ", text)
# 去除首尾空格
text = text.strip()
return [{"text": x, "file_name": document["file_name"]} for x in fixed_size_chunk(text)]
async def post_async(self, shared, prep_res, exec_res_list):
all_chunks = []
for chunks in exec_res_list:
all_chunks.extend(chunks)
print(f"all_chunks: {json.dumps(all_chunks, indent=2, ensure_ascii=False)}")
shared["texts"] = all_chunks
return "default"

View File

@ -1,6 +1,7 @@
from src.pipeline.core.pocket_flow import AsyncBatchNode
from src.pipeline.core.utils import load_document
class ReadDocumentNode(AsyncBatchNode):
async def prep_async(self, shared):
return shared["files"]
@ -10,6 +11,7 @@ class ReadDocumentNode(AsyncBatchNode):
document_text = await load_document(file_path)
return {
"file_path": file_path,
"file_name": file_path.split("/")[-1],
"text": document_text,
"text_length": len(document_text),
"status": "done",
@ -18,6 +20,7 @@ class ReadDocumentNode(AsyncBatchNode):
except Exception as e:
return {
"file_path": file_path,
"file_name": file_path.split("/")[-1],
"text": "",
"text_length": 0,
"status": "error",
@ -25,6 +28,5 @@ class ReadDocumentNode(AsyncBatchNode):
}
async def post_async(self, shared, prep_res, exec_res):
print([{**x, "text": x["text"][:5] + "..."} for x in exec_res])
return {}
shared["documents"] = exec_res
return "default"

View File

@ -1,8 +1,9 @@
import pytest
from src.pipeline.core.nodes import ReadDocumentNode
from src.pipeline.nodes.read_document_node import ReadDocumentNode
from src.pipeline.nodes.chunk_document_node import ChunkDocumentsNode
# from src.pipeline.nodes import ReadDocumentNode, ChunkDocumentsNode
from src.pipeline.core.pocket_flow import AsyncFlow
@pytest.mark.asyncio
async def test_embedding():
@ -18,6 +19,8 @@ async def test_embedding():
}
readNode = ReadDocumentNode()
chunkNode = ChunkDocumentsNode()
readNode >> chunkNode
flow = AsyncFlow(readNode)
await flow.run_async(shared)