feat: 切分检测

This commit is contained in:
李如威 2025-08-09 10:46:55 +08:00
parent 51c83ff4cc
commit f6a4e43220
7 changed files with 208 additions and 92 deletions

View File

@ -84,11 +84,12 @@ async def test_advanced_functionality():
print("🚀 高级多格式文档和图片内容测试")
print("=" * 60)
# 清理向量数据库
db_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/storage/chroma_db/ad_test")
if db_path.exists():
shutil.rmtree(db_path)
print("🧹 已清理向量数据库")
# 清理数据
for p in ["/Users/liruwei/Documents/code/project/demo/base_rag/storage/chroma_db/ad_test", "/Users/liruwei/Documents/code/project/demo/base_rag/storage/status_db"]:
p_obj = Path(p)
if p_obj.exists():
shutil.rmtree(p_obj)
print("🧹 已清理数据")
# 创建RAG实例 - 启用图片处理
rag = AdvancedTestRAG(

View File

@ -35,20 +35,25 @@ class FileStatus(Enum):
class FileManager:
"""异步文件管理器,负责文件存储、状态记录等"""
def __init__(self, storage_dir: str = "./documents", db_path: str = "./file_status.db"):
self.storage_dir = Path(storage_dir)
self.db_path = db_path
self.storage_dir.mkdir(exist_ok=True)
# 确保存储目录存在
Path(storage_dir).mkdir(parents=True, exist_ok=True)
# 确保数据库目录存在
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
self._init_lock = asyncio.Lock()
self._db_initialized = False
async def _init_database(self):
"""异步初始化状态记录数据库"""
async with self._init_lock:
if self._db_initialized:
return
async with aiosqlite.connect(self.db_path) as conn:
await conn.execute("""
CREATE TABLE IF NOT EXISTS file_status (
@ -64,7 +69,7 @@ class FileManager:
""")
await conn.commit()
self._db_initialized = True
async def _calculate_file_hash(self, file_path: str) -> str:
"""异步计算文件哈希值"""
hash_md5 = hashlib.md5()
@ -72,7 +77,7 @@ class FileManager:
while chunk := await f.read(4096):
hash_md5.update(chunk)
return hash_md5.hexdigest()
async def save_file(self, source_path: str) -> Tuple[str, str]:
"""
异步保存文件到存储目录
@ -81,38 +86,38 @@ class FileManager:
source_path = Path(source_path)
if not source_path.exists():
raise FileNotFoundError(f"源文件不存在: {source_path}")
# 计算文件哈希
file_hash = await self._calculate_file_hash(str(source_path))
# 生成存储文件名使用哈希前8位避免冲突
file_extension = source_path.suffix
stored_filename = f"{source_path.stem}_{file_hash[:8]}{file_extension}"
stored_path = self.storage_dir / stored_filename
# 如果文件已存在且哈希相同,直接返回
if stored_path.exists():
existing_hash = await self._calculate_file_hash(str(stored_path))
if existing_hash == file_hash:
print(f"文件已存在,跳过复制: {stored_filename}")
return str(stored_path), file_hash
# 异步复制文件
async with aiofiles.open(source_path, 'rb') as src:
async with aiofiles.open(stored_path, 'wb') as dst:
while chunk := await src.read(8192):
await dst.write(chunk)
print(f"文件已保存到: {stored_path}")
return str(stored_path), file_hash
async def update_file_status(self, file_hash: str, filename: str, file_type: str,
status: FileStatus, error_message: str = None):
"""异步更新文件处理状态"""
await self._init_database()
now = datetime.now().isoformat()
async with aiosqlite.connect(self.db_path) as conn:
# 尝试更新现有记录
cursor = await conn.execute("""
@ -120,24 +125,24 @@ class FileManager:
SET status = ?, updated_at = ?, error_message = ?
WHERE file_hash = ?
""", (status.value, now, error_message, file_hash))
# 如果没有更新任何记录,插入新记录
if cursor.rowcount == 0:
await conn.execute("""
INSERT INTO file_status (filename, file_type, file_hash, status, created_at, updated_at, error_message)
VALUES (?, ?, ?, ?, ?, ?, ?)
""", (filename, file_type, file_hash, status.value, now, now, error_message))
await conn.commit()
async def get_file_status(self, file_hash: str) -> Optional[Dict]:
"""异步获取文件状态"""
await self._init_database()
async with aiosqlite.connect(self.db_path) as conn:
cursor = await conn.execute("SELECT * FROM file_status WHERE file_hash = ?", (file_hash,))
row = await cursor.fetchone()
if row:
return {
'id': row[0],
@ -150,19 +155,19 @@ class FileManager:
'error_message': row[7]
}
return None
async def list_files_by_status(self, status: FileStatus = None) -> List[Dict]:
"""异步列出指定状态的文件"""
await self._init_database()
async with aiosqlite.connect(self.db_path) as conn:
if status:
cursor = await conn.execute("SELECT * FROM file_status WHERE status = ? ORDER BY created_at DESC", (status.value,))
else:
cursor = await conn.execute("SELECT * FROM file_status ORDER BY created_at DESC")
rows = await cursor.fetchall()
return [{
'id': row[0],
'filename': row[1],
@ -422,11 +427,11 @@ class BaseRAG(ABC):
"""确保模型已初始化"""
if self._initialized:
return
async with self._init_lock:
if self._initialized:
return
# 使用统一的模型管理器创建嵌入模型
self.embedding_model = await ModelManager.get_or_create_model(
self.embedding_config, "embedding", ModelManager.create_embedding_model
@ -452,7 +457,7 @@ class BaseRAG(ABC):
embedding_function=self.embedding_model,
persist_directory=self.persist_directory,
)
self._initialized = True
async def _rerank_documents(
@ -480,11 +485,11 @@ class BaseRAG(ABC):
else:
# 本地模型模式CrossEncoder- 在线程池中运行
loop = asyncio.get_event_loop()
def _local_rerank():
query_doc_pairs = [(query, doc.page_content) for doc in documents]
scores = self.reranker.predict(query_doc_pairs)
# 处理得分数据确保scores是一维列表
if isinstance(scores, np.ndarray):
scores = scores.flatten().tolist()
@ -492,9 +497,9 @@ class BaseRAG(ABC):
scores = [scores]
elif len(scores) == 1 and isinstance(scores[0], np.ndarray):
scores = scores[0].flatten().tolist()
return scores
scores = await loop.run_in_executor(None, _local_rerank)
print(f"重排得分: {scores}")
@ -566,13 +571,13 @@ class BaseRAG(ABC):
"""
# 在线程池中运行文档加载和切分
loop = asyncio.get_event_loop()
def _load_and_split():
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=20)
return splitter.split_documents(documents)
return await loop.run_in_executor(None, _load_and_split)
async def _load_document_by_type(self, file_path: str) -> List[Document]:
@ -580,36 +585,36 @@ class BaseRAG(ABC):
根据文件类型异步加载文档
"""
await self._ensure_initialized() # 确保模型已初始化
file_path = Path(file_path)
file_extension = file_path.suffix.lower()
# 在线程池中运行文档加载
loop = asyncio.get_event_loop()
def _load_doc():
try:
if file_extension in ['.txt', '.md']:
# 文本和Markdown文件
loader = TextLoader(str(file_path), encoding="utf-8")
return loader.load()
elif file_extension in ['.doc', '.docx']:
# Word文档 - 增强图片处理
try:
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
from langchain_core.documents import Document
# 加载基本文档内容
loader = UnstructuredWordDocumentLoader(str(file_path))
documents = loader.load()
# 如果启用了图片处理,尝试提取图片
if self.image_processor:
try:
from .image_processor import extract_images_from_docx
images_info = extract_images_from_docx(str(file_path), self.image_processor)
if images_info:
print(f"📸 从DOCX中提取到 {len(images_info)} 张图片")
# 为每张图片创建单独的文档
@ -625,82 +630,82 @@ class BaseRAG(ABC):
documents.append(image_doc)
except Exception as e:
print(f"图片提取失败,继续处理文本内容: {e}")
return documents
except ImportError:
print("警告: 需要安装 unstructured 和 python-docx 来处理Word文档")
print("请运行: pip install unstructured python-docx")
raise
elif file_extension == '.csv':
# CSV文件
try:
import pandas as pd
from langchain_core.documents import Document
# 读取CSV文件
df = pd.read_csv(str(file_path))
# 将DataFrame转换为文本
csv_content = f"CSV文件: {file_path.name}\n\n"
csv_content += f"数据概览:\n行数: {len(df)}, 列数: {len(df.columns)}\n\n"
csv_content += f"列名: {', '.join(df.columns.tolist())}\n\n"
csv_content += "数据内容:\n"
csv_content += df.to_string(index=False)
return [Document(page_content=csv_content, metadata={"source": str(file_path)})]
except ImportError:
print("警告: 需要安装 pandas 来处理CSV文件")
print("请运行: pip install pandas")
raise
elif file_extension in ['.xls', '.xlsx']:
# Excel文件
try:
import pandas as pd
from langchain_core.documents import Document
# 读取Excel文件的所有工作表
excel_file = pd.ExcelFile(str(file_path))
documents = []
for sheet_name in excel_file.sheet_names:
df = pd.read_excel(str(file_path), sheet_name=sheet_name)
sheet_content = f"Excel文件: {file_path.name}\n工作表: {sheet_name}\n\n"
sheet_content += f"数据概览:\n行数: {len(df)}, 列数: {len(df.columns)}\n\n"
sheet_content += f"列名: {', '.join(df.columns.tolist())}\n\n"
sheet_content += "数据内容:\n"
sheet_content += df.to_string(index=False)
documents.append(Document(
page_content=sheet_content,
metadata={"source": str(file_path), "sheet": sheet_name}
))
return documents
except ImportError:
print("警告: 需要安装 pandas 和 openpyxl 来处理Excel文件")
print("请运行: pip install pandas openpyxl")
raise
elif file_extension == '.pdf':
# PDF文件 - 增强图片处理
try:
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.documents import Document
# 加载基本PDF内容
loader = PyPDFLoader(str(file_path))
documents = loader.load()
# 如果启用了图片处理,尝试提取图片
if self.image_processor:
try:
from .image_processor import extract_images_from_pdf
images_info = extract_images_from_pdf(str(file_path), self.image_processor)
if images_info:
print(f"📸 从PDF中提取到 {len(images_info)} 张图片")
# 为每张图片创建单独的文档
@ -716,15 +721,15 @@ class BaseRAG(ABC):
documents.append(image_doc)
except Exception as e:
print(f"PDF图片提取失败继续处理文本内容: {e}")
return documents
except ImportError:
try:
# 备用方案使用pdfplumber
import pdfplumber
from langchain_core.documents import Document
documents = []
with pdfplumber.open(str(file_path)) as pdf:
for i, page in enumerate(pdf.pages):
@ -734,13 +739,13 @@ class BaseRAG(ABC):
page_content=text,
metadata={"source": str(file_path), "page": i + 1}
))
# 如果启用了图片处理,尝试提取图片
if self.image_processor:
try:
from .image_processor import extract_images_from_pdf
images_info = extract_images_from_pdf(str(file_path), self.image_processor)
if images_info:
print(f"📸 从PDF中提取到 {len(images_info)} 张图片")
for image_path, description in images_info:
@ -755,20 +760,20 @@ class BaseRAG(ABC):
documents.append(image_doc)
except Exception as e:
print(f"PDF图片提取失败: {e}")
return documents
except ImportError:
print("警告: 需要安装 PyPDF2 或 pdfplumber 来处理PDF文件")
print("请运行: pip install PyPDF2 pdfplumber")
raise
else:
raise ValueError(f"不支持的文件类型: {file_extension}")
except Exception as e:
print(f"加载文件失败 {file_path}: {e}")
raise
return await loop.run_in_executor(None, _load_doc)
async def process_file_to_vector_store(self, file_path: str, chunk_size: int = 500, chunk_overlap: int = 50) -> Dict:
@ -781,18 +786,18 @@ class BaseRAG(ABC):
:return: 处理结果字典
"""
await self._ensure_initialized()
file_path = Path(file_path)
if not file_path.exists():
raise FileNotFoundError(f"文件不存在: {file_path}")
filename = file_path.name
file_type = file_path.suffix.lower()
try:
# 1. 保存文件并获取哈希
stored_path, file_hash = await self.file_manager.save_file(str(file_path))
# 2. 检查文件是否已经处理过
existing_status = await self.file_manager.get_file_status(file_hash)
if existing_status and existing_status['status'] == FileStatus.COMPLETED.value:
@ -804,24 +809,24 @@ class BaseRAG(ABC):
'filename': filename,
'status': FileStatus.COMPLETED.value
}
# 3. 更新状态为等待中
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.WAITING
)
# 4. 更新状态为处理中
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.PROCESSING
)
# 5. 加载文档
print(f"开始处理文件: {filename}")
documents = await self._load_document_by_type(stored_path)
if not documents:
raise ValueError("未能从文件中提取到任何内容")
# 6. 切分文档
loop = asyncio.get_event_loop()
def _split_docs():
@ -830,9 +835,9 @@ class BaseRAG(ABC):
chunk_overlap=chunk_overlap
)
return splitter.split_documents(documents)
split_docs = await loop.run_in_executor(None, _split_docs)
# print(split_docs)
# 7. 为每个切分的文档添加元数据
for doc in split_docs:
doc.metadata.update({
@ -841,18 +846,18 @@ class BaseRAG(ABC):
'file_type': file_type,
'processed_at': datetime.now().isoformat()
})
# 8. 添加到向量库
print(f"{len(split_docs)} 个文档片段添加到向量库...")
await self.add_documents_to_vector_store(split_docs)
# 9. 更新状态为完成
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.COMPLETED
)
print(f"文件处理完成: {filename} ({len(split_docs)} 个片段)")
return {
'success': True,
'message': '文件处理完成',
@ -861,17 +866,17 @@ class BaseRAG(ABC):
'chunks_count': len(split_docs),
'status': FileStatus.COMPLETED.value
}
except Exception as e:
error_message = str(e)
print(f"文件处理失败 {filename}: {error_message}")
# 更新状态为错误
if 'file_hash' in locals():
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.ERROR, error_message
)
return {
'success': False,
'message': f'文件处理失败: {error_message}',
@ -906,7 +911,7 @@ class BaseRAG(ABC):
异步将文档添加到 Chroma 向量库
"""
await self._ensure_initialized()
if documents:
# 在线程池中运行向量化和存储
loop = asyncio.get_event_loop()
@ -924,11 +929,11 @@ class BaseRAG(ABC):
异步构建 QA
"""
await self._ensure_initialized()
if not self.llm:
raise ValueError("LLM模型未设置")
retriever = await self.build_retriever()
# 在线程池中构建QA链
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
@ -943,7 +948,7 @@ class BaseRAG(ABC):
异步相似性搜索
"""
await self._ensure_initialized()
k = k or self.retriever_top_k
# 在线程池中运行搜索
loop = asyncio.get_event_loop()
@ -956,7 +961,7 @@ class BaseRAG(ABC):
异步带重排功能的相似性搜索
"""
await self._ensure_initialized()
# 首先获取更多的候选文档用于重排
initial_k = k or self.retriever_top_k
if self.rerank_config.get("enabled", False):

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,6 @@
日期,产品,销售额,数量,客户类型,销售员
2024-01-01,笔记本电脑,8500,5,企业,张三
2024-01-02,台式机,6200,4,个人,李四
2024-01-03,平板电脑,3200,8,学生,王五
2024-01-04,智能手机,4500,9,个人,张三
2024-01-05,耳机,280,12,学生,李四
1 日期 产品 销售额 数量 客户类型 销售员
2 2024-01-01 笔记本电脑 8500 5 企业 张三
3 2024-01-02 台式机 6200 4 个人 李四
4 2024-01-03 平板电脑 3200 8 学生 王五
5 2024-01-04 智能手机 4500 9 个人 张三
6 2024-01-05 耳机 280 12 学生 李四

Binary file not shown.