feat: 切分检测
This commit is contained in:
parent
51c83ff4cc
commit
f6a4e43220
|
@ -84,11 +84,12 @@ async def test_advanced_functionality():
|
||||||
print("🚀 高级多格式文档和图片内容测试")
|
print("🚀 高级多格式文档和图片内容测试")
|
||||||
print("=" * 60)
|
print("=" * 60)
|
||||||
|
|
||||||
# 清理向量数据库
|
# 清理数据
|
||||||
db_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/storage/chroma_db/ad_test")
|
for p in ["/Users/liruwei/Documents/code/project/demo/base_rag/storage/chroma_db/ad_test", "/Users/liruwei/Documents/code/project/demo/base_rag/storage/status_db"]:
|
||||||
if db_path.exists():
|
p_obj = Path(p)
|
||||||
shutil.rmtree(db_path)
|
if p_obj.exists():
|
||||||
print("🧹 已清理向量数据库")
|
shutil.rmtree(p_obj)
|
||||||
|
print("🧹 已清理数据")
|
||||||
|
|
||||||
# 创建RAG实例 - 启用图片处理
|
# 创建RAG实例 - 启用图片处理
|
||||||
rag = AdvancedTestRAG(
|
rag = AdvancedTestRAG(
|
||||||
|
|
|
@ -35,20 +35,25 @@ class FileStatus(Enum):
|
||||||
|
|
||||||
class FileManager:
|
class FileManager:
|
||||||
"""异步文件管理器,负责文件存储、状态记录等"""
|
"""异步文件管理器,负责文件存储、状态记录等"""
|
||||||
|
|
||||||
def __init__(self, storage_dir: str = "./documents", db_path: str = "./file_status.db"):
|
def __init__(self, storage_dir: str = "./documents", db_path: str = "./file_status.db"):
|
||||||
self.storage_dir = Path(storage_dir)
|
self.storage_dir = Path(storage_dir)
|
||||||
self.db_path = db_path
|
self.db_path = db_path
|
||||||
self.storage_dir.mkdir(exist_ok=True)
|
|
||||||
|
# 确保存储目录存在
|
||||||
|
Path(storage_dir).mkdir(parents=True, exist_ok=True)
|
||||||
|
# 确保数据库目录存在
|
||||||
|
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
self._init_lock = asyncio.Lock()
|
self._init_lock = asyncio.Lock()
|
||||||
self._db_initialized = False
|
self._db_initialized = False
|
||||||
|
|
||||||
async def _init_database(self):
|
async def _init_database(self):
|
||||||
"""异步初始化状态记录数据库"""
|
"""异步初始化状态记录数据库"""
|
||||||
async with self._init_lock:
|
async with self._init_lock:
|
||||||
if self._db_initialized:
|
if self._db_initialized:
|
||||||
return
|
return
|
||||||
|
|
||||||
async with aiosqlite.connect(self.db_path) as conn:
|
async with aiosqlite.connect(self.db_path) as conn:
|
||||||
await conn.execute("""
|
await conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS file_status (
|
CREATE TABLE IF NOT EXISTS file_status (
|
||||||
|
@ -64,7 +69,7 @@ class FileManager:
|
||||||
""")
|
""")
|
||||||
await conn.commit()
|
await conn.commit()
|
||||||
self._db_initialized = True
|
self._db_initialized = True
|
||||||
|
|
||||||
async def _calculate_file_hash(self, file_path: str) -> str:
|
async def _calculate_file_hash(self, file_path: str) -> str:
|
||||||
"""异步计算文件哈希值"""
|
"""异步计算文件哈希值"""
|
||||||
hash_md5 = hashlib.md5()
|
hash_md5 = hashlib.md5()
|
||||||
|
@ -72,7 +77,7 @@ class FileManager:
|
||||||
while chunk := await f.read(4096):
|
while chunk := await f.read(4096):
|
||||||
hash_md5.update(chunk)
|
hash_md5.update(chunk)
|
||||||
return hash_md5.hexdigest()
|
return hash_md5.hexdigest()
|
||||||
|
|
||||||
async def save_file(self, source_path: str) -> Tuple[str, str]:
|
async def save_file(self, source_path: str) -> Tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
异步保存文件到存储目录
|
异步保存文件到存储目录
|
||||||
|
@ -81,38 +86,38 @@ class FileManager:
|
||||||
source_path = Path(source_path)
|
source_path = Path(source_path)
|
||||||
if not source_path.exists():
|
if not source_path.exists():
|
||||||
raise FileNotFoundError(f"源文件不存在: {source_path}")
|
raise FileNotFoundError(f"源文件不存在: {source_path}")
|
||||||
|
|
||||||
# 计算文件哈希
|
# 计算文件哈希
|
||||||
file_hash = await self._calculate_file_hash(str(source_path))
|
file_hash = await self._calculate_file_hash(str(source_path))
|
||||||
|
|
||||||
# 生成存储文件名(使用哈希前8位避免冲突)
|
# 生成存储文件名(使用哈希前8位避免冲突)
|
||||||
file_extension = source_path.suffix
|
file_extension = source_path.suffix
|
||||||
stored_filename = f"{source_path.stem}_{file_hash[:8]}{file_extension}"
|
stored_filename = f"{source_path.stem}_{file_hash[:8]}{file_extension}"
|
||||||
stored_path = self.storage_dir / stored_filename
|
stored_path = self.storage_dir / stored_filename
|
||||||
|
|
||||||
# 如果文件已存在且哈希相同,直接返回
|
# 如果文件已存在且哈希相同,直接返回
|
||||||
if stored_path.exists():
|
if stored_path.exists():
|
||||||
existing_hash = await self._calculate_file_hash(str(stored_path))
|
existing_hash = await self._calculate_file_hash(str(stored_path))
|
||||||
if existing_hash == file_hash:
|
if existing_hash == file_hash:
|
||||||
print(f"文件已存在,跳过复制: {stored_filename}")
|
print(f"文件已存在,跳过复制: {stored_filename}")
|
||||||
return str(stored_path), file_hash
|
return str(stored_path), file_hash
|
||||||
|
|
||||||
# 异步复制文件
|
# 异步复制文件
|
||||||
async with aiofiles.open(source_path, 'rb') as src:
|
async with aiofiles.open(source_path, 'rb') as src:
|
||||||
async with aiofiles.open(stored_path, 'wb') as dst:
|
async with aiofiles.open(stored_path, 'wb') as dst:
|
||||||
while chunk := await src.read(8192):
|
while chunk := await src.read(8192):
|
||||||
await dst.write(chunk)
|
await dst.write(chunk)
|
||||||
|
|
||||||
print(f"文件已保存到: {stored_path}")
|
print(f"文件已保存到: {stored_path}")
|
||||||
return str(stored_path), file_hash
|
return str(stored_path), file_hash
|
||||||
|
|
||||||
async def update_file_status(self, file_hash: str, filename: str, file_type: str,
|
async def update_file_status(self, file_hash: str, filename: str, file_type: str,
|
||||||
status: FileStatus, error_message: str = None):
|
status: FileStatus, error_message: str = None):
|
||||||
"""异步更新文件处理状态"""
|
"""异步更新文件处理状态"""
|
||||||
await self._init_database()
|
await self._init_database()
|
||||||
|
|
||||||
now = datetime.now().isoformat()
|
now = datetime.now().isoformat()
|
||||||
|
|
||||||
async with aiosqlite.connect(self.db_path) as conn:
|
async with aiosqlite.connect(self.db_path) as conn:
|
||||||
# 尝试更新现有记录
|
# 尝试更新现有记录
|
||||||
cursor = await conn.execute("""
|
cursor = await conn.execute("""
|
||||||
|
@ -120,24 +125,24 @@ class FileManager:
|
||||||
SET status = ?, updated_at = ?, error_message = ?
|
SET status = ?, updated_at = ?, error_message = ?
|
||||||
WHERE file_hash = ?
|
WHERE file_hash = ?
|
||||||
""", (status.value, now, error_message, file_hash))
|
""", (status.value, now, error_message, file_hash))
|
||||||
|
|
||||||
# 如果没有更新任何记录,插入新记录
|
# 如果没有更新任何记录,插入新记录
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
await conn.execute("""
|
await conn.execute("""
|
||||||
INSERT INTO file_status (filename, file_type, file_hash, status, created_at, updated_at, error_message)
|
INSERT INTO file_status (filename, file_type, file_hash, status, created_at, updated_at, error_message)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||||
""", (filename, file_type, file_hash, status.value, now, now, error_message))
|
""", (filename, file_type, file_hash, status.value, now, now, error_message))
|
||||||
|
|
||||||
await conn.commit()
|
await conn.commit()
|
||||||
|
|
||||||
async def get_file_status(self, file_hash: str) -> Optional[Dict]:
|
async def get_file_status(self, file_hash: str) -> Optional[Dict]:
|
||||||
"""异步获取文件状态"""
|
"""异步获取文件状态"""
|
||||||
await self._init_database()
|
await self._init_database()
|
||||||
|
|
||||||
async with aiosqlite.connect(self.db_path) as conn:
|
async with aiosqlite.connect(self.db_path) as conn:
|
||||||
cursor = await conn.execute("SELECT * FROM file_status WHERE file_hash = ?", (file_hash,))
|
cursor = await conn.execute("SELECT * FROM file_status WHERE file_hash = ?", (file_hash,))
|
||||||
row = await cursor.fetchone()
|
row = await cursor.fetchone()
|
||||||
|
|
||||||
if row:
|
if row:
|
||||||
return {
|
return {
|
||||||
'id': row[0],
|
'id': row[0],
|
||||||
|
@ -150,19 +155,19 @@ class FileManager:
|
||||||
'error_message': row[7]
|
'error_message': row[7]
|
||||||
}
|
}
|
||||||
return None
|
return None
|
||||||
|
|
||||||
async def list_files_by_status(self, status: FileStatus = None) -> List[Dict]:
|
async def list_files_by_status(self, status: FileStatus = None) -> List[Dict]:
|
||||||
"""异步列出指定状态的文件"""
|
"""异步列出指定状态的文件"""
|
||||||
await self._init_database()
|
await self._init_database()
|
||||||
|
|
||||||
async with aiosqlite.connect(self.db_path) as conn:
|
async with aiosqlite.connect(self.db_path) as conn:
|
||||||
if status:
|
if status:
|
||||||
cursor = await conn.execute("SELECT * FROM file_status WHERE status = ? ORDER BY created_at DESC", (status.value,))
|
cursor = await conn.execute("SELECT * FROM file_status WHERE status = ? ORDER BY created_at DESC", (status.value,))
|
||||||
else:
|
else:
|
||||||
cursor = await conn.execute("SELECT * FROM file_status ORDER BY created_at DESC")
|
cursor = await conn.execute("SELECT * FROM file_status ORDER BY created_at DESC")
|
||||||
|
|
||||||
rows = await cursor.fetchall()
|
rows = await cursor.fetchall()
|
||||||
|
|
||||||
return [{
|
return [{
|
||||||
'id': row[0],
|
'id': row[0],
|
||||||
'filename': row[1],
|
'filename': row[1],
|
||||||
|
@ -422,11 +427,11 @@ class BaseRAG(ABC):
|
||||||
"""确保模型已初始化"""
|
"""确保模型已初始化"""
|
||||||
if self._initialized:
|
if self._initialized:
|
||||||
return
|
return
|
||||||
|
|
||||||
async with self._init_lock:
|
async with self._init_lock:
|
||||||
if self._initialized:
|
if self._initialized:
|
||||||
return
|
return
|
||||||
|
|
||||||
# 使用统一的模型管理器创建嵌入模型
|
# 使用统一的模型管理器创建嵌入模型
|
||||||
self.embedding_model = await ModelManager.get_or_create_model(
|
self.embedding_model = await ModelManager.get_or_create_model(
|
||||||
self.embedding_config, "embedding", ModelManager.create_embedding_model
|
self.embedding_config, "embedding", ModelManager.create_embedding_model
|
||||||
|
@ -452,7 +457,7 @@ class BaseRAG(ABC):
|
||||||
embedding_function=self.embedding_model,
|
embedding_function=self.embedding_model,
|
||||||
persist_directory=self.persist_directory,
|
persist_directory=self.persist_directory,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._initialized = True
|
self._initialized = True
|
||||||
|
|
||||||
async def _rerank_documents(
|
async def _rerank_documents(
|
||||||
|
@ -480,11 +485,11 @@ class BaseRAG(ABC):
|
||||||
else:
|
else:
|
||||||
# 本地模型模式(CrossEncoder)- 在线程池中运行
|
# 本地模型模式(CrossEncoder)- 在线程池中运行
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
def _local_rerank():
|
def _local_rerank():
|
||||||
query_doc_pairs = [(query, doc.page_content) for doc in documents]
|
query_doc_pairs = [(query, doc.page_content) for doc in documents]
|
||||||
scores = self.reranker.predict(query_doc_pairs)
|
scores = self.reranker.predict(query_doc_pairs)
|
||||||
|
|
||||||
# 处理得分数据:确保scores是一维列表
|
# 处理得分数据:确保scores是一维列表
|
||||||
if isinstance(scores, np.ndarray):
|
if isinstance(scores, np.ndarray):
|
||||||
scores = scores.flatten().tolist()
|
scores = scores.flatten().tolist()
|
||||||
|
@ -492,9 +497,9 @@ class BaseRAG(ABC):
|
||||||
scores = [scores]
|
scores = [scores]
|
||||||
elif len(scores) == 1 and isinstance(scores[0], np.ndarray):
|
elif len(scores) == 1 and isinstance(scores[0], np.ndarray):
|
||||||
scores = scores[0].flatten().tolist()
|
scores = scores[0].flatten().tolist()
|
||||||
|
|
||||||
return scores
|
return scores
|
||||||
|
|
||||||
scores = await loop.run_in_executor(None, _local_rerank)
|
scores = await loop.run_in_executor(None, _local_rerank)
|
||||||
print(f"重排得分: {scores}")
|
print(f"重排得分: {scores}")
|
||||||
|
|
||||||
|
@ -566,13 +571,13 @@ class BaseRAG(ABC):
|
||||||
"""
|
"""
|
||||||
# 在线程池中运行文档加载和切分
|
# 在线程池中运行文档加载和切分
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
def _load_and_split():
|
def _load_and_split():
|
||||||
loader = TextLoader(file_path, encoding="utf-8")
|
loader = TextLoader(file_path, encoding="utf-8")
|
||||||
documents = loader.load()
|
documents = loader.load()
|
||||||
splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=20)
|
splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=20)
|
||||||
return splitter.split_documents(documents)
|
return splitter.split_documents(documents)
|
||||||
|
|
||||||
return await loop.run_in_executor(None, _load_and_split)
|
return await loop.run_in_executor(None, _load_and_split)
|
||||||
|
|
||||||
async def _load_document_by_type(self, file_path: str) -> List[Document]:
|
async def _load_document_by_type(self, file_path: str) -> List[Document]:
|
||||||
|
@ -580,36 +585,36 @@ class BaseRAG(ABC):
|
||||||
根据文件类型异步加载文档
|
根据文件类型异步加载文档
|
||||||
"""
|
"""
|
||||||
await self._ensure_initialized() # 确保模型已初始化
|
await self._ensure_initialized() # 确保模型已初始化
|
||||||
|
|
||||||
file_path = Path(file_path)
|
file_path = Path(file_path)
|
||||||
file_extension = file_path.suffix.lower()
|
file_extension = file_path.suffix.lower()
|
||||||
|
|
||||||
# 在线程池中运行文档加载
|
# 在线程池中运行文档加载
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
def _load_doc():
|
def _load_doc():
|
||||||
try:
|
try:
|
||||||
if file_extension in ['.txt', '.md']:
|
if file_extension in ['.txt', '.md']:
|
||||||
# 文本和Markdown文件
|
# 文本和Markdown文件
|
||||||
loader = TextLoader(str(file_path), encoding="utf-8")
|
loader = TextLoader(str(file_path), encoding="utf-8")
|
||||||
return loader.load()
|
return loader.load()
|
||||||
|
|
||||||
elif file_extension in ['.doc', '.docx']:
|
elif file_extension in ['.doc', '.docx']:
|
||||||
# Word文档 - 增强图片处理
|
# Word文档 - 增强图片处理
|
||||||
try:
|
try:
|
||||||
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
|
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
|
||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
# 加载基本文档内容
|
# 加载基本文档内容
|
||||||
loader = UnstructuredWordDocumentLoader(str(file_path))
|
loader = UnstructuredWordDocumentLoader(str(file_path))
|
||||||
documents = loader.load()
|
documents = loader.load()
|
||||||
|
|
||||||
# 如果启用了图片处理,尝试提取图片
|
# 如果启用了图片处理,尝试提取图片
|
||||||
if self.image_processor:
|
if self.image_processor:
|
||||||
try:
|
try:
|
||||||
from .image_processor import extract_images_from_docx
|
from .image_processor import extract_images_from_docx
|
||||||
images_info = extract_images_from_docx(str(file_path), self.image_processor)
|
images_info = extract_images_from_docx(str(file_path), self.image_processor)
|
||||||
|
|
||||||
if images_info:
|
if images_info:
|
||||||
print(f"📸 从DOCX中提取到 {len(images_info)} 张图片")
|
print(f"📸 从DOCX中提取到 {len(images_info)} 张图片")
|
||||||
# 为每张图片创建单独的文档
|
# 为每张图片创建单独的文档
|
||||||
|
@ -625,82 +630,82 @@ class BaseRAG(ABC):
|
||||||
documents.append(image_doc)
|
documents.append(image_doc)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"图片提取失败,继续处理文本内容: {e}")
|
print(f"图片提取失败,继续处理文本内容: {e}")
|
||||||
|
|
||||||
return documents
|
return documents
|
||||||
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("警告: 需要安装 unstructured 和 python-docx 来处理Word文档")
|
print("警告: 需要安装 unstructured 和 python-docx 来处理Word文档")
|
||||||
print("请运行: pip install unstructured python-docx")
|
print("请运行: pip install unstructured python-docx")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
elif file_extension == '.csv':
|
elif file_extension == '.csv':
|
||||||
# CSV文件
|
# CSV文件
|
||||||
try:
|
try:
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
# 读取CSV文件
|
# 读取CSV文件
|
||||||
df = pd.read_csv(str(file_path))
|
df = pd.read_csv(str(file_path))
|
||||||
|
|
||||||
# 将DataFrame转换为文本
|
# 将DataFrame转换为文本
|
||||||
csv_content = f"CSV文件: {file_path.name}\n\n"
|
csv_content = f"CSV文件: {file_path.name}\n\n"
|
||||||
csv_content += f"数据概览:\n行数: {len(df)}, 列数: {len(df.columns)}\n\n"
|
csv_content += f"数据概览:\n行数: {len(df)}, 列数: {len(df.columns)}\n\n"
|
||||||
csv_content += f"列名: {', '.join(df.columns.tolist())}\n\n"
|
csv_content += f"列名: {', '.join(df.columns.tolist())}\n\n"
|
||||||
csv_content += "数据内容:\n"
|
csv_content += "数据内容:\n"
|
||||||
csv_content += df.to_string(index=False)
|
csv_content += df.to_string(index=False)
|
||||||
|
|
||||||
return [Document(page_content=csv_content, metadata={"source": str(file_path)})]
|
return [Document(page_content=csv_content, metadata={"source": str(file_path)})]
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("警告: 需要安装 pandas 来处理CSV文件")
|
print("警告: 需要安装 pandas 来处理CSV文件")
|
||||||
print("请运行: pip install pandas")
|
print("请运行: pip install pandas")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
elif file_extension in ['.xls', '.xlsx']:
|
elif file_extension in ['.xls', '.xlsx']:
|
||||||
# Excel文件
|
# Excel文件
|
||||||
try:
|
try:
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
# 读取Excel文件的所有工作表
|
# 读取Excel文件的所有工作表
|
||||||
excel_file = pd.ExcelFile(str(file_path))
|
excel_file = pd.ExcelFile(str(file_path))
|
||||||
documents = []
|
documents = []
|
||||||
|
|
||||||
for sheet_name in excel_file.sheet_names:
|
for sheet_name in excel_file.sheet_names:
|
||||||
df = pd.read_excel(str(file_path), sheet_name=sheet_name)
|
df = pd.read_excel(str(file_path), sheet_name=sheet_name)
|
||||||
|
|
||||||
sheet_content = f"Excel文件: {file_path.name}\n工作表: {sheet_name}\n\n"
|
sheet_content = f"Excel文件: {file_path.name}\n工作表: {sheet_name}\n\n"
|
||||||
sheet_content += f"数据概览:\n行数: {len(df)}, 列数: {len(df.columns)}\n\n"
|
sheet_content += f"数据概览:\n行数: {len(df)}, 列数: {len(df.columns)}\n\n"
|
||||||
sheet_content += f"列名: {', '.join(df.columns.tolist())}\n\n"
|
sheet_content += f"列名: {', '.join(df.columns.tolist())}\n\n"
|
||||||
sheet_content += "数据内容:\n"
|
sheet_content += "数据内容:\n"
|
||||||
sheet_content += df.to_string(index=False)
|
sheet_content += df.to_string(index=False)
|
||||||
|
|
||||||
documents.append(Document(
|
documents.append(Document(
|
||||||
page_content=sheet_content,
|
page_content=sheet_content,
|
||||||
metadata={"source": str(file_path), "sheet": sheet_name}
|
metadata={"source": str(file_path), "sheet": sheet_name}
|
||||||
))
|
))
|
||||||
|
|
||||||
return documents
|
return documents
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("警告: 需要安装 pandas 和 openpyxl 来处理Excel文件")
|
print("警告: 需要安装 pandas 和 openpyxl 来处理Excel文件")
|
||||||
print("请运行: pip install pandas openpyxl")
|
print("请运行: pip install pandas openpyxl")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
elif file_extension == '.pdf':
|
elif file_extension == '.pdf':
|
||||||
# PDF文件 - 增强图片处理
|
# PDF文件 - 增强图片处理
|
||||||
try:
|
try:
|
||||||
from langchain_community.document_loaders import PyPDFLoader
|
from langchain_community.document_loaders import PyPDFLoader
|
||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
# 加载基本PDF内容
|
# 加载基本PDF内容
|
||||||
loader = PyPDFLoader(str(file_path))
|
loader = PyPDFLoader(str(file_path))
|
||||||
documents = loader.load()
|
documents = loader.load()
|
||||||
|
|
||||||
# 如果启用了图片处理,尝试提取图片
|
# 如果启用了图片处理,尝试提取图片
|
||||||
if self.image_processor:
|
if self.image_processor:
|
||||||
try:
|
try:
|
||||||
from .image_processor import extract_images_from_pdf
|
from .image_processor import extract_images_from_pdf
|
||||||
images_info = extract_images_from_pdf(str(file_path), self.image_processor)
|
images_info = extract_images_from_pdf(str(file_path), self.image_processor)
|
||||||
|
|
||||||
if images_info:
|
if images_info:
|
||||||
print(f"📸 从PDF中提取到 {len(images_info)} 张图片")
|
print(f"📸 从PDF中提取到 {len(images_info)} 张图片")
|
||||||
# 为每张图片创建单独的文档
|
# 为每张图片创建单独的文档
|
||||||
|
@ -716,15 +721,15 @@ class BaseRAG(ABC):
|
||||||
documents.append(image_doc)
|
documents.append(image_doc)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"PDF图片提取失败,继续处理文本内容: {e}")
|
print(f"PDF图片提取失败,继续处理文本内容: {e}")
|
||||||
|
|
||||||
return documents
|
return documents
|
||||||
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
try:
|
try:
|
||||||
# 备用方案:使用pdfplumber
|
# 备用方案:使用pdfplumber
|
||||||
import pdfplumber
|
import pdfplumber
|
||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
|
|
||||||
documents = []
|
documents = []
|
||||||
with pdfplumber.open(str(file_path)) as pdf:
|
with pdfplumber.open(str(file_path)) as pdf:
|
||||||
for i, page in enumerate(pdf.pages):
|
for i, page in enumerate(pdf.pages):
|
||||||
|
@ -734,13 +739,13 @@ class BaseRAG(ABC):
|
||||||
page_content=text,
|
page_content=text,
|
||||||
metadata={"source": str(file_path), "page": i + 1}
|
metadata={"source": str(file_path), "page": i + 1}
|
||||||
))
|
))
|
||||||
|
|
||||||
# 如果启用了图片处理,尝试提取图片
|
# 如果启用了图片处理,尝试提取图片
|
||||||
if self.image_processor:
|
if self.image_processor:
|
||||||
try:
|
try:
|
||||||
from .image_processor import extract_images_from_pdf
|
from .image_processor import extract_images_from_pdf
|
||||||
images_info = extract_images_from_pdf(str(file_path), self.image_processor)
|
images_info = extract_images_from_pdf(str(file_path), self.image_processor)
|
||||||
|
|
||||||
if images_info:
|
if images_info:
|
||||||
print(f"📸 从PDF中提取到 {len(images_info)} 张图片")
|
print(f"📸 从PDF中提取到 {len(images_info)} 张图片")
|
||||||
for image_path, description in images_info:
|
for image_path, description in images_info:
|
||||||
|
@ -755,20 +760,20 @@ class BaseRAG(ABC):
|
||||||
documents.append(image_doc)
|
documents.append(image_doc)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"PDF图片提取失败: {e}")
|
print(f"PDF图片提取失败: {e}")
|
||||||
|
|
||||||
return documents
|
return documents
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("警告: 需要安装 PyPDF2 或 pdfplumber 来处理PDF文件")
|
print("警告: 需要安装 PyPDF2 或 pdfplumber 来处理PDF文件")
|
||||||
print("请运行: pip install PyPDF2 pdfplumber")
|
print("请运行: pip install PyPDF2 pdfplumber")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"不支持的文件类型: {file_extension}")
|
raise ValueError(f"不支持的文件类型: {file_extension}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"加载文件失败 {file_path}: {e}")
|
print(f"加载文件失败 {file_path}: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
return await loop.run_in_executor(None, _load_doc)
|
return await loop.run_in_executor(None, _load_doc)
|
||||||
|
|
||||||
async def process_file_to_vector_store(self, file_path: str, chunk_size: int = 500, chunk_overlap: int = 50) -> Dict:
|
async def process_file_to_vector_store(self, file_path: str, chunk_size: int = 500, chunk_overlap: int = 50) -> Dict:
|
||||||
|
@ -781,18 +786,18 @@ class BaseRAG(ABC):
|
||||||
:return: 处理结果字典
|
:return: 处理结果字典
|
||||||
"""
|
"""
|
||||||
await self._ensure_initialized()
|
await self._ensure_initialized()
|
||||||
|
|
||||||
file_path = Path(file_path)
|
file_path = Path(file_path)
|
||||||
if not file_path.exists():
|
if not file_path.exists():
|
||||||
raise FileNotFoundError(f"文件不存在: {file_path}")
|
raise FileNotFoundError(f"文件不存在: {file_path}")
|
||||||
|
|
||||||
filename = file_path.name
|
filename = file_path.name
|
||||||
file_type = file_path.suffix.lower()
|
file_type = file_path.suffix.lower()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 1. 保存文件并获取哈希
|
# 1. 保存文件并获取哈希
|
||||||
stored_path, file_hash = await self.file_manager.save_file(str(file_path))
|
stored_path, file_hash = await self.file_manager.save_file(str(file_path))
|
||||||
|
|
||||||
# 2. 检查文件是否已经处理过
|
# 2. 检查文件是否已经处理过
|
||||||
existing_status = await self.file_manager.get_file_status(file_hash)
|
existing_status = await self.file_manager.get_file_status(file_hash)
|
||||||
if existing_status and existing_status['status'] == FileStatus.COMPLETED.value:
|
if existing_status and existing_status['status'] == FileStatus.COMPLETED.value:
|
||||||
|
@ -804,24 +809,24 @@ class BaseRAG(ABC):
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': FileStatus.COMPLETED.value
|
'status': FileStatus.COMPLETED.value
|
||||||
}
|
}
|
||||||
|
|
||||||
# 3. 更新状态为等待中
|
# 3. 更新状态为等待中
|
||||||
await self.file_manager.update_file_status(
|
await self.file_manager.update_file_status(
|
||||||
file_hash, filename, file_type, FileStatus.WAITING
|
file_hash, filename, file_type, FileStatus.WAITING
|
||||||
)
|
)
|
||||||
|
|
||||||
# 4. 更新状态为处理中
|
# 4. 更新状态为处理中
|
||||||
await self.file_manager.update_file_status(
|
await self.file_manager.update_file_status(
|
||||||
file_hash, filename, file_type, FileStatus.PROCESSING
|
file_hash, filename, file_type, FileStatus.PROCESSING
|
||||||
)
|
)
|
||||||
|
|
||||||
# 5. 加载文档
|
# 5. 加载文档
|
||||||
print(f"开始处理文件: {filename}")
|
print(f"开始处理文件: {filename}")
|
||||||
documents = await self._load_document_by_type(stored_path)
|
documents = await self._load_document_by_type(stored_path)
|
||||||
|
|
||||||
if not documents:
|
if not documents:
|
||||||
raise ValueError("未能从文件中提取到任何内容")
|
raise ValueError("未能从文件中提取到任何内容")
|
||||||
|
|
||||||
# 6. 切分文档
|
# 6. 切分文档
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
def _split_docs():
|
def _split_docs():
|
||||||
|
@ -830,9 +835,9 @@ class BaseRAG(ABC):
|
||||||
chunk_overlap=chunk_overlap
|
chunk_overlap=chunk_overlap
|
||||||
)
|
)
|
||||||
return splitter.split_documents(documents)
|
return splitter.split_documents(documents)
|
||||||
|
|
||||||
split_docs = await loop.run_in_executor(None, _split_docs)
|
split_docs = await loop.run_in_executor(None, _split_docs)
|
||||||
|
# print(split_docs)
|
||||||
# 7. 为每个切分的文档添加元数据
|
# 7. 为每个切分的文档添加元数据
|
||||||
for doc in split_docs:
|
for doc in split_docs:
|
||||||
doc.metadata.update({
|
doc.metadata.update({
|
||||||
|
@ -841,18 +846,18 @@ class BaseRAG(ABC):
|
||||||
'file_type': file_type,
|
'file_type': file_type,
|
||||||
'processed_at': datetime.now().isoformat()
|
'processed_at': datetime.now().isoformat()
|
||||||
})
|
})
|
||||||
|
|
||||||
# 8. 添加到向量库
|
# 8. 添加到向量库
|
||||||
print(f"将 {len(split_docs)} 个文档片段添加到向量库...")
|
print(f"将 {len(split_docs)} 个文档片段添加到向量库...")
|
||||||
await self.add_documents_to_vector_store(split_docs)
|
await self.add_documents_to_vector_store(split_docs)
|
||||||
|
|
||||||
# 9. 更新状态为完成
|
# 9. 更新状态为完成
|
||||||
await self.file_manager.update_file_status(
|
await self.file_manager.update_file_status(
|
||||||
file_hash, filename, file_type, FileStatus.COMPLETED
|
file_hash, filename, file_type, FileStatus.COMPLETED
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f"文件处理完成: {filename} ({len(split_docs)} 个片段)")
|
print(f"文件处理完成: {filename} ({len(split_docs)} 个片段)")
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'success': True,
|
'success': True,
|
||||||
'message': '文件处理完成',
|
'message': '文件处理完成',
|
||||||
|
@ -861,17 +866,17 @@ class BaseRAG(ABC):
|
||||||
'chunks_count': len(split_docs),
|
'chunks_count': len(split_docs),
|
||||||
'status': FileStatus.COMPLETED.value
|
'status': FileStatus.COMPLETED.value
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_message = str(e)
|
error_message = str(e)
|
||||||
print(f"文件处理失败 {filename}: {error_message}")
|
print(f"文件处理失败 {filename}: {error_message}")
|
||||||
|
|
||||||
# 更新状态为错误
|
# 更新状态为错误
|
||||||
if 'file_hash' in locals():
|
if 'file_hash' in locals():
|
||||||
await self.file_manager.update_file_status(
|
await self.file_manager.update_file_status(
|
||||||
file_hash, filename, file_type, FileStatus.ERROR, error_message
|
file_hash, filename, file_type, FileStatus.ERROR, error_message
|
||||||
)
|
)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'success': False,
|
'success': False,
|
||||||
'message': f'文件处理失败: {error_message}',
|
'message': f'文件处理失败: {error_message}',
|
||||||
|
@ -906,7 +911,7 @@ class BaseRAG(ABC):
|
||||||
异步将文档添加到 Chroma 向量库。
|
异步将文档添加到 Chroma 向量库。
|
||||||
"""
|
"""
|
||||||
await self._ensure_initialized()
|
await self._ensure_initialized()
|
||||||
|
|
||||||
if documents:
|
if documents:
|
||||||
# 在线程池中运行向量化和存储
|
# 在线程池中运行向量化和存储
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
@ -924,11 +929,11 @@ class BaseRAG(ABC):
|
||||||
异步构建 QA 链。
|
异步构建 QA 链。
|
||||||
"""
|
"""
|
||||||
await self._ensure_initialized()
|
await self._ensure_initialized()
|
||||||
|
|
||||||
if not self.llm:
|
if not self.llm:
|
||||||
raise ValueError("LLM模型未设置")
|
raise ValueError("LLM模型未设置")
|
||||||
retriever = await self.build_retriever()
|
retriever = await self.build_retriever()
|
||||||
|
|
||||||
# 在线程池中构建QA链
|
# 在线程池中构建QA链
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
return await loop.run_in_executor(
|
return await loop.run_in_executor(
|
||||||
|
@ -943,7 +948,7 @@ class BaseRAG(ABC):
|
||||||
异步相似性搜索。
|
异步相似性搜索。
|
||||||
"""
|
"""
|
||||||
await self._ensure_initialized()
|
await self._ensure_initialized()
|
||||||
|
|
||||||
k = k or self.retriever_top_k
|
k = k or self.retriever_top_k
|
||||||
# 在线程池中运行搜索
|
# 在线程池中运行搜索
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
@ -956,7 +961,7 @@ class BaseRAG(ABC):
|
||||||
异步带重排功能的相似性搜索。
|
异步带重排功能的相似性搜索。
|
||||||
"""
|
"""
|
||||||
await self._ensure_initialized()
|
await self._ensure_initialized()
|
||||||
|
|
||||||
# 首先获取更多的候选文档用于重排
|
# 首先获取更多的候选文档用于重排
|
||||||
initial_k = k or self.retriever_top_k
|
initial_k = k or self.retriever_top_k
|
||||||
if self.rerank_config.get("enabled", False):
|
if self.rerank_config.get("enabled", False):
|
||||||
|
|
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,6 @@
|
||||||
|
日期,产品,销售额,数量,客户类型,销售员
|
||||||
|
2024-01-01,笔记本电脑,8500,5,企业,张三
|
||||||
|
2024-01-02,台式机,6200,4,个人,李四
|
||||||
|
2024-01-03,平板电脑,3200,8,学生,王五
|
||||||
|
2024-01-04,智能手机,4500,9,个人,张三
|
||||||
|
2024-01-05,耳机,280,12,学生,李四
|
|
Binary file not shown.
Loading…
Reference in New Issue