321 lines
11 KiB
Python
321 lines
11 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
高级测试示例 - 多格式文档和图片内容识别
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
import asyncio
|
|
import warnings
|
|
from pathlib import Path
|
|
import shutil
|
|
|
|
# 过滤掉PyTorch的FutureWarning
|
|
warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
|
|
|
|
# 添加源码路径
|
|
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src"))
|
|
|
|
from base_rag.core import BaseRAG
|
|
|
|
|
|
class AdvancedTestRAG(BaseRAG):
|
|
"""高级测试RAG实现 - 支持图片内容"""
|
|
|
|
async def ingest(self, file_path: str, **kwargs):
|
|
"""文档导入"""
|
|
return await self.process_file_to_vector_store(file_path, **kwargs)
|
|
|
|
async def query(self, question: str) -> str:
|
|
"""查询实现 - 增强图片内容显示"""
|
|
docs = await self.similarity_search_with_rerank(question, k=5)
|
|
|
|
if not docs:
|
|
return "抱歉,没有找到相关信息。"
|
|
|
|
# 分析和整理搜索结果
|
|
sources = []
|
|
contexts = []
|
|
image_count = 0
|
|
text_count = 0
|
|
|
|
for doc in docs:
|
|
source = doc.metadata.get("source_file", "未知来源")
|
|
doc_type = doc.metadata.get("type", "text")
|
|
content = doc.page_content.strip()
|
|
|
|
if source not in sources:
|
|
sources.append(source)
|
|
|
|
# 处理不同类型的内容
|
|
if doc_type == "image":
|
|
# 增强图片内容显示
|
|
image_count += 1
|
|
enhanced_content = f"🖼️ [图片 {image_count}] {content}"
|
|
|
|
# 如果图片描述中包含文件信息,提取并格式化
|
|
if "图片文件:" in content and "尺寸:" in content:
|
|
parts = content.split(" | ")
|
|
if len(parts) >= 3:
|
|
file_info = parts[0].replace("图片文件: ", "")
|
|
size_info = parts[1].replace("尺寸: ", "")
|
|
type_info = parts[2].replace("类型: ", "")
|
|
enhanced_content = f"🖼️ [图片内容] {file_info}\n 📐 尺寸: {size_info} | 🏷️ 类型: {type_info}"
|
|
|
|
contexts.append(enhanced_content)
|
|
else:
|
|
text_count += 1
|
|
contexts.append(f"📄 {content}")
|
|
|
|
context = "\n\n".join(contexts)
|
|
sources_str = "、".join(sources)
|
|
|
|
# 添加内容统计信息
|
|
stats = f"({text_count}文本"
|
|
if image_count > 0:
|
|
stats += f" + {image_count}图片"
|
|
stats += ")"
|
|
|
|
return f"基于文档({sources_str}){stats}的信息:\n\n{context}"
|
|
|
|
|
|
async def test_advanced_functionality():
|
|
"""测试高级多格式文档和图片功能"""
|
|
print("🚀 高级多格式文档和图片内容测试")
|
|
print("=" * 60)
|
|
|
|
# 清理向量数据库
|
|
db_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/chroma_db/advanced_test")
|
|
if db_path.exists():
|
|
shutil.rmtree(db_path)
|
|
print("🧹 已清理向量数据库")
|
|
|
|
# 创建RAG实例 - 启用图片处理
|
|
rag = AdvancedTestRAG(
|
|
vector_store_name="advanced_test",
|
|
retriever_top_k=5,
|
|
storage_directory="/Users/liruwei/Documents/code/project/demo/base_rag/test_files",
|
|
status_db_path="/Users/liruwei/Documents/code/project/demo/base_rag/advanced_test_status.db",
|
|
# 启用图片处理 - 使用本地BLIP模型获得更好的图片文本识别
|
|
image_config={
|
|
"enabled": True,
|
|
"type": "local",
|
|
"model": "Salesforce/blip-image-captioning-base"
|
|
}
|
|
)
|
|
|
|
print("✅ 高级RAG实例创建成功 (已启用图片处理)")
|
|
print()
|
|
|
|
# 测试多格式文档
|
|
test_files = [
|
|
{
|
|
"file": "test_document.txt",
|
|
"format": "TXT",
|
|
"description": "纯文本文档",
|
|
"expect_images": False
|
|
},
|
|
{
|
|
"file": "complex_data_science.docx",
|
|
"format": "DOCX",
|
|
"description": "Word文档(含图片)",
|
|
"expect_images": True
|
|
},
|
|
{
|
|
"file": "ai_research_report.pdf",
|
|
"format": "PDF",
|
|
"description": "PDF报告(含图片)",
|
|
"expect_images": True
|
|
},
|
|
{
|
|
"file": "company_report.xlsx",
|
|
"format": "XLSX",
|
|
"description": "Excel工作簿",
|
|
"expect_images": False
|
|
},
|
|
{
|
|
"file": "sales_data.csv",
|
|
"format": "CSV",
|
|
"description": "CSV数据文件",
|
|
"expect_images": False
|
|
}
|
|
]
|
|
|
|
# 筛选存在的文件
|
|
test_dir = Path("/Users/liruwei/Documents/code/project/demo/base_rag/test_files")
|
|
available_files = []
|
|
for file_info in test_files:
|
|
if (test_dir / file_info["file"]).exists():
|
|
available_files.append(file_info)
|
|
|
|
print(f"📂 发现 {len(available_files)} 个测试文档")
|
|
print()
|
|
|
|
# 处理文档
|
|
processed_results = []
|
|
total_images = 0
|
|
|
|
for file_info in available_files:
|
|
filename = file_info["file"]
|
|
format_type = file_info["format"]
|
|
description = file_info["description"]
|
|
expect_images = file_info["expect_images"]
|
|
|
|
print(f"📄 处理 {format_type}: {filename}")
|
|
print(f" {description}")
|
|
|
|
try:
|
|
result = await rag.ingest(str(test_dir / filename))
|
|
if result and result.get('success'):
|
|
chunks_count = result['chunks_count']
|
|
print(f" ✅ 成功: {chunks_count} 个片段")
|
|
|
|
# 估算图片内容
|
|
baseline = 1 if format_type in ['TXT', 'CSV'] else 2
|
|
has_images = chunks_count > baseline + 1
|
|
|
|
if expect_images and has_images:
|
|
estimated_images = chunks_count - baseline
|
|
total_images += estimated_images
|
|
print(f" 🖼️ 估计包含 ~{estimated_images} 个图片片段")
|
|
|
|
processed_results.append({
|
|
"file": filename,
|
|
"format": format_type,
|
|
"chunks": chunks_count,
|
|
"has_images": has_images
|
|
})
|
|
|
|
else:
|
|
message = result.get('message', '未知错误')
|
|
if "已经处理完毕" in message:
|
|
print(f" ⚠️ 文件已存在")
|
|
else:
|
|
print(f" ❌ 处理失败: {message}")
|
|
|
|
except Exception as e:
|
|
print(f" ❌ 错误: {str(e)}")
|
|
print()
|
|
|
|
# 结果统计
|
|
image_docs = [r for r in processed_results if r.get("has_images")]
|
|
text_docs = [r for r in processed_results if not r.get("has_images")]
|
|
|
|
print("📊 处理结果统计:")
|
|
print(f" 📄 纯文本文档: {len(text_docs)} 个")
|
|
print(f" 🖼️ 含图片文档: {len(image_docs)} 个")
|
|
if total_images > 0:
|
|
print(f" 📸 估计图片总数: ~{total_images} 个")
|
|
print()
|
|
|
|
# 高级查询测试
|
|
print("🔍 高级查询测试...")
|
|
|
|
test_queries = [
|
|
{
|
|
"question": "数据科学的核心技术有哪些?",
|
|
"focus": "文本内容"
|
|
},
|
|
{
|
|
"question": "文档中的图片显示了什么内容?",
|
|
"focus": "图片内容"
|
|
},
|
|
{
|
|
"question": "Python生态系统相关的信息",
|
|
"focus": "综合内容"
|
|
},
|
|
{
|
|
"question": "销售数据分析结果",
|
|
"focus": "数据内容"
|
|
},
|
|
{
|
|
"question": "技术架构或框架图的内容",
|
|
"focus": "图片技术内容"
|
|
},
|
|
{
|
|
"question": "人工智能研究的挑战和机遇",
|
|
"focus": "研究内容"
|
|
}
|
|
]
|
|
|
|
image_content_found = False
|
|
|
|
for i, query_info in enumerate(test_queries, 1):
|
|
question = query_info["question"]
|
|
focus = query_info["focus"]
|
|
|
|
print(f"\n❓ 查询 {i}: {question}")
|
|
print(f" 🎯 重点: {focus}")
|
|
|
|
try:
|
|
answer = await rag.query(question)
|
|
if "抱歉" not in answer:
|
|
# 检查是否包含图片内容
|
|
if "🖼️ [图片" in answer:
|
|
print(f" 🖼️ ✅ 检索到图片内容!")
|
|
image_content_found = True
|
|
|
|
# 分析结果
|
|
lines = answer.split('\n')
|
|
if lines:
|
|
source_line = lines[0] if lines[0].startswith('基于文档') else "来源信息未知"
|
|
print(f" 📚 {source_line}")
|
|
|
|
# 显示内容预览,特别突出图片信息
|
|
content_start = answer.find('\n\n')
|
|
if content_start > 0:
|
|
content = answer[content_start+2:]
|
|
|
|
# 分离图片和文本内容预览
|
|
content_lines = content.split('\n\n')
|
|
preview_parts = []
|
|
|
|
for line in content_lines[:2]: # 只显示前2个部分
|
|
if "🖼️ [图片" in line:
|
|
# 图片内容特殊处理
|
|
img_preview = line[:200] + "..." if len(line) > 200 else line
|
|
preview_parts.append(f" 🖼️ {img_preview}")
|
|
else:
|
|
# 文本内容
|
|
text_preview = line[:100] + "..." if len(line) > 100 else line
|
|
preview_parts.append(f" 📄 {text_preview}")
|
|
|
|
for part in preview_parts:
|
|
print(part)
|
|
else:
|
|
print(f" 💡 {answer[:200]}...")
|
|
else:
|
|
print(f" 💡 {answer}")
|
|
|
|
except Exception as e:
|
|
print(f" ❌ 查询失败: {str(e)}")
|
|
|
|
# 最终验证结果
|
|
print("\n" + "=" * 60)
|
|
print("🎉 高级功能测试完成!")
|
|
print()
|
|
print("✅ 功能验证结果:")
|
|
print(" 📄 多格式文档解析 - ✅")
|
|
print(" 🖼️ 图片自动提取 - ✅" if image_docs else " 🖼️ 图片自动提取 - ⚠️")
|
|
print(" 🤖 图片文本识别 - ✅" if image_content_found else " 🤖 图片文本识别 - ⚠️")
|
|
print(" 🔍 混合内容检索 - ✅" if image_content_found else " 🔍 混合内容检索 - ⚠️")
|
|
print(" 📊 内容分类显示 - ✅")
|
|
print()
|
|
print("🔧 支持的格式:")
|
|
for file_info in available_files:
|
|
icon = "🖼️" if file_info["expect_images"] else "📄"
|
|
print(f" {icon} {file_info['format']} - {file_info['description']}")
|
|
print()
|
|
print("💡 图片文本识别特性:")
|
|
if image_content_found:
|
|
print(" ✅ 自动提取图片中的视觉信息")
|
|
print(" ✅ 生成图片内容描述文本")
|
|
print(" ✅ 图片信息可被向量化和检索")
|
|
print(" ✅ 支持图片尺寸和类型识别")
|
|
else:
|
|
print(" ⚠️ 需要包含图片的测试文档验证")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(test_advanced_functionality())
|