feat: 支持ocr,整理目录
This commit is contained in:
parent
c6e020a170
commit
5d50b9a23e
|
@ -0,0 +1,88 @@
|
|||
# Examples 示例文件
|
||||
|
||||
本目录包含两个主要的测试示例:
|
||||
|
||||
## 📁 simple_test.py - 基础功能测试
|
||||
**用途**: 验证RAG系统的基础功能
|
||||
- 🔧 纯文本文档处理
|
||||
- 📄 文档加载和切分
|
||||
- 🔍 文本向量化和存储
|
||||
- 🔎 相似性搜索
|
||||
- 📝 查询结果整合
|
||||
|
||||
**特点**:
|
||||
- 禁用图片处理(专注基础功能)
|
||||
- 适合快速验证系统可用性
|
||||
- 轻量级测试
|
||||
|
||||
**运行**:
|
||||
```bash
|
||||
python examples/simple_test.py
|
||||
```
|
||||
|
||||
## 🚀 ad_test.py - 高级功能测试
|
||||
**用途**: 验证多格式文档和图片内容识别
|
||||
- 📄 多格式文档解析 (DOCX, PDF, XLSX, CSV)
|
||||
- 🖼️ 图片自动提取和处理
|
||||
- 🤖 图片内容描述生成
|
||||
- 📝 图片文本内容识别 (OCR)
|
||||
- 🔍 混合内容检索 (文本+图片)
|
||||
- 📊 内容分类显示
|
||||
|
||||
**特点**:
|
||||
- 启用完整图片处理功能
|
||||
- 使用BLIP模型进行图片理解
|
||||
- 支持图片中文本提取
|
||||
- 增强的查询结果显示
|
||||
|
||||
**运行**:
|
||||
```bash
|
||||
python examples/ad_test.py
|
||||
```
|
||||
|
||||
## 🔧 图片文本识别功能
|
||||
|
||||
高级测试(`ad_test.py`)包含增强的图片文本识别功能:
|
||||
|
||||
### ✅ 图片内容处理
|
||||
- **自动提取**: 从DOCX和PDF文档中自动提取嵌入的图片
|
||||
- **智能描述**: 使用BLIP模型生成图片内容描述
|
||||
- **文本识别**: 支持OCR提取图片中的文字内容
|
||||
- **分类标记**: 自动识别图片类型(技术图、数据图表等)
|
||||
|
||||
### 📝 OCR文本提取
|
||||
系统尝试从图片中提取文字内容,支持:
|
||||
- **pytesseract**: 高精度OCR引擎(需要安装)
|
||||
- **easyocr**: 备用OCR方案(支持中英文)
|
||||
- **基础模式**: 如果OCR库不可用,提供基础信息
|
||||
|
||||
### 🔍 增强检索体验
|
||||
- **内容分类**: 查询结果区分图片内容和文本内容
|
||||
- **统计信息**: 显示检索到的文本和图片数量
|
||||
- **格式化显示**: 图片内容带特殊标记 `🖼️ [图片内容]`
|
||||
|
||||
## 📋 测试文档要求
|
||||
|
||||
### 基础测试文档
|
||||
- `python_basics.txt` - Python基础知识
|
||||
- `data_science.txt` - 数据科学内容
|
||||
|
||||
### 高级测试文档
|
||||
- `complex_data_science.docx` - 包含图片的Word文档
|
||||
- `ai_research_report.pdf` - 包含图片的PDF报告
|
||||
- `company_report.xlsx` - Excel工作簿
|
||||
- `sales_data.csv` - CSV数据文件
|
||||
|
||||
## 🎯 预期效果
|
||||
|
||||
### 基础测试
|
||||
- ✅ 文档正常加载和处理
|
||||
- ✅ 文本查询返回相关结果
|
||||
- ✅ 系统响应时间正常
|
||||
|
||||
### 高级测试
|
||||
- ✅ 多格式文档成功解析
|
||||
- ✅ 图片内容被自动识别和描述
|
||||
- 🖼️ 图片查询能返回图片相关内容
|
||||
- 📊 查询结果包含内容类型统计
|
||||
- 🔍 图片和文本内容可被统一检索
|
|
@ -0,0 +1,320 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
高级测试示例 - 多格式文档和图片内容识别
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import asyncio
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
|
||||
# 过滤掉PyTorch的FutureWarning
|
||||
warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
|
||||
|
||||
# 添加源码路径
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src"))
|
||||
|
||||
from base_rag.core import BaseRAG
|
||||
|
||||
|
||||
class AdvancedTestRAG(BaseRAG):
|
||||
"""高级测试RAG实现 - 支持图片内容"""
|
||||
|
||||
async def ingest(self, file_path: str, **kwargs):
|
||||
"""文档导入"""
|
||||
return await self.process_file_to_vector_store(file_path, **kwargs)
|
||||
|
||||
async def query(self, question: str) -> str:
|
||||
"""查询实现 - 增强图片内容显示"""
|
||||
docs = await self.similarity_search_with_rerank(question, k=5)
|
||||
|
||||
if not docs:
|
||||
return "抱歉,没有找到相关信息。"
|
||||
|
||||
# 分析和整理搜索结果
|
||||
sources = []
|
||||
contexts = []
|
||||
image_count = 0
|
||||
text_count = 0
|
||||
|
||||
for doc in docs:
|
||||
source = doc.metadata.get("source_file", "未知来源")
|
||||
doc_type = doc.metadata.get("type", "text")
|
||||
content = doc.page_content.strip()
|
||||
|
||||
if source not in sources:
|
||||
sources.append(source)
|
||||
|
||||
# 处理不同类型的内容
|
||||
if doc_type == "image":
|
||||
# 增强图片内容显示
|
||||
image_count += 1
|
||||
enhanced_content = f"🖼️ [图片 {image_count}] {content}"
|
||||
|
||||
# 如果图片描述中包含文件信息,提取并格式化
|
||||
if "图片文件:" in content and "尺寸:" in content:
|
||||
parts = content.split(" | ")
|
||||
if len(parts) >= 3:
|
||||
file_info = parts[0].replace("图片文件: ", "")
|
||||
size_info = parts[1].replace("尺寸: ", "")
|
||||
type_info = parts[2].replace("类型: ", "")
|
||||
enhanced_content = f"🖼️ [图片内容] {file_info}\n 📐 尺寸: {size_info} | 🏷️ 类型: {type_info}"
|
||||
|
||||
contexts.append(enhanced_content)
|
||||
else:
|
||||
text_count += 1
|
||||
contexts.append(f"📄 {content}")
|
||||
|
||||
context = "\n\n".join(contexts)
|
||||
sources_str = "、".join(sources)
|
||||
|
||||
# 添加内容统计信息
|
||||
stats = f"({text_count}文本"
|
||||
if image_count > 0:
|
||||
stats += f" + {image_count}图片"
|
||||
stats += ")"
|
||||
|
||||
return f"基于文档({sources_str}){stats}的信息:\n\n{context}"
|
||||
|
||||
|
||||
async def test_advanced_functionality():
|
||||
"""测试高级多格式文档和图片功能"""
|
||||
print("🚀 高级多格式文档和图片内容测试")
|
||||
print("=" * 60)
|
||||
|
||||
# 清理向量数据库
|
||||
db_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/chroma_db/advanced_test")
|
||||
if db_path.exists():
|
||||
shutil.rmtree(db_path)
|
||||
print("🧹 已清理向量数据库")
|
||||
|
||||
# 创建RAG实例 - 启用图片处理
|
||||
rag = AdvancedTestRAG(
|
||||
vector_store_name="advanced_test",
|
||||
retriever_top_k=5,
|
||||
storage_directory="/Users/liruwei/Documents/code/project/demo/base_rag/test_files",
|
||||
status_db_path="/Users/liruwei/Documents/code/project/demo/base_rag/advanced_test_status.db",
|
||||
# 启用图片处理 - 使用本地BLIP模型获得更好的图片文本识别
|
||||
image_config={
|
||||
"enabled": True,
|
||||
"type": "local",
|
||||
"model": "Salesforce/blip-image-captioning-base"
|
||||
}
|
||||
)
|
||||
|
||||
print("✅ 高级RAG实例创建成功 (已启用图片处理)")
|
||||
print()
|
||||
|
||||
# 测试多格式文档
|
||||
test_files = [
|
||||
{
|
||||
"file": "test_document.txt",
|
||||
"format": "TXT",
|
||||
"description": "纯文本文档",
|
||||
"expect_images": False
|
||||
},
|
||||
{
|
||||
"file": "complex_data_science.docx",
|
||||
"format": "DOCX",
|
||||
"description": "Word文档(含图片)",
|
||||
"expect_images": True
|
||||
},
|
||||
{
|
||||
"file": "ai_research_report.pdf",
|
||||
"format": "PDF",
|
||||
"description": "PDF报告(含图片)",
|
||||
"expect_images": True
|
||||
},
|
||||
{
|
||||
"file": "company_report.xlsx",
|
||||
"format": "XLSX",
|
||||
"description": "Excel工作簿",
|
||||
"expect_images": False
|
||||
},
|
||||
{
|
||||
"file": "sales_data.csv",
|
||||
"format": "CSV",
|
||||
"description": "CSV数据文件",
|
||||
"expect_images": False
|
||||
}
|
||||
]
|
||||
|
||||
# 筛选存在的文件
|
||||
test_dir = Path("/Users/liruwei/Documents/code/project/demo/base_rag/test_files")
|
||||
available_files = []
|
||||
for file_info in test_files:
|
||||
if (test_dir / file_info["file"]).exists():
|
||||
available_files.append(file_info)
|
||||
|
||||
print(f"📂 发现 {len(available_files)} 个测试文档")
|
||||
print()
|
||||
|
||||
# 处理文档
|
||||
processed_results = []
|
||||
total_images = 0
|
||||
|
||||
for file_info in available_files:
|
||||
filename = file_info["file"]
|
||||
format_type = file_info["format"]
|
||||
description = file_info["description"]
|
||||
expect_images = file_info["expect_images"]
|
||||
|
||||
print(f"📄 处理 {format_type}: {filename}")
|
||||
print(f" {description}")
|
||||
|
||||
try:
|
||||
result = await rag.ingest(str(test_dir / filename))
|
||||
if result and result.get('success'):
|
||||
chunks_count = result['chunks_count']
|
||||
print(f" ✅ 成功: {chunks_count} 个片段")
|
||||
|
||||
# 估算图片内容
|
||||
baseline = 1 if format_type in ['TXT', 'CSV'] else 2
|
||||
has_images = chunks_count > baseline + 1
|
||||
|
||||
if expect_images and has_images:
|
||||
estimated_images = chunks_count - baseline
|
||||
total_images += estimated_images
|
||||
print(f" 🖼️ 估计包含 ~{estimated_images} 个图片片段")
|
||||
|
||||
processed_results.append({
|
||||
"file": filename,
|
||||
"format": format_type,
|
||||
"chunks": chunks_count,
|
||||
"has_images": has_images
|
||||
})
|
||||
|
||||
else:
|
||||
message = result.get('message', '未知错误')
|
||||
if "已经处理完毕" in message:
|
||||
print(f" ⚠️ 文件已存在")
|
||||
else:
|
||||
print(f" ❌ 处理失败: {message}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ 错误: {str(e)}")
|
||||
print()
|
||||
|
||||
# 结果统计
|
||||
image_docs = [r for r in processed_results if r.get("has_images")]
|
||||
text_docs = [r for r in processed_results if not r.get("has_images")]
|
||||
|
||||
print("📊 处理结果统计:")
|
||||
print(f" 📄 纯文本文档: {len(text_docs)} 个")
|
||||
print(f" 🖼️ 含图片文档: {len(image_docs)} 个")
|
||||
if total_images > 0:
|
||||
print(f" 📸 估计图片总数: ~{total_images} 个")
|
||||
print()
|
||||
|
||||
# 高级查询测试
|
||||
print("🔍 高级查询测试...")
|
||||
|
||||
test_queries = [
|
||||
{
|
||||
"question": "数据科学的核心技术有哪些?",
|
||||
"focus": "文本内容"
|
||||
},
|
||||
{
|
||||
"question": "文档中的图片显示了什么内容?",
|
||||
"focus": "图片内容"
|
||||
},
|
||||
{
|
||||
"question": "Python生态系统相关的信息",
|
||||
"focus": "综合内容"
|
||||
},
|
||||
{
|
||||
"question": "销售数据分析结果",
|
||||
"focus": "数据内容"
|
||||
},
|
||||
{
|
||||
"question": "技术架构或框架图的内容",
|
||||
"focus": "图片技术内容"
|
||||
},
|
||||
{
|
||||
"question": "人工智能研究的挑战和机遇",
|
||||
"focus": "研究内容"
|
||||
}
|
||||
]
|
||||
|
||||
image_content_found = False
|
||||
|
||||
for i, query_info in enumerate(test_queries, 1):
|
||||
question = query_info["question"]
|
||||
focus = query_info["focus"]
|
||||
|
||||
print(f"\n❓ 查询 {i}: {question}")
|
||||
print(f" 🎯 重点: {focus}")
|
||||
|
||||
try:
|
||||
answer = await rag.query(question)
|
||||
if "抱歉" not in answer:
|
||||
# 检查是否包含图片内容
|
||||
if "🖼️ [图片" in answer:
|
||||
print(f" 🖼️ ✅ 检索到图片内容!")
|
||||
image_content_found = True
|
||||
|
||||
# 分析结果
|
||||
lines = answer.split('\n')
|
||||
if lines:
|
||||
source_line = lines[0] if lines[0].startswith('基于文档') else "来源信息未知"
|
||||
print(f" 📚 {source_line}")
|
||||
|
||||
# 显示内容预览,特别突出图片信息
|
||||
content_start = answer.find('\n\n')
|
||||
if content_start > 0:
|
||||
content = answer[content_start+2:]
|
||||
|
||||
# 分离图片和文本内容预览
|
||||
content_lines = content.split('\n\n')
|
||||
preview_parts = []
|
||||
|
||||
for line in content_lines[:2]: # 只显示前2个部分
|
||||
if "🖼️ [图片" in line:
|
||||
# 图片内容特殊处理
|
||||
img_preview = line[:200] + "..." if len(line) > 200 else line
|
||||
preview_parts.append(f" 🖼️ {img_preview}")
|
||||
else:
|
||||
# 文本内容
|
||||
text_preview = line[:100] + "..." if len(line) > 100 else line
|
||||
preview_parts.append(f" 📄 {text_preview}")
|
||||
|
||||
for part in preview_parts:
|
||||
print(part)
|
||||
else:
|
||||
print(f" 💡 {answer[:200]}...")
|
||||
else:
|
||||
print(f" 💡 {answer}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ 查询失败: {str(e)}")
|
||||
|
||||
# 最终验证结果
|
||||
print("\n" + "=" * 60)
|
||||
print("🎉 高级功能测试完成!")
|
||||
print()
|
||||
print("✅ 功能验证结果:")
|
||||
print(" 📄 多格式文档解析 - ✅")
|
||||
print(" 🖼️ 图片自动提取 - ✅" if image_docs else " 🖼️ 图片自动提取 - ⚠️")
|
||||
print(" 🤖 图片文本识别 - ✅" if image_content_found else " 🤖 图片文本识别 - ⚠️")
|
||||
print(" 🔍 混合内容检索 - ✅" if image_content_found else " 🔍 混合内容检索 - ⚠️")
|
||||
print(" 📊 内容分类显示 - ✅")
|
||||
print()
|
||||
print("🔧 支持的格式:")
|
||||
for file_info in available_files:
|
||||
icon = "🖼️" if file_info["expect_images"] else "📄"
|
||||
print(f" {icon} {file_info['format']} - {file_info['description']}")
|
||||
print()
|
||||
print("💡 图片文本识别特性:")
|
||||
if image_content_found:
|
||||
print(" ✅ 自动提取图片中的视觉信息")
|
||||
print(" ✅ 生成图片内容描述文本")
|
||||
print(" ✅ 图片信息可被向量化和检索")
|
||||
print(" ✅ 支持图片尺寸和类型识别")
|
||||
else:
|
||||
print(" ⚠️ 需要包含图片的测试文档验证")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_advanced_functionality())
|
|
@ -1,207 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
完整的多格式文件测试 - 包含图片的 DOCX、PDF、Excel、CSV
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import asyncio
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
|
||||
# 过滤掉PyTorch的FutureWarning
|
||||
warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
|
||||
|
||||
# 添加源码路径
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src"))
|
||||
|
||||
from base_rag.core import BaseRAG
|
||||
|
||||
|
||||
class AdvancedFormatRAG(BaseRAG):
|
||||
"""高级格式文件处理的RAG实现"""
|
||||
|
||||
async def ingest(self, file_path: str, **kwargs):
|
||||
"""实现文档导入逻辑"""
|
||||
return await self.process_file_to_vector_store(file_path, **kwargs)
|
||||
|
||||
async def query(self, question: str) -> str:
|
||||
"""实现查询逻辑"""
|
||||
docs = await self.similarity_search_with_rerank(question, k=3)
|
||||
|
||||
if not docs:
|
||||
return "抱歉,没有找到相关信息。"
|
||||
|
||||
# 显示搜索到的文档来源
|
||||
sources = []
|
||||
contexts = []
|
||||
for doc in docs:
|
||||
source = doc.metadata.get("source_file", "未知来源")
|
||||
content = doc.page_content.strip()
|
||||
|
||||
if source not in sources:
|
||||
sources.append(source)
|
||||
contexts.append(content)
|
||||
|
||||
context = "\n\n".join(contexts)
|
||||
sources_str = "、".join(sources)
|
||||
|
||||
return f"基于以下文档({sources_str})的信息:\n\n{context}"
|
||||
|
||||
|
||||
async def test_advanced_formats():
|
||||
"""测试高级文件格式处理"""
|
||||
print("🚀 高级多格式文件处理测试")
|
||||
print("=" * 60)
|
||||
|
||||
# 清理旧的向量数据库
|
||||
db_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/chroma_db/advanced_formats")
|
||||
if db_path.exists():
|
||||
shutil.rmtree(db_path)
|
||||
print("🧹 已清理旧的向量数据库")
|
||||
|
||||
# 创建RAG实例
|
||||
rag = AdvancedFormatRAG(
|
||||
vector_store_name="advanced_formats",
|
||||
retriever_top_k=3,
|
||||
storage_directory="/Users/liruwei/Documents/code/project/demo/base_rag/test_files",
|
||||
status_db_path="/Users/liruwei/Documents/code/project/demo/base_rag/advanced_status.db",
|
||||
)
|
||||
|
||||
# 测试文件列表 - 包含新创建的文件
|
||||
test_files = [
|
||||
{
|
||||
"file": "complex_data_science.docx",
|
||||
"format": "DOCX",
|
||||
"description": "复杂Word文档(含表格和图片)"
|
||||
},
|
||||
{
|
||||
"file": "sales_data.csv",
|
||||
"format": "CSV",
|
||||
"description": "销售数据CSV文件"
|
||||
},
|
||||
{
|
||||
"file": "company_report.xlsx",
|
||||
"format": "XLSX",
|
||||
"description": "多工作表Excel文件"
|
||||
},
|
||||
{
|
||||
"file": "ai_research_report.pdf",
|
||||
"format": "PDF",
|
||||
"description": "AI研究报告PDF(含图片)"
|
||||
}
|
||||
]
|
||||
|
||||
print("📂 处理高级格式文件...")
|
||||
processed_count = 0
|
||||
|
||||
for file_info in test_files:
|
||||
filename = file_info["file"]
|
||||
format_type = file_info["format"]
|
||||
description = file_info["description"]
|
||||
|
||||
file_path = Path("../test_files") / filename
|
||||
|
||||
if not file_path.exists():
|
||||
# 尝试绝对路径
|
||||
file_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/test_files") / filename
|
||||
|
||||
if not file_path.exists():
|
||||
print(f"❌ {format_type}: {filename} - 文件不存在")
|
||||
continue
|
||||
|
||||
print(f"📄 处理 {format_type}: {filename}")
|
||||
print(f" {description}")
|
||||
|
||||
try:
|
||||
result = await rag.ingest(str(file_path))
|
||||
if result and result.get('success'):
|
||||
print(f" ✅ 成功: {result['chunks_count']} 个片段")
|
||||
processed_count += 1
|
||||
else:
|
||||
print(f" ⚠️ 跳过: {result.get('message', '可能已存在')}")
|
||||
if "已经处理完毕" in str(result.get('message', '')):
|
||||
processed_count += 1
|
||||
except Exception as e:
|
||||
print(f" ❌ 失败: {str(e)}")
|
||||
print()
|
||||
|
||||
print(f"📊 处理完成: {processed_count}/{len(test_files)} 个文件")
|
||||
print()
|
||||
|
||||
# 测试针对性查询
|
||||
print("💬 高级格式查询测试...")
|
||||
|
||||
queries = [
|
||||
{
|
||||
"question": "数据科学的核心技术有哪些?",
|
||||
"expected": "complex_data_science.docx"
|
||||
},
|
||||
{
|
||||
"question": "销售数据中哪个产品销售额最高?",
|
||||
"expected": "sales_data.csv"
|
||||
},
|
||||
{
|
||||
"question": "公司员工信息包含哪些部门?",
|
||||
"expected": "company_report.xlsx"
|
||||
},
|
||||
{
|
||||
"question": "人工智能研究面临的挑战是什么?",
|
||||
"expected": "ai_research_report.pdf"
|
||||
},
|
||||
{
|
||||
"question": "Python在数据科学中的作用?",
|
||||
"expected": "多个文档"
|
||||
}
|
||||
]
|
||||
|
||||
for i, query_info in enumerate(queries, 1):
|
||||
question = query_info["question"]
|
||||
expected = query_info["expected"]
|
||||
|
||||
print(f"\n❓ 查询 {i}: {question}")
|
||||
print(f" 期望来源: {expected}")
|
||||
|
||||
try:
|
||||
answer = await rag.query(question)
|
||||
if "抱歉" not in answer:
|
||||
# 分离来源信息和内容
|
||||
parts = answer.split('\n\n', 1)
|
||||
if len(parts) == 2:
|
||||
source_info = parts[0]
|
||||
content = parts[1]
|
||||
|
||||
print(f" 📚 {source_info}")
|
||||
|
||||
# 显示内容摘要(前150字符)
|
||||
if len(content) > 150:
|
||||
content_preview = content[:150] + "..."
|
||||
else:
|
||||
content_preview = content
|
||||
|
||||
print(f" 💡 {content_preview}")
|
||||
else:
|
||||
print(f" 💡 {answer[:150]}...")
|
||||
else:
|
||||
print(f" 💡 {answer}")
|
||||
except Exception as e:
|
||||
print(f" ❌ 查询失败: {str(e)}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("🎉 高级多格式文件测试完成!")
|
||||
print("✅ 支持的格式:")
|
||||
print(" 📄 DOCX - Word文档 (含表格、图片)")
|
||||
print(" 📊 CSV - 逗号分隔值文件")
|
||||
print(" 📈 XLSX - Excel工作簿 (多工作表)")
|
||||
print(" 📑 PDF - 便携式文档格式 (含图片)")
|
||||
print()
|
||||
print("🔧 技术特性:")
|
||||
print(" 🔄 异步处理 - 非阻塞I/O操作")
|
||||
print(" 🧠 智能解析 - 自动识别文件格式")
|
||||
print(" 🔍 跨格式查询 - 统一检索接口")
|
||||
print(" 📋 表格数据提取 - 结构化信息处理")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_advanced_formats())
|
|
@ -1,186 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
多格式文件测试 - 测试 TXT、MD、DOCX 文件格式
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import asyncio
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
# 过滤掉PyTorch的FutureWarning
|
||||
warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
|
||||
|
||||
# 添加源码路径
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src"))
|
||||
|
||||
from base_rag.core import BaseRAG
|
||||
|
||||
|
||||
class MultiFormatRAG(BaseRAG):
|
||||
"""多格式文件处理的RAG实现"""
|
||||
|
||||
async def ingest(self, file_path: str, **kwargs):
|
||||
"""实现文档导入逻辑"""
|
||||
return await self.process_file_to_vector_store(file_path, **kwargs)
|
||||
|
||||
async def query(self, question: str) -> str:
|
||||
"""实现查询逻辑"""
|
||||
docs = await self.similarity_search_with_rerank(question, k=3)
|
||||
|
||||
if not docs:
|
||||
return "抱歉,没有找到相关信息。"
|
||||
|
||||
# 显示搜索到的文档来源
|
||||
sources = []
|
||||
contexts = []
|
||||
for doc in docs:
|
||||
source = doc.metadata.get("source_file", "未知来源")
|
||||
content = doc.page_content.strip()
|
||||
|
||||
if source not in sources:
|
||||
sources.append(source)
|
||||
contexts.append(content)
|
||||
|
||||
context = "\n\n".join(contexts)
|
||||
sources_str = "、".join(sources)
|
||||
|
||||
return f"基于以下文档({sources_str})的信息:\n\n{context}"
|
||||
|
||||
|
||||
async def test_multiple_formats():
|
||||
"""测试多种文件格式处理"""
|
||||
print("🚀 多格式文件处理测试")
|
||||
print("=" * 50)
|
||||
|
||||
# 创建RAG实例
|
||||
rag = MultiFormatRAG(
|
||||
vector_store_name="multiformat_kb",
|
||||
retriever_top_k=3,
|
||||
storage_directory="../test_files", # 相对于examples目录
|
||||
status_db_path="../status.db", # 相对于examples目录
|
||||
)
|
||||
|
||||
# 测试文件列表
|
||||
test_files = [
|
||||
{
|
||||
"file": "knowledge.txt",
|
||||
"format": "TXT",
|
||||
"description": "纯文本文件"
|
||||
},
|
||||
{
|
||||
"file": "python_guide.md",
|
||||
"format": "MD",
|
||||
"description": "Markdown文件"
|
||||
},
|
||||
{
|
||||
"file": "machine_learning.md",
|
||||
"format": "MD",
|
||||
"description": "Markdown文件"
|
||||
},
|
||||
{
|
||||
"file": "deep_learning_guide.docx",
|
||||
"format": "DOCX",
|
||||
"description": "Word文档"
|
||||
},
|
||||
{
|
||||
"file": "complex_data_science.docx",
|
||||
"format": "DOCX",
|
||||
"description": "复杂Word文档(含表格)"
|
||||
},
|
||||
{
|
||||
"file": "sales_data.csv",
|
||||
"format": "CSV",
|
||||
"description": "CSV数据文件"
|
||||
},
|
||||
{
|
||||
"file": "company_report.xlsx",
|
||||
"format": "XLSX",
|
||||
"description": "Excel工作簿"
|
||||
},
|
||||
{
|
||||
"file": "ai_research_report.pdf",
|
||||
"format": "PDF",
|
||||
"description": "PDF文档"
|
||||
}
|
||||
]
|
||||
|
||||
print("📂 处理文件...")
|
||||
processed_count = 0
|
||||
|
||||
for file_info in test_files:
|
||||
filename = file_info["file"]
|
||||
format_type = file_info["format"]
|
||||
description = file_info["description"]
|
||||
|
||||
file_path = Path("../test_files") / filename
|
||||
|
||||
if not file_path.exists():
|
||||
print(f"❌ {format_type}: {filename} - 文件不存在")
|
||||
continue
|
||||
|
||||
print(f"📄 处理 {format_type}: {filename} ({description})")
|
||||
|
||||
try:
|
||||
result = await rag.ingest(str(file_path))
|
||||
if result and result.get('success'):
|
||||
print(f" ✅ 成功: {result['chunks_count']} 个片段")
|
||||
processed_count += 1
|
||||
else:
|
||||
print(f" ⚠️ 跳过: {result.get('message', '可能已存在')}")
|
||||
processed_count += 1 # 已存在也算处理过
|
||||
except Exception as e:
|
||||
print(f" ❌ 失败: {str(e)}")
|
||||
|
||||
print(f"\n📊 处理完成: {processed_count}/{len(test_files)} 个文件")
|
||||
print()
|
||||
|
||||
# 测试跨格式查询
|
||||
print("💬 跨格式查询测试...")
|
||||
|
||||
queries = [
|
||||
"Python有什么特点?",
|
||||
"什么是机器学习?",
|
||||
"深度学习的应用领域有哪些?",
|
||||
"数据科学的核心技术有哪些?",
|
||||
"销售数据中哪个产品销售额最高?",
|
||||
"公司员工的平均年薪是多少?",
|
||||
"人工智能的主要挑战是什么?",
|
||||
"机器学习有哪些类型?"
|
||||
]
|
||||
|
||||
for query in queries:
|
||||
print(f"\n❓ {query}")
|
||||
try:
|
||||
answer = await rag.query(query)
|
||||
if "抱歉" not in answer:
|
||||
# 分离来源信息和内容
|
||||
parts = answer.split('\n\n', 1)
|
||||
if len(parts) == 2:
|
||||
source_info = parts[0] # "基于以下文档..."
|
||||
content = parts[1] # 实际内容
|
||||
|
||||
print(f" 📚 {source_info}")
|
||||
|
||||
# 显示内容摘要(前200字符)
|
||||
if len(content) > 200:
|
||||
content_preview = content[:200] + "..."
|
||||
else:
|
||||
content_preview = content
|
||||
|
||||
print(f" 💡 {content_preview}")
|
||||
else:
|
||||
print(f" 💡 {answer}")
|
||||
else:
|
||||
print(f" 💡 {answer}")
|
||||
except Exception as e:
|
||||
print(f" ❌ 查询失败: {str(e)}")
|
||||
|
||||
print("\n" + "=" * 50)
|
||||
print("✅ 多格式文件测试完成!")
|
||||
print("支持的格式: TXT, MD, DOCX, CSV, XLSX, PDF")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_multiple_formats())
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
简单的文件处理测试
|
||||
简单测试示例 - 基础RAG功能验证
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
@ -8,6 +8,7 @@ import os
|
|||
import asyncio
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
|
||||
# 过滤掉PyTorch的FutureWarning
|
||||
warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
|
||||
|
@ -15,90 +16,138 @@ warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
|
|||
# 添加源码路径
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src"))
|
||||
|
||||
from base_rag.core import BaseRAG, FileStatus
|
||||
from base_rag.core import BaseRAG
|
||||
|
||||
|
||||
class SimpleRAG(BaseRAG):
|
||||
"""简单的RAG实现示例"""
|
||||
class SimpleTestRAG(BaseRAG):
|
||||
"""简单测试RAG实现"""
|
||||
|
||||
async def ingest(self, file_path: str, **kwargs):
|
||||
"""实现文档导入逻辑"""
|
||||
"""文档导入"""
|
||||
return await self.process_file_to_vector_store(file_path, **kwargs)
|
||||
|
||||
async def query(self, question: str) -> str:
|
||||
"""实现简单的查询逻辑"""
|
||||
docs = await self.similarity_search_with_rerank(question, k=2)
|
||||
"""查询实现"""
|
||||
docs = await self.similarity_search_with_rerank(question, k=3)
|
||||
|
||||
if not docs:
|
||||
return "抱歉,没有找到相关信息。"
|
||||
|
||||
# 显示搜索到的文档来源
|
||||
# 整理搜索结果
|
||||
sources = []
|
||||
contexts = []
|
||||
for doc in docs:
|
||||
source = doc.metadata.get("source_file", "未知来源")
|
||||
content = doc.page_content.strip()
|
||||
|
||||
if source not in sources:
|
||||
sources.append(source)
|
||||
contexts.append(doc.page_content.strip())
|
||||
contexts.append(content)
|
||||
|
||||
context = "\n\n".join(contexts)
|
||||
sources_str = "、".join(sources)
|
||||
|
||||
return f"基于以下文档({sources_str})的信息:\n\n{context}"
|
||||
return f"基于文档({sources_str})的信息:\n\n{context}"
|
||||
|
||||
|
||||
async def test_file_processing():
|
||||
print("=== 文件处理功能测试 ===\n")
|
||||
async def test_basic_functionality():
|
||||
"""测试基础RAG功能"""
|
||||
print("🔧 基础RAG功能测试")
|
||||
print("=" * 50)
|
||||
|
||||
# 创建RAG实例
|
||||
rag = SimpleRAG(
|
||||
vector_store_name="test_kb",
|
||||
retriever_top_k=2,
|
||||
storage_directory="./test_files", # 统一使用test_files目录
|
||||
status_db_path="./status.db", # 统一数据库名称
|
||||
# 清理向量数据库
|
||||
db_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/chroma_db/simple_test")
|
||||
if db_path.exists():
|
||||
shutil.rmtree(db_path)
|
||||
print("🧹 已清理向量数据库")
|
||||
|
||||
# 创建RAG实例 - 禁用图片处理用于基础测试
|
||||
rag = SimpleTestRAG(
|
||||
vector_store_name="simple_test",
|
||||
retriever_top_k=3,
|
||||
storage_directory="/Users/liruwei/Documents/code/project/demo/base_rag/test_files",
|
||||
status_db_path="/Users/liruwei/Documents/code/project/demo/base_rag/simple_test_status.db",
|
||||
image_config={"enabled": False} # 基础测试禁用图片
|
||||
)
|
||||
|
||||
# 使用现有的测试文件
|
||||
test_dir = Path("./test_files")
|
||||
|
||||
# 使用已有的测试文件
|
||||
python_file = test_dir / "python_basics.txt"
|
||||
web_file = test_dir / "web_frameworks.txt"
|
||||
datascience_file = test_dir / "data_science.txt"
|
||||
|
||||
print("1. 处理多个知识文件...")
|
||||
files_to_process = [python_file, web_file, datascience_file]
|
||||
|
||||
for file_path in files_to_process:
|
||||
result = await rag.ingest(str(file_path), chunk_size=200, chunk_overlap=20)
|
||||
print(
|
||||
f"处理 {file_path.name}: {result['message']} (片段数: {result.get('chunks_count', 0)})"
|
||||
)
|
||||
print("✅ RAG实例创建成功")
|
||||
print()
|
||||
|
||||
print("2. 查询测试...")
|
||||
questions = [
|
||||
"Python是谁创建的?",
|
||||
"Flask和Django有什么区别?",
|
||||
"Pandas是做什么的?",
|
||||
"什么是NumPy?",
|
||||
"FastAPI有什么特点?",
|
||||
# 测试基础文档
|
||||
test_files = ["test_document.txt", "test_markdown.md", "python_basics.txt", "data_science.txt"]
|
||||
|
||||
print("📂 处理基础文档...")
|
||||
processed_count = 0
|
||||
|
||||
for filename in test_files:
|
||||
file_path = Path("/Users/liruwei/Documents/code/project/demo/base_rag/test_files") / filename
|
||||
|
||||
if not file_path.exists():
|
||||
print(f"⚠️ {filename} - 文件不存在,跳过")
|
||||
continue
|
||||
|
||||
print(f"📄 处理: {filename}")
|
||||
|
||||
try:
|
||||
result = await rag.ingest(str(file_path))
|
||||
if result and result.get('success'):
|
||||
print(f" ✅ 成功: {result['chunks_count']} 个片段")
|
||||
processed_count += 1
|
||||
else:
|
||||
message = result.get('message', '未知错误')
|
||||
if "已经处理完毕" in message:
|
||||
print(f" ⚠️ 已存在,跳过")
|
||||
processed_count += 1
|
||||
else:
|
||||
print(f" ❌ 失败: {message}")
|
||||
except Exception as e:
|
||||
print(f" ❌ 错误: {str(e)}")
|
||||
|
||||
print(f"\n📊 处理完成: {processed_count}/{len(test_files)} 个文件")
|
||||
print()
|
||||
|
||||
# 基础查询测试
|
||||
print("🔍 基础查询测试...")
|
||||
|
||||
test_queries = [
|
||||
"Python编程语言的特点",
|
||||
"数据科学的核心技术",
|
||||
"机器学习的应用",
|
||||
"什么是深度学习"
|
||||
]
|
||||
|
||||
for question in questions:
|
||||
print(f"问题: {question}")
|
||||
for i, question in enumerate(test_queries, 1):
|
||||
print(f"\n❓ 查询 {i}: {question}")
|
||||
|
||||
try:
|
||||
answer = await rag.query(question)
|
||||
print(f"回答: {answer[:150]}...")
|
||||
print("-" * 50)
|
||||
print()
|
||||
if "抱歉" not in answer:
|
||||
# 显示结果摘要
|
||||
lines = answer.split('\n')
|
||||
source_line = lines[0] if lines[0].startswith('基于文档') else "来源未知"
|
||||
print(f" 📚 {source_line}")
|
||||
|
||||
print("3. 查看文件状态...")
|
||||
files = await rag.get_file_processing_status()
|
||||
for file_info in files:
|
||||
print(f"文件: {file_info['filename']} | 状态: {file_info['status']}")
|
||||
# 显示内容预览
|
||||
content_start = answer.find('\n\n')
|
||||
if content_start > 0:
|
||||
content = answer[content_start+2:]
|
||||
preview = content[:150] + "..." if len(content) > 150 else content
|
||||
print(f" 💡 {preview}")
|
||||
else:
|
||||
print(f" 💡 {answer[:150]}...")
|
||||
else:
|
||||
print(f" 💡 {answer}")
|
||||
except Exception as e:
|
||||
print(f" ❌ 查询失败: {str(e)}")
|
||||
|
||||
print("\n=== 测试完成 ===")
|
||||
print("\n" + "=" * 50)
|
||||
print("🎉 基础功能测试完成!")
|
||||
print("✅ 验证项目:")
|
||||
print(" 📄 文档加载和切分")
|
||||
print(" 🔍 文本向量化和存储")
|
||||
print(" 🔎 相似性搜索")
|
||||
print(" 📝 查询结果整合")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_file_processing())
|
||||
asyncio.run(test_basic_functionality())
|
||||
|
|
Loading…
Reference in New Issue