feat: 调整代码结构

This commit is contained in:
李如威 2025-08-09 13:43:12 +08:00
parent 021406a297
commit 83b7f45174
4 changed files with 61 additions and 399 deletions

View File

@ -0,0 +1,5 @@
NumPy是Python中用于科学计算的基础库提供多维数组对象。
Pandas是强大的数据分析和处理库提供DataFrame数据结构。
Matplotlib是Python的绘图库用于创建静态、动态和交互式图表。
Scikit-learn是机器学习库提供各种算法和工具。

View File

@ -1,426 +1,83 @@
#!/usr/bin/env python3
"""
RAG系统完整测试示例
集成文档处理重排检索智能问答等功能
简洁RAG测试 - 文档处理流式提问普通提问
"""
import asyncio
import sys
import os
import warnings
from pathlib import Path
import shutil
# 过滤警告信息
warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
warnings.filterwarnings("ignore", category=UserWarning)
# 添加项目路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
from base_rag.core import BaseRAG, FileStatus
from base_rag.core import BaseRAG
class ComprehensiveRAG(BaseRAG):
"""综合RAG实现 - 支持多格式文档和重排检索"""
class SimpleRAG(BaseRAG):
async def ingest(self, file_path):
return await self.process_file_to_vector_store(file_path)
async def ingest(self, file_paths):
"""批量导入文档"""
if isinstance(file_paths, str):
file_paths = [file_paths]
async def query(self, question, stream=False):
docs = await self.similarity_search_with_rerank(question, k=3)
if not docs or not self.llm:
return "没有找到相关信息或LLM未连接"
results = []
for file_path in file_paths:
result = await self.process_file_to_vector_store(file_path)
results.append(result)
return results
async def query(self, question: str) -> str:
"""智能问答实现 - 集成重排和智能组合prompt"""
print("🎯 使用重排检索相关文档...")
# 1. 使用重排检索获取最相关的文档
docs = await self.similarity_search_with_rerank(question)
if not docs:
return "抱歉,没有找到相关信息。请尝试其他问题或添加更多文档。"
if not self.llm:
# 如果没有LLM返回格式化的检索结果
sources = []
contexts = []
image_count = 0
for i, doc in enumerate(docs):
source = doc.metadata.get('source_file', f'文档{i+1}')
doc_type = doc.metadata.get('type', 'text')
content = doc.page_content.strip()
if source not in sources:
sources.append(source)
if doc_type == 'image':
image_count += 1
contexts.append(f"🖼️ 图片{image_count}: {content}")
else:
contexts.append(f"📄 {content}")
context = "\n\n".join(contexts)
sources_str = "".join(sources)
stats = f"({len(docs)-image_count}文本"
if image_count > 0:
stats += f" + {image_count}图片"
stats += ")"
return f"基于文档({sources_str}){stats}的信息:\n\n{context}"
# 2. 组合上下文和问题的智能prompt
contexts = []
sources = []
image_count = 0
for i, doc in enumerate(docs):
source = doc.metadata.get('source_file', f'文档{i+1}')
doc_type = doc.metadata.get('type', 'text')
content = doc.page_content.strip()
if source not in sources:
sources.append(source)
if doc_type == 'image':
image_count += 1
contexts.append(f"图片内容{image_count}: {content}")
else:
contexts.append(f"文档片段{i+1}: {content}")
context = "\n\n".join(contexts)
sources_str = "".join(sources)
# 3. 构建智能prompt
prompt = f"""请基于以下上下文信息回答用户的问题。
上下文信息来源: {sources_str}
包含内容: {len(docs)-image_count}个文本片段{f'{image_count}个图片内容' if image_count > 0 else ''}
上下文内容:
{context}
用户问题: {question}
回答要求:
1. 基于上下文信息提供准确详细的回答
2. 如果上下文中包含图片信息请结合图片内容回答
3. 如果上下文信息不足以回答问题请明确说明
4. 回答要条理清晰重点突出
5. 用中文回答
回答:"""
print("🤔 正在基于重排后的文档生成智能答案...")
# 4. 调用LLM生成回答
try:
if hasattr(self.llm, 'invoke'):
response = self.llm.invoke(prompt)
else:
response = self.llm(prompt)
# 添加来源信息
sources_info = f"\n\n📚 信息来源: {sources_str}"
if image_count > 0:
sources_info += f" (包含{image_count}个图片内容)"
return response + sources_info
except Exception as e:
print(f"❌ LLM调用失败: {e}")
# 备用方案:返回格式化的检索结果
return f"LLM暂时不可用但找到了相关信息\n\n{context}\n\n📚 来源: {sources_str}"
async def clear_data(test_name: str):
"""清理测试数据"""
paths_to_clear = [
f"./storage/chroma_db/{test_name}",
f"./storage/status_db/{test_name}.db"
]
for path in paths_to_clear:
path_obj = Path(path)
if path_obj.exists():
if path_obj.is_dir():
shutil.rmtree(path_obj)
else:
path_obj.unlink()
print(f"🧹 已清理 {test_name} 的历史数据")
async def test_document_processing():
"""测试文档处理功能"""
print("📂 文档处理测试")
print("-" * 40)
# 创建RAG实例
rag = ComprehensiveRAG(
vector_store_name="comprehensive_test",
retriever_top_k=5,
persist_directory="./storage/chroma_db/comprehensive_test",
storage_directory="./storage/files",
status_db_path="./storage/status_db/comprehensive_test.db",
# 启用重排功能
rerank_config={
"enabled": True,
"type": "local",
"model": "BAAI/bge-reranker-base",
"top_k": 5
},
# 启用图片处理
image_config={
"enabled": True,
"type": "local",
"model": "Salesforce/blip-image-captioning-base"
},
embedding_config={
"type": "local",
"model_name": "BAAI/bge-small-zh-v1.5"
}
)
# 查找测试文件
test_dir = Path("./test_files")
test_files = []
# 支持的文件类型和优先级
file_priorities = {
".txt": 1, ".md": 1, # 基础文本
".pdf": 2, ".docx": 2, # 文档类型
".csv": 3, ".xlsx": 3, # 数据类型
".png": 4, ".jpg": 4 # 图片类型(如果有的话)
}
if test_dir.exists():
for file_path in test_dir.iterdir():
if file_path.is_file() and file_path.suffix.lower() in file_priorities:
priority = file_priorities[file_path.suffix.lower()]
test_files.append((priority, str(file_path), file_path.suffix.upper()))
# 按优先级排序
test_files.sort(key=lambda x: x[0])
if not test_files:
print("⚠️ 未找到测试文件,请在 ./test_files 目录下放置测试文档")
return rag, []
print(f"📁 发现 {len(test_files)} 个测试文件")
processed_files = []
total_chunks = 0
for priority, file_path, file_type in test_files[:6]: # 限制处理6个文件
filename = Path(file_path).name
print(f"\n📄 处理 {file_type}: {filename}")
context = "\n".join([doc.page_content for doc in docs])
prompt = f"基于以下信息回答:\n{context}\n\n问题:{question}\n回答:"
try:
result = await rag.process_file_to_vector_store(file_path)
if result.get('success'):
chunks = result.get('chunks_count', 0)
total_chunks += chunks
processed_files.append(filename)
status = "✅ 新处理" if "处理完成" in result['message'] else "♻️ 已存在"
print(f" {status}: {chunks} 个文档片段")
if stream and hasattr(self.llm, 'stream'):
print("🌊 ", end='', flush=True)
full_response = ""
for chunk in self.llm.stream(prompt):
content = getattr(chunk, 'content', str(chunk))
if content:
print(content, end='', flush=True)
full_response += content
print()
return full_response
else:
error_msg = result.get('message', '未知错误')
print(f" ❌ 失败: {error_msg}")
return self.llm.invoke(prompt) if hasattr(self.llm, 'invoke') else self.llm(prompt)
except Exception as e:
print(f" ❌ 异常: {str(e)}")
print(f"\n📊 处理结果: {len(processed_files)} 个文件, 共 {total_chunks} 个文档片段")
return rag, processed_files
async def test_retrieval_and_rerank():
"""测试检索和重排功能"""
print("\n🔍 检索和重排测试")
print("-" * 40)
# 复用文档处理的RAG实例
rag = ComprehensiveRAG(
vector_store_name="comprehensive_test",
retriever_top_k=5,
persist_directory="./storage/chroma_db/comprehensive_test",
rerank_config={
"enabled": True,
"type": "local",
"model": "BAAI/bge-reranker-base",
"top_k": 3
}
)
test_query = "Python编程语言的特点和优势"
print(f"🔍 测试查询: {test_query}")
# 1. 普通检索
print("\n📋 普通检索结果:")
try:
normal_docs = await rag.similarity_search(test_query, k=5)
for i, doc in enumerate(normal_docs[:3], 1):
source = doc.metadata.get('source_file', f'文档{i}')
content = doc.page_content[:80] + "..." if len(doc.page_content) > 80 else doc.page_content
print(f" {i}. [{source}] {content}")
except Exception as e:
print(f" ❌ 普通检索失败: {e}")
# 2. 重排检索
print("\n🎯 重排后检索结果:")
try:
rerank_docs = await rag.similarity_search_with_rerank(test_query, k=3)
for i, doc in enumerate(rerank_docs, 1):
source = doc.metadata.get('source_file', f'文档{i}')
content = doc.page_content[:80] + "..." if len(doc.page_content) > 80 else doc.page_content
print(f" {i}. [{source}] {content}")
except Exception as e:
print(f" ❌ 重排检索失败: {e}")
return rag
async def test_intelligent_qa(rag):
"""测试智能问答功能"""
print("\n💭 智能问答测试")
print("-" * 40)
# 尝试设置LLM
try:
from langchain_community.llms import Ollama
rag.llm = Ollama(model="qwen3:4b", base_url="http://localhost:11434")
print("🤖 已连接本地LLM (Ollama)")
has_llm = True
except Exception as e:
print(f"⚠️ 未连接LLM将使用检索模式: {e}")
has_llm = False
# 测试问题集
test_questions = [
"Python编程语言有什么特点",
# "数据科学的主要应用领域有哪些?",
# "机器学习和深度学习的区别是什么?",
# "文档中有哪些关于人工智能的内容?",
# "图片中显示了什么信息?" # 测试图片内容
]
print(f"🔥 开始问答测试 ({'LLM模式' if has_llm else '检索模式'})")
for i, question in enumerate(test_questions, 1):
print(f"\n❓ 问题 {i}: {question}")
print(" " + "-" * 35)
try:
answer = await rag.query(question)
if has_llm and "📚 信息来源:" in answer:
# LLM模式分离答案和来源
parts = answer.split("\n\n📚 信息来源:")
main_answer = parts[0]
source_info = "📚 信息来源:" + parts[1] if len(parts) > 1 else ""
print(f" 💡 {main_answer[:150]}...")
if source_info:
print(f" {source_info}")
else:
# 检索模式或简单回答
if len(answer) > 200:
print(f" 💡 {answer[:200]}...")
if "基于文档(" in answer:
source_line = answer.split('\n')[0]
print(f" 📚 {source_line}")
else:
print(f" 💡 {answer}")
except Exception as e:
print(f" ❌ 查询失败: {str(e)}")
async def show_system_status(rag):
"""显示系统状态"""
print("\n📊 系统状态总览")
print("-" * 40)
try:
# 文件处理状态
file_statuses = await rag.get_file_processing_status()
if file_statuses:
print("📁 文档处理状态:")
completed = sum(1 for s in file_statuses if s['status'] == FileStatus.COMPLETED.value)
error = sum(1 for s in file_statuses if s['status'] == FileStatus.ERROR.value)
print(f" ✅ 成功: {completed} 个文件")
if error > 0:
print(f" ❌ 失败: {error} 个文件")
# 配置信息
print("\n⚙️ 配置信息:")
print(f" 🎯 重排功能: {'✅ 启用' if rag.rerank_config.get('enabled') else '❌ 禁用'}")
print(f" 🖼️ 图片处理: {'✅ 启用' if rag.image_config.get('enabled') else '❌ 禁用'}")
print(f" 🤖 LLM模型: {'✅ 已连接' if rag.llm else '❌ 未连接'}")
print(f" 📊 检索数量: Top {rag.retriever_top_k}")
except Exception as e:
print(f"❌ 状态获取失败: {e}")
return f"回答生成失败: {e}"
async def main():
"""主测试流程"""
print("🚀 RAG系统综合测试")
print("=" * 50)
print("🚀 简洁RAG测试\n")
# 清理历史数据
await clear_data("comprehensive_test")
print()
# 1. 创建RAG实例
rag = SimpleRAG(
vector_store_name="simple_test",
rerank_config={"enabled": True, "type": "local", "model": "BAAI/bge-reranker-base"}
)
# 2. 处理文件
print("📂 处理文件...")
test_file = "./test_files/data_science.txt"
if os.path.exists(test_file):
result = await rag.ingest(test_file)
print(f"{result.get('message', '处理完成')}")
else:
print("⚠️ 测试文件不存在")
# 3. 设置LLM
try:
# 1. 文档处理测试
rag, processed_files = await test_document_processing()
if not processed_files:
print("❌ 没有成功处理的文档,测试终止")
return
# 2. 检索重排测试
rag = await test_retrieval_and_rerank()
# 3. 智能问答测试
await test_intelligent_qa(rag)
# 4. 系统状态
await show_system_status(rag)
print("\n" + "=" * 50)
print("🎉 RAG系统测试完成!")
print()
print("✅ 已验证功能:")
print(" 📄 多格式文档处理 (TXT/MD/PDF/DOCX/CSV/XLSX)")
print(" 🖼️ 图片内容提取和识别")
print(" 🎯 智能重排检索")
print(" 💭 上下文问答")
print(" 📊 混合内容处理")
print()
print("💡 使用建议:")
print(" 1. 确保 ./test_files 目录下有测试文档")
print(" 2. 安装 Ollama 并启动本地LLM获得更好体验")
print(" 3. 重排功能需要下载BGE模型首次运行较慢")
print(" 4. 图片处理需要BLIP模型可提升多媒体文档效果")
except Exception as e:
print(f"\n❌ 测试过程中发生错误: {e}")
import traceback
traceback.print_exc()
from langchain_community.llms import Ollama
rag.llm = Ollama(model="qwen3:4b", base_url="http://localhost:11434")
print("🤖 LLM已连接\n")
except:
print("⚠️ LLM连接失败\n")
print("\n" + "=" * 50)
question = "Python有什么特点"
# 4. 流式提问
print(f"❓ 问题: {question}")
print("🌊 流式回答:")
await rag.query(question, stream=True)
# 5. 普通提问
# print(f"\n📝 普通回答:")
# answer = await rag.query(question, stream=False)
# print(f"💡 {answer}")
if __name__ == "__main__":

Binary file not shown.

Binary file not shown.