132 lines
4.0 KiB
Python
132 lines
4.0 KiB
Python
"""
|
||
QA Chain 使用示例
|
||
演示如何使用 build_qa_chain 方法构建问答系统
|
||
"""
|
||
|
||
import asyncio
|
||
import sys
|
||
import os
|
||
|
||
# 添加项目路径到 Python 路径
|
||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
|
||
|
||
from base_rag.core import BaseRAG, FileStatus
|
||
|
||
|
||
class SimpleRAG(BaseRAG):
|
||
"""简单的RAG实现示例"""
|
||
|
||
async def ingest(self, file_paths):
|
||
"""批量导入文档"""
|
||
results = []
|
||
for file_path in file_paths:
|
||
result = await self.process_file_to_vector_store(file_path)
|
||
results.append(result)
|
||
return results
|
||
|
||
async def query(self, question: str) -> str:
|
||
"""简单问答实现"""
|
||
if not self.llm:
|
||
# 如果没有LLM,只返回相关文档
|
||
docs = await self.similarity_search_with_rerank(question)
|
||
return f"找到 {len(docs)} 个相关文档:\n" + "\n---\n".join([doc.page_content[:200] + "..." for doc in docs])
|
||
|
||
# 使用QA链进行问答
|
||
qa_chain = await self.build_qa_chain()
|
||
result = qa_chain(question)
|
||
return result["result"]
|
||
|
||
|
||
async def main():
|
||
print("🚀 QA Chain 使用示例")
|
||
|
||
# 1. 创建RAG实例
|
||
rag = SimpleRAG(
|
||
vector_store_name="qa_chain_demo",
|
||
retriever_top_k=3,
|
||
persist_directory="./storage/chroma_db/qa_chain_demo",
|
||
storage_directory="./storage/files",
|
||
status_db_path="./storage/status_db/qa_chain_demo.db"
|
||
)
|
||
|
||
# 2. 检查是否有文档需要处理
|
||
test_files_dir = "./test_files"
|
||
test_files = [
|
||
f"{test_files_dir}/data_science.txt",
|
||
f"{test_files_dir}/python_guide.md"
|
||
]
|
||
|
||
print("\n📁 检查并处理文档...")
|
||
for file_path in test_files:
|
||
if os.path.exists(file_path):
|
||
print(f"处理文件: {file_path}")
|
||
result = await rag.process_file_to_vector_store(file_path)
|
||
print(f"处理结果: {result['message']}")
|
||
else:
|
||
print(f"文件不存在: {file_path}")
|
||
|
||
# 3. 查看文件处理状态
|
||
print("\n📊 文件处理状态:")
|
||
file_statuses = await rag.get_file_processing_status()
|
||
for status in file_statuses:
|
||
print(f" {status['filename']}: {status['status']}")
|
||
|
||
# 4. 设置LLM(如果可用的话)
|
||
try:
|
||
# 尝试使用 Ollama (需要本地安装)
|
||
from langchain_community.llms import Ollama
|
||
rag.llm = Ollama(model="qwen3:4b", base_url="http://localhost:11434")
|
||
print("\n🤖 使用 Ollama LLM")
|
||
use_llm = True
|
||
except Exception as e:
|
||
print(f"\n⚠️ 无法连接到 Ollama LLM: {e}")
|
||
print("将使用文档检索模式")
|
||
use_llm = False
|
||
|
||
# 5. 示例问题
|
||
questions = [
|
||
"介绍一下python?用中文回复",
|
||
]
|
||
|
||
print(f"\n{'='*50}")
|
||
print("🔍 开始问答测试")
|
||
print(f"{'='*50}")
|
||
|
||
for i, question in enumerate(questions, 1):
|
||
print(f"\n❓ 问题 {i}: {question}")
|
||
print("-" * 40)
|
||
|
||
try:
|
||
if use_llm:
|
||
# 使用QA链进行问答
|
||
print("🔄 正在构建QA链...")
|
||
qa_chain = await rag.build_qa_chain()
|
||
|
||
print("🤔 正在思考答案...")
|
||
result = qa_chain(question)
|
||
|
||
print("💡 答案:")
|
||
print(result["result"])
|
||
|
||
print("\n📚 相关文档:")
|
||
for j, doc in enumerate(result["source_documents"], 1):
|
||
print(f" {j}. {doc.metadata.get('source_file', 'unknown')}")
|
||
print(f" {doc.page_content[:100]}...")
|
||
else:
|
||
# 只进行文档检索
|
||
print("🔍 正在检索相关文档...")
|
||
answer = await rag.query(question)
|
||
print("📖 检索结果:")
|
||
print(answer)
|
||
|
||
except Exception as e:
|
||
print(f"❌ 错误: {e}")
|
||
|
||
print(f"\n{'='*50}")
|
||
print("✅ 测试完成")
|
||
print(f"{'='*50}")
|
||
|
||
|
||
if __name__ == "__main__":
|
||
asyncio.run(main())
|