#!/usr/bin/env python3 """ 简洁RAG测试 - 文档处理、流式提问、普通提问 """ import asyncio import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src')) from base_rag.core import BaseRAG class SimpleRAG(BaseRAG): async def ingest(self, file_path): return await self.process_file_to_vector_store(file_path) async def query(self, question, stream=False): docs = await self.similarity_search_with_rerank(question, k=3) if not docs or not self.llm: return "没有找到相关信息或LLM未连接" context = "\n".join([doc.page_content for doc in docs]) prompt = f"基于以下信息回答:\n{context}\n\n问题:{question}\n回答:" try: if stream and hasattr(self.llm, 'stream'): print("🌊 ", end='', flush=True) full_response = "" for chunk in self.llm.stream(prompt): content = getattr(chunk, 'content', str(chunk)) if content: print(content, end='', flush=True) full_response += content print() return full_response else: return self.llm.invoke(prompt) if hasattr(self.llm, 'invoke') else self.llm(prompt) except Exception as e: return f"回答生成失败: {e}" async def main(): print("🚀 简洁RAG测试\n") # 1. 创建RAG实例 rag = SimpleRAG( vector_store_name="simple_test", rerank_config={"enabled": True, "type": "local", "model": "BAAI/bge-reranker-base"} ) # 2. 处理文件 print("📂 处理文件...") test_file = "./test_files/data_science.txt" if os.path.exists(test_file): result = await rag.ingest(test_file) print(f"✅ {result.get('message', '处理完成')}") else: print("⚠️ 测试文件不存在") # 3. 设置LLM try: from langchain_community.llms import Ollama rag.llm = Ollama(model="qwen3:4b", base_url="http://localhost:11434") print("🤖 LLM已连接\n") except: print("⚠️ LLM连接失败\n") question = "Python有什么特点?" # 4. 流式提问 print(f"❓ 问题: {question}") print("🌊 流式回答:") await rag.query(question, stream=True) # 5. 普通提问 # print(f"\n📝 普通回答:") # answer = await rag.query(question, stream=False) # print(f"💡 {answer}") if __name__ == "__main__": asyncio.run(main())