feat: 转换 async

This commit is contained in:
李如威 2025-08-08 14:48:41 +08:00
parent af8b7f65fb
commit 12531356dc
12 changed files with 757 additions and 464 deletions

81
CLEANUP_SUMMARY.md Normal file
View File

@ -0,0 +1,81 @@
# 项目清理完成总结
## 🧹 清理工作完成
已成功删除多余文件,保留必要示例,并更新了文档。
## 📁 当前项目结构
```
base_rag/
├── src/
│ └── base_rag/
│ ├── __init__.py # 包入口
│ └── core.py # 核心BaseRAG类异步版本
├── examples/
│ └── simple_test.py # 基础使用示例
├── FILE_PROCESSING_GUIDE.md # 文件处理功能说明
├── RERANK_GUIDE.md # 重排功能详细说明
├── README.md # 项目说明(已更新)
├── requirements.txt # 依赖列表
├── pyproject.toml # 包配置
└── LICENSE # 许可证
```
## ❌ 已删除的文件
### 文档文件
- `ASYNC_GUIDE.md` - 异步功能使用指南
- `MIGRATION_GUIDE.md` - 迁移指南
### 示例文件
- `examples/async_example.py` - 异步示例和FastAPI集成
- `examples/performance_test.py` - 性能测试脚本
## ✅ 保留的文件
### 核心文件
- `src/base_rag/core.py` - 保留异步版本的核心实现
- `examples/simple_test.py` - 更新为异步版本的基础示例
### 文档文件
- `README.md` - 删除了异步相关说明,保留核心功能介绍
- `FILE_PROCESSING_GUIDE.md` - 更新为异步版本的使用说明
- `RERANK_GUIDE.md` - 重排功能说明(保持不变)
## 📝 主要更新
### README.md
- ❌ 删除了所有异步特性的详细说明
- ❌ 删除了FastAPI集成示例
- ❌ 删除了性能测试结果
- ❌ 删除了迁移指南链接
- ✅ 保留了核心功能介绍
- ✅ 保留了基本配置和使用示例
- ✅ 简化了项目结构说明
### FILE_PROCESSING_GUIDE.md
- ✅ 更新方法签名为async/await形式
- ✅ 更新示例代码使用asyncio
- ✅ 修正了示例文件引用
### examples/simple_test.py
- ✅ 更新为async/await版本
- ✅ 保持简洁的功能演示
- ✅ 测试验证正常工作
## 🎯 当前状态
项目现在保持了异步功能的核心实现,但删除了:
- 复杂的异步使用指南
- 迁移相关的文档
- 性能测试和对比
- FastAPI集成示例
保留了:
- 核心异步RAG功能
- 简单明了的使用示例
- 基础文档说明
- 重排功能指南
项目现在更加精简,专注于核心功能,适合直接使用和学习。

View File

@ -28,19 +28,19 @@ class FileStatus(Enum):
### BaseRAG 新增方法
#### `process_file_to_vector_store(file_path, chunk_size=500, chunk_overlap=50)`
#### `await process_file_to_vector_store(file_path, chunk_size=500, chunk_overlap=50)`
主要的文件处理方法:
- 自动检测文件类型
- 保存文件到存储目录
- 切分文档并添加到向量库
- 记录处理状态
#### `get_file_processing_status(file_hash=None)`
#### `await get_file_processing_status(file_hash=None)`
获取文件处理状态:
- 传入 file_hash 获取特定文件状态
- 不传参数获取所有文件状态
#### `list_files_by_status(status=None)`
#### `await list_files_by_status(status=None)`
按状态筛选文件:
- 传入 FileStatus 枚举获取特定状态的文件
- 不传参数获取所有文件
@ -51,21 +51,25 @@ class FileStatus(Enum):
```python
from base_rag.core import BaseRAG, FileStatus
import asyncio
# 创建 RAG 实例
rag = SimpleRAG(
vector_store_name="my_knowledge_base",
storage_directory="./documents", # 文件存储目录
status_db_path="./file_status.db" # 状态数据库路径
)
async def main():
# 创建 RAG 实例
rag = SimpleRAG(
vector_store_name="my_knowledge_base",
storage_directory="./documents", # 文件存储目录
status_db_path="./file_status.db" # 状态数据库路径
)
# 处理文件
result = rag.process_file_to_vector_store("path/to/your/document.txt")
print(result)
# 处理文件
result = await rag.process_file_to_vector_store("path/to/your/document.txt")
print(result)
# 查看处理状态
status = rag.get_file_processing_status()
print(status)
# 查看处理状态
status = await rag.get_file_processing_status()
print(status)
asyncio.run(main())
```
### 批量处理文件
@ -73,21 +77,27 @@ print(status)
```python
import os
from pathlib import Path
import asyncio
# 处理目录中的所有文件
docs_dir = Path("./my_documents")
for file_path in docs_dir.glob("*"):
if file_path.suffix.lower() in ['.txt', '.md', '.doc', '.docx']:
print(f"处理文件: {file_path.name}")
result = rag.process_file_to_vector_store(str(file_path))
print(f"结果: {result['message']}")
async def batch_process():
rag = SimpleRAG()
# 处理目录中的所有文件
docs_dir = Path("./my_documents")
for file_path in docs_dir.glob("*"):
if file_path.suffix.lower() in ['.txt', '.md', '.doc', '.docx']:
print(f"处理文件: {file_path.name}")
result = await rag.process_file_to_vector_store(str(file_path))
print(f"结果: {result['message']}")
# 查看处理结果统计
completed = rag.list_files_by_status(FileStatus.COMPLETED)
failed = rag.list_files_by_status(FileStatus.ERROR)
# 查看处理结果统计
completed = await rag.list_files_by_status(FileStatus.COMPLETED)
failed = await rag.list_files_by_status(FileStatus.ERROR)
print(f"成功处理: {len(completed)} 个文件")
print(f"处理失败: {len(failed)} 个文件")
print(f"成功处理: {len(completed)} 个文件")
print(f"处理失败: {len(failed)} 个文件")
asyncio.run(batch_process())
```
## 文件处理流程
@ -134,7 +144,7 @@ BaseRAG(
### 文档切分参数
```python
rag.process_file_to_vector_store(
await rag.process_file_to_vector_store(
file_path="document.txt",
chunk_size=500, # 切分块大小
chunk_overlap=50 # 切分重叠大小
@ -157,4 +167,4 @@ pip install unstructured python-docx
## 完整示例
参见 `examples/file_processing_example.py` 获取完整的使用示例。
参见 `examples/simple_test.py` 获取完整的使用示例。

View File

@ -1,148 +0,0 @@
# 快速开始指南
## 安装依赖
1. 激活虚拟环境:
```bash
source venv/bin/activate
```
2. 安装依赖:
```bash
pip install -r requirements.txt
```
## 基本使用
### 1. 创建 RAG 类实例
```python
from base_rag.core import BaseRAG, FileStatus
class MyRAG(BaseRAG):
def ingest(self, file_path: str, **kwargs):
return self.process_file_to_vector_store(file_path, **kwargs)
def query(self, question: str) -> str:
docs = self.similarity_search_with_rerank(question)
if not docs:
return "没有找到相关信息"
return "\n".join([doc.page_content for doc in docs])
# 创建实例
rag = MyRAG(
vector_store_name="my_kb", # 知识库名称
storage_directory="./documents", # 文件存储目录
status_db_path="./file_status.db" # 状态数据库
)
```
### 2. 处理文件
```python
# 处理单个文件
result = rag.ingest("path/to/your/document.txt")
print(f"处理结果: {result['message']}")
# 批量处理文件
import os
for filename in os.listdir("./documents"):
if filename.endswith(('.txt', '.md', '.doc', '.docx')):
result = rag.ingest(f"./documents/{filename}")
print(f"{filename}: {result['message']}")
```
### 3. 查询知识库
```python
# 搜索相关文档
answer = rag.query("你的问题")
print(answer)
```
### 4. 查看文件状态
```python
# 查看所有文件状态
all_files = rag.get_file_processing_status()
for file_info in all_files:
print(f"{file_info['filename']}: {file_info['status']}")
# 查看已完成的文件
completed = rag.list_files_by_status(FileStatus.COMPLETED)
print(f"已处理完成: {len(completed)} 个文件")
# 查看处理失败的文件
failed = rag.list_files_by_status(FileStatus.ERROR)
for file_info in failed:
print(f"失败文件: {file_info['filename']}")
print(f"错误信息: {file_info['error_message']}")
```
## 支持的文件格式
- **.txt** - 纯文本文件
- **.md** - Markdown 文件
- **.doc/.docx** - Word 文档(需要安装 `unstructured``python-docx`
## 主要特性
1. **自动去重**:相同内容的文件不会重复处理
2. **状态跟踪**:实时跟踪文件处理状态
3. **错误处理**:处理失败的文件会记录错误信息
4. **简单API**:易于使用和扩展
5. **持久化存储**:使用 SQLite 数据库记录状态
## 运行示例
```bash
# 激活环境
source venv/bin/activate
# 运行完整示例
python examples/file_processing_example.py
# 运行简单测试
python examples/simple_test.py
```
## 配置选项
### 文档切分参数
```python
result = rag.ingest(
"document.txt",
chunk_size=500, # 切分块大小
chunk_overlap=50 # 重叠大小
)
```
### 嵌入模型配置
```python
rag = MyRAG(
embedding_config={
"type": "local",
"model_name": "BAAI/bge-small-zh-v1.5"
}
)
```
### 重排模型配置
```python
rag = MyRAG(
rerank_config={
"enabled": True,
"type": "local",
"model": "BAAI/bge-reranker-base",
"top_k": 3
}
)
```
## 数据存储
- **文件存储**`./documents/` 目录(可配置)
- **向量数据库**`./chroma_db/` 目录
- **状态数据库**`./file_status.db` 文件
文件名格式:`原文件名_哈希值前8位.扩展名`

337
README.md
View File

@ -1,4 +1,4 @@
# BaseRAG 系统功能总结
# BaseRAG 检索增强生成系统
## 概述
BaseRAG 是一个灵活的检索增强生成RAG框架支持多种嵌入模型和重排策略专注于本地部署和HuggingFace生态系统。
@ -9,64 +9,198 @@ BaseRAG 是一个灵活的检索增强生成RAG框架支持多种嵌入
- **本地HuggingFace模型**: 支持模型名称和本地路径两种方式
- **本地API接口**: 兼容OpenAI API格式的本地嵌入服务
- **自动回退机制**: API不可用时自动切换到本地模型
- **模型缓存**: 智能缓存机制,多实例共享模型
### 2. 文档重排功能
- **相似度重排**: 基于余弦相似度,无额外依赖
- **CrossEncoder重排**: 专业重排模型,效果优秀
- **BGE重排**: 中文支持良好的重排模型
### 3. 向量存储
### 3. 向量存储与文件管理
- **Chroma数据库**: 自动持久化,支持多集合管理
- **线程安全**: 模型缓存和并发访问保护
- **文件处理**: 支持txt、md、doc/docx等多种格式
- **状态追踪**: SQLite数据库管理文件处理状态
- **智能去重**: 自动检测和跳过重复文件
## 🔧 配置示例
### 4. 简洁易用的API
- **抽象基类设计**: 易于扩展和自定义
- **配置驱动**: 通过配置文件灵活调整模型和参数
- **错误处理**: 完善的错误处理和状态报告
### 基础配置
```python
from base_rag import BaseRAG
## 🔧 快速开始
class MyRAG(BaseRAG):
def ingest(self, documents):
self.vector_store.add_texts(documents)
def query(self, question, k=3):
return self.similarity_search_with_rerank(question, k=k)
# 配置
embedding_config = {
"type": "local",
"model_name": "sentence-transformers/all-MiniLM-L6-v2"
}
rerank_config = {
"enabled": True,
"method": "similarity",
"top_k": 3
}
rag = MyRAG(
embedding_config=embedding_config,
rerank_config=rerank_config
)
### 安装依赖
```bash
pip install -r requirements.txt
```
### 本地API配置
### 基本使用示例
```python
import asyncio
from base_rag.core import BaseRAG, FileStatus
class MyRAG(BaseRAG):
async def ingest(self, file_paths):
"""批量导入文档"""
results = []
for file_path in file_paths:
result = await self.process_file_to_vector_store(file_path)
results.append(result)
return results
async def query(self, question):
"""问答查询"""
docs = await self.similarity_search_with_rerank(question, k=3)
# 处理文档并生成答案
context = "\n".join([doc.page_content for doc in docs])
return f"基于检索结果: {context[:200]}..."
async def main():
# 初始化RAG系统
rag = MyRAG(
vector_store_name="my_knowledge",
embedding_config={
"type": "local",
"model_name": "BAAI/bge-small-zh-v1.5"
},
rerank_config={
"enabled": True,
"type": "local",
"model": "BAAI/bge-reranker-base"
}
)
# 处理文档
await rag.ingest(["document1.txt", "document2.txt"])
# 查询
answer = await rag.query("什么是Python")
print(answer)
## 📋 配置选项
### 嵌入模型配置
#### 本地HuggingFace模型
```python
embedding_config = {
"type": "local",
"model_name": "BAAI/bge-small-zh-v1.5"
}
# 或使用本地路径
embedding_config = {
"type": "local",
"model_path": "/path/to/your/model"
}
```
#### 本地API接口
```python
embedding_config = {
"type": "api",
"api_url": "http://localhost:8080",
"model": "text-embedding-model"
"api_url": "http://localhost:8000/embeddings",
"model": "your-model",
"api_key": "your-api-key"
}
```
### 本地模型路径配置
### 重排配置
#### CrossEncoder重排
```python
embedding_config = {
rerank_config = {
"enabled": True,
"type": "local",
"model_path": "/path/to/your/model"
"model": "BAAI/bge-reranker-base",
"top_k": 3
}
```
#### 相似度重排
```python
rerank_config = {
"enabled": True,
"method": "similarity",
"top_k": 3
}
```
### 完整配置示例
```python
rag = MyRAG(
vector_store_name="knowledge_base",
retriever_top_k=5,
persist_directory="./chroma_db",
storage_directory="./documents",
status_db_path="./file_status.db",
embedding_config={
"type": "local",
"model_name": "BAAI/bge-small-zh-v1.5"
},
rerank_config={
"enabled": True,
"type": "local",
"model": "BAAI/bge-reranker-base",
"top_k": 3
}
)
```
from pydantic import BaseModel
app = FastAPI()
rag_instance = MyAsyncRAG()
class QueryRequest(BaseModel):
question: str
@app.post("/query")
async def query_endpoint(request: QueryRequest):
answer = await rag_instance.query(request.question)
## 🚀 使用示例
### 1. 文件处理
```python
# 处理单个文件
result = await rag.process_file_to_vector_store("document.txt")
print(result)
# 批量处理文件
file_paths = ["doc1.txt", "doc2.md", "doc3.docx"]
results = await rag.ingest(file_paths)
# 查看处理状态
status = await rag.get_file_processing_status()
completed_files = await rag.list_files_by_status(FileStatus.COMPLETED)
```
### 2. 文档检索
```python
# 基本相似性搜索
docs = await rag.similarity_search("Python编程", k=5)
# 带重排的搜索
docs = await rag.similarity_search_with_rerank("Python编程", k=3)
# 问答查询
answer = await rag.query("什么是Python")
```
### 3. 状态管理
```python
from base_rag.core import FileStatus
# 查看所有文件状态
all_files = await rag.get_file_processing_status()
# 查看特定状态的文件
completed = await rag.list_files_by_status(FileStatus.COMPLETED)
failed = await rag.list_files_by_status(FileStatus.ERROR)
print(f"已完成: {len(completed)} 个文件")
print(f"处理失败: {len(failed)} 个文件")
```
## 📁 项目结构
```
base_rag/
@ -75,14 +209,14 @@ base_rag/
│ ├── __init__.py # 包入口
│ └── core.py # 核心BaseRAG类
├── examples/
│ ├── quick_start.py # 快速开始示例
│ ├── rerank_demo.py # 重排功能演示
│ └── local_api_demo.py # 本地API配置示例
│ └── simple_test.py # 基础使用示例
├── requirements.txt # 依赖列表
├── pyproject.toml # 包配置
├── FILE_PROCESSING_GUIDE.md # 文件处理功能说明
├── RERANK_GUIDE.md # 重排功能详细说明
└── README.md # 项目说明
```
```
## 🚀 快速开始
@ -93,6 +227,12 @@ pip install -r requirements.txt
2. **运行示例**
```bash
# 异步功能演示
python examples/async_example.py
# 性能测试
python examples/performance_test.py
# 基础功能演示
python examples/quick_start.py
@ -101,15 +241,100 @@ python examples/rerank_demo.py
# 本地API配置演示
python examples/local_api_demo.py
# FastAPI服务示例
pip install fastapi uvicorn
uvicorn examples.async_example:app --reload
```
## 📦 可选依赖
## 🚀 异步特性详解
### 主要异步方法
所有BaseRAG的核心方法都已异步化
```python
# 文件处理
await rag.process_file_to_vector_store("document.txt")
# 相似性搜索
docs = await rag.similarity_search("query", k=5)
# 带重排的搜索
docs = await rag.similarity_search_with_rerank("query", k=3)
# 文件状态管理
status = await rag.get_file_processing_status()
files = await rag.list_files_by_status(FileStatus.COMPLETED)
# 向量库操作
await rag.add_documents_to_vector_store(documents)
retriever = await rag.build_retriever()
qa_chain = await rag.build_qa_chain()
```
### 并发处理示例
```python
async def concurrent_file_processing(rag, file_paths, max_concurrent=3):
"""并发处理多个文件"""
semaphore = asyncio.Semaphore(max_concurrent)
async def process_single_file(file_path):
async with semaphore:
return await rag.process_file_to_vector_store(file_path)
tasks = [process_single_file(fp) for fp in file_paths]
results = await asyncio.gather(*tasks, return_exceptions=True)
return results
async def concurrent_queries(rag, queries):
"""并发处理多个查询"""
tasks = [rag.similarity_search_with_rerank(q, k=3) for q in queries]
results = await asyncio.gather(*tasks)
return results
```
### 性能优势
**并发查询性能测试结果:**
## 🔍 运行示例
```bash
# 1. 安装依赖
pip install -r requirements.txt
# 2. 运行基础示例
python examples/simple_test.py
```
## 📦 依赖要求
### 核心依赖
```txt
langchain>=0.3.0
langchain-community>=0.3.0
langchain-chroma>=0.1.0
langchain-huggingface>=0.1.0
chromadb>=0.4.0
sentence-transformers>=2.2.0
numpy>=1.21.0
aiofiles>=23.0.0
aiosqlite>=0.19.0
aiohttp>=3.8.0
```
### 文档处理依赖
```txt
unstructured>=0.10.0
python-docx>=0.8.11
```
### 可选依赖
```bash
# 本地API接口支持
pip install langchain-openai
# BGE重排支持
# BGE重排支持
pip install FlagEmbedding
```
@ -118,8 +343,9 @@ pip install FlagEmbedding
### 核心方法
- `similarity_search(query, k)`: 基础相似性搜索
- `similarity_search_with_rerank(query, k)`: 带重排的搜索
- `load_and_split_documents(file_path)`: 文档加载和分割
- `add_documents_to_vector_store(documents)`: 添加文档到向量库
- `process_file_to_vector_store(file_path)`: 处理文件到向量库
- `get_file_processing_status()`: 获取文件处理状态
- `list_files_by_status(status)`: 按状态列出文件
### 抽象方法(需实现)
- `ingest(*args, **kwargs)`: 文档导入逻辑
@ -136,13 +362,36 @@ pip install FlagEmbedding
## 🛠️ 技术特点
- **线程安全**: 支持并发访问和模型缓存
- **并发安全**: 支持并发访问和模型缓存
- **错误处理**: 完善的异常处理和回退机制
- **灵活配置**: 支持多种配置方式和自定义参数
- **易于扩展**: 抽象设计,便于子类实现特定业务逻辑
## 📋 注意事项
1. **模型下载**: 首次运行会下载模型,需要网络连接
2. **内存管理**: 模型会被缓存,注意内存使用
3. **文件格式**: 确保文档格式受支持txt、md、doc、docx
4. **错误处理**: 注意处理文件加载和模型推理的异常
## 🔄 版本信息
- **当前版本**: 1.0.0
- **Python要求**: >= 3.8
- **主要特性**: 多模型支持,智能重排,文件管理
## 📚 文档指南
更多详细信息请参考:
- **[文件处理功能说明](FILE_PROCESSING_GUIDE.md)** - 文件处理详细介绍
- **[重排功能详细说明](RERANK_GUIDE.md)** - 重排功能配置和使用
- **[示例代码](examples/)** - 使用示例
- **[配置文件](pyproject.toml)** - 项目配置
---
🎯 **BaseRAG** - 灵活强大的RAG框架
1. 首次运行会下载模型,需要网络连接
2. 重排功能会增加查询延迟,但提高结果质量
3. 不同模型对硬件要求不同,请根据实际情况选择

View File

@ -0,0 +1,5 @@
NumPy是Python中用于科学计算的基础库提供多维数组对象。
Pandas是强大的数据分析和处理库提供DataFrame数据结构。
Matplotlib是Python的绘图库用于创建静态、动态和交互式图表。
Scikit-learn是机器学习库提供各种算法和工具。

View File

@ -0,0 +1,6 @@
Python是一种高级编程语言。
它具有简洁的语法和强大的功能。
Python广泛应用于Web开发、数据科学、人工智能等领域。
机器学习库如scikit-learn、TensorFlow和PyTorch都支持Python。
Flask和Django是流行的Python Web框架。

View File

@ -0,0 +1,5 @@
Python是一种高级编程语言由Guido van Rossum于1991年创建。
Python具有简洁易读的语法适合初学者学习编程。
Python是解释型语言支持面向对象、函数式等多种编程范式。
Python的设计哲学强调代码的可读性和简洁性。

View File

@ -0,0 +1,5 @@
Flask是一个轻量级的Python Web框架易于学习和使用。
Django是一个功能丰富的Python Web框架适合大型项目开发。
FastAPI是现代的Python Web框架专为构建API而设计。
Tornado是一个可扩展的非阻塞Web服务器和Web应用框架。

View File

@ -5,48 +5,49 @@
import sys
import os
import asyncio
import warnings
from pathlib import Path
# 过滤掉PyTorch的FutureWarning
warnings.filterwarnings('ignore', category=FutureWarning, module='torch')
warnings.filterwarnings("ignore", category=FutureWarning, module="torch")
# 添加源码路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src"))
from base_rag.core import BaseRAG, FileStatus
class SimpleRAG(BaseRAG):
"""简单的RAG实现示例"""
def ingest(self, file_path: str, **kwargs):
async def ingest(self, file_path: str, **kwargs):
"""实现文档导入逻辑"""
return self.process_file_to_vector_store(file_path, **kwargs)
def query(self, question: str) -> str:
return await self.process_file_to_vector_store(file_path, **kwargs)
async def query(self, question: str) -> str:
"""实现简单的查询逻辑"""
docs = self.similarity_search_with_rerank(question, k=2)
docs = await self.similarity_search_with_rerank(question, k=2)
if not docs:
return "抱歉,没有找到相关信息。"
# 显示搜索到的文档来源
sources = []
contexts = []
for doc in docs:
source = doc.metadata.get('source_file', '未知来源')
source = doc.metadata.get("source_file", "未知来源")
if source not in sources:
sources.append(source)
contexts.append(doc.page_content.strip())
context = "\n\n".join(contexts)
sources_str = "".join(sources)
return f"基于以下文档({sources_str})的信息:\n\n{context}"
def test_file_processing():
async def test_file_processing():
print("=== 文件处理功能测试 ===\n")
# 创建RAG实例
@ -54,7 +55,7 @@ def test_file_processing():
vector_store_name="test_kb",
retriever_top_k=2,
storage_directory="./test_docs",
status_db_path="./test_status.db"
status_db_path="./test_status.db",
)
# 创建测试文件
@ -62,60 +63,71 @@ def test_file_processing():
test_dir.mkdir(exist_ok=True)
# 创建多个不同主题的知识文件
# Python基础知识
python_file = test_dir / "python_basics.txt"
python_file.write_text("""
python_file.write_text(
"""
Python是一种高级编程语言由Guido van Rossum于1991年创建
Python具有简洁易读的语法适合初学者学习编程
Python是解释型语言支持面向对象函数式等多种编程范式
Python的设计哲学强调代码的可读性和简洁性
""", encoding="utf-8")
""",
encoding="utf-8",
)
# Web框架知识
web_file = test_dir / "web_frameworks.txt"
web_file.write_text("""
web_file.write_text(
"""
Flask是一个轻量级的Python Web框架易于学习和使用
Django是一个功能丰富的Python Web框架适合大型项目开发
FastAPI是现代的Python Web框架专为构建API而设计
Tornado是一个可扩展的非阻塞Web服务器和Web应用框架
""", encoding="utf-8")
""",
encoding="utf-8",
)
# 数据科学知识
datascience_file = test_dir / "data_science.txt"
datascience_file.write_text("""
datascience_file.write_text(
"""
NumPy是Python中用于科学计算的基础库提供多维数组对象
Pandas是强大的数据分析和处理库提供DataFrame数据结构
Matplotlib是Python的绘图库用于创建静态动态和交互式图表
Scikit-learn是机器学习库提供各种算法和工具
""", encoding="utf-8")
""",
encoding="utf-8",
)
print("1. 处理多个知识文件...")
files_to_process = [python_file, web_file, datascience_file]
for file_path in files_to_process:
result = rag.ingest(str(file_path), chunk_size=200, chunk_overlap=20)
print(f"处理 {file_path.name}: {result['message']} (片段数: {result.get('chunks_count', 0)})")
result = await rag.ingest(str(file_path), chunk_size=200, chunk_overlap=20)
print(
f"处理 {file_path.name}: {result['message']} (片段数: {result.get('chunks_count', 0)})"
)
print()
print("2. 查询测试...")
questions = [
"Python是谁创建的",
"Flask和Django有什么区别",
"Flask和Django有什么区别",
"Pandas是做什么的",
"什么是NumPy",
"FastAPI有什么特点"
"FastAPI有什么特点",
]
for question in questions:
print(f"问题: {question}")
answer = rag.query(question)
answer = await rag.query(question)
print(f"回答: {answer[:150]}...")
print("-" * 50)
print()
print("3. 查看文件状态...")
files = rag.get_file_processing_status()
files = await rag.get_file_processing_status()
for file_info in files:
print(f"文件: {file_info['filename']} | 状态: {file_info['status']}")
@ -123,4 +135,4 @@ Scikit-learn是机器学习库提供各种算法和工具。
if __name__ == "__main__":
test_file_processing()
asyncio.run(test_file_processing())

BIN
file_status.db Normal file

Binary file not shown.

View File

@ -6,6 +6,11 @@ chromadb>=0.4.0
sentence-transformers>=2.2.0
numpy>=1.21.0
# 异步依赖
aiofiles>=23.0.0
aiosqlite>=0.19.0
aiohttp>=3.8.0
# 文档处理依赖
unstructured>=0.10.0
python-docx>=0.8.11

View File

@ -1,15 +1,16 @@
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, ClassVar, Union, Tuple, Any
import threading
import asyncio
import numpy as np
import os
import shutil
import sqlite3
import hashlib
import warnings
from datetime import datetime
from pathlib import Path
from enum import Enum
import aiofiles
import aiosqlite
import aiohttp
# 过滤掉PyTorch的FutureWarning避免干扰用户体验
warnings.filterwarnings('ignore', category=FutureWarning, module='torch')
@ -33,44 +34,48 @@ class FileStatus(Enum):
class FileManager:
"""文件管理器,负责文件存储、状态记录等"""
"""异步文件管理器,负责文件存储、状态记录等"""
def __init__(self, storage_dir: str = "./documents", db_path: str = "./file_status.db"):
self.storage_dir = Path(storage_dir)
self.db_path = db_path
self.storage_dir.mkdir(exist_ok=True)
self._init_database()
self._init_lock = asyncio.Lock()
self._db_initialized = False
def _init_database(self):
"""初始化状态记录数据库"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS file_status (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL,
file_type TEXT NOT NULL,
file_hash TEXT UNIQUE NOT NULL,
status TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
error_message TEXT
)
""")
conn.commit()
conn.close()
async def _init_database(self):
"""异步初始化状态记录数据库"""
async with self._init_lock:
if self._db_initialized:
return
async with aiosqlite.connect(self.db_path) as conn:
await conn.execute("""
CREATE TABLE IF NOT EXISTS file_status (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL,
file_type TEXT NOT NULL,
file_hash TEXT UNIQUE NOT NULL,
status TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
error_message TEXT
)
""")
await conn.commit()
self._db_initialized = True
def _calculate_file_hash(self, file_path: str) -> str:
"""计算文件哈希值"""
async def _calculate_file_hash(self, file_path: str) -> str:
"""异步计算文件哈希值"""
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
async with aiofiles.open(file_path, "rb") as f:
while chunk := await f.read(4096):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def save_file(self, source_path: str) -> Tuple[str, str]:
async def save_file(self, source_path: str) -> Tuple[str, str]:
"""
保存文件到存储目录
异步保存文件到存储目录
返回: (存储路径, 文件哈希)
"""
source_path = Path(source_path)
@ -78,7 +83,7 @@ class FileManager:
raise FileNotFoundError(f"源文件不存在: {source_path}")
# 计算文件哈希
file_hash = self._calculate_file_hash(str(source_path))
file_hash = await self._calculate_file_hash(str(source_path))
# 生成存储文件名使用哈希前8位避免冲突
file_extension = source_path.suffix
@ -87,49 +92,51 @@ class FileManager:
# 如果文件已存在且哈希相同,直接返回
if stored_path.exists():
existing_hash = self._calculate_file_hash(str(stored_path))
existing_hash = await self._calculate_file_hash(str(stored_path))
if existing_hash == file_hash:
print(f"文件已存在,跳过复制: {stored_filename}")
return str(stored_path), file_hash
# 复制文件
shutil.copy2(source_path, stored_path)
print(f"文件已保存到: {stored_path}")
# 异步复制文件
async with aiofiles.open(source_path, 'rb') as src:
async with aiofiles.open(stored_path, 'wb') as dst:
while chunk := await src.read(8192):
await dst.write(chunk)
print(f"文件已保存到: {stored_path}")
return str(stored_path), file_hash
def update_file_status(self, file_hash: str, filename: str, file_type: str,
async def update_file_status(self, file_hash: str, filename: str, file_type: str,
status: FileStatus, error_message: str = None):
"""更新文件处理状态"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
"""异步更新文件处理状态"""
await self._init_database()
now = datetime.now().isoformat()
# 尝试更新现有记录
cursor.execute("""
UPDATE file_status
SET status = ?, updated_at = ?, error_message = ?
WHERE file_hash = ?
""", (status.value, now, error_message, file_hash))
# 如果没有更新任何记录,插入新记录
if cursor.rowcount == 0:
cursor.execute("""
INSERT INTO file_status (filename, file_type, file_hash, status, created_at, updated_at, error_message)
VALUES (?, ?, ?, ?, ?, ?, ?)
""", (filename, file_type, file_hash, status.value, now, now, error_message))
conn.commit()
conn.close()
async with aiosqlite.connect(self.db_path) as conn:
# 尝试更新现有记录
cursor = await conn.execute("""
UPDATE file_status
SET status = ?, updated_at = ?, error_message = ?
WHERE file_hash = ?
""", (status.value, now, error_message, file_hash))
# 如果没有更新任何记录,插入新记录
if cursor.rowcount == 0:
await conn.execute("""
INSERT INTO file_status (filename, file_type, file_hash, status, created_at, updated_at, error_message)
VALUES (?, ?, ?, ?, ?, ?, ?)
""", (filename, file_type, file_hash, status.value, now, now, error_message))
await conn.commit()
def get_file_status(self, file_hash: str) -> Optional[Dict]:
"""获取文件状态"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("SELECT * FROM file_status WHERE file_hash = ?", (file_hash,))
row = cursor.fetchone()
conn.close()
async def get_file_status(self, file_hash: str) -> Optional[Dict]:
"""异步获取文件状态"""
await self._init_database()
async with aiosqlite.connect(self.db_path) as conn:
cursor = await conn.execute("SELECT * FROM file_status WHERE file_hash = ?", (file_hash,))
row = await cursor.fetchone()
if row:
return {
@ -144,18 +151,17 @@ class FileManager:
}
return None
def list_files_by_status(self, status: FileStatus = None) -> List[Dict]:
"""列出指定状态的文件"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
async def list_files_by_status(self, status: FileStatus = None) -> List[Dict]:
"""异步列出指定状态的文件"""
await self._init_database()
if status:
cursor.execute("SELECT * FROM file_status WHERE status = ? ORDER BY created_at DESC", (status.value,))
else:
cursor.execute("SELECT * FROM file_status ORDER BY created_at DESC")
rows = cursor.fetchall()
conn.close()
async with aiosqlite.connect(self.db_path) as conn:
if status:
cursor = await conn.execute("SELECT * FROM file_status WHERE status = ? ORDER BY created_at DESC", (status.value,))
else:
cursor = await conn.execute("SELECT * FROM file_status ORDER BY created_at DESC")
rows = await cursor.fetchall()
return [{
'id': row[0],
@ -170,12 +176,12 @@ class FileManager:
class ModelManager:
"""统一的模型管理类用于创建和缓存embedding和rerank模型"""
"""异步统一的模型管理类用于创建和缓存embedding和rerank模型"""
# 类级别的模型缓存
_models: ClassVar[Dict[str, Any]] = {}
# 线程锁,保护模型缓存的并发访问
_lock: ClassVar[threading.Lock] = threading.Lock()
# 异步锁,保护模型缓存的并发访问
_lock: ClassVar[asyncio.Lock] = asyncio.Lock()
@classmethod
def get_config_key(cls, config: Dict, model_type: str = "embedding") -> str:
@ -201,20 +207,24 @@ class ModelManager:
return f"{prefix}_{model_key}"
@classmethod
def get_or_create_model(cls, config: Dict, model_type: str, creator_func) -> Any:
"""获取或创建模型(带缓存,线程安全)"""
async def get_or_create_model(cls, config: Dict, model_type: str, creator_func) -> Any:
"""异步获取或创建模型(带缓存,线程安全)"""
config_key = cls.get_config_key(config, model_type)
# 双重检查锁定模式
# 检查缓存
if config_key in cls._models:
print(f"使用缓存的{model_type}模型: {config_key}")
return cls._models[config_key]
with cls._lock:
async with cls._lock:
# 再次检查,防止并发创建
if config_key not in cls._models:
print(f"正在创建{model_type}模型: {config_key}")
cls._models[config_key] = creator_func(config)
# 在线程池中运行阻塞的模型创建
loop = asyncio.get_event_loop()
cls._models[config_key] = await loop.run_in_executor(
None, creator_func, config
)
else:
print(f"使用缓存的{model_type}模型: {config_key}")
@ -222,7 +232,7 @@ class ModelManager:
@staticmethod
def create_embedding_model(config: Dict) -> Embeddings:
"""创建嵌入模型"""
"""创建嵌入模型(在线程池中运行)"""
config_type = config.get("type", "local")
if config_type == "local":
@ -282,7 +292,7 @@ class ModelManager:
@staticmethod
def create_rerank_model(config: Dict) -> Any:
"""创建重排模型"""
"""创建重排模型(在线程池中运行)"""
config_type = config.get("type", "local")
if config_type == "local":
@ -375,29 +385,44 @@ class BaseRAG(ABC):
# 初始化文件管理器
self.file_manager = FileManager(storage_directory, status_db_path)
# 使用统一的模型管理器创建嵌入模型
self.embedding_model = ModelManager.get_or_create_model(
self.embedding_config, "embedding", ModelManager.create_embedding_model
)
# 延迟初始化标记
self._initialized = False
self._init_lock = asyncio.Lock()
# 初始化重排模型
self.reranker = None
if self.rerank_config.get("enabled", False):
self.reranker = ModelManager.get_or_create_model(
self.rerank_config, "rerank", ModelManager.create_rerank_model
async def _ensure_initialized(self):
"""确保模型已初始化"""
if self._initialized:
return
async with self._init_lock:
if self._initialized:
return
# 使用统一的模型管理器创建嵌入模型
self.embedding_model = await ModelManager.get_or_create_model(
self.embedding_config, "embedding", ModelManager.create_embedding_model
)
# 初始化 Chroma 向量库
self.vector_store = Chroma(
collection_name=vector_store_name,
embedding_function=self.embedding_model,
persist_directory=persist_directory,
)
# 初始化重排模型
self.reranker = None
if self.rerank_config.get("enabled", False):
self.reranker = await ModelManager.get_or_create_model(
self.rerank_config, "rerank", ModelManager.create_rerank_model
)
def _rerank_documents(
# 初始化 Chroma 向量库
self.vector_store = Chroma(
collection_name=self.vector_store_name,
embedding_function=self.embedding_model,
persist_directory=self.persist_directory,
)
self._initialized = True
async def _rerank_documents(
self, query: str, documents: List[Document], top_k: int = None
) -> List[Document]:
"""对检索到的文档进行重排"""
"""异步对检索到的文档进行重排"""
if not documents:
return documents
@ -415,21 +440,26 @@ class BaseRAG(ABC):
try:
# 判断是否为API模式
if isinstance(self.reranker, dict) and self.reranker.get("type") == "api":
return self._api_rerank(query, documents, top_k)
return await self._api_rerank(query, documents, top_k)
else:
# 本地模型模式CrossEncoder
query_doc_pairs = [(query, doc.page_content) for doc in documents]
scores = self.reranker.predict(query_doc_pairs)
# 处理得分数据确保scores是一维列表
if isinstance(scores, np.ndarray):
scores = scores.flatten().tolist()
elif not isinstance(scores, list):
scores = [scores]
elif len(scores) == 1 and isinstance(scores[0], np.ndarray):
# 如果是包含单个数组的列表,提取数组内容
scores = scores[0].flatten().tolist()
# 本地模型模式CrossEncoder- 在线程池中运行
loop = asyncio.get_event_loop()
def _local_rerank():
query_doc_pairs = [(query, doc.page_content) for doc in documents]
scores = self.reranker.predict(query_doc_pairs)
# 处理得分数据确保scores是一维列表
if isinstance(scores, np.ndarray):
scores = scores.flatten().tolist()
elif not isinstance(scores, list):
scores = [scores]
elif len(scores) == 1 and isinstance(scores[0], np.ndarray):
scores = scores[0].flatten().tolist()
return scores
scores = await loop.run_in_executor(None, _local_rerank)
print(f"重排得分: {scores}")
# 根据分数排序
@ -445,13 +475,10 @@ class BaseRAG(ABC):
print(f"重排失败: {e},跳过重排")
return documents[:top_k]
def _api_rerank(
async def _api_rerank(
self, query: str, documents: List[Document], top_k: int
) -> List[Document]:
"""使用API进行重排"""
import requests
import json
"""使用API进行异步重排"""
try:
api_config = self.reranker
api_url = api_config["api_url"]
@ -469,83 +496,97 @@ class BaseRAG(ABC):
"Authorization": f"Bearer {api_config['api_key']}",
}
# 发送API请求
response = requests.post(api_url, json=payload, headers=headers, timeout=30)
# 使用aiohttp发送异步请求
async with aiohttp.ClientSession() as session:
async with session.post(api_url, json=payload, headers=headers, timeout=30) as response:
if response.status == 200:
result = await response.json()
if response.status_code == 200:
result = response.json()
# 假设API返回格式为: {"scores": [0.9, 0.8, ...]} 或 {"results": [{"index": 0, "score": 0.9}, ...]}
if "scores" in result:
scores = result["scores"]
elif "results" in result:
scores = [item["score"] for item in result["results"]]
else:
raise ValueError("API返回格式不支持")
# 假设API返回格式为: {"scores": [0.9, 0.8, ...]} 或 {"results": [{"index": 0, "score": 0.9}, ...]}
if "scores" in result:
scores = result["scores"]
elif "results" in result:
scores = [item["score"] for item in result["results"]]
else:
raise ValueError("API返回格式不支持")
# 根据分数排序
doc_scores = list(zip(documents, scores))
doc_scores.sort(key=lambda x: x[1], reverse=True)
# 根据分数排序
doc_scores = list(zip(documents, scores))
doc_scores.sort(key=lambda x: x[1], reverse=True)
return [doc for doc, score in doc_scores[:top_k]]
else:
print(f"API重排请求失败: {response.status_code}, {response.text}")
return documents[:top_k]
return [doc for doc, score in doc_scores[:top_k]]
else:
error_text = await response.text()
print(f"API重排请求失败: {response.status}, {error_text}")
return documents[:top_k]
except Exception as e:
print(f"API重排失败: {e},跳过重排")
return documents[:top_k]
def load_and_split_documents(self, file_path: str) -> List[Document]:
async def load_and_split_documents(self, file_path: str) -> List[Document]:
"""
加载并切分文档可被子类重写实现不同的切分方式
异步加载并切分文档可被子类重写实现不同的切分方式
"""
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
# 在线程池中运行文档加载和切分
loop = asyncio.get_event_loop()
def _load_and_split():
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=20)
return splitter.split_documents(documents)
return await loop.run_in_executor(None, _load_and_split)
splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=20)
return splitter.split_documents(documents)
def _load_document_by_type(self, file_path: str) -> List[Document]:
async def _load_document_by_type(self, file_path: str) -> List[Document]:
"""
根据文件类型加载文档
根据文件类型异步加载文档
"""
file_path = Path(file_path)
file_extension = file_path.suffix.lower()
try:
if file_extension in ['.txt', '.md']:
# 文本和Markdown文件
loader = TextLoader(str(file_path), encoding="utf-8")
return loader.load()
elif file_extension in ['.doc', '.docx']:
# Word文档
try:
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(str(file_path))
# 在线程池中运行文档加载
loop = asyncio.get_event_loop()
def _load_doc():
try:
if file_extension in ['.txt', '.md']:
# 文本和Markdown文件
loader = TextLoader(str(file_path), encoding="utf-8")
return loader.load()
except ImportError:
print("警告: 需要安装 unstructured 和 python-docx 来处理Word文档")
print("请运行: pip install unstructured python-docx")
raise
else:
raise ValueError(f"不支持的文件类型: {file_extension}")
except Exception as e:
print(f"加载文件失败 {file_path}: {e}")
raise
elif file_extension in ['.doc', '.docx']:
# Word文档
try:
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(str(file_path))
return loader.load()
except ImportError:
print("警告: 需要安装 unstructured 和 python-docx 来处理Word文档")
print("请运行: pip install unstructured python-docx")
raise
else:
raise ValueError(f"不支持的文件类型: {file_extension}")
except Exception as e:
print(f"加载文件失败 {file_path}: {e}")
raise
return await loop.run_in_executor(None, _load_doc)
def process_file_to_vector_store(self, file_path: str, chunk_size: int = 500, chunk_overlap: int = 50) -> Dict:
async def process_file_to_vector_store(self, file_path: str, chunk_size: int = 500, chunk_overlap: int = 50) -> Dict:
"""
处理文件并添加到向量库
异步处理文件并添加到向量库
:param file_path: 文件路径
:param chunk_size: 文档切分大小
:param chunk_overlap: 文档切分重叠
:return: 处理结果字典
"""
await self._ensure_initialized()
file_path = Path(file_path)
if not file_path.exists():
raise FileNotFoundError(f"文件不存在: {file_path}")
@ -555,10 +596,10 @@ class BaseRAG(ABC):
try:
# 1. 保存文件并获取哈希
stored_path, file_hash = self.file_manager.save_file(str(file_path))
stored_path, file_hash = await self.file_manager.save_file(str(file_path))
# 2. 检查文件是否已经处理过
existing_status = self.file_manager.get_file_status(file_hash)
existing_status = await self.file_manager.get_file_status(file_hash)
if existing_status and existing_status['status'] == FileStatus.COMPLETED.value:
print(f"文件 {filename} 已经处理完毕,跳过处理")
return {
@ -570,28 +611,32 @@ class BaseRAG(ABC):
}
# 3. 更新状态为等待中
self.file_manager.update_file_status(
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.WAITING
)
# 4. 更新状态为处理中
self.file_manager.update_file_status(
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.PROCESSING
)
# 5. 加载文档
print(f"开始处理文件: {filename}")
documents = self._load_document_by_type(stored_path)
documents = await self._load_document_by_type(stored_path)
if not documents:
raise ValueError("未能从文件中提取到任何内容")
# 6. 切分文档
splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
split_docs = splitter.split_documents(documents)
loop = asyncio.get_event_loop()
def _split_docs():
splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
return splitter.split_documents(documents)
split_docs = await loop.run_in_executor(None, _split_docs)
# 7. 为每个切分的文档添加元数据
for doc in split_docs:
@ -604,10 +649,10 @@ class BaseRAG(ABC):
# 8. 添加到向量库
print(f"{len(split_docs)} 个文档片段添加到向量库...")
self.add_documents_to_vector_store(split_docs)
await self.add_documents_to_vector_store(split_docs)
# 9. 更新状态为完成
self.file_manager.update_file_status(
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.COMPLETED
)
@ -628,7 +673,7 @@ class BaseRAG(ABC):
# 更新状态为错误
if 'file_hash' in locals():
self.file_manager.update_file_status(
await self.file_manager.update_file_status(
file_hash, filename, file_type, FileStatus.ERROR, error_message
)
@ -640,77 +685,95 @@ class BaseRAG(ABC):
'error': error_message
}
def get_file_processing_status(self, file_hash: str = None) -> Union[Dict, List[Dict]]:
async def get_file_processing_status(self, file_hash: str = None) -> Union[Dict, List[Dict]]:
"""
获取文件处理状态
异步获取文件处理状态
:param file_hash: 文件哈希如果为None则返回所有文件状态
:return: 文件状态信息
"""
if file_hash:
return self.file_manager.get_file_status(file_hash)
return await self.file_manager.get_file_status(file_hash)
else:
return self.file_manager.list_files_by_status()
return await self.file_manager.list_files_by_status()
def list_files_by_status(self, status: FileStatus = None) -> List[Dict]:
async def list_files_by_status(self, status: FileStatus = None) -> List[Dict]:
"""
按状态列出文件
异步按状态列出文件
:param status: 文件状态如果为None则返回所有状态的文件
:return: 文件列表
"""
return self.file_manager.list_files_by_status(status)
return await self.file_manager.list_files_by_status(status)
def add_documents_to_vector_store(self, documents: List[Document]):
async def add_documents_to_vector_store(self, documents: List[Document]):
"""
将文档添加到 Chroma 向量库
异步将文档添加到 Chroma 向量库
"""
await self._ensure_initialized()
if documents:
self.vector_store.add_documents(documents)
# 新版本的 Chroma 会自动持久化数据
# 在线程池中运行向量化和存储
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self.vector_store.add_documents, documents)
def build_retriever(self):
async def build_retriever(self):
"""
构建检索器可被子类或外部替换
异步构建检索器可被子类或外部替换
"""
await self._ensure_initialized()
return self.vector_store.as_retriever(search_kwargs={"k": self.retriever_top_k})
def build_qa_chain(self):
async def build_qa_chain(self):
"""
构建 QA
异步构建 QA
"""
await self._ensure_initialized()
if not self.llm:
raise ValueError("LLM模型未设置")
retriever = self.build_retriever()
return RetrievalQA.from_chain_type(
llm=self.llm, retriever=retriever, return_source_documents=True
retriever = await self.build_retriever()
# 在线程池中构建QA链
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: RetrievalQA.from_chain_type(
llm=self.llm, retriever=retriever, return_source_documents=True
)
)
def similarity_search(self, query: str, k: int = None) -> List[Document]:
async def similarity_search(self, query: str, k: int = None) -> List[Document]:
"""
相似性搜索
异步相似性搜索
"""
await self._ensure_initialized()
k = k or self.retriever_top_k
return self.vector_store.similarity_search(query, k=k)
# 在线程池中运行搜索
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.vector_store.similarity_search, query, k)
def similarity_search_with_rerank(
async def similarity_search_with_rerank(
self, query: str, k: int = None
) -> List[Document]:
"""
带重排功能的相似性搜索
异步带重排功能的相似性搜索
"""
await self._ensure_initialized()
# 首先获取更多的候选文档用于重排
initial_k = k or self.retriever_top_k
if self.rerank_config.get("enabled", False):
# 获取更多候选文档进行重排
initial_k = max(initial_k * 2, 10)
documents = self.vector_store.similarity_search(query, k=initial_k)
documents = await self.similarity_search(query, k=initial_k)
# 如果启用了重排,进行重排
if self.rerank_config.get("enabled", False) and documents:
final_k = k or self.retriever_top_k
documents = self._rerank_documents(query, documents, top_k=final_k)
documents = await self._rerank_documents(query, documents, top_k=final_k)
return documents
else:
# 返回最终的top_k结果
@ -718,15 +781,15 @@ class BaseRAG(ABC):
return documents[:final_k]
@abstractmethod
def ingest(self, *args, **kwargs):
async def ingest(self, *args, **kwargs):
"""
子类需实现的文档导入逻辑
子类需实现的异步文档导入逻辑
"""
pass
@abstractmethod
def query(self, question: str) -> str:
async def query(self, question: str) -> str:
"""
子类需实现的问答逻辑
子类需实现的异步问答逻辑
"""
pass