feat: init project

This commit is contained in:
李如威 2025-07-28 10:44:56 +08:00
commit c10119b2dc
9 changed files with 367 additions and 0 deletions

17
.gitignore vendored Normal file
View File

@ -0,0 +1,17 @@
# Python
__pycache__/
*.py[cod]
*.so
# Distribution
build/
dist/
*.egg-info/
# Environments
.venv
venv/
# Project specific
chroma_db/
.DS_Store

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Ruwei Li
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

43
README.md Normal file
View File

@ -0,0 +1,43 @@
# Base RAG
简洁的RAG基础库支持多种embedding模型和Chroma向量数据库。
## 安装
```bash
pip install base-rag
```
## 使用
```python
from base_rag import BaseRAG
class MyRAG(BaseRAG):
def ingest(self, file_path: str):
documents = self.load_and_split_documents(file_path)
self.add_documents_to_vector_store(documents)
def query(self, question: str) -> str:
docs = self.similarity_search(question)
return f"找到 {len(docs)} 个相关文档"
# OpenAI API
config = {
"type": "openai",
"model": "text-embedding-3-small",
"api_key": "your-api-key"
}
# 本地模型
config = {
"type": "local",
"model_name": "sentence-transformers/all-MiniLM-L6-v2"
}
rag = MyRAG(embedding_config=config)
rag.ingest("document.txt")
result = rag.query("问题")
```
你只需要继承这个基类,实现 `ingest()``query()` 两个方法即可定制不同的 RAG 流程。如果你需要,我可以帮你写一个继承类样例。是否继续?

27
examples/quick_start.py Normal file
View File

@ -0,0 +1,27 @@
"""Base RAG 使用示例"""
from base_rag import BaseRAG
class SimpleRAG(BaseRAG):
def ingest(self, file_path: str):
documents = self.load_and_split_documents(file_path)
self.add_documents_to_vector_store(documents)
print(f"导入 {len(documents)} 个文档")
def query(self, question: str) -> str:
docs = self.similarity_search(question)
return f"找到 {len(docs)} 个相关文档"
if __name__ == "__main__":
# 本地模型配置
config = {
"type": "local",
"model_name": "sentence-transformers/all-MiniLM-L6-v2"
}
rag = SimpleRAG(embedding_config=config)
print("RAG初始化完成!")
# rag.ingest("your_document.txt")
# result = rag.query("你的问题")
# print(result)

27
pyproject.toml Normal file
View File

@ -0,0 +1,27 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "base-rag"
version = "0.1.0"
description = "简洁的RAG基础库"
readme = "README.md"
license = {text = "MIT"}
requires-python = ">=3.8"
dependencies = [
"langchain>=0.3.0",
"langchain-community>=0.3.0",
"langchain-openai>=0.2.0",
"langchain-chroma>=0.1.0",
"chromadb>=0.4.0",
"openai>=1.0.0",
"tiktoken>=0.5.0",
"sentence-transformers>=2.2.0",
]
[tool.setuptools.packages.find]
where = ["src"]
[tool.setuptools.package-dir]
"" = "src"

8
requirements.txt Normal file
View File

@ -0,0 +1,8 @@
langchain>=0.3.0
langchain-community>=0.3.0
langchain-openai>=0.2.0
langchain-chroma>=0.1.0
chromadb>=0.4.0
openai>=1.0.0
tiktoken>=0.5.0
sentence-transformers>=2.2.0

12
scripts/build.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
# 构建脚本
set -e
echo "清理..."
rm -rf build/ dist/ *.egg-info/
echo "构建..."
python -m build
echo "完成! 输出在 dist/ 目录"

6
src/base_rag/__init__.py Normal file
View File

@ -0,0 +1,6 @@
"""简洁的RAG基础库"""
from .core import BaseRAG
__version__ = "0.1.0"
__all__ = ["BaseRAG"]

206
src/base_rag/core.py Normal file
View File

@ -0,0 +1,206 @@
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, ClassVar, Union
import threading
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_openai import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from langchain_chroma import Chroma
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.llms.base import BaseLLM
from langchain.schema import Document
class BaseRAG(ABC):
# 类级别的模型缓存
_embedding_models: ClassVar[Dict[str, Embeddings]] = {}
# 线程锁,保护模型缓存的并发访问
_lock: ClassVar[threading.Lock] = threading.Lock()
def __init__(
self,
vector_store_name: str = "default",
embedding_config: Optional[Dict] = None,
retriever_top_k: int = 3,
llm: Optional[BaseLLM] = None,
persist_directory: str = "./chroma_db",
):
"""
初始化基础RAG类
:param vector_store_name: 向量库名字用于区分不同知识库
:param embedding_config: 嵌入模型配置支持本地和API模式
:param retriever_top_k: 检索返回的文档数量
:param llm: 可选的对话模型
:param persist_directory: Chroma持久化目录
embedding_config 示例:
本地模型名称: {"type": "local", "model_name": "sentence-transformers/all-MiniLM-L6-v2"}
本地模型路径: {"type": "local", "model_path": "/path/to/your/model"}
OpenAI API: {"type": "openai", "model": "text-embedding-ada-002", "api_key": "sk-..."}
"""
self.vector_store_name = vector_store_name
self.embedding_config = embedding_config or {
"type": "local",
"model_name": "sentence-transformers/all-MiniLM-L6-v2",
}
self.retriever_top_k = retriever_top_k
self.llm = llm
self.persist_directory = persist_directory
# 使用缓存的嵌入模型
config_key = self._get_config_key(self.embedding_config)
self.embedding_model = self._get_or_create_embedding_model(
config_key, self.embedding_config
)
# 初始化 Chroma 向量库
self.vector_store = Chroma(
collection_name=vector_store_name,
embedding_function=self.embedding_model,
persist_directory=persist_directory,
)
@staticmethod
def _get_config_key(config: Dict) -> str:
"""
根据配置生成唯一的缓存键
"""
config_type = config.get("type", "local")
if config_type == "local":
# 支持本地路径和模型名称两种方式
if "model_path" in config:
return f"local_path_{config['model_path'].replace('/', '_').replace('\\', '_')}"
else:
return f"local_name_{config.get('model_name', 'default')}"
elif config_type == "openai":
return f"openai_{config.get('model', 'text-embedding-ada-002')}"
else:
return f"{config_type}_{config.get('model', 'default')}"
@classmethod
def _get_or_create_embedding_model(
cls, config_key: str, config: Dict
) -> Embeddings:
"""
获取或创建嵌入模型带缓存线程安全
"""
# 双重检查锁定模式,先检查是否已存在(避免不必要的锁开销)
if config_key in cls._embedding_models:
print(f"使用缓存的嵌入模型: {config_key}")
return cls._embedding_models[config_key]
# 获取锁,进行安全的创建操作
with cls._lock:
# 再次检查,防止在等待锁期间其他线程已经创建了模型
if config_key not in cls._embedding_models:
print(f"正在创建嵌入模型: {config_key}")
cls._embedding_models[config_key] = cls._create_embedding_model(config)
else:
print(f"使用缓存的嵌入模型: {config_key}")
return cls._embedding_models[config_key]
@staticmethod
def _create_embedding_model(config: Dict) -> Embeddings:
"""
根据配置创建嵌入模型
"""
config_type = config.get("type", "local")
if config_type == "local":
# 支持本地路径和模型名称两种方式
if "model_path" in config:
model_path = config["model_path"]
print(f"从本地路径加载模型: {model_path}")
return HuggingFaceEmbeddings(
model_name=model_path,
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
encode_kwargs=config.get(
"encode_kwargs", {"normalize_embeddings": True}
),
)
else:
model_name = config.get(
"model_name", "sentence-transformers/all-MiniLM-L6-v2"
)
print(f"从HuggingFace Hub加载模型: {model_name}")
return HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=config.get("model_kwargs", {"device": "cpu"}),
encode_kwargs=config.get(
"encode_kwargs", {"normalize_embeddings": True}
),
)
elif config_type == "openai":
from langchain_openai import OpenAIEmbeddings
return OpenAIEmbeddings(
model=config.get("model", "text-embedding-3-small"),
api_key=config.get("api_key"),
base_url=config.get("api_base"),
max_retries=config.get("max_retries", 3),
)
else:
raise ValueError(
f"不支持的嵌入模型类型: {config_type},支持的类型: 'local', 'openai'"
)
def load_and_split_documents(self, file_path: str) -> List[Document]:
"""
加载并切分文档可被子类重写实现不同的切分方式
"""
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
return splitter.split_documents(documents)
def add_documents_to_vector_store(self, documents: List[Document]):
"""
将文档添加到 Chroma 向量库
"""
if documents:
self.vector_store.add_documents(documents)
self.vector_store.persist() # 持久化数据
def build_retriever(self):
"""
构建检索器可被子类或外部替换
"""
return self.vector_store.as_retriever(search_kwargs={"k": self.retriever_top_k})
def build_qa_chain(self):
"""
构建 QA
"""
if not self.llm:
raise ValueError("LLM模型未设置")
retriever = self.build_retriever()
return RetrievalQA.from_chain_type(
llm=self.llm, retriever=retriever, return_source_documents=True
)
def similarity_search(self, query: str, k: int = None) -> List[Document]:
"""
相似性搜索
"""
k = k or self.retriever_top_k
return self.vector_store.similarity_search(query, k=k)
@abstractmethod
def ingest(self, *args, **kwargs):
"""
子类需实现的文档导入逻辑
"""
pass
@abstractmethod
def query(self, question: str) -> str:
"""
子类需实现的问答逻辑
"""
pass