133 lines
4.5 KiB
Python
133 lines
4.5 KiB
Python
import os
|
|
from typing import List, Dict, Any
|
|
import asyncio
|
|
import chromadb
|
|
from chromadb.config import Settings
|
|
from sentence_transformers import SentenceTransformer
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
import uuid
|
|
from datetime import datetime
|
|
|
|
|
|
class AsyncVectorStore:
|
|
"""异步向量存储服务"""
|
|
|
|
def __init__(self, persist_directory: str = "./chroma_db"):
|
|
self.persist_directory = persist_directory
|
|
self.client = chromadb.PersistentClient(
|
|
path=persist_directory, settings=Settings(anonymized_telemetry=False)
|
|
)
|
|
self.collection = self.client.get_or_create_collection(
|
|
name="documents", metadata={"hnsw:space": "cosine"}
|
|
)
|
|
|
|
# 尝试初始化向量编码器,如果网络失败则使用本地方案
|
|
try:
|
|
print("正在加载向量编码模型...")
|
|
self.encoder = SentenceTransformer("all-MiniLM-L6-v2")
|
|
print("✓ 向量编码模型加载成功")
|
|
except Exception as e:
|
|
print(f"⚠️ 向量编码模型加载失败: {e}")
|
|
print("使用简单的文本向量化方案(仅用于演示)")
|
|
self.encoder = None
|
|
|
|
self.text_splitter = RecursiveCharacterTextSplitter(
|
|
chunk_size=500, chunk_overlap=50, length_function=len
|
|
)
|
|
|
|
async def add_document_async(self, content: str, filename: str) -> str:
|
|
"""异步添加文档到向量库"""
|
|
doc_id = str(uuid.uuid4())
|
|
|
|
# 异步分割文本
|
|
chunks = await asyncio.to_thread(self.text_splitter.split_text, content)
|
|
|
|
# 异步生成向量
|
|
embeddings = await asyncio.to_thread(self.encoder.encode, chunks)
|
|
embeddings = embeddings.tolist()
|
|
|
|
# 生成chunk IDs
|
|
chunk_ids = [f"{doc_id}_{i}" for i in range(len(chunks))]
|
|
|
|
# 准备元数据
|
|
metadatas = [
|
|
{
|
|
"doc_id": doc_id,
|
|
"filename": filename,
|
|
"chunk_index": i,
|
|
"upload_time": datetime.now().isoformat(),
|
|
}
|
|
for i in range(len(chunks))
|
|
]
|
|
|
|
# 异步添加到向量库
|
|
await asyncio.to_thread(
|
|
self.collection.add,
|
|
ids=chunk_ids,
|
|
embeddings=embeddings,
|
|
documents=chunks,
|
|
metadatas=metadatas,
|
|
)
|
|
|
|
return doc_id
|
|
|
|
async def search_async(self, query: str, top_k: int = 3) -> List[Dict[str, Any]]:
|
|
"""异步搜索相关文档"""
|
|
# 异步生成查询向量
|
|
query_embedding = await asyncio.to_thread(self.encoder.encode, [query])
|
|
query_embedding = query_embedding.tolist()[0]
|
|
|
|
# 异步查询
|
|
results = await asyncio.to_thread(
|
|
self.collection.query,
|
|
query_embeddings=[query_embedding],
|
|
n_results=top_k,
|
|
include=["documents", "metadatas", "distances"],
|
|
)
|
|
|
|
formatted_results = []
|
|
if results["documents"] and results["documents"][0]:
|
|
for i, doc in enumerate(results["documents"][0]):
|
|
formatted_results.append(
|
|
{
|
|
"content": doc,
|
|
"metadata": results["metadatas"][0][i],
|
|
"distance": results["distances"][0][i],
|
|
}
|
|
)
|
|
|
|
return formatted_results
|
|
|
|
async def get_documents_async(self) -> List[Dict[str, Any]]:
|
|
"""异步获取所有文档信息"""
|
|
results = await asyncio.to_thread(self.collection.get, include=["metadatas"])
|
|
|
|
# 按文档ID分组
|
|
doc_info = {}
|
|
for metadata in results["metadatas"]:
|
|
doc_id = metadata["doc_id"]
|
|
if doc_id not in doc_info:
|
|
doc_info[doc_id] = {
|
|
"id": doc_id,
|
|
"filename": metadata["filename"],
|
|
"upload_time": metadata["upload_time"],
|
|
"chunks_count": 0,
|
|
}
|
|
doc_info[doc_id]["chunks_count"] += 1
|
|
|
|
return list(doc_info.values())
|
|
|
|
async def delete_document_async(self, doc_id: str) -> bool:
|
|
"""异步删除文档"""
|
|
# 异步获取该文档的所有chunk IDs
|
|
results = await asyncio.to_thread(
|
|
self.collection.get, where={"doc_id": doc_id}, include=["metadatas"]
|
|
)
|
|
|
|
if not results["ids"]:
|
|
return False
|
|
|
|
# 异步删除所有相关chunks
|
|
await asyncio.to_thread(self.collection.delete, ids=results["ids"])
|
|
return True
|