feat: 基础服务运行
This commit is contained in:
parent
1e18cd2e76
commit
dface2ba34
|
|
@ -184,7 +184,7 @@ Temporary Items
|
|||
local_config.py
|
||||
|
||||
# 数据目录(如果需要)
|
||||
# datas/
|
||||
datas/
|
||||
# docker_image_build_tmp/
|
||||
# logs/
|
||||
# results/
|
||||
|
|
|
|||
|
|
@ -0,0 +1,338 @@
|
|||
太好了 — 我把 **两套完整示例代码** 给你:
|
||||
|
||||
1. **Elasticsearch + Qdrant 双库 RAG pipeline(Python 模块)**
|
||||
|
||||
* 作用:把文档同时写入 ES(做 BM25 倒排检索)和 Qdrant(做 embedding/语义检索),检索时合并两个检索结果并调用 vLLM reranker 返回最相关结果。
|
||||
|
||||
2. **FastAPI 混合检索服务(直接可运行 API)**
|
||||
|
||||
* 作用:把上面的 pipeline 封装成 HTTP 接口(`/ingest_text`, `/search` 等),方便接入前端或其它服务。
|
||||
|
||||
> 两套示例以工程可用、易读为主。生产化时建议:把阻塞客户端改为 async(或在线程池里并发)、增加重试/限流、加入监控与认证。
|
||||
|
||||
下面先给出**依赖 & 配置(最小)**,然后是两套代码。
|
||||
|
||||
---
|
||||
|
||||
## 依赖(requirements)
|
||||
|
||||
```text
|
||||
# requirements.txt
|
||||
fastapi
|
||||
uvicorn[standard]
|
||||
httpx
|
||||
qdrant-client
|
||||
elasticsearch>=8.6.0
|
||||
python-dotenv
|
||||
numpy
|
||||
python-multipart
|
||||
```
|
||||
|
||||
在项目根放 `.env`(或使用环境变量):
|
||||
|
||||
```
|
||||
# .env
|
||||
VLLM_API_KEY=
|
||||
VLLM_EMBED_URL=http://localhost:8000/v1/embeddings
|
||||
VLLM_RERANK_URL=http://localhost:8000/v1/rerank
|
||||
VLLM_CHAT_URL=http://localhost:8000/v1/chat/completions
|
||||
|
||||
QDRANT_HOST=localhost
|
||||
QDRANT_PORT=6333
|
||||
|
||||
ES_HOST=http://localhost:9200
|
||||
ES_INDEX=my_kb_index
|
||||
|
||||
EMBED_MODEL=text-embedding-3-large
|
||||
RERANK_MODEL=bge-reranker-v2-m3
|
||||
EMBED_DIM=1536
|
||||
|
||||
TOP_K=32
|
||||
TOP_N=5
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# 1) Elasticsearch + Qdrant 双库 RAG pipeline(模块)
|
||||
|
||||
把这个文件保存为 `pipeline_es_qdrant.py`。
|
||||
|
||||
```python
|
||||
# pipeline_es_qdrant.py
|
||||
import os
|
||||
import uuid
|
||||
import math
|
||||
import httpx
|
||||
from typing import List, Dict, Tuple
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from qdrant_client import QdrantClient
|
||||
from qdrant_client.models import VectorParams, Distance
|
||||
|
||||
from elasticsearch import Elasticsearch
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# config from env
|
||||
VLLM_EMBED_URL = os.getenv("VLLM_EMBED_URL")
|
||||
VLLM_RERANK_URL = os.getenv("VLLM_RERANK_URL")
|
||||
EMBED_MODEL = os.getenv("EMBED_MODEL", "text-embedding-3-large")
|
||||
EMBED_DIM = int(os.getenv("EMBED_DIM", "1536"))
|
||||
|
||||
QDRANT_HOST = os.getenv("QDRANT_HOST", "localhost")
|
||||
QDRANT_PORT = int(os.getenv("QDRANT_PORT", "6333"))
|
||||
|
||||
ES_HOST = os.getenv("ES_HOST", "http://localhost:9200")
|
||||
ES_INDEX = os.getenv("ES_INDEX", "my_kb_index")
|
||||
|
||||
TOP_K = int(os.getenv("TOP_K", "32"))
|
||||
|
||||
# clients (synchronous)
|
||||
qdrant = QdrantClient(host=QDRANT_HOST, port=QDRANT_PORT)
|
||||
es = Elasticsearch(ES_HOST)
|
||||
|
||||
# ensure qdrant collection & es index
|
||||
def ensure_qdrant_collection(name: str, dim: int = EMBED_DIM):
|
||||
try:
|
||||
qdrant.get_collection(collection_name=name)
|
||||
except Exception:
|
||||
qdrant.recreate_collection(
|
||||
collection_name=name,
|
||||
vectors_config=VectorParams(size=dim, distance=Distance.COSINE)
|
||||
)
|
||||
|
||||
def ensure_es_index(index_name: str):
|
||||
if not es.indices.exists(index=index_name):
|
||||
body = {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"text": {"type": "text"},
|
||||
"source": {"type": "keyword"},
|
||||
"meta": {"type": "object"}
|
||||
}
|
||||
}
|
||||
}
|
||||
es.indices.create(index=index_name, body=body)
|
||||
|
||||
# embeddings via vLLM OpenAI-style endpoint
|
||||
async def embed_texts(texts: List[str]) -> List[List[float]]:
|
||||
async with httpx.AsyncClient(timeout=60) as client:
|
||||
r = await client.post(VLLM_EMBED_URL, json={"model": EMBED_MODEL, "input": texts})
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
# support OpenAI-style response
|
||||
return [item["embedding"] for item in data["data"]]
|
||||
|
||||
# Ingest: add chunks to ES (for BM25) and Qdrant (for embeddings)
|
||||
async def ingest_chunks(kb: str, chunks: List[Dict]):
|
||||
"""
|
||||
chunks: list of {"id": optional, "text": str, "meta": dict}
|
||||
Writes to ES (document) and Qdrant (vector)
|
||||
"""
|
||||
ensure_es_index(ES_INDEX)
|
||||
ensure_qdrant_collection(kb, dim=EMBED_DIM)
|
||||
|
||||
texts = [c["text"] for c in chunks]
|
||||
embeddings = await embed_texts(texts)
|
||||
|
||||
# bulk index to ES
|
||||
es_actions = []
|
||||
for c, emb in zip(chunks, embeddings):
|
||||
doc_id = c.get("id") or str(uuid.uuid4())
|
||||
es.index(index=ES_INDEX, id=doc_id, document={"text": c["text"], "source": c.get("meta", {}).get("source"), "meta": c.get("meta", {})})
|
||||
# upsert to qdrant
|
||||
points = []
|
||||
for c, emb in zip(chunks, embeddings):
|
||||
pid = c.get("id") or str(uuid.uuid4())
|
||||
points.append({"id": pid, "vector": emb, "payload": {"text": c["text"], **(c.get("meta") or {})}})
|
||||
qdrant.upsert(collection_name=kb, points=points)
|
||||
return {"ok": True, "ingested": len(points)}
|
||||
|
||||
# Search: BM25 via ES
|
||||
def es_search(query: str, top_k: int = 10) -> List[Dict]:
|
||||
resp = es.search(index=ES_INDEX, body={"query": {"match": {"text": {"query": query}}}, "size": top_k})
|
||||
hits = []
|
||||
for h in resp["hits"]["hits"]:
|
||||
hits.append({"id": h["_id"], "score": h["_score"], "text": h["_source"]["text"], "meta": h["_source"].get("meta")})
|
||||
return hits
|
||||
|
||||
# Qdrant search
|
||||
def qdrant_search(kb: str, q_emb: List[float], top_k: int = TOP_K) -> List[Dict]:
|
||||
hits = qdrant.search(collection_name=kb, query_vector=q_emb, limit=top_k)
|
||||
out = []
|
||||
for h in hits:
|
||||
payload = h.payload or {}
|
||||
out.append({"id": h.id, "score": getattr(h, "score", None), "text": payload.get("text"), "meta": payload})
|
||||
return out
|
||||
|
||||
# Merge results strategy:
|
||||
# - gather ES top_k and Qdrant top_k
|
||||
# - deduplicate by id and produce candidate list
|
||||
def merge_candidates(es_hits: List[Dict], q_hits: List[Dict], weight_es: float = 1.0, weight_q: float = 1.0) -> List[Dict]:
|
||||
# map by id with combined score (normalized)
|
||||
candidates = {}
|
||||
# normalize ES scores to 0..1 by dividing by max (if present)
|
||||
max_es = max((h["score"] for h in es_hits), default=1.0)
|
||||
max_q = max((h["score"] or 1.0 for h in q_hits), default=1.0)
|
||||
for h in es_hits:
|
||||
sid = h["id"]
|
||||
s = (h["score"] or 0.0) / max_es
|
||||
candidates.setdefault(sid, {"id": sid, "text": h["text"], "meta": h.get("meta", {}), "es_score": s, "q_score": 0.0})
|
||||
candidates[sid]["es_score"] = s
|
||||
for h in q_hits:
|
||||
sid = h["id"]
|
||||
s = (h["score"] or 0.0) / max_q
|
||||
candidates.setdefault(sid, {"id": sid, "text": h["text"], "meta": h.get("meta", {}), "es_score": 0.0, "q_score": s})
|
||||
candidates[sid]["q_score"] = s
|
||||
# compute hybrid score
|
||||
for sid, v in candidates.items():
|
||||
v["hybrid_score"] = weight_es * v["es_score"] + weight_q * v["q_score"]
|
||||
# sort by hybrid_score desc
|
||||
return sorted(candidates.values(), key=lambda x: x["hybrid_score"], reverse=True)
|
||||
|
||||
# Rerank via vLLM reranker endpoint (OpenAI-style)
|
||||
async def rerank_with_vllm(query: str, docs: List[str], model: str = None) -> List[int]:
|
||||
model = model or os.getenv("RERANK_MODEL")
|
||||
async with httpx.AsyncClient(timeout=60) as client:
|
||||
r = await client.post(VLLM_RERANK_URL, json={"model": model, "query": query, "documents": docs})
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
# expect data["results"] = [{"index":i,"score":...}, ...]
|
||||
order = [item["index"] for item in sorted(data["results"], key=lambda x: -x["score"])]
|
||||
return order
|
||||
|
||||
# Full pipeline: query -> es + qdrant -> merge -> rerank -> return top_n
|
||||
async def hybrid_search(kb: str, query: str, top_k_es: int = 8, top_k_q: int = 24, top_n: int = 5) -> Dict:
|
||||
# 1 get ES hits
|
||||
es_hits = es_search(query, top_k_es)
|
||||
# 2 embed query and qdrant search
|
||||
q_emb = (await embed_texts([query]))[0]
|
||||
q_hits = qdrant_search(kb, q_emb, top_k_q)
|
||||
# 3 merge candidates
|
||||
candidates = merge_candidates(es_hits, q_hits, weight_es=1.0, weight_q=1.0)
|
||||
# 4 rerank top M by calling reranker
|
||||
M = min(len(candidates), 50)
|
||||
docs = [c["text"] for c in candidates[:M]]
|
||||
if docs:
|
||||
order = await rerank_with_vllm(query, docs)
|
||||
ordered = [candidates[:M][i] for i in order][:top_n]
|
||||
else:
|
||||
ordered = candidates[:top_n]
|
||||
return {"query": query, "results": ordered}
|
||||
```
|
||||
|
||||
**说明与要点:**
|
||||
|
||||
* `ES` 做 BM25(`match` 查询),`Qdrant` 做向量召回;合并时把两边的分数 normalize 后加权得到 `hybrid_score`,然后再交给大型 reranker(vLLM)精排。
|
||||
* `embed_texts` 使用了异步 httpx 调用。Qdrant/ES 操作是同步的(如果你想更高并发,把 ES/Qdrant 客户端换成 async 或把调用放进线程池)。
|
||||
* `merge_candidates` 是基础合并逻辑,可替换为更复杂的策略(交叉乘积、IDF 规则、source bias 等)。
|
||||
* reranker 接口需由你在 vLLM 那端实现:接收 `query` + `documents`,返回每个 document 的 score 和 index。
|
||||
|
||||
---
|
||||
|
||||
# 2) FastAPI 版本的混合检索 API(封装上面 pipeline)
|
||||
|
||||
下面示例展示一个小服务,提供 `/ingest_text` 和 `/search` 两个接口。把它保存为 `app_fastapi.py`(或在你的 FastAPI 项目中合并)。
|
||||
|
||||
```python
|
||||
# app_fastapi.py
|
||||
import os
|
||||
import asyncio
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Optional
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
from pipeline_es_qdrant import ingest_chunks, hybrid_search # import above module
|
||||
|
||||
app = FastAPI(title="Hybrid RAG API")
|
||||
|
||||
class IngestReq(BaseModel):
|
||||
kb: str
|
||||
chunks: List[dict] # each { "text": "...", "meta": {...}, "id": optional }
|
||||
|
||||
class SearchReq(BaseModel):
|
||||
kb: str
|
||||
query: str
|
||||
top_n: Optional[int] = 5
|
||||
|
||||
@app.post("/ingest_text")
|
||||
async def ingest_text(req: IngestReq):
|
||||
try:
|
||||
res = await ingest_chunks(req.kb, req.chunks)
|
||||
return res
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.post("/search")
|
||||
async def search(req: SearchReq):
|
||||
try:
|
||||
res = await hybrid_search(req.kb, req.query, top_n=req.top_n)
|
||||
return res
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get("/healthz")
|
||||
async def healthz():
|
||||
return {"status": "ok"}
|
||||
```
|
||||
|
||||
**如何运行:**
|
||||
|
||||
```bash
|
||||
uvicorn app_fastapi:app --reload --port 8000
|
||||
```
|
||||
|
||||
**示例流程:**
|
||||
|
||||
1. 导入文档(最简单方式:把文章切成 chunks,然后调用 `/ingest_text` 上传):
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:8000/ingest_text" -H "Content-Type: application/json" -d '{
|
||||
"kb":"kb_demo",
|
||||
"chunks":[{"text":"文档1 内容 ...","meta":{"source":"doc1"}},{"text":"文档2 ..."}]
|
||||
}'
|
||||
```
|
||||
|
||||
2. 检索:
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:8000/search" -H "Content-Type: application/json" -d '{
|
||||
"kb":"kb_demo",
|
||||
"query":"如何使用产品A?",
|
||||
"top_n":5
|
||||
}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 生产部署 & 性能建议(实践要点)
|
||||
|
||||
1. **并发与阻塞**:上面示例中 ES/Qdrant 客户端为同步实现(阻塞)。在 FastAPI 高并发场景下建议:
|
||||
|
||||
* 使用 `AsyncQdrantClient`(`qdrant-client[httpx]`)与 `AsyncElasticsearch`(elasticsearch 8+ 支持 async via `aiohttp`/`httpx`)或
|
||||
* 将阻塞调用放到线程池:`await asyncio.get_event_loop().run_in_executor(None, sync_call, args)`。
|
||||
|
||||
2. **批量与分片**:大规模写入时应使用批量(ES bulk API、Qdrant batch upsert),并控制批大小(例如每批 64/128)。
|
||||
|
||||
3. **Hybrid 调整**:
|
||||
|
||||
* 合并评分的权重 (`weight_es`, `weight_q`) 可基于离线验证(MRR、P@k)调整。
|
||||
* Reranker 只对 top-M 做精排(例如 M=50),避免太多计算成本。
|
||||
|
||||
4. **安全与限流**:在公开 API 上加认证(API Key)、请求频率限制和输入长度限制(避免过大上下文请求)。
|
||||
|
||||
5. **监控**:记录每个阶段耗时(embed、es_search、qdrant_search、rerank、llm),用于优化和 SLO。
|
||||
|
||||
---
|
||||
|
||||
## 还可以做的扩展(我可以帮你继续做)
|
||||
|
||||
* 把 ES/Qdrant 客户端都改为 **异步实现**(完整 async 版本)并替换到代码中。
|
||||
* 增加 **文档 loader(docx/pdf) + 自动切分器** 并在 `/ingest_file` 中直接上传文件。
|
||||
* 提供 **示例 docker-compose** 把 Qdrant、Elasticsearch、vLLM(或模拟服务)和你的 API 一起编排。
|
||||
* 实现更复杂的 **混合融合策略**(比如先 union,再基于 metadata过滤,再 rerank)。
|
||||
|
||||
你想先要哪个扩展(例如:**异步版本** 或 **文件上传 + 自动切分** 或 **docker-compose**)?我可以立刻把对应代码补上。
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch:9.2.2
|
||||
container_name: elasticsearch
|
||||
environment:
|
||||
- discovery.type=single-node # 单节点模式
|
||||
- xpack.security.enabled=true # 开启安全(才可以设密码)
|
||||
- ELASTIC_PASSWORD=12345 # 设置密码(重要)
|
||||
- ES_JAVA_OPTS=-Xms1g -Xmx1g
|
||||
ports:
|
||||
- "9210:9200"
|
||||
volumes:
|
||||
- ./datas/es-data:/usr/share/elasticsearch/data
|
||||
restart: unless-stopped
|
||||
|
||||
qdrant:
|
||||
image: qdrant/qdrant:latest
|
||||
container_name: qdrant
|
||||
environment:
|
||||
QDRANT__SERVICE__API_KEY: 12345
|
||||
ports:
|
||||
- "6333:6333" # HTTP
|
||||
- "6334:6334" # gRPC
|
||||
volumes:
|
||||
- ./datas/qdrant-storage:/qdrant/storage
|
||||
restart: unless-stopped
|
||||
Loading…
Reference in New Issue