feat: 补充 embedding、rerank 模型
This commit is contained in:
parent
799fe71666
commit
99be9a5571
10
README.md
10
README.md
|
|
@ -30,6 +30,14 @@ sh start.sh
|
|||
sh stop.sh
|
||||
|
||||
# 查看是否成功
|
||||
curl http://localhost:8022/v1/models
|
||||
curl -H "Authorization: Bearer sk-local-827ccb0eea8a706c4c34a16891f84e7b" http://localhost:8022/v1/models
|
||||
|
||||
|
||||
# embedding
|
||||
curl http://localhost:8023/embed \
|
||||
-X POST \
|
||||
-H "Authorization: Bearer sk-local-827ccb0eea8a706c4c34a16891f84e7b" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"inputs": ["Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: What is the capital of China?", "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: Explain gravity"]}'\
|
||||
```
|
||||
|
||||
|
|
|
|||
53
start.sh
53
start.sh
|
|
@ -3,10 +3,25 @@
|
|||
|
||||
# ========= 配置区域 =========
|
||||
MODEL_PATH="./models/Qwen2.5-0.5B-Instruct"
|
||||
MODEL_NAME="Qwen2.5-0.5B-Instruct"
|
||||
MODEL_NAME="Qwen2.5"
|
||||
PORT=8022
|
||||
LOG_FILE="vllm.log"
|
||||
PID_FILE="vllm.pid"
|
||||
|
||||
EMBEDDING_MODEL_PATH="./models/Qwen3-Embedding-0.6B"
|
||||
EMBEDDING_MODEL_NAME="Qwen3-Embedding"
|
||||
EMBEDDING_PORT=8023
|
||||
EMBEDDING_LOG_FILE="vllm-embedding.log"
|
||||
EMBEDDING_PID_FILE="vllm-embedding.pid"
|
||||
|
||||
|
||||
RERANK_MODEL_PATH="./models/Qwen3-Reranker-0.6B"
|
||||
RERANK_MODEL_NAME="Qwen3-Reranker"
|
||||
RERANK_PORT=8024
|
||||
RERANK_LOG_FILE="vllm-rerank.log"
|
||||
RERANK_PID_FILE="vllm-rerank.pid"
|
||||
|
||||
|
||||
VENV_DIR="venv-vllm"
|
||||
API_KEY="sk-local-827ccb0eea8a706c4c34a16891f84e7b"
|
||||
# ===========================
|
||||
|
|
@ -27,8 +42,8 @@ if ! python -c "import vllm" &>/dev/null; then
|
|||
pip install vllm==0.11.0
|
||||
fi
|
||||
|
||||
# 启动 vLLM
|
||||
echo "🚀 启动 vLLM API Server..."
|
||||
# 启动 主模型
|
||||
echo "🚀 启动主模型 API Server..."
|
||||
nohup python -m vllm.entrypoints.openai.api_server \
|
||||
--model "$MODEL_PATH" \
|
||||
--served-model-name "$MODEL_NAME" \
|
||||
|
|
@ -36,8 +51,32 @@ nohup python -m vllm.entrypoints.openai.api_server \
|
|||
--host 0.0.0.0 \
|
||||
--port $PORT \
|
||||
--api-key "$API_KEY" > "$LOG_FILE" 2>&1 &
|
||||
|
||||
echo $! > "$PID_FILE"
|
||||
echo "✅ vLLM 已启动 (PID=$(cat $PID_FILE))"
|
||||
echo "📡 访问地址: http://localhost:$PORT/v1"
|
||||
echo "📜 日志文件: $LOG_FILE"
|
||||
|
||||
# 启动 Embedding 模型
|
||||
echo "🚀 启动 Embedding 模型 API Server..."
|
||||
nohup python -m vllm.entrypoints.openai.api_server \
|
||||
--model "$EMBEDDING_MODEL_PATH" \
|
||||
--served-model-name "$EMBEDDING_MODEL_NAME" \
|
||||
--max-model-len 2048 \
|
||||
--host 0.0.0.0 \
|
||||
--port $EMBEDDING_PORT \
|
||||
--api-key "$API_KEY" > "$EMBEDDING_LOG_FILE" 2>&1 &
|
||||
echo $! > "$EMBEDDING_PID_FILE"
|
||||
|
||||
# 启动 Rerank 模型
|
||||
echo "🚀 启动 Rerank 模型 API Server..."
|
||||
nohup python -m vllm.entrypoints.openai.api_server \
|
||||
--model "$RERANK_MODEL_PATH" \
|
||||
--served-model-name "$RERANK_MODEL_NAME" \
|
||||
--max-model-len 2048 \
|
||||
--host 0.0.0.0 \
|
||||
--port $RERANK_PORT \
|
||||
--api-key "$API_KEY" > "$RERANK_LOG_FILE" 2>&1 &
|
||||
echo $! > "$RERANK_PID_FILE"
|
||||
|
||||
|
||||
echo "✅ 所有模型已启动"
|
||||
echo "主模型: http://localhost:$PORT/v1 (PID=$(cat $PID_FILE)) 日志: $LOG_FILE"
|
||||
echo "Embedding: http://localhost:$EMBEDDING_PORT/v1 (PID=$(cat $EMBEDDING_PID_FILE)) 日志: $EMBEDDING_LOG_FILE"
|
||||
echo "Rerank: http://localhost:$RERANK_PORT/v1 (PID=$(cat $RERANK_PID_FILE)) 日志: $RERANK_LOG_FILE"
|
||||
|
|
|
|||
42
stop.sh
42
stop.sh
|
|
@ -1,20 +1,30 @@
|
|||
#!/bin/bash
|
||||
# 停止本地 vLLM 服务
|
||||
# 停止本地 vLLM 服务(多模型)
|
||||
|
||||
PID_FILE="vllm.pid"
|
||||
MAIN_PID_FILE="vllm.pid"
|
||||
EMBEDDING_PID_FILE="vllm-embedding.pid"
|
||||
RERANK_PID_FILE="vllm-rerank.pid"
|
||||
|
||||
if [ ! -f "$PID_FILE" ]; then
|
||||
echo "⚠️ 未找到 PID 文件,vLLM 可能未运行。"
|
||||
exit 1
|
||||
fi
|
||||
stop_service() {
|
||||
local PID_FILE=$1
|
||||
local NAME=$2
|
||||
if [ ! -f "$PID_FILE" ]; then
|
||||
echo "⚠️ 未找到 $NAME PID 文件,$NAME 可能未运行。"
|
||||
return
|
||||
fi
|
||||
|
||||
PID=$(cat "$PID_FILE")
|
||||
if ps -p "$PID" > /dev/null 2>&1; then
|
||||
echo "🛑 停止 vLLM (PID=$PID)..."
|
||||
kill "$PID"
|
||||
rm -f "$PID_FILE"
|
||||
echo "✅ 已关闭 vLLM 服务。"
|
||||
else
|
||||
echo "⚠️ 未找到运行中的 vLLM 进程。"
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
PID=$(cat "$PID_FILE")
|
||||
if ps -p "$PID" > /dev/null 2>&1; then
|
||||
echo "🛑 停止 $NAME (PID=$PID)..."
|
||||
kill "$PID"
|
||||
rm -f "$PID_FILE"
|
||||
echo "✅ 已关闭 $NAME 服务。"
|
||||
else
|
||||
echo "⚠️ 未找到运行中的 $NAME 进程。"
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service "$MAIN_PID_FILE" "主模型"
|
||||
stop_service "$EMBEDDING_PID_FILE" "Embedding 模型"
|
||||
stop_service "$RERANK_PID_FILE" "Rerank 模型"
|
||||
|
|
@ -0,0 +1 @@
|
|||
21758
|
||||
|
|
@ -0,0 +1 @@
|
|||
21759
|
||||
Loading…
Reference in New Issue