83 lines
2.4 KiB
Bash
83 lines
2.4 KiB
Bash
#!/bin/bash
|
|
# 启动本地 vLLM OpenAI API 服务 (MPS 模式)
|
|
|
|
# ========= 配置区域 =========
|
|
MODEL_PATH="./models/Qwen2.5-0.5B-Instruct"
|
|
MODEL_NAME="Qwen2.5"
|
|
PORT=8022
|
|
LOG_FILE="vllm.log"
|
|
PID_FILE="vllm.pid"
|
|
|
|
EMBEDDING_MODEL_PATH="./models/Qwen3-Embedding-0.6B"
|
|
EMBEDDING_MODEL_NAME="Qwen3-Embedding"
|
|
EMBEDDING_PORT=8023
|
|
EMBEDDING_LOG_FILE="vllm-embedding.log"
|
|
EMBEDDING_PID_FILE="vllm-embedding.pid"
|
|
|
|
|
|
RERANK_MODEL_PATH="./models/Qwen3-Reranker-0.6B"
|
|
RERANK_MODEL_NAME="Qwen3-Reranker"
|
|
RERANK_PORT=8024
|
|
RERANK_LOG_FILE="vllm-rerank.log"
|
|
RERANK_PID_FILE="vllm-rerank.pid"
|
|
|
|
|
|
VENV_DIR="venv-vllm"
|
|
API_KEY="sk-local-827ccb0eea8a706c4c34a16891f84e7b"
|
|
# ===========================
|
|
|
|
echo "🔍 检查 Python 虚拟环境..."
|
|
if [ ! -d "$VENV_DIR" ]; then
|
|
echo "⚙️ 创建虚拟环境..."
|
|
python3 -m venv "$VENV_DIR"
|
|
fi
|
|
|
|
source "$VENV_DIR/bin/activate"
|
|
|
|
# 检查 vllm 是否安装
|
|
if ! python -c "import vllm" &>/dev/null; then
|
|
echo "📦 安装 vLLM 和 PyTorch..."
|
|
pip install --upgrade pip
|
|
pip install torch torchvision torchaudio
|
|
pip install vllm==0.11.0
|
|
fi
|
|
|
|
# 启动 主模型
|
|
echo "🚀 启动主模型 API Server..."
|
|
nohup python -m vllm.entrypoints.openai.api_server \
|
|
--model "$MODEL_PATH" \
|
|
--served-model-name "$MODEL_NAME" \
|
|
--max-model-len 2048 \
|
|
--host 0.0.0.0 \
|
|
--port $PORT \
|
|
--api-key "$API_KEY" > "$LOG_FILE" 2>&1 &
|
|
echo $! > "$PID_FILE"
|
|
|
|
# 启动 Embedding 模型
|
|
echo "🚀 启动 Embedding 模型 API Server..."
|
|
nohup python -m vllm.entrypoints.openai.api_server \
|
|
--model "$EMBEDDING_MODEL_PATH" \
|
|
--served-model-name "$EMBEDDING_MODEL_NAME" \
|
|
--max-model-len 2048 \
|
|
--host 0.0.0.0 \
|
|
--port $EMBEDDING_PORT \
|
|
--api-key "$API_KEY" > "$EMBEDDING_LOG_FILE" 2>&1 &
|
|
echo $! > "$EMBEDDING_PID_FILE"
|
|
|
|
# 启动 Rerank 模型
|
|
echo "🚀 启动 Rerank 模型 API Server..."
|
|
nohup python -m vllm.entrypoints.openai.api_server \
|
|
--model "$RERANK_MODEL_PATH" \
|
|
--served-model-name "$RERANK_MODEL_NAME" \
|
|
--max-model-len 2048 \
|
|
--host 0.0.0.0 \
|
|
--port $RERANK_PORT \
|
|
--api-key "$API_KEY" > "$RERANK_LOG_FILE" 2>&1 &
|
|
echo $! > "$RERANK_PID_FILE"
|
|
|
|
|
|
echo "✅ 所有模型已启动"
|
|
echo "主模型: http://localhost:$PORT/v1 (PID=$(cat $PID_FILE)) 日志: $LOG_FILE"
|
|
echo "Embedding: http://localhost:$EMBEDDING_PORT/v1 (PID=$(cat $EMBEDDING_PID_FILE)) 日志: $EMBEDDING_LOG_FILE"
|
|
echo "Rerank: http://localhost:$RERANK_PORT/v1 (PID=$(cat $RERANK_PID_FILE)) 日志: $RERANK_LOG_FILE"
|