#!/bin/bash # 启动本地 vLLM OpenAI API 服务 (MPS 模式) # ========= 配置区域 ========= MODEL_PATH="./models/Qwen2.5-0.5B-Instruct" MODEL_NAME="Qwen2.5-0.5B-Instruct" PORT=8022 LOG_FILE="vllm.log" PID_FILE="vllm.pid" VENV_DIR="venv-vllm" API_KEY="sk-local-827ccb0eea8a706c4c34a16891f84e7b" # =========================== echo "🔍 检查 Python 虚拟环境..." if [ ! -d "$VENV_DIR" ]; then echo "⚙️ 创建虚拟环境..." python3 -m venv "$VENV_DIR" fi source "$VENV_DIR/bin/activate" # 检查 vllm 是否安装 if ! python -c "import vllm" &>/dev/null; then echo "📦 安装 vLLM 和 PyTorch..." pip install --upgrade pip pip install torch torchvision torchaudio pip install vllm==0.11.0 fi # 启动 vLLM echo "🚀 启动 vLLM API Server..." nohup python -m vllm.entrypoints.openai.api_server \ --model "$MODEL_PATH" \ --served-model-name "$MODEL_NAME" \ --max-model-len 2048 \ --host 0.0.0.0 \ --port $PORT \ --api-key "$API_KEY" > "$LOG_FILE" 2>&1 & echo $! > "$PID_FILE" echo "✅ vLLM 已启动 (PID=$(cat $PID_FILE))" echo "📡 访问地址: http://localhost:$PORT/v1" echo "📜 日志文件: $LOG_FILE"