feat: call 方法
This commit is contained in:
parent
4372d7c52d
commit
370301cd15
|
|
@ -0,0 +1 @@
|
|||
httpx==0.28.1
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
import httpx
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
|
||||
VLLM_HTTP_KEY = os.environ.get("VLLM_HTTP_KEY")
|
||||
VLLM_HTTP_URL = os.environ.get("VLLM_HTTP_URL")
|
||||
VLLM_CHAT_MODEL_NAME = os.environ.get("VLLM_CHAT_MODEL_NAME")
|
||||
|
||||
|
||||
def _headers():
|
||||
h = {"Content-Type": "application/json"}
|
||||
if VLLM_HTTP_KEY:
|
||||
h["Authorization"] = f"Bearer {VLLM_HTTP_KEY}"
|
||||
return h
|
||||
|
||||
|
||||
def _merge_messages_and_prompt(messages: list[dict] = [], prompt: str = ""):
|
||||
"""
|
||||
将 messages(历史对话)和单条 prompt 合并成新的 OpenAI-style messages 列表。
|
||||
- 如果 messages 为 None 或空,则创建一个只包含 prompt 的 user 消息(若 prompt 非空)。
|
||||
- 若 prompt 非空,总是以一条 {"role":"user","content": prompt} 追加到 messages 末尾。
|
||||
"""
|
||||
merged = []
|
||||
if messages:
|
||||
merged = [dict(x) for x in messages]
|
||||
if prompt:
|
||||
merged.append({"role": "user", "content": prompt})
|
||||
return merged
|
||||
|
||||
|
||||
def _get_content(resp: dict) -> str:
|
||||
choices = resp.get("choices")
|
||||
if not isinstance(choices, list) or not choices:
|
||||
return ""
|
||||
|
||||
first = choices[0]
|
||||
if not isinstance(first, dict):
|
||||
return ""
|
||||
|
||||
content_obj = first.get("message") or first.get("delta")
|
||||
if isinstance(content_obj, dict):
|
||||
return content_obj.get("content", "")
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
async def call_llm(prompt: str, messages = [], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 30):
|
||||
"""简单对话"""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
url = VLLM_HTTP_URL.rstrip("/") + "/v1/chat/completions"
|
||||
body = {
|
||||
"model": VLLM_CHAT_MODEL_NAME,
|
||||
"messages": _merge_messages_and_prompt(messages, prompt),
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
}
|
||||
res = await client.post(url, headers=_headers(), json=body)
|
||||
res.raise_for_status()
|
||||
data = res.json()
|
||||
return _get_content(data)
|
||||
except Exception as e:
|
||||
print(f"call_llm[ERROR]: {e}")
|
||||
return ""
|
||||
|
||||
|
||||
async def call_llm_stream(prompt: str, messages = [], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 60):
|
||||
"""
|
||||
流式对话
|
||||
- 使用: async for chunk call_llm_stream("prompt"):
|
||||
"""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
url = (VLLM_HTTP_URL or "").rstrip("/") + "/v1/chat/completions"
|
||||
body = {
|
||||
"model": VLLM_CHAT_MODEL_NAME,
|
||||
"messages": _merge_messages_and_prompt(messages or [], prompt),
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"stream": True,
|
||||
}
|
||||
async with client.stream("POST", url, headers=_headers(), json=body) as resp:
|
||||
resp.raise_for_status()
|
||||
async for raw_line in resp.aiter_lines():
|
||||
if not raw_line:
|
||||
continue
|
||||
line = raw_line.strip()
|
||||
if line.startswith("data:"):
|
||||
payload = line[len("data:"):].strip()
|
||||
data = {}
|
||||
if payload in ("[DONE]", ""):
|
||||
break
|
||||
try:
|
||||
data = json.loads(payload)
|
||||
except Exception:
|
||||
yield payload
|
||||
continue
|
||||
# print(data)
|
||||
yield _get_content(data)
|
||||
continue
|
||||
yield line
|
||||
except Exception as e:
|
||||
print(f"call_llm_stream[ERROR]: {e}")
|
||||
return
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 设置
|
||||
os.environ.setdefault("VLLM_HTTP_URL", "http://localhost:8022")
|
||||
os.environ.setdefault("VLLM_HTTP_KEY", "sk-local-827ccb0eea8a706c4c34a16891f84e7b")
|
||||
os.environ.setdefault("VLLM_CHAT_MODEL_NAME", "Qwen2.5-0.5B-Instruct")
|
||||
# 读取
|
||||
VLLM_HTTP_KEY = os.environ.get("VLLM_HTTP_KEY")
|
||||
VLLM_HTTP_URL = os.environ.get("VLLM_HTTP_URL")
|
||||
VLLM_CHAT_MODEL_NAME = os.environ.get("VLLM_CHAT_MODEL_NAME")
|
||||
|
||||
test_prompt = "你好,你是谁,有什么功能,中文回复"
|
||||
print("--- 普通对话 ---")
|
||||
|
||||
text = asyncio.run(call_llm(test_prompt))
|
||||
print(text)
|
||||
|
||||
print("--- 对话结束 ---\n")
|
||||
|
||||
print("--- 流式对话 ---")
|
||||
|
||||
async def _test_stream():
|
||||
try:
|
||||
async for chunk in call_llm_stream(test_prompt):
|
||||
print(chunk, end="", flush=True)
|
||||
print("\n--- 流式结束 ---")
|
||||
except Exception as e:
|
||||
print(f"_test_stream[ERROR]: {e}")
|
||||
asyncio.run(_test_stream())
|
||||
Loading…
Reference in New Issue