sample-ai/src/sample_ai/utils.py

185 lines
6.4 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from venv import logger
import httpx
import asyncio
import os
import json
from dotenv import load_dotenv
load_dotenv()
MAIN_HTTP_URL = os.environ.get("MAIN_HTTP_URL")
MAIN_HTTP_KEY = os.environ.get("MAIN_HTTP_KEY")
MAIN_MODEL_NAME = os.environ.get("MAIN_MODEL_NAME")
EMBEDDING_HTTP_URL = os.environ.get("EMBEDDING_HTTP_URL")
EMBEDDING_HTTP_KEY = os.environ.get("EMBEDDING_HTTP_KEY")
EMBEDDING_MODEL_NAME = os.environ.get("EMBEDDING_MODEL_NAME")
def _merge_messages_and_prompt(messages: list[dict] = [], prompt: str = ""):
"""
将 messages历史对话和单条 prompt 合并成新的 OpenAI-style messages 列表。
- 如果 messages 为 None 或空,则创建一个只包含 prompt 的 user 消息(若 prompt 非空)。
- 若 prompt 非空,总是以一条 {"role":"user","content": prompt} 追加到 messages 末尾。
"""
merged = []
if messages:
merged = [dict(x) for x in messages]
if prompt:
merged.append({"role": "user", "content": prompt})
return merged
def _get_content(resp: dict) -> str:
choices = resp.get("choices")
if not isinstance(choices, list) or not choices:
return ""
first = choices[0]
if not isinstance(first, dict):
return ""
content_obj = first.get("message") or first.get("delta")
if isinstance(content_obj, dict):
return content_obj.get("content", "")
return ""
def fixed_size_chunk(text, chunk_size=2000, overlap=50):
chunks = []
start = 0
while start < len(text):
end = start + chunk_size
chunks.append(text[max(0, start - overlap) : min(len(text), end + overlap)])
start += chunk_size
return chunks
async def call_llm(prompt: str, messages=[], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 30):
"""简单对话"""
try:
async with httpx.AsyncClient(timeout=timeout) as client:
url = MAIN_HTTP_URL
body = {
"model": MAIN_MODEL_NAME,
"messages": _merge_messages_and_prompt(messages, prompt),
"max_tokens": max_tokens,
"temperature": temperature,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {MAIN_HTTP_KEY}",
}
res = await client.post(url, headers=headers, json=body)
res.raise_for_status()
data = res.json()
return _get_content(data)
except Exception as e:
print(f"call_llm[ERROR]: {e}")
return ""
async def call_llm_stream(prompt: str, messages=[], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 60):
"""
流式对话
- 使用: async for chunk call_llm_stream("prompt"):
"""
try:
async with httpx.AsyncClient(timeout=timeout) as client:
url = MAIN_HTTP_URL
body = {
"model": MAIN_MODEL_NAME,
"messages": _merge_messages_and_prompt(messages or [], prompt),
"max_tokens": max_tokens,
"temperature": temperature,
"stream": True,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {MAIN_HTTP_KEY}",
}
async with client.stream("POST", url, headers=headers, json=body) as resp:
resp.raise_for_status()
async for raw_line in resp.aiter_lines():
if not raw_line:
continue
line = raw_line.strip()
if line.startswith("data:"):
payload = line[len("data:") :].strip()
data = {}
if payload in ("[DONE]", ""):
break
try:
data = json.loads(payload)
except Exception:
yield payload
continue
# print(data)
yield _get_content(data)
continue
yield line
except Exception as e:
print(f"call_llm_stream[ERROR]: {e}")
return
async def get_embedding(text, timeout: int = 30):
try:
async with httpx.AsyncClient(timeout=timeout) as client:
url = EMBEDDING_HTTP_URL
body = {
"model": EMBEDDING_MODEL_NAME,
"input": [text],
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {EMBEDDING_HTTP_KEY}",
}
res = await client.post(url, headers=headers, json=body)
res.raise_for_status()
data = res.json()
return data["data"][0]["embedding"]
except Exception as e:
print(f"call_llm[ERROR]: {e}")
return []
if __name__ == "__main__":
# 设置
os.environ.setdefault("MAIN_HTTP_URL", "http://localhost:8022/v1/chat/completions")
os.environ.setdefault("MAIN_HTTP_KEY", "sk-local-827ccb0eea8a706c4c34a16891f84e7b")
os.environ.setdefault("MAIN_MODEL_NAME", "Qwen2.5")
os.environ.setdefault("EMBEDDING_HTTP_URL", "http://localhost:8023/v1/embeddings")
os.environ.setdefault("EMBEDDING_HTTP_KEY", "sk-local-827ccb0eea8a706c4c34a16891f84e7b")
os.environ.setdefault("EMBEDDING_MODEL_NAME", "Qwen3-Embedding")
# 读取
MAIN_HTTP_URL = os.environ.get("MAIN_HTTP_URL")
MAIN_HTTP_KEY = os.environ.get("MAIN_HTTP_KEY")
MAIN_MODEL_NAME = os.environ.get("MAIN_MODEL_NAME")
EMBEDDING_HTTP_URL = os.environ.get("EMBEDDING_HTTP_URL")
EMBEDDING_HTTP_KEY = os.environ.get("EMBEDDING_HTTP_KEY")
EMBEDDING_MODEL_NAME = os.environ.get("EMBEDDING_MODEL_NAME")
test_prompt = "你好,你是谁,有什么功能,中文回复"
# print("--- 普通对话 ---")
# text = asyncio.run(call_llm(test_prompt))
# print(text)
# print("--- 流式对话 ---")
# async def _test_stream():
# try:
# async for chunk in call_llm_stream(test_prompt):
# print(chunk, end="", flush=True)
# except Exception as e:
# print(f"_test_stream[ERROR]: {e}")
# asyncio.run(_test_stream())
print("--- embedding ---")
async def _test_embedding():
res = await get_embedding(test_prompt)
print(res)
asyncio.run(_test_embedding())