feat: add pytest

This commit is contained in:
李如威 2025-11-24 14:31:50 +08:00
parent 2c63a030e5
commit 00038b679a
8 changed files with 431 additions and 37 deletions

0
src/sample_ai/db/db.py Normal file
View File

View File

View File

@ -0,0 +1,7 @@
from sample_ai.pocketflow import AsyncFlow
from sample_ai.nodes import ChunkDocumentsNode
def create_online_flow():
chunk_node = ChunkDocumentsNode()
flow = AsyncFlow(chunk_node)
return flow

View File

@ -0,0 +1,97 @@
from sample_ai.pocketflow import AsyncBatchNode, AsyncFlow, Node
from sample_ai.utils import fixed_size_chunk
import asyncio
import json
import re
import chromadb
# Nodes for the offline flow
class ChunkDocumentsNode(AsyncBatchNode):
async def prep_async(self, shared):
"""Read texts from shared store"""
return shared["texts"]
async def exec_async(self, text):
"""Chunk a single text into smaller pieces"""
# 先将所有制表符等替换为一个空格
text = re.sub(r'[ \t]+', ' ', text)
# 再将多个空格替换为一个空格
text = re.sub(r' +', ' ', text)
# 去除首尾空格
text = text.strip()
return fixed_size_chunk(text, chunk_size=100)
async def post_async(self, shared, prep_res, exec_res_list):
"""Store chunked texts in the shared store"""
# Flatten the list of lists into a single list of chunks
all_chunks = []
for chunks in exec_res_list:
all_chunks.extend(chunks)
# Replace the original texts with the flat list of chunks
print(f"all_chunks: {json.dumps(all_chunks, indent=2)}")
shared["texts"] = all_chunks
print(f"✅ Created {len(all_chunks)} chunks from {len(prep_res)} documents")
return "default"
class CreateIndexNode(Node):
def prep(self, shared):
"""Get embeddings from shared store"""
return shared["embeddings"]
def exec(self, embeddings):
"""Create FAISS index and add embeddings"""
return []
def post(self, shared, prep_res, exec_res):
"""Store the index in shared store"""
shared["index"] = exec_res
print(f"✅ Index created with {exec_res.ntotal} vectors")
return "default"
if __name__ == "__main__":
async def _do():
shared = {
"texts": [
# PocketFlow framework
"""Pocket Flow is a 100-line minimalist LLM framework
Lightweight: Just 100 lines. Zero bloat, zero dependencies, zero vendor lock-in.
Expressive: Everything you love(Multi-)Agents, Workflow, RAG, and more.
Agentic Coding: Let AI Agents (e.g., Cursor AI) build Agents10x productivity boost!
To install, pip install pocketflow or just copy the source code (only 100 lines).""",
# Fictional medical device
"""NeurAlign M7 is a revolutionary non-invasive neural alignment device.
Targeted magnetic resonance technology increases neuroplasticity in specific brain regions.
Clinical trials showed 72% improvement in PTSD treatment outcomes.
Developed by Cortex Medical in 2024 as an adjunct to standard cognitive therapy.
Portable design allows for in-home use with remote practitioner monitoring.""",
# Made-up historical event
"""The Velvet Revolution of Caldonia (1967-1968) ended Generalissimo Verak's 40-year rule.
Led by poet Eliza Markovian through underground literary societies.
Culminated in the Great Silence Protest with 300,000 silent protesters.
First democratic elections held in March 1968 with 94% voter turnout.
Became a model for non-violent political transitions in neighboring regions.""",
# Fictional technology
"""Q-Mesh is QuantumLeap Technologies' instantaneous data synchronization protocol.
Utilizes directed acyclic graph consensus for 500,000 transactions per second.
Consumes 95% less energy than traditional blockchain systems.
Adopted by three central banks for secure financial data transfer.
Released in February 2024 after five years of development in stealth mode.""",
# Made-up scientific research
"""Harlow Institute's Mycelium Strain HI-271 removes 99.7% of PFAS from contaminated soil.
Engineered fungi create symbiotic relationships with native soil bacteria.
Breaks down "forever chemicals" into non-toxic compounds within 60 days.
Field tests successfully remediated previously permanently contaminated industrial sites.
Deployment costs 80% less than traditional chemical extraction methods.""",
]
}
chunk_node = ChunkDocumentsNode()
flow = AsyncFlow(chunk_node)
await flow.run_async(shared)
asyncio.run(_do())

View File

@ -0,0 +1,197 @@
import asyncio, warnings, copy, time
class BaseNode:
def __init__(self):
self.params, self.successors = {}, {}
def set_params(self, params):
self.params = params
def next(self, node, action="default"):
if action in self.successors:
warnings.warn(f"Overwriting successor for action '{action}'")
self.successors[action] = node
return node
def prep(self, shared):
pass
def exec(self, prep_res):
pass
def post(self, shared, prep_res, exec_res):
pass
def _exec(self, prep_res):
return self.exec(prep_res)
def _run(self, shared):
p = self.prep(shared)
e = self._exec(p)
return self.post(shared, p, e)
def run(self, shared):
if self.successors:
warnings.warn("Node won't run successors. Use Flow.")
return self._run(shared)
def __rshift__(self, other):
return self.next(other)
def __sub__(self, action):
if isinstance(action, str):
return _ConditionalTransition(self, action)
raise TypeError("Action must be a string")
class _ConditionalTransition:
def __init__(self, src, action):
self.src, self.action = src, action
def __rshift__(self, tgt):
return self.src.next(tgt, self.action)
class Node(BaseNode):
def __init__(self, max_retries=1, wait=0):
super().__init__()
self.max_retries, self.wait = max_retries, wait
def exec_fallback(self, prep_res, exc):
raise exc
def _exec(self, prep_res):
for self.cur_retry in range(self.max_retries):
try:
return self.exec(prep_res)
except Exception as e:
if self.cur_retry == self.max_retries - 1:
return self.exec_fallback(prep_res, e)
if self.wait > 0:
time.sleep(self.wait)
class BatchNode(Node):
def _exec(self, items):
return [super(BatchNode, self)._exec(i) for i in (items or [])]
class Flow(BaseNode):
def __init__(self, start=None):
super().__init__()
self.start_node = start
def start(self, start):
self.start_node = start
return start
def get_next_node(self, curr, action):
nxt = curr.successors.get(action or "default")
if not nxt and curr.successors:
warnings.warn(f"Flow ends: '{action}' not found in {list(curr.successors)}")
return nxt
def _orch(self, shared, params=None):
curr, p, last_action = copy.copy(self.start_node), (params or {**self.params}), None
while curr:
curr.set_params(p)
last_action = curr._run(shared)
curr = copy.copy(self.get_next_node(curr, last_action))
return last_action
def _run(self, shared):
p = self.prep(shared)
o = self._orch(shared)
return self.post(shared, p, o)
def post(self, shared, prep_res, exec_res):
return exec_res
class BatchFlow(Flow):
def _run(self, shared):
pr = self.prep(shared) or []
for bp in pr:
self._orch(shared, {**self.params, **bp})
return self.post(shared, pr, None)
class AsyncNode(Node):
async def prep_async(self, shared):
pass
async def exec_async(self, prep_res):
pass
async def exec_fallback_async(self, prep_res, exc):
raise exc
async def post_async(self, shared, prep_res, exec_res):
pass
async def _exec(self, prep_res):
for self.cur_retry in range(self.max_retries):
try:
return await self.exec_async(prep_res)
except Exception as e:
if self.cur_retry == self.max_retries - 1:
return await self.exec_fallback_async(prep_res, e)
if self.wait > 0:
await asyncio.sleep(self.wait)
async def run_async(self, shared):
if self.successors:
warnings.warn("Node won't run successors. Use AsyncFlow.")
return await self._run_async(shared)
async def _run_async(self, shared):
p = await self.prep_async(shared)
e = await self._exec(p)
return await self.post_async(shared, p, e)
def _run(self, shared):
raise RuntimeError("Use run_async.")
class AsyncBatchNode(AsyncNode, BatchNode):
async def _exec(self, items):
return [await super(AsyncBatchNode, self)._exec(i) for i in items]
class AsyncParallelBatchNode(AsyncNode, BatchNode):
async def _exec(self, items):
return await asyncio.gather(*(super(AsyncParallelBatchNode, self)._exec(i) for i in items))
class AsyncFlow(Flow, AsyncNode):
async def _orch_async(self, shared, params=None):
curr, p, last_action = copy.copy(self.start_node), (params or {**self.params}), None
while curr:
curr.set_params(p)
last_action = await curr._run_async(shared) if isinstance(curr, AsyncNode) else curr._run(shared)
curr = copy.copy(self.get_next_node(curr, last_action))
return last_action
async def _run_async(self, shared):
p = await self.prep_async(shared)
o = await self._orch_async(shared)
return await self.post_async(shared, p, o)
async def post_async(self, shared, prep_res, exec_res):
return exec_res
class AsyncBatchFlow(AsyncFlow, BatchFlow):
async def _run_async(self, shared):
pr = await self.prep_async(shared) or []
for bp in pr:
await self._orch_async(shared, {**self.params, **bp})
return await self.post_async(shared, pr, None)
class AsyncParallelBatchFlow(AsyncFlow, BatchFlow):
async def _run_async(self, shared):
pr = await self.prep_async(shared) or []
await asyncio.gather(*(self._orch_async(shared, {**self.params, **bp}) for bp in pr))
return await self.post_async(shared, pr, None)

View File

@ -1,18 +1,16 @@
from venv import logger
import httpx
import asyncio
import os
import json
VLLM_HTTP_KEY = os.environ.get("VLLM_HTTP_KEY")
VLLM_HTTP_URL = os.environ.get("VLLM_HTTP_URL")
VLLM_CHAT_MODEL_NAME = os.environ.get("VLLM_CHAT_MODEL_NAME")
MAIN_HTTP_URL = os.environ.get("MAIN_HTTP_URL")
MAIN_HTTP_KEY = os.environ.get("MAIN_HTTP_KEY")
MAIN_MODEL_NAME = os.environ.get("MAIN_MODEL_NAME")
def _headers():
h = {"Content-Type": "application/json"}
if VLLM_HTTP_KEY:
h["Authorization"] = f"Bearer {VLLM_HTTP_KEY}"
return h
EMBEDDING_HTTP_URL = os.environ.get("EMBEDDING_HTTP_URL")
EMBEDDING_HTTP_KEY = os.environ.get("EMBEDDING_HTTP_KEY")
EMBEDDING_MODEL_NAME = os.environ.get("EMBEDDING_MODEL_NAME")
def _merge_messages_and_prompt(messages: list[dict] = [], prompt: str = ""):
@ -45,18 +43,29 @@ def _get_content(resp: dict) -> str:
return ""
async def call_llm(prompt: str, messages = [], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 30):
def fixed_size_chunk(text, chunk_size=2000):
chunks = []
for i in range(0, len(text), chunk_size):
chunks.append(text[i : i + chunk_size])
return chunks
async def call_llm(prompt: str, messages=[], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 30):
"""简单对话"""
try:
async with httpx.AsyncClient(timeout=timeout) as client:
url = VLLM_HTTP_URL.rstrip("/") + "/v1/chat/completions"
url = MAIN_HTTP_URL
body = {
"model": VLLM_CHAT_MODEL_NAME,
"model": MAIN_MODEL_NAME,
"messages": _merge_messages_and_prompt(messages, prompt),
"max_tokens": max_tokens,
"temperature": temperature,
}
res = await client.post(url, headers=_headers(), json=body)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {MAIN_HTTP_KEY}",
}
res = await client.post(url, headers=headers, json=body)
res.raise_for_status()
data = res.json()
return _get_content(data)
@ -65,29 +74,33 @@ async def call_llm(prompt: str, messages = [], max_tokens: int = 512, temperatur
return ""
async def call_llm_stream(prompt: str, messages = [], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 60):
async def call_llm_stream(prompt: str, messages=[], max_tokens: int = 512, temperature: float = 0.0, timeout: int = 60):
"""
流式对话
- 使用: async for chunk call_llm_stream("prompt"):
"""
try:
async with httpx.AsyncClient(timeout=timeout) as client:
url = (VLLM_HTTP_URL or "").rstrip("/") + "/v1/chat/completions"
url = MAIN_HTTP_URL
body = {
"model": VLLM_CHAT_MODEL_NAME,
"model": MAIN_MODEL_NAME,
"messages": _merge_messages_and_prompt(messages or [], prompt),
"max_tokens": max_tokens,
"temperature": temperature,
"stream": True,
}
async with client.stream("POST", url, headers=_headers(), json=body) as resp:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {MAIN_HTTP_KEY}",
}
async with client.stream("POST", url, headers=headers, json=body) as resp:
resp.raise_for_status()
async for raw_line in resp.aiter_lines():
if not raw_line:
continue
line = raw_line.strip()
if line.startswith("data:"):
payload = line[len("data:"):].strip()
payload = line[len("data:") :].strip()
data = {}
if payload in ("[DONE]", ""):
break
@ -105,31 +118,62 @@ async def call_llm_stream(prompt: str, messages = [], max_tokens: int = 512, tem
return
async def get_embedding(text, timeout: int = 30):
try:
async with httpx.AsyncClient(timeout=timeout) as client:
url = EMBEDDING_HTTP_URL
body = {
"model": EMBEDDING_MODEL_NAME,
"input": [text],
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {EMBEDDING_HTTP_KEY}",
}
res = await client.post(url, headers=headers, json=body)
res.raise_for_status()
data = res.json()
return data["data"][0]["embedding"]
except Exception as e:
print(f"call_llm[ERROR]: {e}")
return []
if __name__ == "__main__":
# 设置
os.environ.setdefault("VLLM_HTTP_URL", "http://localhost:8022")
os.environ.setdefault("VLLM_HTTP_KEY", "sk-local-827ccb0eea8a706c4c34a16891f84e7b")
os.environ.setdefault("VLLM_CHAT_MODEL_NAME", "Qwen2.5-0.5B-Instruct")
os.environ.setdefault("MAIN_HTTP_URL", "http://localhost:8022/v1/chat/completions")
os.environ.setdefault("MAIN_HTTP_KEY", "sk-local-827ccb0eea8a706c4c34a16891f84e7b")
os.environ.setdefault("MAIN_MODEL_NAME", "Qwen2.5")
os.environ.setdefault("EMBEDDING_HTTP_URL", "http://localhost:8023/v1/embeddings")
os.environ.setdefault("EMBEDDING_HTTP_KEY", "sk-local-827ccb0eea8a706c4c34a16891f84e7b")
os.environ.setdefault("EMBEDDING_MODEL_NAME", "Qwen3-Embedding")
# 读取
VLLM_HTTP_KEY = os.environ.get("VLLM_HTTP_KEY")
VLLM_HTTP_URL = os.environ.get("VLLM_HTTP_URL")
VLLM_CHAT_MODEL_NAME = os.environ.get("VLLM_CHAT_MODEL_NAME")
MAIN_HTTP_URL = os.environ.get("MAIN_HTTP_URL")
MAIN_HTTP_KEY = os.environ.get("MAIN_HTTP_KEY")
MAIN_MODEL_NAME = os.environ.get("MAIN_MODEL_NAME")
EMBEDDING_HTTP_URL = os.environ.get("EMBEDDING_HTTP_URL")
EMBEDDING_HTTP_KEY = os.environ.get("EMBEDDING_HTTP_KEY")
EMBEDDING_MODEL_NAME = os.environ.get("EMBEDDING_MODEL_NAME")
test_prompt = "你好,你是谁,有什么功能,中文回复"
print("--- 普通对话 ---")
text = asyncio.run(call_llm(test_prompt))
print(text)
# print("--- 普通对话 ---")
# text = asyncio.run(call_llm(test_prompt))
# print(text)
print("--- 对话结束 ---\n")
# print("--- 流式对话 ---")
# async def _test_stream():
# try:
# async for chunk in call_llm_stream(test_prompt):
# print(chunk, end="", flush=True)
# except Exception as e:
# print(f"_test_stream[ERROR]: {e}")
# asyncio.run(_test_stream())
print("--- 流式对话 ---")
print("--- embedding ---")
async def _test_embedding():
res = await get_embedding(test_prompt)
print(res)
async def _test_stream():
try:
async for chunk in call_llm_stream(test_prompt):
print(chunk, end="", flush=True)
print("\n--- 流式结束 ---")
except Exception as e:
print(f"_test_stream[ERROR]: {e}")
asyncio.run(_test_stream())
asyncio.run(_test_embedding())

0
src/tests/__init__.py Normal file
View File

49
src/tests/test_nodes.py Normal file
View File

@ -0,0 +1,49 @@
import pytest
import asyncio
from sample_ai.nodes import ChunkDocumentsNode
from sample_ai.pocketflow import AsyncFlow
def test_chunk():
async def _do():
shared = {
"texts": [
# PocketFlow framework
"""Pocket Flow is a 100-line minimalist LLM framework
Lightweight: Just 100 lines. Zero bloat, zero dependencies, zero vendor lock-in.
Expressive: Everything you love(Multi-)Agents, Workflow, RAG, and more.
Agentic Coding: Let AI Agents (e.g., Cursor AI) build Agents10x productivity boost!
To install, pip install pocketflow or just copy the source code (only 100 lines).""",
# Fictional medical device
"""NeurAlign M7 is a revolutionary non-invasive neural alignment device.
Targeted magnetic resonance technology increases neuroplasticity in specific brain regions.
Clinical trials showed 72% improvement in PTSD treatment outcomes.
Developed by Cortex Medical in 2024 as an adjunct to standard cognitive therapy.
Portable design allows for in-home use with remote practitioner monitoring.""",
# Made-up historical event
"""The Velvet Revolution of Caldonia (1967-1968) ended Generalissimo Verak's 40-year rule.
Led by poet Eliza Markovian through underground literary societies.
Culminated in the Great Silence Protest with 300,000 silent protesters.
First democratic elections held in March 1968 with 94% voter turnout.
Became a model for non-violent political transitions in neighboring regions.""",
# Fictional technology
"""Q-Mesh is QuantumLeap Technologies' instantaneous data synchronization protocol.
Utilizes directed acyclic graph consensus for 500,000 transactions per second.
Consumes 95% less energy than traditional blockchain systems.
Adopted by three central banks for secure financial data transfer.
Released in February 2024 after five years of development in stealth mode.""",
# Made-up scientific research
"""Harlow Institute's Mycelium Strain HI-271 removes 99.7% of PFAS from contaminated soil.
Engineered fungi create symbiotic relationships with native soil bacteria.
Breaks down "forever chemicals" into non-toxic compounds within 60 days.
Field tests successfully remediated previously permanently contaminated industrial sites.
Deployment costs 80% less than traditional chemical extraction methods.""",
]
}
chunk_node = ChunkDocumentsNode()
flow = AsyncFlow(chunk_node)
await flow.run_async(shared)
asyncio.run(_do())