Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
362 changes: 362 additions & 0 deletions tests/engine/test_engine.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,362 @@
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import time
import uuid
from types import SimpleNamespace

import numpy as np
import pytest

from fastdeploy.engine.engine import LLMEngine
from fastdeploy.utils import EngineError


def _make_cfg(**ov):
ns = SimpleNamespace
_j = lambda: "{}"
mc = ns(model="/fake", model_type="ernie", max_model_len=2048, num_hidden_layers=2, quantization="{}")
mc.runner, mc.convert, mc.override_pooler_config, mc.logprobs_mode = "default", None, None, "none"
mc.max_logprobs, mc.enable_logprob, mc.lm_head_fp32, mc.moe_gate_fp32 = 0, False, False, False
mc.enable_entropy, mc.model_impl = False, "default"
pc = ns(tensor_parallel_size=1, tensor_parallel_rank=0, device_ids="0", data_parallel_size=1)
pc.expert_parallel_size, pc.chunked_moe_size, pc.engine_worker_queue_port = 1, 0, [6778]
pc.enable_expert_parallel = pc.enable_chunked_moe = pc.disable_custom_all_reduce = False
pc.use_internode_ll_two_stage = pc.disable_sequence_parallel_moe = False
pc.shutdown_comm_group_if_worker_idle = False
pc.ep_prefill_use_worst_num_tokens = False
sc = ns(max_num_seqs=256, max_num_batched_tokens=4096, splitwise_role="mixed", name="local")
sc.enable_overlap_schedule = False
cc = ns(num_gpu_blocks_override=None, gpu_memory_utilization=0.9, block_size=16, enc_dec_block_num=0)
cc.enable_prefix_caching = cc.enable_chunked_prefill = False
cc.kv_cache_ratio, cc.kvcache_storage_backend, cc.num_cpu_blocks, cc.max_encoder_cache = 1.0, None, 0, 0
cc.cache_transfer_protocol, cc.total_block_num = "tcp", 100
lc = ns(load_strategy="auto", rsync_config={}, dynamic_load_weight=False, load_choices="auto")
soc = ns(guided_decoding_backend=None, logits_processors=None, reasoning_parser="none")
soc.disable_any_whitespace = False
cfg = ns(model_config=mc, parallel_config=pc, scheduler_config=sc, cache_config=cc, load_config=lc)
cfg.speculative_config = ns(model_type="main", to_json_string=_j)
cfg.graph_opt_config = cfg.early_stop_config = cfg.eplb_config = ns(to_json_string=_j)
cfg.routing_replay_config = cfg.plas_attention_config = ns(to_json_string=_j)
cfg.structured_outputs_config = soc
cfg.worker_num_per_node, cfg.master_ip, cfg.host_ip = 1, "127.0.0.1", "127.0.0.1"
cfg.ips, cfg.nnode, cfg.register_info, cfg.node_rank = None, 1, None, 0
cfg.print = lambda: None
for k, v in ov.items():
setattr(cfg, k, v)
return cfg


def _make_engine(**ov):
e = object.__new__(LLMEngine)
e.cfg = _make_cfg(**ov)
e.running, e.is_started, e.do_profile = True, False, 0
e.engine = SimpleNamespace(scheduler=SimpleNamespace(get_results=lambda: []))
e.guided_decoding_checker, e.ipc_signal_suffix = None, 6778
return e


def _make_request(token_count=10, max_tokens=100, min_tokens=0, stop_seqs_len=None, **ov):
vals = {"max_tokens": max_tokens, "min_tokens": min_tokens, "request_id": "x", "stop_seqs_len": stop_seqs_len}
req = SimpleNamespace(prompt_token_ids=list(range(token_count)), prompt_token_ids_len=token_count)
req.need_prefill_tokens = token_count
req.metrics = SimpleNamespace(scheduler_recv_req_time=0, preprocess_start_time=0, preprocess_end_time=0)
req.get = lambda k: vals.get(k)
req.set = lambda k, v: setattr(req, k, v)
req.sampling_params = req.guided_json = req.guided_regex = req.guided_choice = None
req.structural_tag = req.guided_grammar = req.guided_json_object = None
for k, v in ov.items():
setattr(req, k, v)
return req


def _make_tokenizer(**kw):
d = dict(vocab={"<pad>": 0, "hello": 1}, think_truncate_prompt="...", tokenize=lambda s: ["..."])
d["get_vocab"] = lambda: {"<think>": 5, "</think>": 6, "<|IMAGE_PLACEHOLDER|>": -1, "\n": 10}
d["encode"], d["convert_tokens_to_ids"] = (lambda s, add_special_tokens=False: [10]), (lambda t: [99])
d.update(kw)
return SimpleNamespace(**d)


class TestLLMEngineLifecycle:
def test_start(self, monkeypatch):
ipc = lambda **kw: SimpleNamespace(
value=np.zeros(kw.get("array", np.zeros(1)).shape, dtype=kw.get("dtype", np.int32)), clear=lambda: None
)
monkeypatch.setattr("fastdeploy.engine.engine.IPCSignal", ipc)
monkeypatch.setattr("fastdeploy.engine.engine.current_platform.is_intel_hpu", lambda: False)
monkeypatch.setattr("fastdeploy.engine.engine.time.sleep", lambda s: None)
monkeypatch.setattr("fastdeploy.engine.engine.time.time", lambda: 1.0)
tok = _make_tokenizer()
dp = SimpleNamespace(tokenizer=tok, eos_token_id_len=1, pad_token_id=0)
e = _make_engine()
e.engine.start = lambda: None
e.engine.create_data_processor = lambda: None
e.engine.data_processor = dp
e.engine.start_zmq_service = lambda pid: None
e.engine.start_cache_service = lambda d, s: []
e.engine.mm_max_tokens_per_item = None
monkeypatch.setattr("fastdeploy.engine.engine.subprocess.Popen", lambda cmd, **kw: SimpleNamespace(pid=1))

# Simulate model loaded immediately and worker ready
def _fake_init_signals(self_arg=e):
self_arg.worker_ready_signal = SimpleNamespace(value=np.ones(1, dtype=np.int32), clear=lambda: None)
self_arg.loaded_model_signal = SimpleNamespace(value=np.array([1], dtype=np.int32), clear=lambda: None)

monkeypatch.setattr(LLMEngine, "_init_worker_signals", lambda s: _fake_init_signals(s))
monkeypatch.setattr(LLMEngine, "launch_components", lambda s: None)
monkeypatch.setattr(LLMEngine, "check_worker_initialize_status", lambda s: True)
monkeypatch.setattr("fastdeploy.engine.engine.envs.FD_ENABLE_INTERNAL_ADAPTER", False)
monkeypatch.setattr("fastdeploy.engine.engine.envs.ENABLE_V1_KVCACHE_SCHEDULER", True)
e.cfg.cache_config.num_gpu_blocks_override = 50
e.cfg.cache_config.num_cpu_blocks = 10
result = e.start(api_server_pid=999)
assert result is True
assert e.api_server_pid == 999

def test_from_engine_args(self, monkeypatch):
monkeypatch.setattr("fastdeploy.engine.engine.EngineService", lambda cfg: SimpleNamespace())
monkeypatch.setattr("fastdeploy.engine.engine.main_process_metrics.set_cache_config_info", lambda **kw: None)
monkeypatch.setattr("fastdeploy.engine.engine.tracing.trace_set_thread_info", lambda s: None)
args = SimpleNamespace(create_engine_config=lambda: _make_cfg())
assert LLMEngine.from_engine_args(args).do_profile == 1
cfg2 = _make_cfg()
cfg2.cache_config.num_gpu_blocks_override = 100
assert LLMEngine.from_engine_args(SimpleNamespace(create_engine_config=lambda: cfg2)).do_profile == 0

def test_exit_sub_services(self, monkeypatch):
e = _make_engine()
e.worker_ready_signal = e.loaded_model_signal = SimpleNamespace(clear=lambda: None)
killed = []
monkeypatch.setattr("fastdeploy.engine.engine.os.getpgid", lambda pid: pid)
monkeypatch.setattr("fastdeploy.engine.engine.os.killpg", lambda pgid, sig: killed.append(pgid))
e.worker_proc = SimpleNamespace(pid=99)
_cm = SimpleNamespace(shm_cache_task_flag_broadcast=SimpleNamespace(clear=lambda: None))
_cm.cache_ready_signal = SimpleNamespace(clear=lambda: None)
e.engine.resource_manager = SimpleNamespace(cache_manager=_cm)
e.cache_manager_processes = [SimpleNamespace(pid=55)]
joined, closed = [], []
e.dp_processed = [SimpleNamespace(pid=77, join=lambda: joined.append(1))]
e.dp_engine_worker_queue_server = [SimpleNamespace(cleanup=lambda: None)]
e.zmq_server = SimpleNamespace(close=lambda: closed.append(1))
e.get_profile_block_num_signal = SimpleNamespace(clear=lambda: None)
e._exit_sub_services()
assert not e.running and 55 in killed and 99 in killed
assert len(joined) == 1 and len(closed) == 1

def test_stop_profile(self, monkeypatch):
e = _make_engine()
e.do_profile = 1
e.get_profile_block_num_signal = SimpleNamespace(value=np.array([100], dtype=np.int32))
reset_calls = []
e.engine.resource_manager = SimpleNamespace(reset_cache_config=lambda cfg: None)
e.cfg.cache_config = SimpleNamespace(reset=lambda n: reset_calls.append(n), enable_prefix_caching=False)
e.cfg.scheduler_config.splitwise_role = "mixed"
e._stop_profile()
assert e.do_profile == 0 and reset_calls == [100]
e2 = _make_engine()
e2.do_profile = 1
e2.get_profile_block_num_signal = SimpleNamespace(value=np.array([100], dtype=np.int32))
e2.engine.resource_manager = SimpleNamespace(reset_cache_config=lambda cfg: None)
e2.cfg.cache_config = SimpleNamespace(reset=lambda n: None, enable_prefix_caching=True)
e2.cfg.scheduler_config.splitwise_role = "mixed"
monkeypatch.setattr("fastdeploy.engine.engine.current_platform.is_intel_hpu", lambda: False)
e2.engine.start_cache_service = lambda d, s: [SimpleNamespace(pid=1)]
e2._stop_profile()
assert hasattr(e2, "cache_manager_processes")


class TestLLMEngineWorker:
def test_init_worker_signals(self, monkeypatch):
ipc = lambda **kw: SimpleNamespace(
value=np.zeros(kw.get("array", np.zeros(1)).shape, dtype=kw.get("dtype", np.int32)), clear=lambda: None
)
monkeypatch.setattr("fastdeploy.engine.engine.IPCSignal", ipc)
e = _make_engine()
e._init_worker_signals()
assert hasattr(e, "worker_ready_signal") and hasattr(e, "loaded_model_signal")
assert not hasattr(e, "launched_cache_manager_signal")
e2 = _make_engine()
e2.cfg.cache_config.enable_prefix_caching = True
e2._init_worker_signals()
assert hasattr(e2, "launched_cache_manager_signal")
e3 = _make_engine()
e3.cfg.parallel_config.data_parallel_size = 2
monkeypatch.setattr("fastdeploy.engine.engine.envs.FD_ENABLE_MULTI_API_SERVER", False)
e3._init_worker_signals()
assert hasattr(e3, "launched_expert_service_signal")
e4 = _make_engine()
e4.do_profile = 1
monkeypatch.setattr("fastdeploy.engine.engine.paddle.is_compiled_with_custom_device", lambda x: False)
e4._init_worker_signals()
assert hasattr(e4, "get_profile_block_num_signal")

def test_start_worker_service(self, monkeypatch):
captured = []
_popen = lambda cmd, **kw: SimpleNamespace(pid=1) if captured.append(cmd) or True else None
monkeypatch.setattr("fastdeploy.engine.engine.subprocess.Popen", _popen)
monkeypatch.setattr("fastdeploy.engine.engine.current_platform.is_iluvatar", lambda: False)
e = _make_engine()
e.cfg.cache_config.num_gpu_blocks_override = 200
e.cfg.parallel_config.enable_expert_parallel = True
e.cfg.cache_config.enable_prefix_caching = True
e.cfg.cache_config.kvcache_storage_backend = "rocksdb"
tok = _make_tokenizer()
e.data_processor = SimpleNamespace(tokenizer=tok, eos_token_id_len=1, pad_token_id=0)
e.engine.data_processor = e.data_processor
e.engine.mm_max_tokens_per_item = None
e._start_worker_service()
cmd = captured[0]
assert "--max_model_len 2048" in cmd and "--enable_expert_parallel" in cmd and "--enable_prefix_caching" in cmd
assert "--num_gpu_blocks_override 200" in cmd and "--kvcache_storage_backend rocksdb" in cmd

def test_launch_components(self, monkeypatch):
e = _make_engine()
e.cfg.scheduler_config.splitwise_role = "prefill"
e.cfg.scheduler_config.name = "splitwise"
started = []
e.engine.split_connector = SimpleNamespace(start_receiver=lambda: None)
e.engine.scheduler = SimpleNamespace(start=lambda *a, **kw: started.append(1))
e.launch_components()
assert hasattr(e, "splitwise_receive_thread") and len(started) == 1

def test_check_worker_initialize_status(self, monkeypatch):
monkeypatch.setattr("fastdeploy.engine.engine.time.sleep", lambda s: None)
_th = lambda target, daemon: SimpleNamespace(start=lambda: target(), join=lambda **kw: None)
monkeypatch.setattr("fastdeploy.engine.engine.threading.Thread", _th)
_ctx = SimpleNamespace(n=0, update=lambda x: None, refresh=lambda: None)
_tq = type("T", (), {"__enter__": lambda s: _ctx, "__exit__": lambda s, *a: None})
monkeypatch.setattr("fastdeploy.engine.engine.tqdm", lambda total, desc: _tq())
# Success path with weight + layer loading progress
e = _make_engine()
e.worker_init_status = {}
e.worker_proc = SimpleNamespace(
stdout=iter([b"Loading checkpoint shards: 100\n", b"Start load layer 1\n"]),
poll=lambda: None,
)
e.worker_ready_signal = SimpleNamespace(value=np.ones(1, dtype=np.int32))
assert e.check_worker_initialize_status() is True
# Failure: poll returns non-None in weight loading
e2 = _make_engine()
e2.worker_init_status = {}
e2.worker_proc = SimpleNamespace(stdout=iter([]), poll=lambda: 1)
e2.worker_ready_signal = SimpleNamespace(value=np.zeros(1, dtype=np.int32))
assert e2.check_worker_initialize_status() is False


class TestLLMEngineRequests:
def test_add_requests(self, monkeypatch):
monkeypatch.setattr("fastdeploy.engine.engine.Request.from_dict", lambda d: d["_req"])
e = _make_engine()
e.engine.data_processor = SimpleNamespace(process_request=lambda r, *a, **kw: r)
with pytest.raises(EngineError):
e.add_requests({"prompt": "x", "_req": _make_request(token_count=3000)})
# input_ids_len > max_model_len
with pytest.raises(EngineError):
e.add_requests({"prompt": "x", "_req": _make_request(token_count=2049)})
with pytest.raises(EngineError):
e.add_requests({"prompt": "x", "_req": _make_request(token_count=100, min_tokens=2000)})
monkeypatch.setattr("fastdeploy.engine.engine.envs.FD_MAX_STOP_SEQS_NUM", 10)
with pytest.raises(EngineError):
e.add_requests({"prompt": "x", "_req": _make_request(stop_seqs_len=list(range(200)))})
monkeypatch.setattr("fastdeploy.engine.engine.envs.FD_STOP_SEQS_MAX_LEN", 5)
with pytest.raises(EngineError):
e.add_requests({"prompt": "x", "_req": _make_request(stop_seqs_len=[20])})
with pytest.raises(EngineError):
e.add_requests({"prompt": "x", "_req": _make_request(guided_json='{"type":"object"}')})

put_calls = []
monkeypatch.setattr("fastdeploy.engine.engine.Request.from_dict", lambda d: _make_request())
monkeypatch.setattr("fastdeploy.engine.engine.asdict", lambda x: {"temperature": 0.0})
e.engine.scheduler = SimpleNamespace(put_requests=lambda reqs: put_calls.extend(reqs))
sp = SimpleNamespace(temperature=0.0)
e.add_requests({"prompt": "hi"}, sampling_params=sp)
assert len(put_calls) == 1 and sp.temperature == 1e-06

def test_format_and_add_data(self):
e = _make_engine()
e.add_requests = lambda t, **kw: None
prompts = {"prompt": "Hello"}
uuid.UUID(e._format_and_add_data(prompts))
assert prompts["max_tokens"] == 2048
assert e._format_and_add_data({"prompt": "Hi", "request_id": "my-id", "max_tokens": 50}) == "my-id"
roles = [("system", "H"), ("user", "Hi"), ("assistant", "Hey")]
ctx = {"context": [{"role": r, "utterance": u} for r, u in roles]}
e._format_and_add_data(ctx)
assert ctx["system"] == "H" and ctx["prompt"] == ["Hi", "Hey"]

def test_generate(self):
e = _make_engine()
e.add_requests = lambda t, **kw: None
e.engine.check_and_free_block_tables = lambda: None
_resp = SimpleNamespace(to_dict=lambda: {"outputs": {"text": "hi", "reasoning_content": ""}})
e.engine.data_processor = SimpleNamespace(process_response=lambda r: _resp)
# stream=True: one non-finished + one finished
results_s = [SimpleNamespace(finished=False), SimpleNamespace(finished=True)]
e._get_generated_tokens = lambda rid: iter(results_s)
out_s = list(e.generate({"prompt": "x"}, stream=True))
assert len(out_s) == 2 and out_s[1]["outputs"]["text"] == ""

# stream=False: offline path
e.engine.data_processor.process_response = lambda r: _resp
e._get_generated_tokens = lambda rid: iter([SimpleNamespace(finished=True)])
out = list(e.generate({"prompt": "x"}, stream=False))
assert len(out) == 1 and out[0]["outputs"]["text"] == "hi"
assert e._get_generated_result() == []
# Error path
e.add_requests = lambda *a, **kw: (_ for _ in ()).throw(ValueError("bad"))
with pytest.raises(EngineError):
list(e.generate({"prompt": "x"}, stream=False))


class TestLLMEngineUtils:
def test_has_guided_input(self):
e = _make_engine()
fields = "guided_json,guided_regex,guided_choice,structural_tag,guided_grammar,guided_json_object".split(",")
assert e._has_guided_input(SimpleNamespace(**{f: None for f in fields})) is False
for field in fields:
kw = {f: None for f in fields}
kw[field] = "value"
assert e._has_guided_input(SimpleNamespace(**kw)) is True

def test_setting_environ_variables(self, monkeypatch):
e = _make_engine()
result = e._setting_environ_variables()
assert "OMP_NUM_THREADS=" in result and "NCCL_ALGO=Ring" in result
assert "FLAGS_use_pd_disaggregation" not in result
e.cfg.scheduler_config.splitwise_role = "prefill"
assert "FLAGS_use_pd_disaggregation" in e._setting_environ_variables()
monkeypatch.setattr("fastdeploy.engine.engine.envs.ENABLE_V1_KVCACHE_SCHEDULER", True)
assert "FLAGS_use_pd_disaggregation_per_chunk" in e._setting_environ_variables()

def test_health_and_readiness(self):
e = _make_engine()
e.worker_ready_signal = SimpleNamespace(value=np.zeros(1, dtype=np.int32))
assert e._worker_processes_ready() is False
e.worker_ready_signal = SimpleNamespace(value=np.ones(1, dtype=np.int32))
assert e._worker_processes_ready() is True
e.cfg.worker_num_per_node = 3
e.worker_ready_signal = SimpleNamespace(value=np.array([1, 1, 0], dtype=np.int32))
assert e._worker_processes_ready() is False
e.engine.worker_healthy_live_signal = SimpleNamespace(value=np.array([0.0]))
assert e.check_health()[0] is True
e.engine.worker_healthy_live_signal = SimpleNamespace(value=np.array([time.time()]))
assert e.check_health()[0] is True
e.engine.worker_healthy_live_signal = SimpleNamespace(value=np.array([time.time() - 60]))
healthy, msg = e.check_health(time_interval_threashold=30)
assert healthy is False and "Not Healthy" in msg


if __name__ == "__main__":
pytest.main([__file__, "-v"])
Loading