[Feature] [PD Disaggregation] simplify configuration for pd-disaggregated deployment, and refactor post-init and usage for all ports (#5415)

* [feat] simplify configuration for pd-disaggregated deployment, and refactor post-init and usage for all ports

* [fix] fix some bugs

* [fix] fix rdma port for cache manager/messager

* [fix] temporarily cancel port availability check to see if it can pass ci test

* [feat] simplify args for multi api server

* [fix] fix dp

* [fix] fix port for xpu

* [fix] add tests for ports post processing & fix ci

* [test] fix test_multi_api_server

* [fix] fix rdma_comm_ports args for multi_api_server

* [fix] fix test_common_engine

* [fix] fix test_cache_transfer_manager

* [chore] automatically setting FD_ENABLE_MULTI_API_SERVER

* [fix] avoid api server from creating engine_args twice

* [fix] fix test_run_batch

* [fix] fix test_metrics

* [fix] fix splitwise connector init

* [test] add test_rdma_transfer and test_expert_service

* [fix] fix code syntax

* [fix] fix test_rdma_transfer and build wheel with rdma script
This commit is contained in:
Yonghua Li
2025-12-17 15:50:42 +08:00
committed by GitHub
parent cdc0004894
commit 0c8c6369ed
34 changed files with 1323 additions and 409 deletions
+18 -11
View File
@@ -200,14 +200,26 @@ class TestCommonEngineAdditionalCoverage(unittest.TestCase):
and to drive specific code paths that were previously uncovered.
"""
def setUp(self):
patch("fastdeploy.engine.common_engine.EngineCacheQueue").start()
def _make_cfg(self, **kwargs):
# If DP > 1, we must provide enough engine_worker_queue_port for each dp index
dp = kwargs.get("data_parallel_size", 1)
nnode = len(kwargs.get("ips", ["127.0.0.1"]))
engine_worker_queue_port = int(os.getenv("FD_ENGINE_QUEUE_PORT", "6778"))
cache_queue_port = int(os.getenv("FD_CACHE_QUEUE_PORT", "6779"))
if dp and dp > 1:
engine_worker_queue_port = [engine_worker_queue_port + 20 + i for i in range(dp // nnode)]
cache_queue_port = [cache_queue_port + 20 + i for i in range(dp // nnode)]
args = EngineArgs(
model=MODEL_NAME,
max_model_len=128,
tensor_parallel_size=1,
# give unique ports to avoid collision with other tests
engine_worker_queue_port=str(int(os.getenv("FD_ENGINE_QUEUE_PORT", "6778")) + 20),
cache_queue_port=str(int(os.getenv("FD_CACHE_QUEUE_PORT", "6779")) + 20),
engine_worker_queue_port=engine_worker_queue_port,
cache_queue_port=cache_queue_port,
enable_prefix_caching=True,
**kwargs,
)
@@ -218,14 +230,7 @@ class TestCommonEngineAdditionalCoverage(unittest.TestCase):
# Always enable chunked prefill in tests to avoid another strict check
args.enable_chunked_prefill = True
# If DP > 1, we must provide enough engine_worker_queue_port for each dp index
dp = kwargs.get("data_parallel_size", args.data_parallel_size)
base = int(args.engine_worker_queue_port.split(",")[0])
if dp and dp > 1:
ports = ",".join(str(base + i) for i in range(dp))
args.engine_worker_queue_port = ports
return args.create_engine_config(port_availability_check=False)
return args.create_engine_config()
def _stub_processor(self):
class _Tok:
@@ -574,7 +579,9 @@ class TestCommonEngineAdditionalCoverage(unittest.TestCase):
def test_start_worker_service_cmd_build(self):
"""Cover 1517, 1526, 1568, 1592, 1595 by building the worker command with mocks."""
with patch("fastdeploy.config.get_host_ip", return_value="127.0.0.1"):
cfg = self._make_cfg(splitwise_role="mixed", num_gpu_blocks_override=4, ips=["127.0.0.1", "127.0.0.2"])
cfg = self._make_cfg(
splitwise_role="mixed", num_gpu_blocks_override=4, ips=["127.0.0.1", "127.0.0.2"], data_parallel_size=2
)
# Make model multi-modal so env var branch already covered above; here not required
cfg.structured_outputs_config.logits_processors = ["A", "B"]