Support MXFP4 for GPT-OSS (#5435)

* support mxfp4 in gpt-oss

* support mxfp4 in gpt-oss

* add scope for flashinfer

* remove torch code

* update envs.FD_MXFP4_BACKEND

* update process_weights_after_loading

* update env name

* support tp in gpt-oss, add e2e test

* add flashinfer-python-paddle in requirements

* fix import error

* add test

* add test

* add test

* add test
This commit is contained in:
Haonan Luo
2026-01-22 14:21:01 +08:00
committed by GitHub
parent 309c7d9764
commit 82057cb71f
13 changed files with 670 additions and 25 deletions
+2
View File
@@ -987,6 +987,8 @@ def initialize_fd_config(args, ranks: int = 1, local_rank: int = 0) -> FDConfig:
expert_parallel_rank = int(local_rank % parallel_config.expert_parallel_size)
if isinstance(model_config.moe_num_experts, list):
num_experts = model_config.moe_num_experts[0] + eplb_config.redundant_experts_num
elif hasattr(model_config, "num_local_experts") and model_config.num_local_experts is not None:
num_experts = model_config.num_local_experts + eplb_config.redundant_experts_num
else:
num_experts = model_config.moe_num_experts + eplb_config.redundant_experts_num
num_experts_per_rank = num_experts // parallel_config.expert_parallel_size