mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
Support MXFP4 for GPT-OSS (#5435)
* support mxfp4 in gpt-oss * support mxfp4 in gpt-oss * add scope for flashinfer * remove torch code * update envs.FD_MXFP4_BACKEND * update process_weights_after_loading * update env name * support tp in gpt-oss, add e2e test * add flashinfer-python-paddle in requirements * fix import error * add test * add test * add test * add test
This commit is contained in:
@@ -987,6 +987,8 @@ def initialize_fd_config(args, ranks: int = 1, local_rank: int = 0) -> FDConfig:
|
||||
expert_parallel_rank = int(local_rank % parallel_config.expert_parallel_size)
|
||||
if isinstance(model_config.moe_num_experts, list):
|
||||
num_experts = model_config.moe_num_experts[0] + eplb_config.redundant_experts_num
|
||||
elif hasattr(model_config, "num_local_experts") and model_config.num_local_experts is not None:
|
||||
num_experts = model_config.num_local_experts + eplb_config.redundant_experts_num
|
||||
else:
|
||||
num_experts = model_config.moe_num_experts + eplb_config.redundant_experts_num
|
||||
num_experts_per_rank = num_experts // parallel_config.expert_parallel_size
|
||||
|
||||
Reference in New Issue
Block a user