mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
Support MXFP4 for GPT-OSS (#5435)
* support mxfp4 in gpt-oss * support mxfp4 in gpt-oss * add scope for flashinfer * remove torch code * update envs.FD_MXFP4_BACKEND * update process_weights_after_loading * update env name * support tp in gpt-oss, add e2e test * add flashinfer-python-paddle in requirements * fix import error * add test * add test * add test * add test
This commit is contained in:
@@ -23,6 +23,7 @@ from paddle import Tensor, nn
|
||||
from paddle.framework import in_dynamic_mode
|
||||
from scipy.linalg import block_diag
|
||||
|
||||
from fastdeploy.config import FDConfig
|
||||
from fastdeploy.platforms import current_platform
|
||||
|
||||
if current_platform.is_cuda() and current_platform.available():
|
||||
@@ -519,3 +520,20 @@ def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size: int, ran
|
||||
def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int, offset: int = 0):
|
||||
per_partition_vocab_size = divide(global_vocab_size, world_size)
|
||||
return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank, offset=offset)
|
||||
|
||||
|
||||
def modules_to_convert(prefix: str, fd_config: FDConfig):
|
||||
import fnmatch
|
||||
|
||||
if (
|
||||
hasattr(fd_config.model_config, "quantization_config")
|
||||
and fd_config.model_config.quantization_config is not None
|
||||
):
|
||||
if "modules_to_not_convert" in fd_config.model_config.quantization_config:
|
||||
patterns = fd_config.model_config.quantization_config["modules_to_not_convert"]
|
||||
for p in patterns:
|
||||
if fnmatch.fnmatch(prefix, p) or fnmatch.fnmatch(prefix, p + ".*"):
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
||||
Reference in New Issue
Block a user