[RL] change glm rope_emb calculation (#7316)

* change glm rope_emb calculation

* glm without EnforceFmulRN

* fix ci
This commit is contained in:
JYChen
2026-04-11 18:36:28 +08:00
committed by GitHub
parent fcf8b1336d
commit 076ab07528
5 changed files with 47 additions and 38 deletions
@@ -20,6 +20,7 @@ from typing import Optional, Tuple
import paddle
from paddle import nn
from fastdeploy import envs
from fastdeploy.config import ModelConfig
from fastdeploy.platforms import current_platform
@@ -87,8 +88,13 @@ class GlmRotaryEmbedding:
def __call__(self, position_ids):
bsz, max_seq_len = position_ids.shape[:2]
inv_freq = self.base ** (-paddle.arange(0, self.rotary_dim, 2, dtype="float32") / self.rotary_dim)
freqs = paddle.einsum("ij,k->ijk", position_ids.cast("float32"), inv_freq)
if envs.FD_ENABLE_RL == 1:
idx = paddle.arange(0, self.rotary_dim, 2, dtype=paddle.int64).astype(paddle.float32)
inv_freq = 1.0 / (self.base ** (idx / self.rotary_dim))
freqs = paddle.outer(position_ids.astype(inv_freq.dtype), inv_freq)
else:
inv_freq = self.base ** (-paddle.arange(0, self.rotary_dim, 2, dtype="float32") / self.rotary_dim)
freqs = paddle.einsum("ij,k->ijk", position_ids.cast("float32"), inv_freq)
# shape: [B, S, D/2]
rot_emb = paddle.zeros((2, bsz, max_seq_len, 1, self.rotary_dim // 2), dtype="float32")
emb = paddle.stack([freqs], axis=-1).reshape((bsz, max_seq_len, self.rotary_dim // 2))