mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 17:11:21 +08:00
[XPU] Refactor get_padding_offset to single kernel. (#7029)
* [XPU] Refactor get_padding_offset to single kernel. * add unittest. * fix codestyle. * remove cum_offsets_now. * remove max_len.
This commit is contained in:
@@ -21,8 +21,7 @@ np.random.seed(2023)
|
||||
|
||||
max_len = 10
|
||||
seq_lens = np.array([4, 3, 6], "int32").reshape(-1, 1)
|
||||
cum_offset = np.cumsum((max_len - seq_lens).flatten(), -1, "int32")
|
||||
token_num = np.sum(seq_lens)
|
||||
token_num = int(np.sum(seq_lens))
|
||||
bs = seq_lens.shape[0]
|
||||
input_ids = np.zeros([bs, max_len], "int64")
|
||||
for i in range(bs):
|
||||
@@ -32,34 +31,44 @@ for i in range(bs):
|
||||
(
|
||||
x_remove_padding,
|
||||
cum_offsets_out,
|
||||
padding_offset,
|
||||
batch_id_per_token,
|
||||
cu_seqlens_q,
|
||||
cu_seqlens_k,
|
||||
) = get_padding_offset(
|
||||
paddle.to_tensor(input_ids),
|
||||
paddle.to_tensor(cum_offset),
|
||||
paddle.to_tensor(token_num),
|
||||
paddle.to_tensor(seq_lens),
|
||||
paddle.to_tensor(seq_lens.flatten()),
|
||||
token_num,
|
||||
)
|
||||
|
||||
print("input_ids:\n", input_ids)
|
||||
print("cum_offset:\n", cum_offset)
|
||||
print("seq_lens:\n", seq_lens.flatten())
|
||||
print("token_num:\n", token_num)
|
||||
print("seq_lens:\n", seq_lens)
|
||||
print("x_remove_padding:\n", x_remove_padding)
|
||||
print("cum_offsets_out:\n", cum_offsets_out)
|
||||
print("padding_offset:\n", padding_offset)
|
||||
print("batch_id_per_token:\n", batch_id_per_token)
|
||||
print("cu_seqlens_q:\n", cu_seqlens_q)
|
||||
print("cu_seqlens_k:\n", cu_seqlens_k)
|
||||
|
||||
ref_x_remove_padding = np.array([8, 7, 8, 2, 4, 5, 5, 7, 6, 1, 7, 2, 6], "int64")
|
||||
ref_cum_offsets_out = np.array([0, 6, 13], "int32")
|
||||
ref_padding_offset = np.array([0, 0, 0, 0, 6, 6, 6, 13, 13, 13, 13, 13, 13], "int32")
|
||||
ref_batch_id_per_token = np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2], "int32")
|
||||
ref_cu_seqlens_q = np.array([0, 4, 7, 13], "int32")
|
||||
ref_cu_seqlens_k = np.array([0, 4, 7, 13], "int32")
|
||||
|
||||
assert sum(ref_x_remove_padding - x_remove_padding) == 0, "Check x_remove_padding failed."
|
||||
assert sum(ref_cum_offsets_out - cum_offsets_out) == 0, "Check cum_offsets_out failed."
|
||||
assert sum(ref_padding_offset - padding_offset) == 0, "Check padding_offset failed."
|
||||
assert sum(ref_cu_seqlens_q - cu_seqlens_q) == 0, "Check cu_seqlens_q failed."
|
||||
assert sum(ref_cu_seqlens_k - cu_seqlens_k) == 0, "Check cu_seqlens_k failed."
|
||||
assert (
|
||||
np.sum(np.abs(ref_x_remove_padding - x_remove_padding.numpy())) == 0
|
||||
), f"Check x_remove_padding failed.\nref: {ref_x_remove_padding}\ngot: {x_remove_padding.numpy()}"
|
||||
assert (
|
||||
np.sum(np.abs(ref_cum_offsets_out - cum_offsets_out.numpy())) == 0
|
||||
), f"Check cum_offsets_out failed.\nref: {ref_cum_offsets_out}\ngot: {cum_offsets_out.numpy()}"
|
||||
assert (
|
||||
np.sum(np.abs(ref_batch_id_per_token - batch_id_per_token.numpy())) == 0
|
||||
), f"Check batch_id_per_token failed.\nref: {ref_batch_id_per_token}\ngot: {batch_id_per_token.numpy()}"
|
||||
assert (
|
||||
np.sum(np.abs(ref_cu_seqlens_q - cu_seqlens_q.numpy())) == 0
|
||||
), f"Check cu_seqlens_q failed.\nref: {ref_cu_seqlens_q}\ngot: {cu_seqlens_q.numpy()}"
|
||||
assert (
|
||||
np.sum(np.abs(ref_cu_seqlens_k - cu_seqlens_k.numpy())) == 0
|
||||
), f"Check cu_seqlens_k failed.\nref: {ref_cu_seqlens_k}\ngot: {cu_seqlens_k.numpy()}"
|
||||
|
||||
print("\nAll checks passed!")
|
||||
|
||||
Reference in New Issue
Block a user