Co-authored-by: gongweibao <gognweibao@baidu.com>
This commit is contained in:
gongweibao
2026-03-04 21:55:31 +08:00
committed by GitHub
parent 5c8f5184d9
commit ddb06ff83f
306 changed files with 40627 additions and 34418 deletions
+155 -125
View File
@@ -1,35 +1,39 @@
// adapted from: https://github.com/vllm-project/vllm/blob/118ff921118cc81061a2af865a1e13840ceb6792/csrc/quantization/cutlass_w8a8/c3x/cutlass_gemm_caller.cuh
// adapted from:
// https://github.com/vllm-project/vllm/blob/118ff921118cc81061a2af865a1e13840ceb6792/csrc/quantization/cutlass_w8a8/c3x/cutlass_gemm_caller.cuh
#include "quantization/common.cuh"
// adapted from: https://github.com/sgl-project/sglang/blob/v0.5.2rc2/sgl-kernel/csrc/gemm/per_token_quant_fp8.cu
// adapted from:
// https://github.com/sgl-project/sglang/blob/v0.5.2rc2/sgl-kernel/csrc/gemm/per_token_quant_fp8.cu
// ---------------------------------------------------------------------------
// 1. Warplocal, no shared memory
// • One warp handles one token.
// • Eight tokens per 256thread CTA.
// ---------------------------------------------------------------------------
template <typename T, typename DST_DTYPE, int kTokensPerCTA = 8, int kVecSize = 16>
__global__ void per_token_quant_fp8_kernel(
const T* __restrict__ input,
DST_DTYPE* __restrict__ output_q,
float* __restrict__ output_s,
const float scale_ub,
const int64_t hidden_size,
const int64_t num_tokens) {
template <typename T,
typename DST_DTYPE,
int kTokensPerCTA = 8,
int kVecSize = 16>
__global__ void per_token_quant_fp8_kernel(const T *__restrict__ input,
DST_DTYPE *__restrict__ output_q,
float *__restrict__ output_s,
const float scale_ub,
const int64_t hidden_size,
const int64_t num_tokens) {
const int warp_id = threadIdx.x / WARP_SIZE; // 07 (8 warps)
const int lane_id = threadIdx.x & (WARP_SIZE - 1); // 031
const int token_id = blockIdx.x * kTokensPerCTA + warp_id;
if (token_id >= num_tokens) return;
// Global tensors for this token
const T* token_input = input + token_id * hidden_size;
DST_DTYPE* token_output = output_q + token_id * hidden_size;
float* token_scale = output_s + token_id;
const T *token_input = input + token_id * hidden_size;
DST_DTYPE *token_output = output_q + token_id * hidden_size;
float *token_scale = output_s + token_id;
//
// Pass-1: Perform a warp reduce to find the max_value of a token's hidden_size
// Pass-1: Perform a warp reduce to find the max_value of a token's
// hidden_size
//
float max_value = 0.f;
using vec_t = AlignedVector<T, kVecSize>;
@@ -46,7 +50,7 @@ __global__ void per_token_quant_fp8_kernel(
}
float warp_max = warpReduceMax(max_value);
if (scale_ub > 0){
if (scale_ub > 0) {
warp_max = fminf(warp_max, scale_ub);
}
float scale;
@@ -71,7 +75,7 @@ __global__ void per_token_quant_fp8_kernel(
output_arr[j] = static_cast<DST_DTYPE>(val);
}
if constexpr (kVecSize == 16) {
*(uint4*)(token_output + i * kVecSize) = *(uint4*)output_arr;
*(uint4 *)(token_output + i * kVecSize) = *(uint4 *)output_arr;
} else {
// Use element-wise copy for vector size 8 to ensure correctness
for (int k = 0; k < kVecSize; ++k) {
@@ -86,9 +90,9 @@ __global__ void per_token_quant_fp8_kernel(
// ---------------------------------------------------------------------------
template <typename T, typename DST_DTYPE, int kVecSize = 16>
__global__ void per_token_quant_fp8_small_batch_kernel(
const T* __restrict__ input,
DST_DTYPE* __restrict__ output_q,
float* __restrict__ output_s,
const T *__restrict__ input,
DST_DTYPE *__restrict__ output_q,
float *__restrict__ output_s,
const float scale_ub,
const int64_t hidden_size,
const int64_t num_tokens) {
@@ -98,8 +102,8 @@ __global__ void per_token_quant_fp8_small_batch_kernel(
const int tid = threadIdx.x;
const int block_dim = blockDim.x;
const T* token_input = input + token_idx * hidden_size;
DST_DTYPE* token_output = output_q + token_idx * hidden_size;
const T *token_input = input + token_idx * hidden_size;
DST_DTYPE *token_output = output_q + token_idx * hidden_size;
float max_value = 0.0f;
@@ -120,7 +124,7 @@ __global__ void per_token_quant_fp8_small_batch_kernel(
}
max_value = blockReduceMax(max_value);
if (scale_ub > 0){
if (scale_ub > 0) {
max_value = fminf(max_value, scale_ub);
}
__shared__ float scale;
@@ -140,12 +144,14 @@ __global__ void per_token_quant_fp8_small_batch_kernel(
DST_DTYPE output_arr[kVecSize];
#pragma unroll
for (uint32_t j = 0; j < kVecSize; ++j) {
float val = fmaxf(fminf(static_cast<float>(input_vec[j]) * scale_inv, FP8_E4M3_MAX), -FP8_E4M3_MAX);
float val = fmaxf(
fminf(static_cast<float>(input_vec[j]) * scale_inv, FP8_E4M3_MAX),
-FP8_E4M3_MAX);
output_arr[j] = static_cast<DST_DTYPE>(val);
}
if constexpr (kVecSize == 16) {
*(uint4*)(token_output + i * kVecSize) = *(uint4*)output_arr;
*(uint4 *)(token_output + i * kVecSize) = *(uint4 *)output_arr;
} else {
// Use element-wise copy for vector size 8 to ensure correctness
for (int k = 0; k < kVecSize; ++k) {
@@ -173,8 +179,11 @@ __global__ void scaled_fp8_quant_kernel(fp8_type *__restrict__ out,
template <typename scalar_t, typename fp8_type>
__global__ void dynamic_per_token_scaled_fp8_quant_kernel(
fp8_type *__restrict__ out, float *__restrict__ scale,
scalar_t const *__restrict__ input, float scale_ub, const int hidden_size) {
fp8_type *__restrict__ out,
float *__restrict__ scale,
scalar_t const *__restrict__ input,
float scale_ub,
const int hidden_size) {
int const tid = threadIdx.x;
int const token_idx = blockIdx.x;
@@ -228,11 +237,11 @@ __global__ void dynamic_per_token_scaled_fp8_quant_kernel(
}
}
} // namespace fastdeploy
} // namespace fastdeploy
void StaticScaledFp8Quant(paddle::Tensor &out, // [..., d]
paddle::Tensor const &input, // [..., d]
paddle::Tensor const &scale) // [1]
void StaticScaledFp8Quant(paddle::Tensor &out, // [..., d]
paddle::Tensor const &input, // [..., d]
paddle::Tensor const &scale) // [1]
{
PD_CHECK(out.dtype() == paddle::DataType::FLOAT8_E4M3FN);
using fp8_t = phi::dtype::float8_e4m3fn;
@@ -245,35 +254,41 @@ void StaticScaledFp8Quant(paddle::Tensor &out, // [..., d]
cudaStream_t stream = input.stream();
switch (input.dtype()) {
case paddle::DataType::FLOAT32: {
using scalar_t = float;
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(), input.data<scalar_t>(),
scale.data<float>(), num_elems);
break;
}
case paddle::DataType::FLOAT16: {
using scalar_t = phi::dtype::float16;
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(), input.data<scalar_t>(),
scale.data<float>(), num_elems);
break;
}
case paddle::DataType::BFLOAT16: {
using scalar_t = phi::dtype::bfloat16;
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(), input.data<scalar_t>(),
scale.data<float>(), num_elems);
break;
}
default:
PD_THROW("Only supported attr of input type in [fp32, fp16, bf16].");
case paddle::DataType::FLOAT32: {
using scalar_t = float;
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(),
input.data<scalar_t>(),
scale.data<float>(),
num_elems);
break;
}
case paddle::DataType::FLOAT16: {
using scalar_t = phi::dtype::float16;
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(),
input.data<scalar_t>(),
scale.data<float>(),
num_elems);
break;
}
case paddle::DataType::BFLOAT16: {
using scalar_t = phi::dtype::bfloat16;
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(),
input.data<scalar_t>(),
scale.data<float>(),
num_elems);
break;
}
default:
PD_THROW("Only supported attr of input type in [fp32, fp16, bf16].");
}
}
void DynamicScaledFp8Quant(paddle::Tensor &out, // [..., d]
paddle::Tensor const &input, // [..., d]
paddle::Tensor &scale) // [1]
void DynamicScaledFp8Quant(paddle::Tensor &out, // [..., d]
paddle::Tensor const &input, // [..., d]
paddle::Tensor &scale) // [1]
{
PD_CHECK(out.dtype() == paddle::DataType::FLOAT8_E4M3FN);
using fp8_t = phi::dtype::float8_e4m3fn;
@@ -286,44 +301,51 @@ void DynamicScaledFp8Quant(paddle::Tensor &out, // [..., d]
cudaStream_t stream = input.stream();
switch (input.dtype()) {
case paddle::DataType::FLOAT32: {
using scalar_t = float;
fastdeploy::segmented_max_reduction<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(scale.data<float>(),
input.data<scalar_t>(), num_elems);
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(), input.data<scalar_t>(),
scale.data<float>(), num_elems);
break;
}
case paddle::DataType::FLOAT16: {
using scalar_t = phi::dtype::float16;
fastdeploy::segmented_max_reduction<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(scale.data<float>(),
input.data<scalar_t>(), num_elems);
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(), input.data<scalar_t>(),
scale.data<float>(), num_elems);
break;
}
case paddle::DataType::BFLOAT16: {
using scalar_t = phi::dtype::bfloat16;
fastdeploy::segmented_max_reduction<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(scale.data<float>(),
input.data<scalar_t>(), num_elems);
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(), input.data<scalar_t>(),
scale.data<float>(), num_elems);
break;
}
default:
PD_THROW("Only supported attr of input type in [fp32, fp16, bf16].");
case paddle::DataType::FLOAT32: {
using scalar_t = float;
fastdeploy::segmented_max_reduction<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(
scale.data<float>(), input.data<scalar_t>(), num_elems);
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(),
input.data<scalar_t>(),
scale.data<float>(),
num_elems);
break;
}
case paddle::DataType::FLOAT16: {
using scalar_t = phi::dtype::float16;
fastdeploy::segmented_max_reduction<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(
scale.data<float>(), input.data<scalar_t>(), num_elems);
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(),
input.data<scalar_t>(),
scale.data<float>(),
num_elems);
break;
}
case paddle::DataType::BFLOAT16: {
using scalar_t = phi::dtype::bfloat16;
fastdeploy::segmented_max_reduction<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(
scale.data<float>(), input.data<scalar_t>(), num_elems);
fastdeploy::scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(),
input.data<scalar_t>(),
scale.data<float>(),
num_elems);
break;
}
default:
PD_THROW("Only supported attr of input type in [fp32, fp16, bf16].");
}
}
void DynamicPerTokenScaledFp8Quant(paddle::Tensor &out, // [..., d]
paddle::Tensor const &input, // [..., d]
paddle::Tensor &scales, float scale_ub) {
void DynamicPerTokenScaledFp8Quant(paddle::Tensor &out, // [..., d]
paddle::Tensor const &input, // [..., d]
paddle::Tensor &scales,
float scale_ub) {
PD_CHECK(input.is_contiguous());
PD_CHECK(out.is_contiguous());
PD_CHECK(out.dtype() == paddle::DataType::FLOAT8_E4M3FN);
@@ -333,7 +355,7 @@ void DynamicPerTokenScaledFp8Quant(paddle::Tensor &out, // [..., d]
int const num_tokens = input.numel() / hidden_size;
cudaStream_t stream = input.stream();
if (hidden_size % 8 == 0){
if (hidden_size % 8 == 0) {
int device = 0;
cudaGetDevice(&device);
int sm_count = 0;
@@ -343,50 +365,58 @@ void DynamicPerTokenScaledFp8Quant(paddle::Tensor &out, // [..., d]
const bool use_vec16 = (hidden_size % 16 == 0);
DISPATCH_FLOAT_FP6_DTYPE(input.dtype(), scalar_t, {
if (use_warp_kernel) {
// -------- warplocal ---------------------------------------------------
// -------- warplocal
// ---------------------------------------------------
constexpr int THREADS = TOKENS_PER_CTA * WARP_SIZE; // 256
dim3 grid((num_tokens + TOKENS_PER_CTA - 1) / TOKENS_PER_CTA);
dim3 block(THREADS);
if (use_vec16) {
per_token_quant_fp8_kernel<scalar_t, __nv_fp8_e4m3, TOKENS_PER_CTA, 16><<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t*>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3*>(out.data<fp8_t>()),
reinterpret_cast<float*>(scales.data<float>()),
per_token_quant_fp8_kernel<scalar_t,
__nv_fp8_e4m3,
TOKENS_PER_CTA,
16><<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t *>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3 *>(out.data<fp8_t>()),
reinterpret_cast<float *>(scales.data<float>()),
scale_ub,
hidden_size,
num_tokens);
} else {
per_token_quant_fp8_kernel<scalar_t, __nv_fp8_e4m3, TOKENS_PER_CTA, 8><<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t*>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3*>(out.data<fp8_t>()),
reinterpret_cast<float*>(scales.data<float>()),
scale_ub,
hidden_size,
num_tokens);
per_token_quant_fp8_kernel<scalar_t, __nv_fp8_e4m3, TOKENS_PER_CTA, 8>
<<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t *>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3 *>(out.data<fp8_t>()),
reinterpret_cast<float *>(scales.data<float>()),
scale_ub,
hidden_size,
num_tokens);
}
} else {
// -------- baseline -----------------------------------------------------
// -------- baseline
// -----------------------------------------------------
constexpr int THREADS = 256;
dim3 grid(num_tokens);
dim3 block(THREADS);
if (use_vec16) {
per_token_quant_fp8_small_batch_kernel<scalar_t, __nv_fp8_e4m3, 16><<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t*>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3*>(out.data<fp8_t>()),
reinterpret_cast<float*>(scales.data<float>()),
scale_ub,
hidden_size,
num_tokens);
per_token_quant_fp8_small_batch_kernel<scalar_t, __nv_fp8_e4m3, 16>
<<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t *>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3 *>(out.data<fp8_t>()),
reinterpret_cast<float *>(scales.data<float>()),
scale_ub,
hidden_size,
num_tokens);
} else {
per_token_quant_fp8_small_batch_kernel<scalar_t, __nv_fp8_e4m3, 8><<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t*>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3*>(out.data<fp8_t>()),
reinterpret_cast<float*>(scales.data<float>()),
scale_ub,
hidden_size,
num_tokens);
per_token_quant_fp8_small_batch_kernel<scalar_t, __nv_fp8_e4m3, 8>
<<<grid, block, 0, stream>>>(
reinterpret_cast<const scalar_t *>(input.data<scalar_t>()),
reinterpret_cast<__nv_fp8_e4m3 *>(out.data<fp8_t>()),
reinterpret_cast<float *>(scales.data<float>()),
scale_ub,
hidden_size,
num_tokens);
}
}
});
@@ -398,11 +428,12 @@ void DynamicPerTokenScaledFp8Quant(paddle::Tensor &out, // [..., d]
DISPATCH_FLOAT_FP6_DTYPE(input.dtype(), scalar_t, {
fastdeploy::dynamic_per_token_scaled_fp8_quant_kernel<scalar_t, fp8_t>
<<<grid, block, 0, stream>>>(out.data<fp8_t>(), scales.data<float>(),
input.data<scalar_t>(), scale_ub,
<<<grid, block, 0, stream>>>(out.data<fp8_t>(),
scales.data<float>(),
input.data<scalar_t>(),
scale_ub,
hidden_size);
});
}
PD_BUILD_STATIC_OP(static_scaled_fp8_quant)
@@ -414,8 +445,7 @@ PD_BUILD_STATIC_OP(static_scaled_fp8_quant)
PD_BUILD_STATIC_OP(dynamic_scaled_fp8_quant)
.Inputs({"out", "input", "scale"})
.Outputs({"out_q", "out_scale"})
.SetInplaceMap({{"out", "out_q"},
{"scale", "out_scale"}})
.SetInplaceMap({{"out", "out_q"}, {"scale", "out_scale"}})
.SetKernelFn(PD_KERNEL(DynamicScaledFp8Quant));
PD_BUILD_STATIC_OP(dynamic_per_token_scaled_fp8_quant)