mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 17:11:21 +08:00
[Optimization] The pre- and post-processing pipeline do not perform dict conversion (#5494)
* to_request_for_infer initial commit * refact to from_chat_completion_request * preprocess use request initial commit * bugfix * processors refact to using request * bug fix * refact Request from_generic_request * post process initial commit * bugfix * postprocess second commit * bugfix * serving_embedding initial commit * serving_reward initial commit * bugfix * replace function name * async_llm initial commit * offline initial commit and fix bug * bugfix * fix async_llm * remove add speculate_metrics into data * fix logprobs bug * fix echo bug * fix bug * fix reasoning_max_tokens * bugfix * bugfix and modify unittest * bugfix and modify unit test * bugfix * bugfix * bugfix * modify unittest * fix error when reasong_content is none for text_processor * remove some unnessary logic * revert removed logic * implement add and set method for RequestOutput and refact code * modify unit test * modify unit test * union process_request and process_request_obj * remove a unit test * union process_response and process_response_obj * support qwen3_vl_processor * modify unittest and remove comments * fix prompt_logprobs * fix codestyle * add v1 * v1 * fix unit test * fix unit test * fix pre-commit * fix * add process request * add process request * fix * fix * fix unit test * fix unit test * fix unit test * fix unit test * fix unit test * remove file * add unit test * add unit test * add unit test * fix unit test * fix unit test * fix * fix --------- Co-authored-by: Jiaxin Sui <95567040+plusNew001@users.noreply.github.com> Co-authored-by: luukunn <981429396@qq.com> Co-authored-by: luukunn <83932082+luukunn@users.noreply.github.com> Co-authored-by: Zhang Yulong <35552275+ZhangYulongg@users.noreply.github.com>
This commit is contained in:
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
|
||||
from .process import DataProcessor
|
||||
from .qwen3_vl_processor import Qwen3VLProcessor
|
||||
|
||||
__all__ = [
|
||||
"DataProcessor",
|
||||
"Qwen3VLProcessor",
|
||||
]
|
||||
@@ -0,0 +1,413 @@
|
||||
"""
|
||||
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import paddle
|
||||
import PIL
|
||||
from paddleformers.transformers.feature_extraction_utils import BatchFeature
|
||||
from paddleformers.transformers.image_processing_utils import BaseImageProcessor
|
||||
from paddleformers.transformers.image_transforms import (
|
||||
normalize,
|
||||
rescale,
|
||||
resize,
|
||||
to_channel_dimension_format,
|
||||
)
|
||||
from paddleformers.transformers.image_utils import (
|
||||
ChannelDimension,
|
||||
ImageInput,
|
||||
PILImageResampling,
|
||||
get_image_size,
|
||||
infer_channel_dimension_format,
|
||||
make_list_of_images,
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from paddleformers.transformers.tokenizer_utils_base import TensorType
|
||||
from PIL import Image
|
||||
|
||||
from fastdeploy.utils import data_processor_logger
|
||||
|
||||
IMAGE_MEAN = [0.5, 0.5, 0.5]
|
||||
IMAGE_STD = [0.5, 0.5, 0.5]
|
||||
|
||||
MIN_PIXELS = 65536
|
||||
MAX_PIXELS = 16777216
|
||||
|
||||
|
||||
VideoInput = Union[
|
||||
List["PIL.Image.Image"],
|
||||
"np.ndarray",
|
||||
"paddle.Tensor",
|
||||
List["np.ndarray"],
|
||||
List["paddle.Tensor"],
|
||||
List[List["PIL.Image.Image"]],
|
||||
List[List["np.ndarray"]],
|
||||
List[List["paddle.Tensor"]],
|
||||
]
|
||||
|
||||
|
||||
def round_by_factor(number: int, factor: int) -> int:
|
||||
return round(number / factor) * factor
|
||||
|
||||
|
||||
def ceil_by_factor(number: int, factor: int) -> int:
|
||||
return math.ceil(number / factor) * factor
|
||||
|
||||
|
||||
def floor_by_factor(number: int, factor: int) -> int:
|
||||
return math.floor(number / factor) * factor
|
||||
|
||||
|
||||
def smart_resize(height: int, width: int, factor: int, min_pixels: int, max_pixels: int, max_ratio: int = 200):
|
||||
"""
|
||||
Smart image resizing that maintains aspect ratio and respects constraints.
|
||||
|
||||
Args:
|
||||
height: Original image height
|
||||
width: Original image width
|
||||
factor: Patch size factor
|
||||
min_pixels: Minimum allowed pixels
|
||||
max_pixels: Maximum allowed pixels
|
||||
max_ratio: Maximum allowed aspect ratio
|
||||
|
||||
Returns:
|
||||
tuple: (new_height, new_width)
|
||||
|
||||
Raises:
|
||||
ValueError: If calculated dimensions are invalid
|
||||
"""
|
||||
if max(height, width) / min(height, width) > max_ratio:
|
||||
if height > width:
|
||||
new_width = max(factor, round_by_factor(width, factor))
|
||||
new_height = floor_by_factor(new_width * max_ratio, factor)
|
||||
else:
|
||||
new_height = max(factor, round_by_factor(height, factor))
|
||||
new_width = floor_by_factor(new_height * max_ratio, factor)
|
||||
|
||||
data_processor_logger.info(
|
||||
f"absolute aspect ratio must be smaller than {max_ratio}, got {max(height, width) / min(height, width)},\
|
||||
resize to {max(new_height, new_width) / min(new_height, new_width)}"
|
||||
)
|
||||
|
||||
height = new_height
|
||||
width = new_width
|
||||
|
||||
h_bar = max(factor, round_by_factor(height, factor))
|
||||
w_bar = max(factor, round_by_factor(width, factor))
|
||||
if h_bar * w_bar > max_pixels:
|
||||
beta = math.sqrt((height * width) / max_pixels)
|
||||
h_bar = floor_by_factor(height / beta, factor)
|
||||
w_bar = floor_by_factor(width / beta, factor)
|
||||
elif h_bar * w_bar < min_pixels:
|
||||
beta = math.sqrt(min_pixels / (height * width))
|
||||
h_bar = ceil_by_factor(height * beta, factor)
|
||||
w_bar = ceil_by_factor(width * beta, factor)
|
||||
|
||||
if min_pixels > h_bar * w_bar or h_bar * w_bar > max_pixels:
|
||||
raise ValueError(f"encounter invalid h_bar: {h_bar}, w_bar: {w_bar}")
|
||||
|
||||
return h_bar, w_bar
|
||||
|
||||
|
||||
def is_scaled_image(image: np.ndarray) -> bool:
|
||||
"""
|
||||
Check if image pixel values are already normalized to [0, 1] range.
|
||||
|
||||
Args:
|
||||
image: Input image array
|
||||
|
||||
Returns:
|
||||
bool: True if image is already scaled
|
||||
"""
|
||||
if image.dtype == np.uint8:
|
||||
return False
|
||||
|
||||
# It's possible the image has pixel values in [0, 255] but is of floating type
|
||||
return np.min(image) >= 0 and np.max(image) <= 1
|
||||
|
||||
|
||||
class ImageProcessor(BaseImageProcessor):
|
||||
"""
|
||||
Adaptive image processor for dynamic image resizing and preprocessing.
|
||||
|
||||
This processor handles image resizing, rescaling, normalization and format conversion.
|
||||
It dynamically adjusts image dimensions based on original size and specified constraints.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
patch_size: int = 16,
|
||||
merge_size: int = 2,
|
||||
temporal_patch_size: int = 2,
|
||||
min_pixels: int = MIN_PIXELS,
|
||||
max_pixels: int = MAX_PIXELS,
|
||||
image_mean: Union[float, List[float]] = IMAGE_MEAN,
|
||||
image_std: Union[float, List[float]] = IMAGE_STD,
|
||||
rescale_factor: float = 1 / 255,
|
||||
do_rescale: bool = True,
|
||||
do_normalize: bool = True,
|
||||
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize image processor with configuration parameters.
|
||||
|
||||
Args:
|
||||
patch_size (int): Spatial patch size for vision encoder
|
||||
merge_size (int): Merge size between vision and LLM encoders
|
||||
temporal_patch_size (int): Temporal patch size for video processing
|
||||
min_pixels (int): Minimum allowed pixels in resized image
|
||||
max_pixels (int): Maximum allowed pixels in resized image
|
||||
image_mean (float/list): Mean values for normalization per channel
|
||||
image_std (float/list): Std values for normalization per channel
|
||||
rescale_factor (float): Scaling factor for pixel values (default 1/255)
|
||||
do_rescale (bool): Whether to rescale images
|
||||
do_normalize (bool): Whether to normalize images
|
||||
resample: Resampling method for image resizing
|
||||
**kwargs: Additional base class arguments
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.patch_size = patch_size
|
||||
self.merge_size = merge_size
|
||||
self.temporal_patch_size = temporal_patch_size
|
||||
|
||||
self.min_pixels = min_pixels
|
||||
self.max_pixels = max_pixels
|
||||
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.rescale_factor = rescale_factor
|
||||
self.do_rescale = do_rescale
|
||||
self.do_normalize = do_normalize
|
||||
|
||||
self.resample = resample
|
||||
|
||||
def _preprocess(
|
||||
self,
|
||||
images: Union[ImageInput, VideoInput],
|
||||
min_pixels: int,
|
||||
max_pixels: int,
|
||||
image_mean: Optional[Union[float, List[float]]],
|
||||
image_std: Optional[Union[float, List[float]]],
|
||||
rescale_factor: float,
|
||||
do_rescale: bool,
|
||||
do_normalize: bool,
|
||||
resample: PILImageResampling,
|
||||
data_format: Optional[ChannelDimension],
|
||||
input_data_format: Optional[Union[str, ChannelDimension]],
|
||||
):
|
||||
"""
|
||||
Internal method for image preprocessing pipeline.
|
||||
|
||||
Args:
|
||||
images: Input image or batch of images
|
||||
min_pixels: Minimum allowed pixels in output
|
||||
max_pixels: Maximum allowed pixels in output
|
||||
image_mean: Normalization mean values
|
||||
image_std: Normalization std values
|
||||
rescale_factor: Pixel value scaling factor
|
||||
do_rescale: Whether to rescale pixel values
|
||||
do_normalize: Whether to normalize pixel values
|
||||
resample: Resampling method
|
||||
data_format: Output channel format
|
||||
input_data_format: Input channel format
|
||||
|
||||
Returns:
|
||||
tuple: (flatten_patches, grid_dimensions)
|
||||
- flatten_patches: Flattened image patches
|
||||
- grid_dimensions: Grid dimensions [t, h, w]
|
||||
"""
|
||||
images = make_list_of_images(images)
|
||||
|
||||
# All transformations expect numpy arrays.
|
||||
images = [to_numpy_array(image) for image in images]
|
||||
|
||||
if is_scaled_image(images[0]) and do_rescale:
|
||||
data_processor_logger.warning(
|
||||
"It looks like you are trying to rescale already rescaled images. If the input"
|
||||
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
||||
)
|
||||
if input_data_format is None:
|
||||
# We assume that all images have the same channel dimension format.
|
||||
input_data_format = infer_channel_dimension_format(images[0])
|
||||
|
||||
# Get original dimensions and calculate optimal resize dimensions
|
||||
height, width = get_image_size(images[0], channel_dim=input_data_format)
|
||||
resized_height, resized_width = smart_resize(
|
||||
height,
|
||||
width,
|
||||
factor=self.patch_size * self.merge_size, # Combine patch and merge factors
|
||||
min_pixels=min_pixels,
|
||||
max_pixels=max_pixels,
|
||||
)
|
||||
|
||||
processed_images = []
|
||||
for image in images:
|
||||
if height != resized_height or width != resized_width:
|
||||
# Convert to uint8 before resizing to avoid double scaling
|
||||
image = image.astype("uint8")
|
||||
# Convert to PIL Image and resize
|
||||
image = Image.fromarray(image)
|
||||
image = resize(
|
||||
image,
|
||||
size=(resized_height, resized_width),
|
||||
resample=resample,
|
||||
data_format=input_data_format,
|
||||
)
|
||||
|
||||
if do_rescale and do_normalize:
|
||||
# Adjust mean and std for combined rescale+normalize
|
||||
image_mean = np.array(image_mean, dtype=np.float32) * (1.0 / rescale_factor)
|
||||
image_std = np.array(image_std, dtype=np.float32) * (1.0 / rescale_factor)
|
||||
do_rescale = False # Skip separate rescale step
|
||||
|
||||
# mutual exclusion and upper branch
|
||||
if do_rescale:
|
||||
image = image.astype(np.float32)
|
||||
image = rescale(image, scale=rescale_factor, data_format=input_data_format)
|
||||
|
||||
if do_normalize:
|
||||
image = image.astype(np.float32)
|
||||
image = normalize(
|
||||
image=image,
|
||||
mean=image_mean,
|
||||
std=image_std,
|
||||
data_format=input_data_format,
|
||||
)
|
||||
|
||||
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) # [C, H, W]
|
||||
processed_images.append(image)
|
||||
|
||||
# Convert processed images to numpy array
|
||||
patches = np.array(processed_images)
|
||||
|
||||
# Pad temporal dimension if needed
|
||||
if patches.shape[0] % self.temporal_patch_size != 0:
|
||||
repeats = np.repeat(
|
||||
patches[-1][np.newaxis],
|
||||
self.temporal_patch_size - (patches.shape[0] % self.temporal_patch_size),
|
||||
axis=0,
|
||||
)
|
||||
patches = np.concatenate([patches, repeats], axis=0)
|
||||
|
||||
# Convert to channels-first format if needed
|
||||
if data_format == ChannelDimension.LAST:
|
||||
patches = patches.transpose([0, 3, 1, 2]) # [N, H, W, C] -> [N, C, H, W]
|
||||
|
||||
grid_t, channel = patches.shape[:2]
|
||||
grid_t = grid_t // self.temporal_patch_size
|
||||
|
||||
grid_h, grid_w = (
|
||||
resized_height // self.patch_size,
|
||||
resized_width // self.patch_size,
|
||||
)
|
||||
# Reshape into hierarchical patch structure
|
||||
patches = patches.reshape(
|
||||
[
|
||||
grid_t,
|
||||
self.temporal_patch_size,
|
||||
channel,
|
||||
grid_h // self.merge_size,
|
||||
self.merge_size,
|
||||
self.patch_size,
|
||||
grid_w // self.merge_size,
|
||||
self.merge_size,
|
||||
self.patch_size,
|
||||
]
|
||||
)
|
||||
# Reorder dimensions for better memory access pattern
|
||||
# [grid_t, grid_h/merge_size, grid_w/merge_size, merge_size, merge_size, C, temporal_patch_size, psz, psz]
|
||||
patches = patches.transpose([0, 3, 6, 4, 7, 2, 1, 5, 8])
|
||||
|
||||
flatten_patches = patches.reshape(
|
||||
[
|
||||
grid_t * grid_h * grid_w,
|
||||
channel * self.temporal_patch_size * self.patch_size * self.patch_size,
|
||||
]
|
||||
)
|
||||
|
||||
return flatten_patches, np.array([grid_t, grid_h, grid_w])
|
||||
|
||||
def preprocess(
|
||||
self,
|
||||
images: Union[ImageInput, VideoInput],
|
||||
min_pixels: Optional[int] = None,
|
||||
max_pixels: Optional[int] = None,
|
||||
image_mean: Optional[Union[float, List[float]]] = None,
|
||||
image_std: Optional[Union[float, List[float]]] = None,
|
||||
rescale_factor: Optional[float] = None,
|
||||
do_rescale: Optional[bool] = None,
|
||||
do_normalize: Optional[bool] = None,
|
||||
resample: Optional[PILImageResampling] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
||||
input_data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.LAST,
|
||||
):
|
||||
"""
|
||||
Main preprocessing method for images/videos.
|
||||
|
||||
Args:
|
||||
images: Input image/video data
|
||||
min_pixels: Override for minimum pixels
|
||||
max_pixels: Override for maximum pixels
|
||||
image_mean: Override for normalization mean
|
||||
image_std: Override for normalization std
|
||||
rescale_factor: Override for rescaling factor
|
||||
do_rescale: Override for rescaling flag
|
||||
do_normalize: Override for normalization flag
|
||||
resample: Override for resampling method
|
||||
return_tensors: Desired output tensor format
|
||||
data_format: Output channel dimension format
|
||||
input_data_format: Input channel dimension format
|
||||
|
||||
Returns:
|
||||
BatchFeature: Processed features containing:
|
||||
- pixel_values: Preprocessed pixel data
|
||||
- grid_thw: Grid dimensions [temporal, height, width]
|
||||
|
||||
Raises:
|
||||
ValueError: For invalid image types or dimensions
|
||||
"""
|
||||
min_pixels = min_pixels if min_pixels is not None else self.min_pixels
|
||||
max_pixels = max_pixels if max_pixels is not None else self.max_pixels
|
||||
image_mean = image_mean if image_mean is not None else self.image_mean
|
||||
image_std = image_std if image_std is not None else self.image_std
|
||||
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
||||
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
||||
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
||||
resample = resample if resample is not None else self.resample
|
||||
|
||||
if images is not None and not valid_images(images):
|
||||
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "paddle.Tensor.")
|
||||
|
||||
pixel_values, grid_thw = self._preprocess(
|
||||
images,
|
||||
min_pixels=min_pixels,
|
||||
max_pixels=max_pixels,
|
||||
image_mean=image_mean,
|
||||
image_std=image_std,
|
||||
rescale_factor=rescale_factor,
|
||||
do_rescale=do_rescale,
|
||||
do_normalize=do_normalize,
|
||||
resample=resample,
|
||||
data_format=data_format,
|
||||
input_data_format=input_data_format,
|
||||
)
|
||||
data = {"pixel_values": pixel_values, "grid_thw": grid_thw}
|
||||
return BatchFeature(data=data, tensor_type=return_tensors)
|
||||
@@ -0,0 +1,674 @@
|
||||
"""
|
||||
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
|
||||
import pickle
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import paddle
|
||||
import zmq
|
||||
from paddleformers.transformers import AutoTokenizer
|
||||
from PIL import Image
|
||||
|
||||
from fastdeploy.engine.request import ImagePosition, Request
|
||||
from fastdeploy.entrypoints.chat_utils import parse_chat_messages
|
||||
from fastdeploy.input.ernie4_5_vl_processor import read_video_decord
|
||||
from fastdeploy.input.mm_data_processor import MMBaseDataProcessor
|
||||
from fastdeploy.input.utils import IDS_TYPE_FLAG
|
||||
from fastdeploy.multimodal.hasher import MultimodalHasher
|
||||
from fastdeploy.utils import data_processor_logger
|
||||
|
||||
from .image_processor import ImageProcessor, ceil_by_factor, floor_by_factor
|
||||
|
||||
VIDEO_MIN_PIXELS = 128 * 28 * 28
|
||||
VIDEO_MAX_PIXELS = 768 * 28 * 28
|
||||
FRAME_FACTOR = 2
|
||||
FPS = 2.0
|
||||
FPS_MIN_FRAMES = 4
|
||||
FPS_MAX_FRAMES = 768
|
||||
|
||||
|
||||
def sample_frames(
|
||||
frame_factor: int,
|
||||
min_frames: int,
|
||||
max_frames: int,
|
||||
metadata: Optional[dict] = None,
|
||||
fps: Optional[Union[int, float]] = -1,
|
||||
num_frames: Optional[int] = -1,
|
||||
):
|
||||
"""
|
||||
Sample frames from video according to specified criteria.
|
||||
|
||||
Args:
|
||||
frame_factor: Ensure sampled frames are multiples of this factor
|
||||
min_frames: Minimum number of frames to sample
|
||||
max_frames: Maximum number of frames to sample
|
||||
metadata: Video metadata containing fps information
|
||||
fps: Target frames per second for sampling
|
||||
num_frames: Exact number of frames to sample
|
||||
|
||||
Returns:
|
||||
np.ndarray: Sampled video frames
|
||||
|
||||
Raises:
|
||||
ValueError: If both fps and num_frames are specified,
|
||||
or if required metadata is missing,
|
||||
or if requested frames exceed available frames
|
||||
"""
|
||||
if fps > 0 and num_frames > 0:
|
||||
raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!")
|
||||
|
||||
total_num_frames = metadata["num_of_frame"]
|
||||
|
||||
# If num_frames is not given but fps is, calculate num_frames from fps
|
||||
if num_frames > 0:
|
||||
num_frames = round(num_frames / frame_factor) * frame_factor
|
||||
elif fps > 0:
|
||||
if metadata is None:
|
||||
raise ValueError(
|
||||
"Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
|
||||
"Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
|
||||
)
|
||||
# max_frames = math.floor(min(max_frames, total_num_frames) / frame_factor) * frame_factor
|
||||
min_frames = ceil_by_factor(min_frames, frame_factor)
|
||||
max_frames = floor_by_factor(min(max_frames, total_num_frames), frame_factor)
|
||||
|
||||
num_frames = total_num_frames / metadata["fps"] * fps
|
||||
|
||||
if num_frames > total_num_frames:
|
||||
data_processor_logger.warning(f"smart_nframes: nframes[{num_frames}] > total_frames[{total_num_frames}]")
|
||||
|
||||
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
|
||||
num_frames = floor_by_factor(num_frames, frame_factor)
|
||||
|
||||
if num_frames > total_num_frames:
|
||||
raise ValueError(
|
||||
f"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
|
||||
"Decrease `num_frames` or `fps` for sampling."
|
||||
)
|
||||
|
||||
# Hack code ensures that num_frames can always be divided by 4
|
||||
# due to sched/resource_manager_v1.py 中 grid_thw.extend([[2, h, w]] * (t // 2))
|
||||
if num_frames > 2 and num_frames % 4 != 0:
|
||||
num_frames = (num_frames // 4) * 4 # 向下取整到 4 的倍数
|
||||
total_num_frames = (total_num_frames // 4) * 4
|
||||
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
|
||||
|
||||
# Calculate frame indices based on sampling strategy
|
||||
if num_frames > 0:
|
||||
# Evenly spaced sampling for target frame count
|
||||
indices = np.arange(0, total_num_frames, total_num_frames / num_frames).astype(np.int32)
|
||||
else:
|
||||
# Keep all frames if no sampling requested
|
||||
indices = np.arange(0, total_num_frames).astype(np.int32)
|
||||
|
||||
return indices
|
||||
|
||||
|
||||
class DataProcessor(MMBaseDataProcessor):
|
||||
"""
|
||||
Processes multimodal inputs (text, images, videos) into model-ready formats.
|
||||
|
||||
Handles:
|
||||
- Tokenization of text with special tokens for visual content
|
||||
- Image and video preprocessing
|
||||
- Generation of 3D positional embeddings
|
||||
- Conversion of chat messages to model inputs
|
||||
|
||||
Attributes:
|
||||
tokenizer: Text tokenizer instance
|
||||
image_processor: Image/video preprocessor
|
||||
image_token: Special token for image placeholders
|
||||
video_token: Special token for video placeholders
|
||||
vision_start: Token marking start of visual content
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_path: str,
|
||||
enable_processor_cache: bool = False,
|
||||
video_min_frames: int = FPS_MIN_FRAMES,
|
||||
video_max_frames: int = FPS_MAX_FRAMES,
|
||||
video_target_frames: int = -1,
|
||||
video_fps: int = FPS,
|
||||
tokens_per_second: int = 2,
|
||||
tokenizer=None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the data processor.
|
||||
|
||||
Args:
|
||||
model_path: Path to pretrained model
|
||||
video_min_frames: Minimum frames to sample from videos
|
||||
video_max_frames: Maximum frames to sample from videos
|
||||
tokens_per_second: Temporal resolution for positional embeddings
|
||||
**kwargs: Additional configuration
|
||||
"""
|
||||
super().__init__()
|
||||
self.min_frames = video_min_frames
|
||||
self.max_frames = video_max_frames
|
||||
self.target_frames = video_target_frames
|
||||
self.fps = video_fps
|
||||
self.frame_factor = FRAME_FACTOR
|
||||
|
||||
# Initialize tokenizer with left padding and fast tokenizer
|
||||
if tokenizer is None:
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_path, padding_side="left", use_fast=True)
|
||||
self.tokenizer.ignored_index = -100 # Set ignored index for loss calculation
|
||||
else:
|
||||
self.tokenizer = tokenizer
|
||||
|
||||
self.image_processor = ImageProcessor.from_pretrained(model_path) # Initialize image processor
|
||||
self.enable_processor_cache = enable_processor_cache
|
||||
|
||||
# Convolution sizes for patch aggregation
|
||||
self.spatial_conv_size = self.image_processor.merge_size
|
||||
self.temporal_conv_size = self.image_processor.temporal_patch_size
|
||||
|
||||
# Special tokens and IDs
|
||||
self.image_token = "<|image_pad|>"
|
||||
self.video_token = "<|video_pad|>"
|
||||
|
||||
self.image_token_id = self.tokenizer.convert_tokens_to_ids(self.image_token)
|
||||
self.video_token_id = self.tokenizer.convert_tokens_to_ids(self.video_token)
|
||||
|
||||
self.vision_start = "<|vision_start|>"
|
||||
self.vision_start_id = self.tokenizer.convert_tokens_to_ids(self.vision_start)
|
||||
|
||||
self.tokens_per_second = tokens_per_second
|
||||
|
||||
self.role_prefixes = {
|
||||
"system": "",
|
||||
"user": "User: ",
|
||||
"bot": "Assistant: ",
|
||||
"assistant": "Assistant: ",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def mm_num_tokens(grid_thw: list | list[list[int]] | np.ndarray | paddle.Tensor) -> int | list[int]:
|
||||
"""
|
||||
Calculate the number of tokens in the multimodal input.
|
||||
"""
|
||||
if isinstance(grid_thw, paddle.Tensor):
|
||||
grid_thw = grid_thw.numpy()
|
||||
|
||||
if len(grid_thw) == 0:
|
||||
return 0
|
||||
|
||||
def calc_one(thw):
|
||||
t, h, w = map(int, thw)
|
||||
return t * h * w // 4
|
||||
|
||||
if isinstance(grid_thw[0], (list, tuple, np.ndarray)):
|
||||
return [calc_one(x) for x in grid_thw]
|
||||
|
||||
return calc_one(grid_thw)
|
||||
|
||||
def text2ids(self, text, images=None, videos=None, image_uuid=None, video_uuid=None):
|
||||
"""
|
||||
Convert text with image/video placeholders into model inputs.
|
||||
|
||||
Args:
|
||||
text: Input text with <|image@placeholder|> and <|video@placeholder|> markers
|
||||
images: List of PIL Images corresponding to image placeholders
|
||||
videos: List of video data corresponding to video placeholders
|
||||
image_uuid: List of unique identifiers for each image, used for caching or hashing.
|
||||
video_uuid: List of unique identifiers for each video, used for caching or hashing.
|
||||
|
||||
Returns:
|
||||
Dict containing:
|
||||
- input_ids: Token IDs
|
||||
- token_type_ids: Type identifiers (text/image/video)
|
||||
- position_ids: 3D positional embeddings
|
||||
- images: Preprocessed visual features
|
||||
- grid_thw: Spatial/temporal dimensions
|
||||
- image_type_ids: Visual content type (0=image, 1=video)
|
||||
"""
|
||||
|
||||
outputs = {
|
||||
"input_ids": [],
|
||||
"token_type_ids": [],
|
||||
"position_ids": [],
|
||||
"images": [],
|
||||
"grid_thw": [],
|
||||
"image_type_ids": [],
|
||||
"labels": [],
|
||||
"cur_position": 0,
|
||||
"video_cnt": 0,
|
||||
"num_input_image_tokens": 0,
|
||||
"num_input_video_tokens": 0,
|
||||
"fps": [],
|
||||
"mm_positions": [],
|
||||
"mm_hashes": [],
|
||||
}
|
||||
|
||||
# Define placeholders and their lengths
|
||||
IMAGE_PLACEHOLDER = "<|image_pad|>"
|
||||
VIDEO_PLACEHOLDER = "<|video_pad|>"
|
||||
IMAGE_PLACEHOLDER_LEN = len(IMAGE_PLACEHOLDER)
|
||||
VIDEO_PLACEHOLDER_LEN = len(VIDEO_PLACEHOLDER)
|
||||
|
||||
# Initialize tracking variables for text parsing
|
||||
st, image_idx, video_idx = 0, 0, 0 # Start position, image counter, video counter
|
||||
while st < len(text):
|
||||
# Find next image or video placeholder in text
|
||||
image_pos = text.find(IMAGE_PLACEHOLDER, st)
|
||||
image_pos = len(text) if image_pos == -1 else image_pos # Set to end if not found
|
||||
video_pos = text.find(VIDEO_PLACEHOLDER, st)
|
||||
video_pos = len(text) if video_pos == -1 else video_pos # Set to end if not found
|
||||
ed = min(image_pos, video_pos) # End position is first placeholder found
|
||||
|
||||
self._add_text(text[st:ed], outputs)
|
||||
if ed == len(text):
|
||||
break
|
||||
|
||||
if ed == image_pos:
|
||||
image = images[image_idx]
|
||||
uuid = image_uuid[image_idx] if image_uuid else None
|
||||
if not isinstance(image, tuple):
|
||||
self._add_image(image, outputs, uuid)
|
||||
else:
|
||||
self._add_processed_image(image, outputs, uuid)
|
||||
image_idx += 1
|
||||
st = ed + IMAGE_PLACEHOLDER_LEN
|
||||
else:
|
||||
item = videos[video_idx]
|
||||
uuid = video_uuid[video_idx] if video_uuid else None
|
||||
if not isinstance(item, tuple):
|
||||
if isinstance(item, dict):
|
||||
frames, meta = self._load_and_process_video(item["video"], item)
|
||||
else:
|
||||
frames, meta = self._load_and_process_video(item, {})
|
||||
self._add_video(frames, meta, outputs, uuid)
|
||||
else:
|
||||
# cached frames are already processed
|
||||
self._add_processed_video(item, outputs, uuid)
|
||||
video_idx += 1
|
||||
st = ed + VIDEO_PLACEHOLDER_LEN
|
||||
|
||||
return outputs
|
||||
|
||||
def request2ids(
|
||||
self, request: Request, tgts: List[str] = None
|
||||
) -> Dict[str, Union[np.ndarray, List[np.ndarray], None]]:
|
||||
"""
|
||||
Convert chat request with multimodal messages into model inputs.
|
||||
|
||||
Args:
|
||||
request: Request containing:
|
||||
- messages: List of chat messages with text/image/video content
|
||||
- request_id: Unique identifier for logging
|
||||
tgts: Optional target sequences
|
||||
|
||||
Returns:
|
||||
Dict with same structure as text2ids() output
|
||||
"""
|
||||
|
||||
# Parse and validate chat messages
|
||||
messages = parse_chat_messages(request.messages)
|
||||
mm_items = []
|
||||
for msg in messages:
|
||||
role = msg.get("role")
|
||||
assert role in self.role_prefixes, f"Unsupported role: {role}"
|
||||
|
||||
# Normalize content to list format
|
||||
content = msg.get("content")
|
||||
if not isinstance(content, list):
|
||||
content = [content]
|
||||
# Collect all visual content items
|
||||
for item in content:
|
||||
if item.get("type") in ["image", "video"]:
|
||||
mm_items.append(item)
|
||||
|
||||
missing_hashes, missing_idx = [], []
|
||||
for idx, item in enumerate(mm_items):
|
||||
if not item.get("data"):
|
||||
# raw data not provided, should be retrieved from processor cache
|
||||
missing_hashes.append(item.get("uuid"))
|
||||
missing_idx.append(idx)
|
||||
|
||||
if len(missing_hashes) > 0 and not self.enable_processor_cache:
|
||||
raise ValueError("Missing items cannot be retrieved without processor cache.")
|
||||
|
||||
if self.enable_processor_cache:
|
||||
context = zmq.Context()
|
||||
dealer = context.socket(zmq.DEALER)
|
||||
dealer.connect("ipc:///dev/shm/processor_cache.ipc")
|
||||
|
||||
missing_items = self.get_processor_cache(dealer, missing_hashes)
|
||||
for idx in range(len(missing_items)):
|
||||
if not missing_items[idx]:
|
||||
raise ValueError(f"Missing item {idx} not found in processor cache")
|
||||
mm_items[missing_idx[idx]]["data"] = missing_items[idx]
|
||||
|
||||
images, videos = [], []
|
||||
image_uuid, video_uuid = [], []
|
||||
for item in mm_items:
|
||||
if item.get("type") == "image":
|
||||
images.append(item["data"])
|
||||
image_uuid.append(item["uuid"])
|
||||
elif item.get("type") == "video":
|
||||
videos.append(item["data"])
|
||||
video_uuid.append(item["uuid"])
|
||||
else:
|
||||
raise ValueError(f"Unsupported multimodal type: {item.get('type')}")
|
||||
|
||||
if self.tokenizer.chat_template is None:
|
||||
raise ValueError("This model does not support chat template.")
|
||||
|
||||
chat_template_kwargs = request.chat_template_kwargs if request.chat_template_kwargs else {}
|
||||
prompt = self.tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=request.add_generation_prompt if request.add_generation_prompt is not None else True,
|
||||
**chat_template_kwargs,
|
||||
)
|
||||
request.prompt_tokens = prompt
|
||||
|
||||
outputs = self.text2ids(prompt, images, videos, image_uuid, video_uuid)
|
||||
|
||||
if self.enable_processor_cache:
|
||||
missing_idx = set(missing_idx)
|
||||
hashes_to_cache, items_to_cache = [], []
|
||||
for idx in range(len(mm_items)):
|
||||
if idx in missing_idx:
|
||||
continue
|
||||
meta = {}
|
||||
t, h, w = outputs["grid_thw"][idx]
|
||||
meta["thw"] = (t, h, w)
|
||||
meta["fps"] = outputs["fps"][idx]
|
||||
hashes_to_cache.append(outputs["mm_hashes"][idx])
|
||||
items_to_cache.append((outputs["images"][idx], meta))
|
||||
self.update_processor_cache(dealer, hashes_to_cache, items_to_cache)
|
||||
|
||||
return outputs
|
||||
|
||||
def _add_text(self, tokens, outputs: Dict) -> None:
|
||||
"""
|
||||
Add text tokens to model inputs dictionary.
|
||||
|
||||
Args:
|
||||
tokens: Text string or already tokenized IDs
|
||||
outputs: Dictionary accumulating model inputs
|
||||
|
||||
Note:
|
||||
- Handles both raw text and pre-tokenized inputs
|
||||
- Updates position IDs for 3D embeddings
|
||||
"""
|
||||
if not tokens:
|
||||
return None
|
||||
|
||||
if isinstance(tokens, str):
|
||||
tokens_str = self.tokenizer.tokenize(tokens)
|
||||
tokens = self.tokenizer.convert_tokens_to_ids(tokens_str)
|
||||
|
||||
num_tokens = len(tokens)
|
||||
outputs["input_ids"].extend(tokens)
|
||||
outputs["token_type_ids"].extend([IDS_TYPE_FLAG["text"]] * num_tokens)
|
||||
|
||||
pos_ids = self._compute_text_positions(outputs["cur_position"], num_tokens)
|
||||
outputs["position_ids"].append(pos_ids)
|
||||
outputs["cur_position"] = pos_ids.max() + 1
|
||||
|
||||
def _compute_text_positions(self, start_pos: int, num_tokens: int) -> np.ndarray:
|
||||
"""
|
||||
Generate 3D positional embeddings for text tokens.
|
||||
|
||||
Args:
|
||||
start_pos: Starting position index
|
||||
num_tokens: Number of tokens to generate positions for
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: 3D position IDs shaped (3, num_tokens)
|
||||
"""
|
||||
text_array = np.arange(num_tokens).reshape(1, -1)
|
||||
text_index = np.broadcast_to(text_array, (3, num_tokens))
|
||||
position = text_index + start_pos
|
||||
return position
|
||||
|
||||
def _add_image(self, img, outputs: Dict, uuid: Optional[str]) -> None:
|
||||
"""
|
||||
Add image data to model inputs dictionary.
|
||||
|
||||
Args:
|
||||
img: PIL Image to process
|
||||
outputs: Dictionary accumulating model inputs
|
||||
|
||||
Note:
|
||||
- Preprocesses image and calculates spatial dimensions
|
||||
- Adds image token IDs and type markers
|
||||
- Generates appropriate position embeddings
|
||||
"""
|
||||
ret = self.image_processor.preprocess(images=[img.convert("RGB")])
|
||||
num_tokens = ret["grid_thw"].prod() // self.image_processor.merge_size**2
|
||||
grid_thw = ret["grid_thw"].tolist()
|
||||
|
||||
outputs["mm_positions"].append(ImagePosition(len(outputs["input_ids"]), num_tokens))
|
||||
outputs["input_ids"].extend([self.image_token_id] * num_tokens)
|
||||
outputs["token_type_ids"].extend([IDS_TYPE_FLAG["image"]] * num_tokens)
|
||||
outputs["num_input_image_tokens"] += int(num_tokens)
|
||||
|
||||
outputs["images"].append(ret["pixel_values"])
|
||||
if not uuid:
|
||||
outputs["mm_hashes"].append(MultimodalHasher.hash_features(ret["pixel_values"]))
|
||||
else:
|
||||
outputs["mm_hashes"].append(uuid)
|
||||
outputs["grid_thw"].append(grid_thw)
|
||||
outputs["image_type_ids"].append(0)
|
||||
|
||||
t, h, w = grid_thw
|
||||
pos_ids = self._compute_vision_positions(outputs["cur_position"], t, h, w, 0)
|
||||
|
||||
outputs["position_ids"].append(pos_ids)
|
||||
outputs["cur_position"] = pos_ids.max() + 1
|
||||
|
||||
outputs["fps"].append(0)
|
||||
|
||||
def _add_processed_image(self, img_cache: Tuple[np.ndarray, dict], outputs: Dict, uuid: str) -> None:
|
||||
img, meta = img_cache
|
||||
num_tokens = img.shape[0] // self.image_processor.merge_size**2
|
||||
|
||||
outputs["mm_positions"].append(ImagePosition(len(outputs["input_ids"]), num_tokens))
|
||||
outputs["input_ids"].extend([self.image_patch_id] * num_tokens)
|
||||
outputs["token_type_ids"].extend([IDS_TYPE_FLAG["image"]] * num_tokens)
|
||||
|
||||
_, h, w = meta["thw"]
|
||||
pos_ids = self._compute_vision_positions(outputs["cur_position"], 1, h, w, 0)
|
||||
outputs["position_ids"].append(pos_ids)
|
||||
outputs["cur_position"] = pos_ids.max() + 1
|
||||
|
||||
outputs["images"].append(img)
|
||||
outputs["mm_hashes"].append(uuid)
|
||||
outputs["grid_thw"].append(np.array([[1, h, w]]))
|
||||
outputs["image_type_ids"].append(0)
|
||||
|
||||
outputs["fps"].append(0)
|
||||
|
||||
def _add_video(self, frames, meta: Dict, outputs: Dict, uuid: Optional[str]) -> None:
|
||||
"""
|
||||
Add video data to model inputs dictionary.
|
||||
|
||||
Args:
|
||||
frames: Video frames as numpy array
|
||||
meta: Video metadata containing fps/duration
|
||||
outputs: Dictionary accumulating model inputs
|
||||
|
||||
Note:
|
||||
- Handles temporal dimension in position embeddings
|
||||
- Uses video-specific token IDs and type markers
|
||||
"""
|
||||
ret = self.image_processor.preprocess(
|
||||
images=frames,
|
||||
min_pixels=VIDEO_MIN_PIXELS,
|
||||
max_pixels=VIDEO_MAX_PIXELS,
|
||||
)
|
||||
|
||||
num_tokens = ret["grid_thw"].prod() // self.image_processor.merge_size**2
|
||||
grid_thw = ret["grid_thw"].tolist()
|
||||
|
||||
outputs["mm_positions"].append(ImagePosition(len(outputs["input_ids"]), num_tokens))
|
||||
# Hack code. In order to adapt to the framework, only image_token can be passed
|
||||
# The correct way should be to use [self.video_token_id] * num_tokens
|
||||
outputs["input_ids"].extend([self.image_token_id] * num_tokens)
|
||||
outputs["token_type_ids"].extend([IDS_TYPE_FLAG["video"]] * num_tokens)
|
||||
outputs["num_input_video_tokens"] += int(num_tokens)
|
||||
|
||||
outputs["images"].append(ret["pixel_values"])
|
||||
if not uuid:
|
||||
outputs["mm_hashes"].append(MultimodalHasher.hash_features(ret["pixel_values"]))
|
||||
else:
|
||||
outputs["mm_hashes"].append(uuid)
|
||||
outputs["grid_thw"].append(grid_thw)
|
||||
outputs["image_type_ids"].extend([1] * grid_thw[0])
|
||||
|
||||
fps = meta["fps"]
|
||||
second_per_grid_t = self.temporal_conv_size / fps
|
||||
t, h, w = grid_thw
|
||||
pos_ids = self._compute_vision_positions(outputs["cur_position"], t, h, w, second_per_grid_t)
|
||||
|
||||
outputs["position_ids"].append(pos_ids)
|
||||
outputs["cur_position"] = pos_ids.max() + 1
|
||||
|
||||
outputs["fps"].append(fps)
|
||||
|
||||
def _add_processed_video(self, frames_cache: Tuple[np.ndarray, dict], outputs: Dict, uuid: str) -> None:
|
||||
frames, meta = frames_cache
|
||||
num_tokens = frames.shape[0] // self.image_processor.merge_size**2
|
||||
|
||||
t, h, w = meta["thw"]
|
||||
outputs["images"].append(frames)
|
||||
outputs["mm_hashes"].append(uuid)
|
||||
outputs["grid_thw"].append(np.array([[t, h, w]]))
|
||||
|
||||
outputs["mm_positions"].append(ImagePosition(len(outputs["input_ids"]), num_tokens))
|
||||
outputs["input_ids"].extend([self.image_patch_id] * num_tokens)
|
||||
outputs["token_type_ids"].extend([IDS_TYPE_FLAG["video"]] * num_tokens)
|
||||
outputs["image_type_ids"].extend([1] * t)
|
||||
|
||||
fps = meta["fps"]
|
||||
second_per_grid_t = self.temporal_conv_size / fps
|
||||
pos_ids = self._compute_vision_positions(outputs["cur_position"], t, h, w, second_per_grid_t)
|
||||
outputs["position_ids"].append(pos_ids)
|
||||
outputs["cur_position"] = pos_ids.max() + 1
|
||||
|
||||
outputs["fps"].append(fps)
|
||||
|
||||
def _compute_vision_positions(
|
||||
self, start_pos: int, t: int, h: int, w: int, second_per_grid_t: float
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Generate 3D position IDs for visual inputs.
|
||||
|
||||
Args:
|
||||
start_pos: Base position in sequence
|
||||
t: Temporal patches (1 for images)
|
||||
h: Height in patches
|
||||
w: Width in patches
|
||||
second_per_grid_t: Time per temporal patch
|
||||
|
||||
Returns:
|
||||
np.ndarray: Position IDs for [t,h,w] dimensions
|
||||
"""
|
||||
h //= self.spatial_conv_size
|
||||
w //= self.spatial_conv_size
|
||||
|
||||
tn = np.arange(t).reshape(-1, 1)
|
||||
tn = np.broadcast_to(tn, (t, h * w))
|
||||
tn = tn * int(second_per_grid_t) * self.tokens_per_second
|
||||
t_index = tn.flatten()
|
||||
|
||||
hn = np.arange(h).reshape(1, -1, 1)
|
||||
h_index = np.broadcast_to(hn, (t, h, w)).flatten()
|
||||
|
||||
wn = np.arange(w).reshape(1, 1, -1)
|
||||
w_index = np.broadcast_to(wn, (t, h, w)).flatten()
|
||||
|
||||
position = np.stack([t_index, h_index, w_index]) + start_pos
|
||||
return position
|
||||
|
||||
def _load_and_process_video(self, url: str, item: Dict) -> Tuple[np.ndarray, Dict]:
|
||||
"""
|
||||
Load and preprocess video into frames.
|
||||
|
||||
Args:
|
||||
url: Video file path or bytes
|
||||
item: Dictionary containing processing parameters
|
||||
|
||||
Returns:
|
||||
tuple: (frames, metadata) where:
|
||||
- frames: Processed video frames as numpy array
|
||||
- metadata: Updated video metadata dictionary
|
||||
"""
|
||||
reader, meta, _ = read_video_decord(url, save_to_disk=False)
|
||||
|
||||
# Apply frame sampling if fps or target_frames specified
|
||||
fps = item.get("fps", self.fps)
|
||||
num_frames = item.get("target_frames", self.target_frames)
|
||||
|
||||
frame_indices = list(range(meta["num_of_frame"]))
|
||||
if fps > 0 or num_frames > 0:
|
||||
# Get frame sampling constraints
|
||||
min_frames = item.get("min_frames", self.min_frames)
|
||||
max_frames = item.get("max_frames", self.max_frames)
|
||||
|
||||
# Sample frames according to specifications
|
||||
frame_indices = sample_frames(
|
||||
frame_factor=self.frame_factor, # Ensure divisible by temporal patch size
|
||||
min_frames=min_frames,
|
||||
max_frames=max_frames,
|
||||
metadata=meta,
|
||||
fps=fps,
|
||||
num_frames=num_frames,
|
||||
)
|
||||
|
||||
# Update metadata with new frame count and fps
|
||||
meta["num_of_frame"] = len(frame_indices)
|
||||
if fps is not None:
|
||||
meta["fps"] = fps # Use specified fps
|
||||
meta["duration"] = len(frame_indices) / fps
|
||||
else:
|
||||
meta["fps"] = len(frame_indices) / meta["duration"] # Calculate fps from sampled frames
|
||||
|
||||
frames = []
|
||||
for idx in frame_indices:
|
||||
frame = reader[idx].asnumpy()
|
||||
image = Image.fromarray(frame, "RGB")
|
||||
frames.append(image)
|
||||
frames = np.stack([np.array(f.convert("RGB")) for f in frames], axis=0)
|
||||
|
||||
return frames, meta
|
||||
|
||||
def get_processor_cache(self, socket, mm_hashes: list[str]) -> list:
|
||||
"""
|
||||
get cache correspond to given hash values
|
||||
"""
|
||||
req = pickle.dumps(mm_hashes)
|
||||
socket.send_multipart([b"", req])
|
||||
_, resp = socket.recv_multipart()
|
||||
mm_items = pickle.loads(resp)
|
||||
data_processor_logger.info(f"Get cache of mm_hashes: {mm_hashes}")
|
||||
|
||||
return mm_items
|
||||
|
||||
def update_processor_cache(self, socket, mm_hashes: list[str], mm_items):
|
||||
"""
|
||||
update cache data
|
||||
"""
|
||||
req = pickle.dumps((mm_hashes, mm_items))
|
||||
socket.send_multipart([b"", req])
|
||||
data_processor_logger.info(f"Update cache of mm_hashes: {mm_hashes}")
|
||||
@@ -0,0 +1,328 @@
|
||||
"""
|
||||
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from fastdeploy.engine.request import Request
|
||||
from fastdeploy.input.v1.text_processor import DataProcessor as TextProcessor
|
||||
from fastdeploy.utils import data_processor_logger
|
||||
|
||||
from .process import DataProcessor
|
||||
|
||||
|
||||
class Qwen3VLProcessor(TextProcessor):
|
||||
"""
|
||||
Qwen Vision-Language processor for handling multimodal inputs.
|
||||
|
||||
This processor extends TextProcessor to support:
|
||||
- Image and video processing
|
||||
- Multimodal feature extraction
|
||||
- Tokenization and position encoding
|
||||
- Request processing and model input generation
|
||||
|
||||
Attributes:
|
||||
processor (DataProcessor): Underlying data processor instance
|
||||
tokenizer: Text tokenizer instance
|
||||
limit_mm_per_prompt (dict): Limits for multimodal inputs per prompt
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config,
|
||||
model_name_or_path,
|
||||
limit_mm_per_prompt=None,
|
||||
mm_processor_kwargs=None,
|
||||
reasoning_parser_obj=None,
|
||||
tool_parser_obj=None,
|
||||
enable_processor_cache=False,
|
||||
):
|
||||
"""
|
||||
Initialize QwenVLProcessor instance.
|
||||
|
||||
Args:
|
||||
config: Model configuration object
|
||||
model_name_or_path (str): Pretrained model name or path
|
||||
limit_mm_per_prompt (dict, optional): Limits for multimodal inputs
|
||||
mm_processor_kwargs (dict, optional): Multimodal processor arguments
|
||||
reasoning_parser_obj: Reasoning parser instance
|
||||
tool_parser_obj: Tool parser instance
|
||||
"""
|
||||
super().__init__(model_name_or_path, reasoning_parser_obj, tool_parser_obj)
|
||||
|
||||
data_processor_logger.info(f"model_name_or_path: {model_name_or_path}")
|
||||
processor_kwargs = self._parse_processor_kwargs(mm_processor_kwargs)
|
||||
self.processor = DataProcessor(
|
||||
model_path=model_name_or_path,
|
||||
enable_processor_cache=enable_processor_cache,
|
||||
# tokens_per_second=config.vision_config.tokens_per_second,
|
||||
tokenizer=self.tokenizer,
|
||||
**processor_kwargs,
|
||||
)
|
||||
self.image_patch_id = self.processor.image_token_id
|
||||
self.limit_mm_per_prompt = self._parse_limits(limit_mm_per_prompt)
|
||||
|
||||
def _parse_processor_kwargs(self, kwargs):
|
||||
"""
|
||||
Parse and validate multimodal processor arguments.
|
||||
|
||||
Args:
|
||||
kwargs (dict): Processor configuration arguments
|
||||
|
||||
Returns:
|
||||
dict: Validated processor arguments
|
||||
|
||||
Raises:
|
||||
ValueError: If arguments format is invalid
|
||||
"""
|
||||
if not kwargs:
|
||||
return {}
|
||||
|
||||
try:
|
||||
if not isinstance(kwargs, dict):
|
||||
raise ValueError("mm-processor-kwargs must be a dictionary")
|
||||
|
||||
# Validate kwargs types against expected schema
|
||||
data_processor_logger.info(f"Processing kwargs: {kwargs}")
|
||||
expected_types = {
|
||||
"video_max_frames": int, # Maximum video frames parameter
|
||||
"video_min_frames": int, # Minimum video frames parameter
|
||||
}
|
||||
|
||||
for key, value in kwargs.items():
|
||||
if key in expected_types and not isinstance(value, expected_types[key]):
|
||||
raise ValueError(
|
||||
f"Invalid type for {key}: expected {expected_types[key].__name__}, got {type(value).__name__}"
|
||||
)
|
||||
|
||||
return kwargs
|
||||
|
||||
except Exception as e:
|
||||
data_processor_logger.warning(f"Invalid mm-processor-kwargs format: {e}")
|
||||
return {}
|
||||
|
||||
def _parse_limits(self, limits):
|
||||
"""
|
||||
Parse and validate multimodal input limits.
|
||||
|
||||
Args:
|
||||
limits (dict): Input limits configuration
|
||||
|
||||
Returns:
|
||||
dict: Validated limits with defaults
|
||||
|
||||
Raises:
|
||||
ValueError: If limits format is invalid
|
||||
"""
|
||||
DEFAULT_LIMITS = {"image": 1, "video": 1, "audio": 1}
|
||||
|
||||
if not limits:
|
||||
return DEFAULT_LIMITS
|
||||
|
||||
try:
|
||||
if not isinstance(limits, dict):
|
||||
raise ValueError("limit-mm-per-prompt must be a dictionary")
|
||||
data_processor_logger.info(f"_parse_limits:{limits}")
|
||||
return {**DEFAULT_LIMITS, **limits}
|
||||
except Exception as e:
|
||||
data_processor_logger.warning(f"Invalid limit-mm-per-prompt format: {e}, using default limits")
|
||||
return DEFAULT_LIMITS
|
||||
|
||||
def _check_mm_limits(self, item):
|
||||
"""
|
||||
Validate multimodal inputs against configured limits.
|
||||
|
||||
Args:
|
||||
item: Input request item to validate
|
||||
|
||||
Raises:
|
||||
ValueError: If input exceeds configured limits
|
||||
"""
|
||||
if isinstance(item, dict):
|
||||
# 请求包含prompt和multi_modal_data
|
||||
mm_data = item
|
||||
else:
|
||||
# 请求包含messages
|
||||
mm_data = {"image": [], "video": []}
|
||||
|
||||
for message in item:
|
||||
if isinstance(message.get("content"), list):
|
||||
for part in message["content"]:
|
||||
if part.get("type") in ["image_url", "image"]:
|
||||
mm_data["image"].append(part)
|
||||
elif part.get("type") in ["video_url", "video"]:
|
||||
mm_data["video"].append(part)
|
||||
|
||||
for modality, data in mm_data.items():
|
||||
if modality in self.limit_mm_per_prompt:
|
||||
limit = self.limit_mm_per_prompt[modality]
|
||||
if len(data) > limit:
|
||||
raise ValueError(f"Too many {modality} items in prompt, " f"got {len(data)} but limit is {limit}")
|
||||
|
||||
def process_request(self, request, max_model_len=None, **kwargs):
|
||||
"""
|
||||
Process incoming request and generate model inputs.
|
||||
|
||||
Args:
|
||||
request: Input request object
|
||||
max_model_len (int, optional): Maximum context length
|
||||
**kwargs: Additional processing parameters
|
||||
|
||||
Returns:
|
||||
Request: Processed request with model inputs
|
||||
"""
|
||||
task = request.to_dict()
|
||||
task["enable_thinking"] = kwargs.get("enable_thinking", False)
|
||||
self.process_request_dict(task, max_model_len)
|
||||
request = Request.from_dict(task)
|
||||
request = self._apply_default_parameters(request)
|
||||
return request
|
||||
|
||||
def process_request_dict(self, request, max_model_len=None, **kwargs):
|
||||
"""
|
||||
Process request dictionary into model inputs.
|
||||
|
||||
Args:
|
||||
request Request: Input request dictionary
|
||||
max_model_len (int, optional): Maximum context length
|
||||
|
||||
Returns:
|
||||
Request: Processed request with model inputs
|
||||
|
||||
Raises:
|
||||
ValueError: If request format is invalid
|
||||
"""
|
||||
|
||||
request = self._apply_default_parameters(request)
|
||||
if not request.eos_token_ids:
|
||||
request.eos_token_ids = self.eos_token_ids
|
||||
|
||||
stop_sequences = request.sampling_params.stop
|
||||
if stop_sequences:
|
||||
stop_seqs, stop_seqs_len = self.update_stop_seq(stop_sequences)
|
||||
request.sampling_params.stop_token_ids = stop_seqs
|
||||
request.sampling_params.stop_seqs_len = stop_seqs_len
|
||||
|
||||
bad_words = request.sampling_params.bad_words
|
||||
bad_words_token_ids = request.sampling_params.bad_words_token_ids
|
||||
if bad_words:
|
||||
bad_words_token_ids = self.update_bad_words(bad_words, bad_words_token_ids)
|
||||
request.sampling_params.bad_words_token_ids = bad_words_token_ids
|
||||
|
||||
if request.prompt:
|
||||
multimodal_data = request.multimodal_data
|
||||
if multimodal_data is None:
|
||||
multimodal_data = {}
|
||||
self._check_mm_limits(multimodal_data)
|
||||
images = multimodal_data.get("image", None)
|
||||
videos = multimodal_data.get("video", None)
|
||||
outputs = self.processor.text2ids(request.prompt, images, videos)
|
||||
|
||||
elif request.messages:
|
||||
messages = request.messages
|
||||
self._check_mm_limits(messages)
|
||||
chat_template_kwargs = request.chat_template_kwargs
|
||||
if chat_template_kwargs:
|
||||
if isinstance(chat_template_kwargs, dict):
|
||||
for k, v in chat_template_kwargs.items():
|
||||
if getattr(request, k, None) is None:
|
||||
setattr(request, k, v)
|
||||
else:
|
||||
raise ValueError("Invalid input: chat_template_kwargs must be a dict")
|
||||
if request.enable_thinking is None:
|
||||
request.enable_thinking = kwargs.get("enable_thinking", False)
|
||||
outputs = self.processor.request2ids(request)
|
||||
delattr(request, "chat_template_kwargs")
|
||||
else:
|
||||
raise ValueError(f"Request must contain 'prompt', or 'messages': {request}")
|
||||
|
||||
# Handle continuation of previous generation by appending existing tokens
|
||||
if request.completion_token_ids:
|
||||
self.append_completion_tokens(outputs, request.completion_token_ids)
|
||||
|
||||
# qwen25_vl not support thinking
|
||||
request.enable_thinking = False
|
||||
|
||||
outputs = self.pack_outputs(outputs)
|
||||
|
||||
request.prompt_token_ids = outputs["input_ids"].tolist()
|
||||
request.prompt_token_ids_len = len(request.prompt_token_ids)
|
||||
request.multimodal_inputs = outputs
|
||||
|
||||
# Handle prompt truncation if exceeds model context length
|
||||
if max_model_len is not None and len(request.prompt_token_ids) > max_model_len:
|
||||
request.prompt_token_ids = request.prompt_token_ids[
|
||||
: max_model_len - 1
|
||||
] # Leave space for at least 1 new token
|
||||
|
||||
# Set default max_tokens if not specified
|
||||
if request.sampling_params.max_tokens is None:
|
||||
request.sampling_params.max_tokens = max(
|
||||
1, max_model_len - len(request.prompt_token_ids)
|
||||
) # Ensure at least 1 token
|
||||
data_processor_logger.info(f"Processed request {request}")
|
||||
|
||||
return request
|
||||
|
||||
def append_completion_tokens(self, multimodal_inputs, completion_token_ids):
|
||||
"""
|
||||
Append completion tokens to existing outputs.
|
||||
|
||||
Args:
|
||||
outputs: Current model outputs
|
||||
completion_token_ids: completion tokens to append
|
||||
"""
|
||||
|
||||
num_tokens = len(completion_token_ids)
|
||||
multimodal_inputs["input_ids"].extend(completion_token_ids)
|
||||
multimodal_inputs["token_type_ids"].extend([0] * num_tokens)
|
||||
|
||||
pos_ids = self.processor._compute_text_positions(multimodal_inputs["cur_position"], num_tokens)
|
||||
multimodal_inputs["position_ids"].append(pos_ids)
|
||||
multimodal_inputs["cur_position"] += num_tokens
|
||||
|
||||
def pack_outputs(self, outputs):
|
||||
"""
|
||||
Prepare final output dictionary for model.
|
||||
|
||||
Args:
|
||||
outputs: Intermediate processing outputs
|
||||
|
||||
Returns:
|
||||
dict: Packed output dictionary with all required fields
|
||||
"""
|
||||
if not outputs["images"]:
|
||||
outputs["images"] = None # No images case
|
||||
outputs["grid_thw"] = None # No spatial dimensions
|
||||
outputs["image_type_ids"] = None # No type IDs
|
||||
else:
|
||||
outputs["images"] = np.vstack(outputs["images"]) # Stack image features vertically
|
||||
outputs["grid_thw"] = np.vstack(outputs["grid_thw"]) # Stack spatial dimensions
|
||||
outputs["image_type_ids"] = np.array(outputs["image_type_ids"]) # Convert to numpy array
|
||||
|
||||
# Convert all outputs to numpy arrays with appropriate types
|
||||
outputs["input_ids"] = np.array(outputs["input_ids"], dtype=np.int64) # Token IDs as int64
|
||||
outputs["token_type_ids"] = np.array(outputs["token_type_ids"], dtype=np.int64) # Type IDs as int64
|
||||
outputs["position_ids"] = np.concatenate(
|
||||
outputs["position_ids"], axis=1, dtype=np.int64
|
||||
) # Concatenate position ID
|
||||
|
||||
outputs["image_patch_id"] = self.processor.image_token_id
|
||||
outputs["video_patch_id"] = self.processor.video_token_id
|
||||
outputs["position_ids"] = outputs["position_ids"].transpose(1, 0)
|
||||
|
||||
outputs["mm_num_token_func"] = self.processor.mm_num_tokens
|
||||
|
||||
return outputs
|
||||
Reference in New Issue
Block a user