[Optimization] The pre- and post-processing pipeline do not perform dict conversion (#5494)

* to_request_for_infer initial commit

* refact to from_chat_completion_request

* preprocess use request initial commit

* bugfix

* processors refact to using request

* bug fix

* refact Request from_generic_request

* post process initial commit

* bugfix

* postprocess second commit

* bugfix

* serving_embedding initial commit

* serving_reward initial commit

* bugfix

* replace function name

* async_llm initial commit

* offline initial commit and fix bug

* bugfix

* fix async_llm

* remove add speculate_metrics into data

* fix logprobs bug

* fix echo bug

* fix bug

* fix reasoning_max_tokens

* bugfix

* bugfix and modify unittest

* bugfix and modify unit test

* bugfix

* bugfix

* bugfix

* modify unittest

* fix error when reasong_content is none for text_processor

* remove some unnessary logic

* revert removed logic

* implement add and set method for RequestOutput and refact code

* modify unit test

* modify unit test

* union process_request and process_request_obj

* remove a unit test

* union process_response and process_response_obj

* support qwen3_vl_processor

* modify unittest and remove comments

* fix prompt_logprobs

* fix codestyle

* add v1

* v1

* fix unit test

* fix unit test

* fix pre-commit

* fix

* add process request

* add process request

* fix

* fix

* fix unit test

* fix unit test

* fix unit test

* fix unit test

* fix unit test

* remove file

* add unit test

* add unit test

* add unit test

* fix unit test

* fix unit test

* fix

* fix

---------

Co-authored-by: Jiaxin Sui <95567040+plusNew001@users.noreply.github.com>
Co-authored-by: luukunn <981429396@qq.com>
Co-authored-by: luukunn <83932082+luukunn@users.noreply.github.com>
Co-authored-by: Zhang Yulong <35552275+ZhangYulongg@users.noreply.github.com>
This commit is contained in:
kxz2002
2026-01-22 00:50:52 +08:00
committed by GitHub
parent fe5ba4b509
commit 6e416c62dd
66 changed files with 16614 additions and 739 deletions
@@ -15,17 +15,20 @@
"""
import base64
import time
from collections.abc import AsyncGenerator
from typing import Literal, Union
import numpy as np
from typing_extensions import assert_never, override
import fastdeploy.envs as envs
from fastdeploy.engine.pooling_params import PoolingParams
from fastdeploy.engine.request import (
EmbeddingOutput,
EmbeddingRequestOutput,
PoolingRequestOutput,
Request,
)
from fastdeploy.entrypoints.openai.protocol import (
EmbeddingCompletionRequest,
@@ -66,12 +69,25 @@ class OpenAIServingEmbedding(ZmqOpenAIServing):
@override
def _request_to_dict(self, ctx: ServeContext):
request: EmbeddingRequest = ctx.request
request_dict = super()._request_to_dict(ctx)
if hasattr(request, "to_pooling_params"):
pooling_params: PoolingParams = request.to_pooling_params()
pooling_params.verify("embed", self.cfg.model_config)
request_dict["pooling_params"] = pooling_params.to_dict()
return request_dict
if not envs.ENABLE_V1_DATA_PROCESSOR:
request_dict = super()._request_to_dict(ctx)
if hasattr(request, "to_pooling_params"):
pooling_params: PoolingParams = request.to_pooling_params()
pooling_params.verify("embed", self.cfg.model_config)
request_dict["pooling_params"] = pooling_params.to_dict()
request_dict["metrics"] = {}
return request_dict
else:
request_obj = None
if hasattr(request, "to_pooling_params"):
pooling_params: PoolingParams = request.to_pooling_params()
pooling_params.verify("embed", self.cfg.model_config)
request_obj = Request.from_generic_request(
req=request, request_id=ctx.request_id, pooling_params=pooling_params
)
request_obj.metrics.arrival_time = time.time()
super()._process_chat_template_kwargs(request_obj)
return request_obj
@override
def _request_to_batch_dicts(self, ctx: ServeContext):