mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
[Optimization] The pre- and post-processing pipeline do not perform dict conversion (#5494)
* to_request_for_infer initial commit * refact to from_chat_completion_request * preprocess use request initial commit * bugfix * processors refact to using request * bug fix * refact Request from_generic_request * post process initial commit * bugfix * postprocess second commit * bugfix * serving_embedding initial commit * serving_reward initial commit * bugfix * replace function name * async_llm initial commit * offline initial commit and fix bug * bugfix * fix async_llm * remove add speculate_metrics into data * fix logprobs bug * fix echo bug * fix bug * fix reasoning_max_tokens * bugfix * bugfix and modify unittest * bugfix and modify unit test * bugfix * bugfix * bugfix * modify unittest * fix error when reasong_content is none for text_processor * remove some unnessary logic * revert removed logic * implement add and set method for RequestOutput and refact code * modify unit test * modify unit test * union process_request and process_request_obj * remove a unit test * union process_response and process_response_obj * support qwen3_vl_processor * modify unittest and remove comments * fix prompt_logprobs * fix codestyle * add v1 * v1 * fix unit test * fix unit test * fix pre-commit * fix * add process request * add process request * fix * fix * fix unit test * fix unit test * fix unit test * fix unit test * fix unit test * remove file * add unit test * add unit test * add unit test * fix unit test * fix unit test * fix * fix --------- Co-authored-by: Jiaxin Sui <95567040+plusNew001@users.noreply.github.com> Co-authored-by: luukunn <981429396@qq.com> Co-authored-by: luukunn <83932082+luukunn@users.noreply.github.com> Co-authored-by: Zhang Yulong <35552275+ZhangYulongg@users.noreply.github.com>
This commit is contained in:
@@ -27,7 +27,7 @@ import numpy as np
|
||||
|
||||
import fastdeploy.envs as envs
|
||||
import fastdeploy.metrics.trace as tracing
|
||||
from fastdeploy.engine.request import RequestOutput
|
||||
from fastdeploy.engine.request import Request, RequestOutput
|
||||
from fastdeploy.entrypoints.openai.protocol import (
|
||||
CompletionLogprobs,
|
||||
CompletionRequest,
|
||||
@@ -178,8 +178,11 @@ class OpenAIServingCompletion:
|
||||
try:
|
||||
for idx, prompt in enumerate(request_prompts):
|
||||
request_id_idx = f"{request_id}_{idx}"
|
||||
current_req_dict = request.to_dict_for_infer(request_id_idx, prompt)
|
||||
current_req_dict["arrival_time"] = time.time()
|
||||
if not envs.ENABLE_V1_DATA_PROCESSOR:
|
||||
current_req_dict = request.to_dict_for_infer(request_id_idx, prompt)
|
||||
else:
|
||||
current_req_dict = Request.from_generic_request(request, request_id=f"{request_id}_0")
|
||||
current_req_dict["metrics"]["arrival_time"] = time.time()
|
||||
prompt_token_ids = await self.engine_client.format_and_add_data(current_req_dict) # tokenize
|
||||
if isinstance(prompt_token_ids, np.ndarray):
|
||||
prompt_token_ids = prompt_token_ids.tolist()
|
||||
@@ -562,7 +565,7 @@ class OpenAIServingCompletion:
|
||||
draft_logprobs=draft_logprobs_res,
|
||||
speculate_metrics=output_speculate_metrics,
|
||||
)
|
||||
if not res["finished"] and "delta_message" in output:
|
||||
if not res["finished"] and output["enable_parser"]:
|
||||
delta_message_output = output["delta_message"]
|
||||
if delta_message_output is None:
|
||||
continue
|
||||
@@ -737,7 +740,7 @@ class OpenAIServingCompletion:
|
||||
else None
|
||||
),
|
||||
reasoning_content=output.get("reasoning_content"),
|
||||
tool_calls=output.get("tool_call"),
|
||||
tool_calls=output.get("tool_call", None),
|
||||
logprobs=aggregated_logprobs,
|
||||
draft_logprobs=aggregated_draft_logprobs,
|
||||
prompt_logprobs=clamp_prompt_logprobs(prompt_logprobs_res),
|
||||
|
||||
Reference in New Issue
Block a user