mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
[BugFix] rollback max_tokens and min_tokens when continue to infer (#5053)
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
* [BugFix] rollback max_tokens and min_tokens when continue to infer * [BugFix] rollback max_tokens and min_tokens when continue to infer --------- Co-authored-by: liqinrui <liqinrui@baidu.com>
This commit is contained in:
@@ -207,14 +207,7 @@ class EngineClient:
|
||||
task["prompt_token_ids_len"] = len(task["prompt_token_ids"])
|
||||
input_ids_len = task["prompt_token_ids_len"]
|
||||
|
||||
completion_token_len = len(task["completion_token_ids"]) if task.get("completion_token_ids") else 0
|
||||
task["max_tokens"] = min(
|
||||
self.max_model_len - input_ids_len, max(0, task.get("max_tokens") - completion_token_len)
|
||||
)
|
||||
|
||||
if task.get("min_tokens") is not None:
|
||||
task["min_tokens"] = max(1, task["min_tokens"] - completion_token_len)
|
||||
|
||||
task["max_tokens"] = min(self.max_model_len - input_ids_len, task.get("max_tokens"))
|
||||
min_tokens = task.get("min_tokens", 1)
|
||||
if "messages" in task:
|
||||
del task["messages"]
|
||||
|
||||
@@ -250,9 +250,7 @@ class Ernie4_5_VLProcessor(Ernie4_5Processor):
|
||||
else:
|
||||
raise ValueError(f"Request must contain 'prompt', or 'messages': {request}")
|
||||
|
||||
completion_token_len = 0
|
||||
if request.get("completion_token_ids"):
|
||||
completion_token_len = len(request.get("completion_token_ids"))
|
||||
self.append_completion_tokens(outputs, request["completion_token_ids"])
|
||||
|
||||
outputs = self.pack_outputs(outputs)
|
||||
@@ -264,16 +262,12 @@ class Ernie4_5_VLProcessor(Ernie4_5Processor):
|
||||
if max_model_len is not None and len(request["prompt_token_ids"]) > max_model_len:
|
||||
request["prompt_token_ids"] = request["prompt_token_ids"][: max_model_len - 1]
|
||||
|
||||
tmp_max_tokens = 0
|
||||
if request.get("max_tokens") is None:
|
||||
request["max_tokens"] = max(1, max_model_len - len(request["prompt_token_ids"]))
|
||||
tmp_max_tokens = request["max_tokens"]
|
||||
else:
|
||||
tmp_max_tokens = min(
|
||||
max_model_len - len(request["prompt_token_ids"]), max(0, request["max_tokens"] - completion_token_len)
|
||||
)
|
||||
request["max_tokens"] = min(max_model_len - len(request["prompt_token_ids"]), request["max_tokens"])
|
||||
if request.get("reasoning_max_tokens") is None:
|
||||
request["reasoning_max_tokens"] = max(int(tmp_max_tokens * 0.8), 1)
|
||||
request["reasoning_max_tokens"] = max(int(request["max_tokens"] * 0.8), 1)
|
||||
data_processor_logger.info(f"Processed request {request}")
|
||||
|
||||
return request
|
||||
|
||||
Reference in New Issue
Block a user