mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
【Fix】 remove text_after_process & raw_prediction (#4421)
* remove text_after_process & raw_prediction * remove text_after_process & raw_prediction
This commit is contained in:
@@ -193,8 +193,6 @@ class ChatMessage(BaseModel):
|
||||
tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None
|
||||
prompt_token_ids: Optional[List[int]] = None
|
||||
completion_token_ids: Optional[List[int]] = None
|
||||
text_after_process: Optional[str] = None
|
||||
raw_prediction: Optional[str] = None
|
||||
prompt_tokens: Optional[str] = None
|
||||
completion_tokens: Optional[str] = None
|
||||
|
||||
@@ -255,8 +253,6 @@ class DeltaMessage(BaseModel):
|
||||
completion_token_ids: Optional[List[int]] = None
|
||||
reasoning_content: Optional[str] = None
|
||||
tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None
|
||||
text_after_process: Optional[str] = None
|
||||
raw_prediction: Optional[str] = None
|
||||
prompt_tokens: Optional[str] = None
|
||||
completion_tokens: Optional[str] = None
|
||||
|
||||
@@ -295,8 +291,6 @@ class CompletionResponseChoice(BaseModel):
|
||||
text: str
|
||||
prompt_token_ids: Optional[List[int]] = None
|
||||
completion_token_ids: Optional[List[int]] = None
|
||||
text_after_process: Optional[str] = None
|
||||
raw_prediction: Optional[str] = None
|
||||
prompt_tokens: Optional[str] = None
|
||||
completion_tokens: Optional[str] = None
|
||||
arrival_time: Optional[float] = None
|
||||
@@ -341,8 +335,6 @@ class CompletionResponseStreamChoice(BaseModel):
|
||||
logprobs: Optional[CompletionLogprobs] = None
|
||||
prompt_token_ids: Optional[List[int]] = None
|
||||
completion_token_ids: Optional[List[int]] = None
|
||||
text_after_process: Optional[str] = None
|
||||
raw_prediction: Optional[str] = None
|
||||
prompt_tokens: Optional[str] = None
|
||||
completion_tokens: Optional[str] = None
|
||||
reasoning_content: Optional[str] = None
|
||||
|
||||
@@ -118,14 +118,14 @@ class OpenAIServingChat:
|
||||
else:
|
||||
request_id = f"chatcmpl-{uuid.uuid4()}"
|
||||
api_server_logger.info(f"create chat completion request: {request_id}")
|
||||
text_after_process = None
|
||||
prompt_tokens = None
|
||||
try:
|
||||
current_req_dict = request.to_dict_for_infer(request_id)
|
||||
if "chat_template" not in current_req_dict:
|
||||
current_req_dict["chat_template"] = self.chat_template
|
||||
current_req_dict["arrival_time"] = time.time()
|
||||
prompt_token_ids = await self.engine_client.format_and_add_data(current_req_dict)
|
||||
text_after_process = current_req_dict.get("text_after_process")
|
||||
prompt_tokens = current_req_dict.get("prompt_tokens")
|
||||
if isinstance(prompt_token_ids, np.ndarray):
|
||||
prompt_token_ids = prompt_token_ids.tolist()
|
||||
except ParameterError as e:
|
||||
@@ -143,12 +143,12 @@ class OpenAIServingChat:
|
||||
|
||||
if request.stream:
|
||||
return self.chat_completion_stream_generator(
|
||||
request, request_id, request.model, prompt_token_ids, text_after_process
|
||||
request, request_id, request.model, prompt_token_ids, prompt_tokens
|
||||
)
|
||||
else:
|
||||
try:
|
||||
return await self.chat_completion_full_generator(
|
||||
request, request_id, request.model, prompt_token_ids, text_after_process
|
||||
request, request_id, request.model, prompt_token_ids, prompt_tokens
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = f"request[{request_id}]full generator error: {str(e)}, {str(traceback.format_exc())}"
|
||||
@@ -175,7 +175,7 @@ class OpenAIServingChat:
|
||||
request_id: str,
|
||||
model_name: str,
|
||||
prompt_token_ids: list(),
|
||||
text_after_process: str,
|
||||
prompt_tokens: str,
|
||||
):
|
||||
"""
|
||||
Streaming chat completion generator.
|
||||
@@ -289,8 +289,7 @@ class OpenAIServingChat:
|
||||
|
||||
if request.return_token_ids:
|
||||
choice.delta.prompt_token_ids = list(prompt_token_ids)
|
||||
choice.delta.text_after_process = text_after_process
|
||||
choice.delta.prompt_tokens = text_after_process
|
||||
choice.delta.prompt_tokens = prompt_tokens
|
||||
chunk = ChatCompletionStreamResponse(
|
||||
id=request_id,
|
||||
object=chunk_object_type,
|
||||
@@ -368,8 +367,7 @@ class OpenAIServingChat:
|
||||
choice.delta.multimodal_content[0]["completion_token_ids"] = list(output["token_ids"])
|
||||
else:
|
||||
choice.delta.completion_token_ids = list(output["token_ids"])
|
||||
choice.delta.raw_prediction = output.get("raw_prediction")
|
||||
choice.delta.completion_tokens = output.get("raw_prediction")
|
||||
choice.delta.completion_tokens = output.get("completion_tokens")
|
||||
if include_continuous_usage:
|
||||
chunk.usage = UsageInfo(
|
||||
prompt_tokens=num_prompt_tokens,
|
||||
@@ -419,7 +417,7 @@ class OpenAIServingChat:
|
||||
request_id: str,
|
||||
model_name: str,
|
||||
prompt_token_ids: list(),
|
||||
text_after_process: str,
|
||||
prompt_tokens: str,
|
||||
):
|
||||
"""
|
||||
Full chat completion generator.
|
||||
@@ -509,10 +507,8 @@ class OpenAIServingChat:
|
||||
tool_calls=output.get("tool_call"),
|
||||
prompt_token_ids=prompt_token_ids if request.return_token_ids else None,
|
||||
completion_token_ids=completion_token_ids if request.return_token_ids else None,
|
||||
text_after_process=text_after_process if request.return_token_ids else None,
|
||||
prompt_tokens=text_after_process if request.return_token_ids else None,
|
||||
raw_prediction=output.get("raw_prediction") if request.return_token_ids else None,
|
||||
completion_tokens=output.get("raw_prediction") if request.return_token_ids else None,
|
||||
prompt_tokens=prompt_tokens if request.return_token_ids else None,
|
||||
completion_tokens=output.get("completion_tokens") if request.return_token_ids else None,
|
||||
)
|
||||
|
||||
if response_processor.enable_multimodal_content():
|
||||
|
||||
@@ -132,7 +132,7 @@ class OpenAIServingCompletion:
|
||||
num_choices = len(request_prompts)
|
||||
api_server_logger.info(f"Start preprocessing request: req_id={request_id}), num_choices={num_choices}")
|
||||
prompt_batched_token_ids = []
|
||||
text_after_process_list = []
|
||||
prompt_tokens_list = []
|
||||
try:
|
||||
if self.max_waiting_time < 0:
|
||||
await self.engine_client.semaphore.acquire()
|
||||
@@ -157,7 +157,7 @@ class OpenAIServingCompletion:
|
||||
prompt_token_ids = await self.engine_client.format_and_add_data(current_req_dict) # tokenize
|
||||
if isinstance(prompt_token_ids, np.ndarray):
|
||||
prompt_token_ids = prompt_token_ids.tolist()
|
||||
text_after_process_list.append(current_req_dict.get("text_after_process"))
|
||||
prompt_tokens_list.append(current_req_dict.get("prompt_tokens"))
|
||||
prompt_batched_token_ids.append(prompt_token_ids)
|
||||
del current_req_dict
|
||||
except ParameterError as e:
|
||||
@@ -180,7 +180,7 @@ class OpenAIServingCompletion:
|
||||
created_time=created_time,
|
||||
model_name=request.model,
|
||||
prompt_batched_token_ids=prompt_batched_token_ids,
|
||||
text_after_process_list=text_after_process_list,
|
||||
prompt_tokens_list=prompt_tokens_list,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
@@ -191,7 +191,7 @@ class OpenAIServingCompletion:
|
||||
created_time=created_time,
|
||||
model_name=request.model,
|
||||
prompt_batched_token_ids=prompt_batched_token_ids,
|
||||
text_after_process_list=text_after_process_list,
|
||||
prompt_tokens_list=prompt_tokens_list,
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = (
|
||||
@@ -213,7 +213,7 @@ class OpenAIServingCompletion:
|
||||
created_time: int,
|
||||
model_name: str,
|
||||
prompt_batched_token_ids: list(),
|
||||
text_after_process_list: list(),
|
||||
prompt_tokens_list: list(),
|
||||
):
|
||||
"""
|
||||
Process the full completion request with multiple choices.
|
||||
@@ -292,7 +292,7 @@ class OpenAIServingCompletion:
|
||||
model_name=model_name,
|
||||
prompt_batched_token_ids=prompt_batched_token_ids,
|
||||
completion_batched_token_ids=completion_batched_token_ids,
|
||||
text_after_process_list=text_after_process_list,
|
||||
prompt_tokens_list=prompt_tokens_list,
|
||||
)
|
||||
api_server_logger.info(f"Completion response: {res.model_dump_json()}")
|
||||
return res
|
||||
@@ -344,7 +344,7 @@ class OpenAIServingCompletion:
|
||||
created_time: int,
|
||||
model_name: str,
|
||||
prompt_batched_token_ids: list(),
|
||||
text_after_process_list: list(),
|
||||
prompt_tokens_list: list(),
|
||||
):
|
||||
"""
|
||||
Process the stream completion request.
|
||||
@@ -408,8 +408,7 @@ class OpenAIServingCompletion:
|
||||
index=idx,
|
||||
text="",
|
||||
prompt_token_ids=list(prompt_batched_token_ids[idx]),
|
||||
text_after_process=text_after_process_list[idx],
|
||||
prompt_tokens=text_after_process_list[idx],
|
||||
prompt_tokens=prompt_tokens_list[idx],
|
||||
completion_token_ids=None,
|
||||
)
|
||||
],
|
||||
@@ -443,8 +442,7 @@ class OpenAIServingCompletion:
|
||||
prompt_token_ids=None,
|
||||
completion_token_ids=output.get("token_ids") if request.return_token_ids else None,
|
||||
tool_calls=None,
|
||||
raw_prediction=output.get("raw_prediction") if request.return_token_ids else None,
|
||||
completion_tokens=output.get("raw_prediction") if request.return_token_ids else None,
|
||||
completion_tokens=output.get("completion_tokens") if request.return_token_ids else None,
|
||||
reasoning_content="",
|
||||
arrival_time=arrival_time,
|
||||
logprobs=logprobs_res,
|
||||
@@ -522,7 +520,7 @@ class OpenAIServingCompletion:
|
||||
model_name: str,
|
||||
prompt_batched_token_ids: list(),
|
||||
completion_batched_token_ids: list(),
|
||||
text_after_process_list: list(),
|
||||
prompt_tokens_list: list(),
|
||||
) -> CompletionResponse:
|
||||
choices: List[CompletionResponseChoice] = []
|
||||
num_prompt_tokens = 0
|
||||
@@ -556,10 +554,8 @@ class OpenAIServingCompletion:
|
||||
text=output_text,
|
||||
prompt_token_ids=prompt_token_ids if request.return_token_ids else None,
|
||||
completion_token_ids=completion_token_ids if request.return_token_ids else None,
|
||||
raw_prediction=output.get("raw_prediction") if request.return_token_ids else None,
|
||||
completion_tokens=output.get("raw_prediction") if request.return_token_ids else None,
|
||||
text_after_process=text_after_process_list[idx] if request.return_token_ids else None,
|
||||
prompt_tokens=text_after_process_list[idx] if request.return_token_ids else None,
|
||||
completion_tokens=output.get("completion_tokens") if request.return_token_ids else None,
|
||||
prompt_tokens=prompt_tokens_list[idx] if request.return_token_ids else None,
|
||||
reasoning_content=output.get("reasoning_content"),
|
||||
tool_calls=output.get("tool_call"),
|
||||
logprobs=aggregated_logprobs,
|
||||
|
||||
Reference in New Issue
Block a user