mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
[Speculative Decoding] Add draft_logprobs Support for Speculative Decode MTP (#4467)
* feat: add draft_logprobs for Speculative Decode MTP * feat: add draft_logprobs for Speculative Decode MTP * feat: add draft_logprobs for Speculative Decode MTP * fix: postprocess for speculative decode * test: test_speculative_decoding_use_logprobs * fix: test_completion_echo * fix test_max_streaming_tokens --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
@@ -205,6 +205,7 @@ class ChatCompletionResponseChoice(BaseModel):
|
||||
index: int
|
||||
message: ChatMessage
|
||||
logprobs: Optional[LogProbs] = None
|
||||
draft_logprobs: Optional[LogProbs] = None
|
||||
finish_reason: Optional[Literal["stop", "length", "tool_calls", "recover_stop"]]
|
||||
|
||||
|
||||
@@ -265,6 +266,7 @@ class ChatCompletionResponseStreamChoice(BaseModel):
|
||||
index: int
|
||||
delta: DeltaMessage
|
||||
logprobs: Optional[LogProbs] = None
|
||||
draft_logprobs: Optional[LogProbs] = None
|
||||
finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None
|
||||
arrival_time: Optional[float] = None
|
||||
|
||||
@@ -295,6 +297,7 @@ class CompletionResponseChoice(BaseModel):
|
||||
completion_tokens: Optional[str] = None
|
||||
arrival_time: Optional[float] = None
|
||||
logprobs: Optional[CompletionLogprobs] = None
|
||||
draft_logprobs: Optional[CompletionLogprobs] = None
|
||||
reasoning_content: Optional[str] = None
|
||||
finish_reason: Optional[Literal["stop", "length", "tool_calls"]]
|
||||
tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None
|
||||
@@ -333,6 +336,7 @@ class CompletionResponseStreamChoice(BaseModel):
|
||||
text: str
|
||||
arrival_time: float = None
|
||||
logprobs: Optional[CompletionLogprobs] = None
|
||||
draft_logprobs: Optional[CompletionLogprobs] = None
|
||||
prompt_token_ids: Optional[List[int]] = None
|
||||
completion_token_ids: Optional[List[int]] = None
|
||||
prompt_tokens: Optional[str] = None
|
||||
@@ -420,6 +424,7 @@ class CompletionRequest(BaseModel):
|
||||
echo: Optional[bool] = False
|
||||
frequency_penalty: Optional[float] = Field(default=None, ge=-2, le=2)
|
||||
logprobs: Optional[int] = None
|
||||
include_draft_logprobs: Optional[bool] = False
|
||||
# For logits and logprobs post processing
|
||||
temp_scaled_logprobs: bool = False
|
||||
top_p_normalized_logprobs: bool = False
|
||||
@@ -555,6 +560,7 @@ class ChatCompletionRequest(BaseModel):
|
||||
frequency_penalty: Optional[float] = Field(None, le=2, ge=-2)
|
||||
logprobs: Optional[bool] = False
|
||||
top_logprobs: Optional[int] = 0
|
||||
include_draft_logprobs: Optional[bool] = False
|
||||
|
||||
# For logits and logprobs post processing
|
||||
temp_scaled_logprobs: bool = False
|
||||
|
||||
@@ -316,12 +316,18 @@ class OpenAIServingChat:
|
||||
|
||||
output = res["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_draft_top_logprobs = output["draft_top_logprobs"]
|
||||
previous_num_tokens[idx] += len(output["token_ids"])
|
||||
logprobs_res: Optional[LogProbs] = None
|
||||
draft_logprobs_res: Optional[LogProbs] = None
|
||||
if request.logprobs and output_top_logprobs is not None:
|
||||
logprobs_res = self._create_chat_logprobs(
|
||||
output_top_logprobs, request.logprobs, request.top_logprobs
|
||||
)
|
||||
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
|
||||
draft_logprobs_res = self._create_chat_logprobs(
|
||||
output_draft_top_logprobs, request.logprobs, request.top_logprobs
|
||||
)
|
||||
|
||||
delta_message = DeltaMessage(
|
||||
reasoning_content="",
|
||||
@@ -348,6 +354,7 @@ class OpenAIServingChat:
|
||||
index=idx,
|
||||
delta=delta_message,
|
||||
logprobs=logprobs_res,
|
||||
draft_logprobs=draft_logprobs_res,
|
||||
arrival_time=arrival_time,
|
||||
)
|
||||
if res["finished"]:
|
||||
@@ -444,7 +451,9 @@ class OpenAIServingChat:
|
||||
dealer.write([b"", rid.encode("utf-8")])
|
||||
previous_num_tokens = [0] * num_choices
|
||||
current_waiting_time = 0
|
||||
|
||||
logprob_contents = [[] for _ in range(num_choices)]
|
||||
draft_logprob_contents = [[] for _ in range(num_choices)]
|
||||
completion_token_ids = [[] for _ in range(num_choices)]
|
||||
num_cached_tokens = [0] * num_choices
|
||||
response_processor = ChatResponseProcessor(
|
||||
@@ -492,12 +501,23 @@ class OpenAIServingChat:
|
||||
# The logprob for handling the response
|
||||
output = data["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_draft_top_logprobs = output["draft_top_logprobs"]
|
||||
if output_top_logprobs is not None:
|
||||
# logprobs
|
||||
logprobs_res = self._create_chat_logprobs(
|
||||
output_top_logprobs, request.logprobs, request.top_logprobs
|
||||
)
|
||||
if logprobs_res and logprobs_res.content is not None:
|
||||
logprob_contents[idx].extend(logprobs_res.content)
|
||||
|
||||
# draft_logprobs
|
||||
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
|
||||
draft_logprobs_res = self._create_chat_logprobs(
|
||||
output_draft_top_logprobs, request.logprobs, request.top_logprobs
|
||||
)
|
||||
if draft_logprobs_res and draft_logprobs_res.content is not None:
|
||||
draft_logprob_contents[idx].extend(draft_logprobs_res.content)
|
||||
|
||||
if data["finished"]:
|
||||
num_choices -= 1
|
||||
choice = await self._create_chat_completion_choice(
|
||||
|
||||
@@ -234,6 +234,7 @@ class OpenAIServingCompletion:
|
||||
valid_results = [dict()] * num_choices
|
||||
output_tokens = [0] * num_choices
|
||||
aggregated_top_logprobs = [[[], [], []] for _ in range(num_choices)]
|
||||
aggregated_draft_top_logprobs = [[[], [], []] for _ in range(num_choices)]
|
||||
aggregated_token_ids = [[] for _ in range(num_choices)]
|
||||
completion_batched_token_ids = [[] for _ in range(num_choices)]
|
||||
current_waiting_time = 0
|
||||
@@ -266,12 +267,19 @@ class OpenAIServingCompletion:
|
||||
raise ValueError("{}".format(data["error_msg"]))
|
||||
|
||||
output = data["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_top_logprobs = output.get("top_logprobs") or None
|
||||
output_draft_top_logprobs = output.get("draft_top_logprobs") or None
|
||||
if output_top_logprobs is not None:
|
||||
aggregated_top_logprobs[rid][0].extend(output_top_logprobs[0])
|
||||
aggregated_top_logprobs[rid][1].extend(output_top_logprobs[1])
|
||||
aggregated_top_logprobs[rid][2].extend(output_top_logprobs[2])
|
||||
|
||||
# draft logprobs
|
||||
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
|
||||
aggregated_draft_top_logprobs[rid][0].extend(output_draft_top_logprobs[0])
|
||||
aggregated_draft_top_logprobs[rid][1].extend(output_draft_top_logprobs[1])
|
||||
aggregated_draft_top_logprobs[rid][2].extend(output_draft_top_logprobs[2])
|
||||
|
||||
aggregated_token_ids[rid].extend(data["outputs"]["token_ids"])
|
||||
|
||||
self.engine_client.data_processor.process_response_dict(
|
||||
@@ -282,6 +290,7 @@ class OpenAIServingCompletion:
|
||||
if data.get("finished", False):
|
||||
data["output_token_ids"] = output_tokens[rid]
|
||||
data["outputs"]["top_logprobs"] = aggregated_top_logprobs[rid]
|
||||
data["outputs"]["draft_top_logprobs"] = aggregated_draft_top_logprobs[rid]
|
||||
data["outputs"]["token_ids"] = aggregated_token_ids[rid]
|
||||
valid_results[rid] = data
|
||||
num_choices -= 1
|
||||
@@ -437,10 +446,17 @@ class OpenAIServingCompletion:
|
||||
await self._process_echo_logic(request, idx, res["outputs"])
|
||||
output = res["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_draft_top_logprobs = output["draft_top_logprobs"]
|
||||
logprobs_res: Optional[CompletionLogprobs] = None
|
||||
draft_logprobs_res: Optional[CompletionLogprobs] = None
|
||||
if request.logprobs and output_top_logprobs is not None:
|
||||
logprobs_res = self._create_completion_logprobs(output_top_logprobs, request.logprobs, 0)
|
||||
|
||||
# draft logprobs
|
||||
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
|
||||
draft_logprobs_res = self._create_completion_logprobs(
|
||||
output_draft_top_logprobs, request.logprobs, 0
|
||||
)
|
||||
output_tokens[idx] += 1
|
||||
delta_message = CompletionResponseStreamChoice(
|
||||
index=idx,
|
||||
@@ -452,6 +468,7 @@ class OpenAIServingCompletion:
|
||||
reasoning_content="",
|
||||
arrival_time=arrival_time,
|
||||
logprobs=logprobs_res,
|
||||
draft_logprobs=draft_logprobs_res,
|
||||
)
|
||||
if not res["finished"] and "delta_message" in output:
|
||||
delta_message_output = output["delta_message"]
|
||||
@@ -541,15 +558,23 @@ class OpenAIServingCompletion:
|
||||
final_res = final_res_batch[idx]
|
||||
prompt_token_ids = prompt_batched_token_ids[idx // (1 if request.n is None else request.n)]
|
||||
assert prompt_token_ids is not None
|
||||
prompt_text = request.prompt
|
||||
completion_token_ids = completion_batched_token_ids[idx]
|
||||
|
||||
output = final_res["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_top_logprobs = output.get("top_logprobs") or None
|
||||
output_draft_top_logprobs = output.get("draft_top_logprobs") or None
|
||||
|
||||
aggregated_logprobs: Optional[CompletionLogprobs] = None
|
||||
if output_top_logprobs is not None:
|
||||
aggregated_logprobs = self._create_completion_logprobs(output_top_logprobs, request.logprobs, 0)
|
||||
|
||||
aggregated_draft_logprobs: Optional[CompletionLogprobs] = None
|
||||
if output_draft_top_logprobs is not None:
|
||||
aggregated_draft_logprobs = self._create_completion_logprobs(
|
||||
output_draft_top_logprobs, request.logprobs, 0
|
||||
)
|
||||
|
||||
if request.echo:
|
||||
prompt_text = self._echo_back_prompt(request, idx // (1 if request.n is None else request.n))
|
||||
token_ids = [*prompt_token_ids, *output["token_ids"]]
|
||||
@@ -574,6 +599,7 @@ class OpenAIServingCompletion:
|
||||
reasoning_content=output.get("reasoning_content"),
|
||||
tool_calls=output.get("tool_call"),
|
||||
logprobs=aggregated_logprobs,
|
||||
draft_logprobs=aggregated_draft_logprobs,
|
||||
finish_reason=finish_reason,
|
||||
)
|
||||
choices.append(choice_data)
|
||||
|
||||
Reference in New Issue
Block a user