mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
【Fix】 remove text_after_process & raw_prediction (#4421)
* remove text_after_process & raw_prediction * remove text_after_process & raw_prediction
This commit is contained in:
@@ -14,10 +14,10 @@ from core import TEMPLATE, URL, build_request_payload, send_request
|
||||
COMPLETIONS_URL = URL.replace("/v1/chat/completions", "/v1/completions")
|
||||
|
||||
|
||||
def test_completion_stream_text_after_process_raw_prediction():
|
||||
def test_completion_stream_prompt_tokens_completion_tokens():
|
||||
"""
|
||||
/v1/completions接口, stream=True
|
||||
返回属性"text_after_process"和"reasoning_content"
|
||||
return "prompt_tokens"和"reasoning_content"
|
||||
"""
|
||||
data = {
|
||||
"prompt": "你是谁",
|
||||
@@ -39,55 +39,55 @@ def test_completion_stream_text_after_process_raw_prediction():
|
||||
|
||||
choice = response_data["choices"][0]
|
||||
if "prompt_token_ids" in choice and choice["prompt_token_ids"] is not None:
|
||||
text_after_process = choice["text_after_process"]
|
||||
assert data["prompt"] in text_after_process, "text_after_process取值结果不正确"
|
||||
prompt_tokens = choice["prompt_tokens"]
|
||||
assert data["prompt"] in prompt_tokens, "prompt_tokens取值结果不正确"
|
||||
else:
|
||||
raw_prediction = choice["raw_prediction"]
|
||||
completion_tokens = choice["completion_tokens"]
|
||||
reasoning_content = choice["reasoning_content"]
|
||||
text = choice["text"]
|
||||
assert reasoning_content or text in raw_prediction, "raw_prediction取值结果不正确"
|
||||
assert reasoning_content or text in completion_tokens, "completion_tokens取值结果不正确"
|
||||
if "finish_reason" in line.strip():
|
||||
break
|
||||
|
||||
|
||||
def test_completion_text_after_process_raw_predictio_return_token_ids():
|
||||
def test_completion_prompt_tokens_completion_tokens_return_token_ids():
|
||||
"""
|
||||
/v1/completions接口,非流式接口
|
||||
返回属性"text_after_process"和"reasoning_content"
|
||||
return "prompt_tokens"和"reasoning_content"
|
||||
"""
|
||||
data = {"stream": False, "prompt": "你是谁", "max_tokens": 50, "return_token_ids": True}
|
||||
payload = build_request_payload(TEMPLATE, data)
|
||||
resp = send_request(COMPLETIONS_URL, payload).json()
|
||||
|
||||
text_after_process = resp["choices"][0]["text_after_process"]
|
||||
assert data["prompt"] in text_after_process, "text_after_process取值结果不正确"
|
||||
prompt_tokens = resp["choices"][0]["prompt_tokens"]
|
||||
assert data["prompt"] in prompt_tokens, "prompt_tokens取值结果不正确"
|
||||
|
||||
raw_prediction = resp["choices"][0]["raw_prediction"]
|
||||
completion_tokens = resp["choices"][0]["completion_tokens"]
|
||||
reasoning_content = resp["choices"][0]["reasoning_content"]
|
||||
text = resp["choices"][0]["text"]
|
||||
assert reasoning_content or text in raw_prediction, "raw_prediction取值结果不正确"
|
||||
assert reasoning_content or text in completion_tokens, "completion_tokens取值结果不正确"
|
||||
|
||||
|
||||
def test_completion_text_after_process_raw_prediction():
|
||||
def test_completion_prompt_tokens_completion_tokens():
|
||||
"""
|
||||
/v1/completions接口,无return_token_ids参数
|
||||
非流式接口中,无return token ids 属性"text_after_process"和"reasoning_content"值为null
|
||||
非流式接口中,无return token ids 属性"prompt_tokens"和"reasoning_content"值为null
|
||||
"""
|
||||
data = {"stream": False, "prompt": "你是谁", "max_tokens": 50}
|
||||
payload = build_request_payload(TEMPLATE, data)
|
||||
resp = send_request(COMPLETIONS_URL, payload).json()
|
||||
|
||||
text_after_process = resp["choices"][0]["text_after_process"]
|
||||
assert text_after_process is None, "text_after_process取值结果不正确"
|
||||
prompt_tokens = resp["choices"][0]["prompt_tokens"]
|
||||
assert prompt_tokens is None, "prompt_tokens取值结果不正确"
|
||||
|
||||
raw_prediction = resp["choices"][0]["raw_prediction"]
|
||||
assert raw_prediction is None, "raw_prediction取值结果不正确"
|
||||
completion_tokens = resp["choices"][0]["completion_tokens"]
|
||||
assert completion_tokens is None, "completion_tokens取值结果不正确"
|
||||
|
||||
|
||||
def test_stream_text_after_process_raw_prediction():
|
||||
def test_stream_prompt_tokens_completion_tokens():
|
||||
"""
|
||||
/v1/chat/completions接口,"stream": True
|
||||
返回属性"text_after_process"和"reasoning_content"
|
||||
返回属性"prompt_tokens"和"reasoning_content"
|
||||
"""
|
||||
data = {
|
||||
"messages": [{"role": "user", "content": "你是谁"}],
|
||||
@@ -109,21 +109,21 @@ def test_stream_text_after_process_raw_prediction():
|
||||
|
||||
choice = response_data["choices"][0]
|
||||
if "prompt_token_ids" in choice["delta"] and choice["delta"]["prompt_token_ids"] is not None:
|
||||
text_after_process = choice["delta"]["text_after_process"]
|
||||
assert data["messages"][0]["content"] in text_after_process, "text_after_process取值结果不正确"
|
||||
prompt_tokens = choice["delta"]["prompt_tokens"]
|
||||
assert data["messages"][0]["content"] in prompt_tokens, "prompt_tokens取值结果不正确"
|
||||
else:
|
||||
raw_prediction = choice["delta"]["raw_prediction"]
|
||||
completion_tokens = choice["delta"]["completion_tokens"]
|
||||
reasoning_content = choice["delta"]["reasoning_content"]
|
||||
content = choice["delta"]["content"]
|
||||
assert reasoning_content or content in raw_prediction, "raw_prediction取值结果不正确"
|
||||
assert reasoning_content or content in completion_tokens, "completion_tokens取值结果不正确"
|
||||
if "finish_reason" in line.strip():
|
||||
break
|
||||
|
||||
|
||||
def test_text_after_process_raw_prediction_return_token_ids():
|
||||
def test_prompt_tokens_completion_tokens_return_token_ids():
|
||||
"""
|
||||
/v1/chat/completions接口,非流式接口
|
||||
返回属性"text_after_process"和"reasoning_content"
|
||||
返回属性"prompt_tokens"和"reasoning_content"
|
||||
"""
|
||||
data = {
|
||||
"stream": False,
|
||||
@@ -136,19 +136,19 @@ def test_text_after_process_raw_prediction_return_token_ids():
|
||||
payload = build_request_payload(TEMPLATE, data)
|
||||
resp = send_request(URL, payload).json()
|
||||
|
||||
text_after_process = resp["choices"][0]["message"]["text_after_process"]
|
||||
assert data["messages"][0]["content"] in text_after_process, "text_after_process取值结果不正确"
|
||||
prompt_tokens = resp["choices"][0]["message"]["prompt_tokens"]
|
||||
assert data["messages"][0]["content"] in prompt_tokens, "prompt_tokens取值结果不正确"
|
||||
|
||||
raw_prediction = resp["choices"][0]["message"]["raw_prediction"]
|
||||
completion_tokens = resp["choices"][0]["message"]["completion_tokens"]
|
||||
reasoning_content = resp["choices"][0]["message"]["reasoning_content"]
|
||||
text = resp["choices"][0]["message"]["content"]
|
||||
assert reasoning_content or text in raw_prediction, "raw_prediction取值结果不正确"
|
||||
assert reasoning_content or text in completion_tokens, "completion_tokens取值结果不正确"
|
||||
|
||||
|
||||
def test_text_after_process_raw_prediction():
|
||||
def test_prompt_tokens_completion_tokens():
|
||||
"""
|
||||
/v1/chat/completions接口,无return_token_ids参数
|
||||
无return token ids 属性"text_after_process"和"reasoning_content"值为null
|
||||
无return token ids 属性"prompt_tokens"和"reasoning_content"值为null
|
||||
"""
|
||||
data = {
|
||||
"stream": False,
|
||||
@@ -160,8 +160,8 @@ def test_text_after_process_raw_prediction():
|
||||
payload = build_request_payload(TEMPLATE, data)
|
||||
resp = send_request(URL, payload).json()
|
||||
|
||||
text_after_process = resp["choices"][0]["message"]["text_after_process"]
|
||||
assert text_after_process is None, "text_after_process取值结果不正确"
|
||||
prompt_tokens = resp["choices"][0]["message"]["prompt_tokens"]
|
||||
assert prompt_tokens is None, "prompt_tokens取值结果不正确"
|
||||
|
||||
raw_prediction = resp["choices"][0]["message"]["raw_prediction"]
|
||||
assert raw_prediction is None, "raw_prediction取值结果不正确"
|
||||
completion_tokens = resp["choices"][0]["message"]["completion_tokens"]
|
||||
assert completion_tokens is None, "completion_tokens取值结果不正确"
|
||||
|
||||
Reference in New Issue
Block a user