【Fix】 remove text_after_process & raw_prediction (#4421)

* remove text_after_process &  raw_prediction

* remove text_after_process &  raw_prediction
This commit is contained in:
LiqinruiG
2025-10-16 19:00:18 +08:00
committed by GitHub
parent 8f77adc381
commit 4251ac5e95
20 changed files with 169 additions and 105 deletions
@@ -57,7 +57,7 @@ class TestCompletionEcho(unittest.IsolatedAsyncioTestCase):
model_name="test_model",
prompt_batched_token_ids=[[1, 2]],
completion_batched_token_ids=[[3, 4, 5]],
text_after_process_list=["test prompt"],
prompt_tokens_list=["test prompt"],
)
self.assertEqual(response.choices[0].text, "test prompt generated text")
@@ -90,7 +90,7 @@ class TestCompletionEcho(unittest.IsolatedAsyncioTestCase):
model_name="test_model",
prompt_batched_token_ids=[[1, 2]],
completion_batched_token_ids=[[3, 4, 5]],
text_after_process_list=["test prompt"],
prompt_tokens_list=["test prompt"],
)
self.assertEqual(response.choices[0].text, "decoded_[1, 2, 3] generated text")
@@ -123,7 +123,7 @@ class TestCompletionEcho(unittest.IsolatedAsyncioTestCase):
model_name="test_model",
prompt_batched_token_ids=[[1], [2]],
completion_batched_token_ids=[[1, 2], [3, 4]],
text_after_process_list=["prompt1", "prompt2"],
prompt_tokens_list=["prompt1", "prompt2"],
)
self.assertEqual(len(response.choices), 2)
@@ -159,7 +159,7 @@ class TestCompletionEcho(unittest.IsolatedAsyncioTestCase):
model_name="test_model",
prompt_batched_token_ids=[[1], [2]],
completion_batched_token_ids=[[1, 2], [3, 4]],
text_after_process_list=["prompt1", "prompt2"],
prompt_tokens_list=["prompt1", "prompt2"],
)
self.assertEqual(len(response.choices), 2)