Files
FastDeploy/tests/input/v1/test_tokenizer_client.py
T
kxz2002 6e416c62dd [Optimization] The pre- and post-processing pipeline do not perform dict conversion (#5494)
* to_request_for_infer initial commit

* refact to from_chat_completion_request

* preprocess use request initial commit

* bugfix

* processors refact to using request

* bug fix

* refact Request from_generic_request

* post process initial commit

* bugfix

* postprocess second commit

* bugfix

* serving_embedding initial commit

* serving_reward initial commit

* bugfix

* replace function name

* async_llm initial commit

* offline initial commit and fix bug

* bugfix

* fix async_llm

* remove add speculate_metrics into data

* fix logprobs bug

* fix echo bug

* fix bug

* fix reasoning_max_tokens

* bugfix

* bugfix and modify unittest

* bugfix and modify unit test

* bugfix

* bugfix

* bugfix

* modify unittest

* fix error when reasong_content is none for text_processor

* remove some unnessary logic

* revert removed logic

* implement add and set method for RequestOutput and refact code

* modify unit test

* modify unit test

* union process_request and process_request_obj

* remove a unit test

* union process_response and process_response_obj

* support qwen3_vl_processor

* modify unittest and remove comments

* fix prompt_logprobs

* fix codestyle

* add v1

* v1

* fix unit test

* fix unit test

* fix pre-commit

* fix

* add process request

* add process request

* fix

* fix

* fix unit test

* fix unit test

* fix unit test

* fix unit test

* fix unit test

* remove file

* add unit test

* add unit test

* add unit test

* fix unit test

* fix unit test

* fix

* fix

---------

Co-authored-by: Jiaxin Sui <95567040+plusNew001@users.noreply.github.com>
Co-authored-by: luukunn <981429396@qq.com>
Co-authored-by: luukunn <83932082+luukunn@users.noreply.github.com>
Co-authored-by: Zhang Yulong <35552275+ZhangYulongg@users.noreply.github.com>
2026-01-22 00:50:52 +08:00

102 lines
3.3 KiB
Python

"""
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import httpx
import pytest
import respx
from fastdeploy.input.tokenzier_client import (
AsyncTokenizerClient,
ImageEncodeRequest,
VideoEncodeRequest,
)
@pytest.mark.asyncio
@respx.mock
async def test_encode_image_success():
base_url = "http://testserver"
client = AsyncTokenizerClient(base_url=base_url)
# Mock 创建任务接口
respx.post(f"{base_url}/image/encode").mock(
return_value=httpx.Response(200, json={"code": 0, "task_tag": "task123"})
)
# Mock 轮询接口,返回完成状态
mock_get_ret = {
"state": "Finished",
"result": {"feature_url": "bos://host:port/key", "feature_shape": [80, 45, 1563]},
}
respx.get(f"{base_url}/encode/get").mock(return_value=httpx.Response(200, json=mock_get_ret))
request = ImageEncodeRequest(
version="v1", req_id="req_img_001", is_gen=False, resolution=512, image_url="http://example.com/image.jpg"
)
result = await client.encode_image(request)
assert result["feature_url"] == "bos://host:port/key"
assert result["feature_shape"] == [80, 45, 1563]
@pytest.mark.asyncio
@respx.mock
async def test_encode_video_failure():
base_url = "http://testserver"
client = AsyncTokenizerClient(base_url=base_url, max_wait=1)
respx.post(f"{base_url}/video/encode").mock(
return_value=httpx.Response(200, json={"code": 0, "task_tag": "task_vid_001"})
)
# 模拟轮询接口失败状态
respx.get(f"{base_url}/encode/get").mock(
return_value=httpx.Response(200, json={"state": "Error", "message": "Encode failed"})
)
request = VideoEncodeRequest(
version="v1",
req_id="req_vid_001",
is_gen=True,
resolution=720,
video_url="http://example.com/video.mp4",
start_ts=0.0,
end_ts=10.0,
frames=30,
vit_merge=True,
)
with pytest.raises(RuntimeError, match="Encode failed"):
await client.encode_video(request)
@pytest.mark.asyncio
@respx.mock
async def test_encode_timeout():
base_url = "http://testserver"
client = AsyncTokenizerClient(base_url=base_url, max_wait=1, poll_interval=0.1)
respx.post(f"{base_url}/image/encode").mock(
return_value=httpx.Response(200, json={"code": 0, "task_tag": "task_timeout"})
)
# 模拟轮询接口一直返回等待状态,导致超时
respx.get(f"{base_url}/encode/get").mock(return_value=httpx.Response(200, json={"status": "processing"}))
request = ImageEncodeRequest(
version="v1", req_id="req_img_timeout", is_gen=False, resolution=256, image_url="http://example.com/image.jpg"
)
with pytest.raises(TimeoutError):
await client.encode_image(request)