Files
FastDeploy/tests/conftest.py
T
gongweibao edd31e8849 [Feature] Add Deterministic Inference Support (#6476)
* add

* [tests] Add Paddle attention determinism tests and refactor resource manager

Add comprehensive determinism tests for Paddle attention layer and refactor
resource manager for deterministic mode support.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* add

* add

* add

* add

* add more

* add more

* fixsome

* fixsome

* fix bugs

* fix bugs

* only in gpu

* add docs

* fix comments

* fix some

* fix some

* fix comments

* add more

* fix potential problem

* remove not need

* remove not need

* remove no need

* fix bug

* fix bugs

* fix comments

* fix comments

* Update tests/ce/deterministic/test_determinism_verification.py

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update tests/inter_communicator/test_ipc_signal.py

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update tests/layers/test_paddle_attention_determinism.py

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update tests/engine/test_sampling_params_determinism.py

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update tests/layers/test_paddle_attention_determinism.py

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update tests/layers/test_paddle_attention_determinism_standalone.py

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix comments

* fix import error

* fix a bug

* fix bugs

* fix bugs

* fix coverage

* refine codes

* refine code

* fix comments

* fix comments

* fix comments

* rm not need

* fix allreduce large tensor bug

* mv log files

* mv log files

* add files

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-02-26 19:31:51 -08:00

120 lines
3.6 KiB
Python

# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def pytest_configure(config):
config.addinivalue_line("markers", "gpu: mark test as requiring GPU platform")
def pytest_collection_modifyitems(config, items):
"""Skip GPU-marked tests when not on a GPU platform.
IMPORTANT: Do NOT import paddle or fastdeploy here. This function runs
during pytest collection (before fork). Importing paddle initializes the
CUDA runtime, which makes forked child processes unable to re-initialize
CUDA (OSError: CUDA error(3), initialization error).
"""
import glob
has_gpu = len(glob.glob("/dev/nvidia[0-9]*")) > 0
if has_gpu:
return
skip_marker = pytest.mark.skip(reason="Test requires GPU platform, skipping on non-GPU")
for item in items:
if "gpu" in item.keywords:
item.add_marker(skip_marker)
import time
from typing import Any, Union
from e2e.utils.serving_utils import ( # noqa: E402
FD_API_PORT,
FD_CACHE_QUEUE_PORT,
FD_ENGINE_QUEUE_PORT,
clean_ports,
)
class FDRunner:
def __init__(
self,
model_name_or_path: str,
tensor_parallel_size: int = 1,
max_num_seqs: int = 1,
max_model_len: int = 1024,
load_choices: str = "default",
quantization: str = "None",
**kwargs,
) -> None:
from fastdeploy.entrypoints.llm import LLM
clean_ports()
time.sleep(10)
graph_optimization_config = {"use_cudagraph": False}
self.llm = LLM(
model=model_name_or_path,
tensor_parallel_size=tensor_parallel_size,
max_num_seqs=max_num_seqs,
max_model_len=max_model_len,
load_choices=load_choices,
quantization=quantization,
max_num_batched_tokens=max_model_len,
graph_optimization_config=graph_optimization_config,
port=FD_API_PORT,
cache_queue_port=FD_CACHE_QUEUE_PORT,
engine_worker_queue_port=FD_ENGINE_QUEUE_PORT,
**kwargs,
)
def generate(
self,
prompts: list[str],
sampling_params,
**kwargs: Any,
) -> list[tuple[list[list[int]], list[str]]]:
req_outputs = self.llm.generate(prompts, sampling_params=sampling_params, **kwargs)
outputs: list[tuple[list[list[int]], list[str]]] = []
for output in req_outputs:
outputs.append((output.outputs.token_ids, output.outputs.text))
return outputs
def generate_topp0(
self,
prompts: Union[list[str]],
max_tokens: int,
**kwargs: Any,
) -> list[tuple[list[int], str]]:
from fastdeploy.engine.sampling_params import SamplingParams
topp_params = SamplingParams(temperature=0.0, top_p=0, max_tokens=max_tokens)
outputs = self.generate(prompts, topp_params, **kwargs)
return outputs
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
del self.llm
@pytest.fixture(scope="session")
def fd_runner():
return FDRunner