mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-23 00:17:25 +08:00
49c033a828
* [backend] Support XPU via Paddle Inference backend * [backend] Support XPU via Paddle Inference backend * [backend] Support XPU via Paddle Inference backend * [XPU] support XPU benchmark via paddle inference * [XPU] support XPU benchmark via paddle inference * [benchmark] add xpu paddle h2d config files
15 lines
278 B
Plaintext
Executable File
15 lines
278 B
Plaintext
Executable File
device: xpu
|
|
device_id: 0
|
|
cpu_thread_nums: 1
|
|
warmup: 200
|
|
repeat: 1000
|
|
backend: paddle
|
|
profile_mode: runtime
|
|
include_h2d_d2h: false
|
|
use_fp16: false
|
|
collect_memory_info: false
|
|
sampling_interval: 1
|
|
precision_compare: false
|
|
xpu_l3_cache: 0
|
|
result_path: benchmark_xpu_paddle_fp32.txt
|