From 301bb59683780a76485a106d980cd6da4aa06f0d Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Tue, 11 Oct 2022 02:06:36 +0000 Subject: [PATCH] Update new docs --- .new_docs/README.md | 39 +- .new_docs/README_CN.md | 40 ++ .new_docs/README_EN.md | 40 ++ .new_docs/build_and_install/index.rst | 13 - .new_docs/build_and_install/prebuilt.md | 31 -- .new_docs/cn/build_and_install/README.md | 8 + .new_docs/cn/build_and_install/android.md | 3 + .new_docs/{ => cn}/build_and_install/cpu.md | 3 + .new_docs/{ => cn}/build_and_install/gpu.md | 4 + .../{ => cn}/build_and_install/jetson.md | 2 + .new_docs/cn/build_and_install/prebuilt.md | 64 +++ .new_docs/cn/faq/build_on_win_with_gui.md | 134 +++++ .new_docs/cn/faq/develop_a_new_model.md | 3 + .new_docs/cn/faq/how_to_change_backend.md | 47 ++ .new_docs/cn/faq/use_sdk_on_android.md | 3 + .new_docs/cn/faq/use_sdk_on_windows.md | 505 ++++++++++++++++++ .new_docs/{ => cn}/quantize.md | 2 + .new_docs/{ => cn}/quick_start/models/cpp.md | 0 .../{ => cn}/quick_start/models/python.md | 5 +- .new_docs/{ => cn}/quick_start/runtime/cpp.md | 0 .../{ => cn}/quick_start/runtime/python.md | 0 .new_docs/en/build_and_install/README.md | 8 + .new_docs/en/build_and_install/android.md | 3 + .new_docs/en/build_and_install/cpu.md | 107 ++++ .new_docs/en/build_and_install/gpu.md | 135 +++++ .new_docs/en/build_and_install/jetson.md | 50 ++ .new_docs/en/build_and_install/prebuilt.md | 64 +++ .new_docs/en/faq/build_on_win_with_gui.md | 134 +++++ .new_docs/en/faq/develop_a_new_model.md | 3 + .new_docs/en/faq/how_to_change_backend.md | 47 ++ .new_docs/en/faq/use_sdk_on_android.md | 3 + .new_docs/en/faq/use_sdk_on_windows.md | 505 ++++++++++++++++++ .new_docs/en/quantize.md | 11 + .new_docs/en/quick_start/models/cpp.md | 116 ++++ .new_docs/en/quick_start/models/python.md | 44 ++ .new_docs/en/quick_start/runtime/cpp.md | 1 + .new_docs/en/quick_start/runtime/python.md | 1 + .new_docs/quick_start/index.rst | 12 - .new_docs/quick_start/models/index.rst | 12 - .new_docs/quick_start/runtime/index.rst | 12 - 40 files changed, 2093 insertions(+), 121 deletions(-) mode change 100644 => 120000 .new_docs/README.md create mode 100644 .new_docs/README_CN.md create mode 100644 .new_docs/README_EN.md delete mode 100644 .new_docs/build_and_install/index.rst delete mode 100644 .new_docs/build_and_install/prebuilt.md create mode 100644 .new_docs/cn/build_and_install/README.md create mode 100644 .new_docs/cn/build_and_install/android.md rename .new_docs/{ => cn}/build_and_install/cpu.md (94%) rename .new_docs/{ => cn}/build_and_install/gpu.md (95%) rename .new_docs/{ => cn}/build_and_install/jetson.md (94%) create mode 100644 .new_docs/cn/build_and_install/prebuilt.md create mode 100644 .new_docs/cn/faq/build_on_win_with_gui.md create mode 100644 .new_docs/cn/faq/develop_a_new_model.md create mode 100644 .new_docs/cn/faq/how_to_change_backend.md create mode 100644 .new_docs/cn/faq/use_sdk_on_android.md create mode 100644 .new_docs/cn/faq/use_sdk_on_windows.md rename .new_docs/{ => cn}/quantize.md (83%) rename .new_docs/{ => cn}/quick_start/models/cpp.md (100%) rename .new_docs/{ => cn}/quick_start/models/python.md (81%) rename .new_docs/{ => cn}/quick_start/runtime/cpp.md (100%) rename .new_docs/{ => cn}/quick_start/runtime/python.md (100%) create mode 100644 .new_docs/en/build_and_install/README.md create mode 100644 .new_docs/en/build_and_install/android.md create mode 100644 .new_docs/en/build_and_install/cpu.md create mode 100644 .new_docs/en/build_and_install/gpu.md create mode 100644 .new_docs/en/build_and_install/jetson.md create mode 100644 .new_docs/en/build_and_install/prebuilt.md create mode 100644 .new_docs/en/faq/build_on_win_with_gui.md create mode 100644 .new_docs/en/faq/develop_a_new_model.md create mode 100644 .new_docs/en/faq/how_to_change_backend.md create mode 100644 .new_docs/en/faq/use_sdk_on_android.md create mode 100644 .new_docs/en/faq/use_sdk_on_windows.md create mode 100644 .new_docs/en/quantize.md create mode 100644 .new_docs/en/quick_start/models/cpp.md create mode 100644 .new_docs/en/quick_start/models/python.md create mode 100644 .new_docs/en/quick_start/runtime/cpp.md create mode 100644 .new_docs/en/quick_start/runtime/python.md delete mode 100644 .new_docs/quick_start/index.rst delete mode 100644 .new_docs/quick_start/models/index.rst delete mode 100644 .new_docs/quick_start/runtime/index.rst diff --git a/.new_docs/README.md b/.new_docs/README.md deleted file mode 100644 index beafcf67cb..0000000000 --- a/.new_docs/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# 使用文档 - -## 安装文档 - -- [预编译库下载安装](./build_and_install/prebuilt.md) -- [GPU部署环境编译安装](./build_and_install/gpu.md) -- [CPU部署环境编译安装](./build_and_install/cpu.md) -- [Jetson部署环境编译安装](./build_and_install/jetson.md) -- [Android平台部署环境编译安装](./build_and_install/android.md) - -## 快速使用 - -- [PP-YOLOE Python部署示例]() -- [PP-YOLOE C++部署示例]() -- [Runtime Python使用示例]() -- [Runtime C++使用示例]() - -## API文档 - -- [Python API文档]() -- [C++ API文档]() -- [Java API文档]() - -## 性能调优 - -- [量化加速](quantize.md) - -## 常见问题 - -- [1. 如何配置模型部署的推理后端]() -- [2. Windows上C++ SDK如何使用]() -- [3. Android上如何使用FastDeploy]() -- [4. 怎么测试Benchmark]() -- [5. 如何增加新的模型]() - -## 更多FastDeploy部署模块 - -- [服务化部署]() diff --git a/.new_docs/README.md b/.new_docs/README.md new file mode 120000 index 0000000000..bacd3186b4 --- /dev/null +++ b/.new_docs/README.md @@ -0,0 +1 @@ +README_CN.md \ No newline at end of file diff --git a/.new_docs/README_CN.md b/.new_docs/README_CN.md new file mode 100644 index 0000000000..3f5d3eac5a --- /dev/null +++ b/.new_docs/README_CN.md @@ -0,0 +1,40 @@ +[English](README.md) | 简体中文 + +# 使用文档 + +## 安装文档 + +- [预编译库下载安装](cn/build_and_install/prebuilt.md) +- [GPU部署环境编译安装](cn/build_and_install/gpu.md) +- [CPU部署环境编译安装](cn/build_and_install/cpu.md) +- [Jetson部署环境编译安装](cn/build_and_install/jetson.md) +- [Android平台部署环境编译安装](cn/build_and_install/android.md) + +## 快速使用 + +- [PP-YOLOE Python部署示例](cn/quick_start/models/python.md) +- [PP-YOLOE C++部署示例](cn/quick_start/models/cpp.md) +- [Runtime Python使用示例](cn/quick_start/runtime/python.md) +- [Runtime C++使用示例](cn/quick_start/runtime/cpp.md) + +## API文档 + +- [Python API文档]() +- [C++ API文档]() + +## 性能调优 + +- [量化加速](quantize.md) +- [不同后端调优选项](backends)(进行中) + +## 常见问题 + +- [1. 如何配置模型部署的推理后端](cn/faq/how_to_change_backend.md) +- [2. Windows上C++ SDK如何使用](cn/faq/use_sdk_on_windows.md) +- [3. Android上如何使用FastDeploy](cn/faq/use_sdk_on_android.md)(进行中) +- [4. 如何增加新的模型](cn/faq/develop_a_new_model.md)(进行中) + +## 更多FastDeploy部署模块 + +- [服务化部署](../serving) +- [Benchmark测试](../benchmark) diff --git a/.new_docs/README_EN.md b/.new_docs/README_EN.md new file mode 100644 index 0000000000..36e2688ed1 --- /dev/null +++ b/.new_docs/README_EN.md @@ -0,0 +1,40 @@ +[English](README.md) | 简体中文 + +# 使用文档 + +## 安装文档 + +- [预编译库下载安装](cn/build_and_install/prebuilt.md) +- [GPU部署环境编译安装](cn/build_and_install/gpu.md) +- [CPU部署环境编译安装](cn/build_and_install/cpu.md) +- [Jetson部署环境编译安装](cn/build_and_install/jetson.md) +- [Android平台部署环境编译安装](cn/build_and_install/android.md) + +## 快速使用 + +- [PP-YOLOE Python部署示例]() +- [PP-YOLOE C++部署示例]() +- [Runtime Python使用示例]() +- [Runtime C++使用示例]() + +## API文档 + +- [Python API文档]() +- [C++ API文档]() +- [Java API文档]() + +## 性能调优 + +- [量化加速](cn/quantize.md) + +## 常见问题 + +- [1. 如何配置模型部署的推理后端]() +- [2. Windows上C++ SDK如何使用]() +- [3. Android上如何使用FastDeploy]() +- [4. 怎么测试Benchmark]() +- [5. 如何增加新的模型]() + +## 更多FastDeploy部署模块 + +- [服务化部署]() diff --git a/.new_docs/build_and_install/index.rst b/.new_docs/build_and_install/index.rst deleted file mode 100644 index 5315d5f310..0000000000 --- a/.new_docs/build_and_install/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -编译与安装 -======================================= - -FastDeploy支持在多种平台与硬件的部署,提供预编译库,同时也支持开发者根据需求灵活编译 - -.. toctree:: - :caption: 编译与安装 - :maxdepth: 2 - :titlesonly: - - cpu.md - gpu.md - jetson.md diff --git a/.new_docs/build_and_install/prebuilt.md b/.new_docs/build_and_install/prebuilt.md deleted file mode 100644 index 4354f4f64d..0000000000 --- a/.new_docs/build_and_install/prebuilt.md +++ /dev/null @@ -1,31 +0,0 @@ -# 预编译库安装 - -FastDeploy提供各平台预编译库,供开发者直接下载安装使用。当然FastDeploy编译也非常容易,开发者也可根据自身需求编译FastDeploy。 - -## Python安装 - -### Nvidia GPU部署环境 - -#### 环境要求 -- CUDA >= 11.2 -- cuDNN >= 8.0 -- python >= 3.6 -- OS: Linux(x64)/Windows 10(x64) - -支持CPU和Nvidia GPU的部署,默认集成Paddle Inference、ONNX Runtime、OpenVINO以及TensorRT推理后端,Vision视觉模型模块,Text文本NLP模型模块 - -Release版本(当前最新0.2.1)安装 -``` -pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html -``` - -其中推荐使用Conda配置开发环境 -``` -conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2 -``` - -### CPU部署环境 - -- python >= 3.6 -- OS: Linux(x64/aarch64)/Windows 10 x64/Mac OSX(x86/aarm64 - diff --git a/.new_docs/cn/build_and_install/README.md b/.new_docs/cn/build_and_install/README.md new file mode 100644 index 0000000000..3a2d3fcc2d --- /dev/null +++ b/.new_docs/cn/build_and_install/README.md @@ -0,0 +1,8 @@ +# FastDeploy安装 + +- [预编译库下载安装](cn/build_and_install/prebuilt.md) +- [GPU部署环境编译安装](cn/build_and_install/gpu.md) +- [CPU部署环境编译安装](cn/build_and_install/cpu.md) +- [Jetson部署环境编译安装](cn/build_and_install/jetson.md) +- [Android平台部署环境编译安装](cn/build_and_install/android.md) + diff --git a/.new_docs/cn/build_and_install/android.md b/.new_docs/cn/build_and_install/android.md new file mode 100644 index 0000000000..c091b75d55 --- /dev/null +++ b/.new_docs/cn/build_and_install/android.md @@ -0,0 +1,3 @@ +# Android部署库编译 + +进行中... diff --git a/.new_docs/build_and_install/cpu.md b/.new_docs/cn/build_and_install/cpu.md similarity index 94% rename from .new_docs/build_and_install/cpu.md rename to .new_docs/cn/build_and_install/cpu.md index eef3bb6ffc..5a5647c510 100644 --- a/.new_docs/build_and_install/cpu.md +++ b/.new_docs/cn/build_and_install/cpu.md @@ -1,3 +1,5 @@ +[English](../../en/build_and_install/cpu.md) | 简体中文 + # CPU部署库编译 FastDeploy当前在CPU支持后端引擎如下 @@ -54,6 +56,7 @@ msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64 编译完成后,即在`CMAKE_INSTALL_PREFIX`指定的目录下生成C++推理库 +如您使用CMake GUI可参考文档[Windows使用CMakeGUI + Visual Studio 2019 IDE编译](../faq/build_on_win_with_gui.md) ## Python编译安装 diff --git a/.new_docs/build_and_install/gpu.md b/.new_docs/cn/build_and_install/gpu.md similarity index 95% rename from .new_docs/build_and_install/gpu.md rename to .new_docs/cn/build_and_install/gpu.md index 54eb288134..7d6bc77092 100644 --- a/.new_docs/build_and_install/gpu.md +++ b/.new_docs/cn/build_and_install/gpu.md @@ -1,3 +1,5 @@ +[English](../../en/build_and_install/gpu.md) | 简体中文 + # GPU部署库编译 FastDeploy当前在GPU环境支持Paddle Inference、ONNX Runtime和TensorRT,但同时在Linux&Windows的GPU环境也同时支持CPU硬件,因此编译时也可以同步将CPU的推理后端OpenVINO编译集成 @@ -69,6 +71,8 @@ msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64 编译完成后,即在`CMAKE_INSTALL_PREFIX`指定的目录下生成C++推理库 +如您使用CMake GUI可参考文档[Windows使用CMakeGUI + Visual Studio 2019 IDE编译](../faq/build_on_win_with_gui.md) + ## Python编译安装 diff --git a/.new_docs/build_and_install/jetson.md b/.new_docs/cn/build_and_install/jetson.md similarity index 94% rename from .new_docs/build_and_install/jetson.md rename to .new_docs/cn/build_and_install/jetson.md index 6901a88ecc..38eb5e26ad 100644 --- a/.new_docs/build_and_install/jetson.md +++ b/.new_docs/cn/build_and_install/jetson.md @@ -1,3 +1,5 @@ +[English](../../en/build_and_install/jetson.md) | 简体中文 + # Jetson部署库编译 FastDeploy当前在Jetson仅支持ONNX Runtime CPU和TensorRT GPU两种后端推理 diff --git a/.new_docs/cn/build_and_install/prebuilt.md b/.new_docs/cn/build_and_install/prebuilt.md new file mode 100644 index 0000000000..4d945ae208 --- /dev/null +++ b/.new_docs/cn/build_and_install/prebuilt.md @@ -0,0 +1,64 @@ +[English](../../en/build_and_install/prebuilt.md) | 简体中文 + +# 预编译库安装 + +FastDeploy提供各平台预编译库,供开发者直接下载安装使用。当然FastDeploy编译也非常容易,开发者也可根据自身需求编译FastDeploy。 + +## GPU部署环境 + +### 环境要求 +- CUDA >= 11.2 +- cuDNN >= 8.0 +- python >= 3.6 +- OS: Linux(x64)/Windows 10(x64) + +支持CPU和Nvidia GPU的部署,默认集成Paddle Inference、ONNX Runtime、OpenVINO以及TensorRT推理后端,Vision视觉模型模块,Text文本NLP模型模块 + +### Python安装 + +Release版本(当前最新0.2.1)安装 +``` +pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html +``` + +其中推荐使用Conda配置开发环境 +``` +conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2 +``` + +### C++ SDK安装 + +Release版本(当前最新0.2.1) + +| 平台 | 文件 | 说明 | +| :--- | :--- | :---- | +| Linux x64 | [fastdeploy-linux-x64-gpu-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-0.2.1.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2编译产出 | +| Windows x64 | [fastdeploy-win-x64-gpu-0.2.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.1.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2编译产出 | + +## CPU部署环境 + +### 环境要求 +- python >= 3.6 +- OS: Linux(x64/aarch64)/Windows 10 x64/Mac OSX(x86/aarm64) + +仅支持CPU部署,默认集成Paddle Inference、ONNX Runtime、OpenVINO, Vision视觉模型模块(Linux aarch64和Mac OSX下仅集成ONNX Runtime模块), Text文本NLP模型模块。 + +### Python安装 + +Release版本(当前最新0.2.1)安装 +``` +pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html +``` + +## C++ SDK安装 + +Release版本(当前最新0.2.1) + +| 平台 | 文件 | 说明 | +| :--- | :--- | :---- | +| Linux x64 | [fastdeploy-linux-x64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-0.2.1.tgz) | g++ 8.2编译产出 | +| Windows x64 | [fastdeploy-win-x64-0.2.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-0.2.1.zip) | Visual Studio 16 2019编译产出 | +| Mac OSX x64 | [fastdeploy-osx-x86_64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-0.2.1.tgz) | - | +| Mac OSX arm64 | [fastdeploy-osx-arm64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-0.2.1.tgz) | - | +| Linux aarch64 | [fastdeploy-linux-aarch64-0.2.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-0.2.0.tgz) | g++ 6.3.0编译产出 | + diff --git a/.new_docs/cn/faq/build_on_win_with_gui.md b/.new_docs/cn/faq/build_on_win_with_gui.md new file mode 100644 index 0000000000..c7fdb76f5a --- /dev/null +++ b/.new_docs/cn/faq/build_on_win_with_gui.md @@ -0,0 +1,134 @@ +# CMakeGUI + VS 2019 IDE编译FastDeploy + +注:此方式仅支持编译FastDeploy C++ SDK + +## 目录 +- [使用CMake GUI进行基础配置](#CMakeGuiAndVS2019Basic) +- [编译CPU版本 C++ SDK设置](#CMakeGuiAndVS2019CPU) +- [编译GPU版本 C++ SDK设置](#CMakeGuiAndVS2019GPU) +- [使用Visual Studio 2019 IDE进行编译](#CMakeGuiAndVS2019Build) + +### 使用CMake GUI进行基础配置 +
+ +步骤一:首先,打开CMake GUI,先初始化FastDeploy工程: + +![image](https://user-images.githubusercontent.com/31974251/192094881-c5beb0e5-82ae-4a62-a88c-73f3d80f7936.png) + +步骤二:点击Configure后,在弹窗中设置编译"x64"架构: + +![image](https://user-images.githubusercontent.com/31974251/192094951-958a0a22-2090-4ab6-84f5-3573164d0835.png) + +初始化完成后,显示如下: + +![image](https://user-images.githubusercontent.com/31974251/192095053-874b9c73-fc0d-4325-b555-ac94ab9a9f38.png) + +步骤三:由于FastDeploy目前只支持Release版本,因此,先将"CMAKE_CONFIGURATION_TYPES"修改成"Release" + +![image](https://user-images.githubusercontent.com/31974251/192095175-3aeede95-a633-4b3c-81f8-067f0a0a44a3.png) + +接下来,用户可根据自己实际的开发需求开启对应的编译选项,并生成sln解决方案。以下,针对编译CPU和GPU版本SDK各举一个例子。 + +### 编译CPU版本 C++ SDK设置 + +
+ +步骤一:勾选CPU版本对应的编译选项。注意CPU版本,请`不要`勾选WITH_GPU和ENABLE_TRT_BACKEND + +![image](https://user-images.githubusercontent.com/31974251/192095848-b3cfdf19-e378-41e0-b44e-5edb49461eeb.png) + +这个示例中,我们开启ORT、Paddle、OpenVINO等推理后端,并且选择了需要编译TEXT和VISION的API + + +步骤二:自定义设置SDK安装路径,修改CMAKE_INSTALL_PREFIX + +![image](https://user-images.githubusercontent.com/31974251/192095961-5f6e348a-c30b-4473-8331-8beefb7cd2e6.png) + +由于默认的安装路径是C盘,我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径,这里我们将安装路径修改到`build\fastdeploy-win-x64-0.2.1`目录下。 + +![image](https://user-images.githubusercontent.com/31974251/192096055-8a276a9e-6017-4447-9ded-b95c5579d663.png) + + + +### 编译GPU版本 C++ SDK设置 +
+ +步骤一:勾选GPU版本对应的编译选项。注意GPU版本,请`需要`勾选WITH_GPU + +![image](https://user-images.githubusercontent.com/31974251/192099254-9f82abb0-8a29-41ce-a0ce-da6aacf23582.png) + +这个示例中,我们开启ORT、Paddle、OpenVINO和TRT等推理后端,并且选择了需要编译TEXT和VISION的API。并且,由于开启了GPU和TensorRT,此时需要额外指定CUDA_DIRECTORY和TRT_DIRECTORY,在GUI界面中找到这两个变量,点击右侧的选项框,分别选择您安装CUDA的路径和TensorRT的路径 + + +![image](https://user-images.githubusercontent.com/31974251/192098907-9dd9a49c-4a3e-4641-8e68-f25da1cafbba.png) + + +![image](https://user-images.githubusercontent.com/31974251/192098984-7fefd824-7e3b-4185-abba-bae5d8765e2a.png) + + +步骤二:自定义设置SDK安装路径,修改CMAKE_INSTALL_PREFIX + +![image](https://user-images.githubusercontent.com/31974251/192099125-81fc8217-e51f-4039-9421-ba7a09c0027c.png) + + +由于默认的安装路径是C盘,我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径,这里我们将安装路径修改到`build\fastdeploy-win-x64-gpu-0.2.1`目录下。 + + +### 使用Visual Studio 2019 IDE进行编译 + +
+ +步骤一:点击"Generate",生成sln解决方案,并用Visual Studio 2019打开 + +![image](https://user-images.githubusercontent.com/31974251/192096162-c05cbb11-f96e-4c82-afde-c7fc02cddf68.png) + +这个过程默认会从下载一些编译需要的资源,cmake的dev警告可以不用管。生成完成之后可以看到以下界面: + +CPU版本SDK: + +![image](https://user-images.githubusercontent.com/31974251/192096478-faa570bd-7569-43c3-ad79-cc6be5b605e3.png) + +GPU版本SDK: + +![image](https://user-images.githubusercontent.com/31974251/192099583-300e4680-1089-45cf-afaa-d2afda8fd436.png) + + +左侧界面,可以看到所有编译需要的include路径和lib路径已经被设置好了,用户可以考虑把这些路径记录下来方便后续的开发。右侧界面,可以看到已经生成fastdeploy.sln解决方案文件。接下来,我们使用Visual Studio 2019打开这个解决方案文件(理论上VS2022也可以编译,但目前建议使用VS2019)。 + +![image](https://user-images.githubusercontent.com/31974251/192096765-2aeadd68-47fb-4cd6-b083-4a478cf5e584.jpg) + + +步骤二:在Visual Studio 2019点击"ALL BUILD"->右键点击"生成"开始编译 + +![image](https://user-images.githubusercontent.com/31974251/192096893-5d6bc428-b824-4ffe-8930-0ec6d4dcfd02.png) + +CPU版本SDK编译成功! + +![image](https://user-images.githubusercontent.com/31974251/192097020-979bd7a3-1cdd-4fb5-a931-864c5372933d.png) + +GPU版本SDK编译成功! + +![image](https://user-images.githubusercontent.com/31974251/192099902-4b661f9a-7691-4f7f-b573-92ca9397a890.png) + + +步骤三:编译完成后,在Visual Studio 2019点击"INSTALL"->右键点击"生成"将编译好的SDK安装到先前指定的目录 + + +![image](https://user-images.githubusercontent.com/31974251/192097073-ce5236eb-1ed7-439f-8098-fef7a2d02779.png) + +![image](https://user-images.githubusercontent.com/31974251/192097122-d675ae39-35fb-4dbb-9c75-eefb0597ec2e.png) + +SDK成功安装到指定目录! + +### 编译所有examples(可选) +可以在CMake GUI中勾选BUILD_EXAMPLES选项,连带编译所有的examples,编译完成后所有example的可执行文件保存在build/bin/Release目录下 + +![image](https://user-images.githubusercontent.com/31974251/192110769-a4f0940d-dea3-4524-831b-1c2a6ab8e871.png) + +![image](https://user-images.githubusercontent.com/31974251/192110930-e7e49bc6-c271-4076-be74-3d103f27bc78.png) + + +## 特别提示 + +如果是用户自行编译SDK,理论上支持Windows 10/11,VS 2019/2022,CUDA 11.x 以及 TensorRT 8.x等配置,但建议使用我们推荐的默认配置,即:Windows 10, VS 2019, CUDA 11.2 和 TensorRT 8.4.x版本。另外,如果编译过程中遇到中文字符的编码问题(如UIE example必须传入中文字符进行预测),可以参考Visual Studio的官方文档,设置源字符集为`/utf-8`解决: +- [/utf-8(将源字符集和执行字符集设置为 UTF-8)](https://learn.microsoft.com/zh-cn/cpp/build/reference/utf-8-set-source-and-executable-character-sets-to-utf-8?view=msvc-170) diff --git a/.new_docs/cn/faq/develop_a_new_model.md b/.new_docs/cn/faq/develop_a_new_model.md new file mode 100644 index 0000000000..b2b5ac9903 --- /dev/null +++ b/.new_docs/cn/faq/develop_a_new_model.md @@ -0,0 +1,3 @@ +# FastDeploy集成新模型流程 + +进行中... diff --git a/.new_docs/cn/faq/how_to_change_backend.md b/.new_docs/cn/faq/how_to_change_backend.md new file mode 100644 index 0000000000..0188d849d9 --- /dev/null +++ b/.new_docs/cn/faq/how_to_change_backend.md @@ -0,0 +1,47 @@ +# 如何切换模型推理后端 + +FastDeploy中各视觉模型可支持多种后端,包括 +- OpenVINO (支持Paddle/ONNX两种格式模型, 仅支持CPU上推理) +- ONNX Runtime (支持Paddle/ONNX两种格式模型, 支持CPU/GPU) +- TensorRT (支持Paddle/ONNX两种格式模型,仅支持GPU上推理) +- Paddle Inference(支持Paddle格式模型, 支持CPU/GPU) + +所有模型切换后端方式均通过RuntimeOption进行切换, + +**Python** +```python +import fastdeploy as fd +option = fd.RuntimeOption() + +# 切换使用CPU/GPU +option.use_cpu() +option.use_gpu() + +# 切换不同后端 +option.use_paddle_backend() # Paddle Inference +option.use_trt_backend() # TensorRT +option.use_openvino_backend() # OpenVINO +option.use_ort_backend() # ONNX Runtime + +``` + +**C++** +```C++ +fastdeploy::RuntimeOption option; + +// 切换使用CPU/GPU +option.UseCpu(); +option.UseGpu(); + +// 切换不同后端 +option.UsePaddleBackend(); // Paddle Inference +option.UseTrtBackend(); // TensorRT +option.UseOpenVINOBackend(); // OpenVINO +option.UseOrtBackend(); // ONNX Runtime +``` + +具体示例可参阅`FastDeploy/examples/vision`下不同模型的python或c++推理代码 + +更多`RuntimeOption`的配置方式查阅FastDeploy API文档 +- [Python API]() +- [C++ API]() diff --git a/.new_docs/cn/faq/use_sdk_on_android.md b/.new_docs/cn/faq/use_sdk_on_android.md new file mode 100644 index 0000000000..0add916cea --- /dev/null +++ b/.new_docs/cn/faq/use_sdk_on_android.md @@ -0,0 +1,3 @@ +# Android平台使用FastDeploy部署 + +进行中... diff --git a/.new_docs/cn/faq/use_sdk_on_windows.md b/.new_docs/cn/faq/use_sdk_on_windows.md new file mode 100644 index 0000000000..be1e1ab0a0 --- /dev/null +++ b/.new_docs/cn/faq/use_sdk_on_windows.md @@ -0,0 +1,505 @@ +# 在 Windows 使用 FastDeploy C++ SDK + +## 目录 +- [1. 环境依赖](#Environment) +- [2. 下载 FastDeploy Windows 10 C++ SDK](#Download) +- [3. Windows下多种方式使用 C++ SDK 的方式](#CommandLine) + - [3.1 命令行方式使用 C++ SDK](#CommandLine) + - [3.1.1 在 Windows 命令行终端 上编译 example](#CommandLine) + - [3.1.2 运行可执行文件获得推理结果](#CommandLine) + - [3.2 Visual Studio 2019 创建sln工程使用 C++ SDK](#VisualStudio2019Sln) + - [3.2.1 Visual Studio 2019 创建 sln 工程项目](#VisualStudio2019Sln1) + - [3.2.2 从examples中拷贝infer_ppyoloe.cc的代码到工程](#VisualStudio2019Sln2) + - [3.2.3 将工程配置设置成"Release x64"配置](#VisualStudio2019Sln3) + - [3.2.4 配置头文件include路径](#VisualStudio2019Sln4) + - [3.2.5 配置lib路径和添加库文件](#VisualStudio2019Sln5) + - [3.2.6 编译工程并运行获取结果](#VisualStudio2019Sln6) + - [3.3 Visual Studio 2019 创建CMake工程使用 C++ SDK](#VisualStudio2019) + - [3.3.1 Visual Studio 2019 创建CMake工程项目](#VisualStudio20191) + - [3.3.2 在CMakeLists中配置 FastDeploy C++ SDK](#VisualStudio20192) + - [3.3.3 生成工程缓存并修改CMakeSetting.json配置](#VisualStudio20193) + - [3.3.4 生成可执行文件,运行获取结果](#VisualStudio20194) +- [4. 多种方法配置exe运行时所需的依赖库](#CommandLineDeps1) + - [4.1 使用 fastdeploy_init.bat 进行配置(推荐)](#CommandLineDeps1) + - [4.1.1 fastdeploy_init.bat 使用说明](#CommandLineDeps11) + - [4.1.2 fastdeploy_init.bat 查看 SDK 中所有的 dll、lib 和 include 路径](#CommandLineDeps12) + - [4.1.3 fastdeploy_init.bat 安装 SDK 中所有的 dll 到指定的目录](#CommandLineDeps13) + - [4.1.4 fastdeploy_init.bat 配置 SDK 环境变量](#CommandLineDeps14) + - [4.2 修改 CMakeLists.txt,一行命令配置(推荐)](#CommandLineDeps2) + - [4.3 命令行设置环境变量](#CommandLineDeps3) + - [4.4 手动拷贝依赖库到exe的目录下](#CommandLineDeps4) + + +## 1. 环境依赖 +
+ +- cmake >= 3.12 +- Visual Studio 16 2019 +- cuda >= 11.2 (当WITH_GPU=ON) +- cudnn >= 8.0 (当WITH_GPU=ON) + +## 2. 下载 FastDeploy Windows 10 C++ SDK +
+ +### 2.1 下载预编译库或者从源码编译最新的SDK +可以从以下链接下载编译好的 FastDeploy Windows 10 C++ SDK,SDK中包含了examples代码。 +```text +https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.1.zip +``` +源码编译请参考: [build_and_install](../build_and_install) +### 2.2 准备模型文件和测试图片 +可以从以下链接下载模型文件和测试图片,并解压缩 +```text +https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (下载后解压缩) +https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg +``` + +## 3. Windows下多种方式使用 C++ SDK 的方式 +### 3.1 SDK使用方式一:命令行方式使用 C++ SDK +
+ +#### 3.1.1 在 Windows 上编译 PPYOLOE +Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,cd到ppyoloe的demo路径 +```bat +cd fastdeploy-win-x64-gpu-0.2.1\examples\vision\detection\paddledetection\cpp +``` +```bat +mkdir build && cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1 -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2" +``` +然后执行 +```bat +msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64 +``` + +#### 3.1.2 运行 demo +```bat +cd Release +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 2 # GPU + TensorRT +``` + +特别说明,exe运行时所需要的依赖库配置方法,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +### 3.2 SDK使用方式二:Visual Studio 2019 创建 sln 工程使用 C++ SDK + +本章节针对非CMake用户,介绍如何在Visual Studio 2019 中创建 sln 工程使用 FastDeploy C++ SDK. CMake用户请直接看下一章节。另外,本章节内容特别感谢“梦醒南天”同学关于FastDeploy使用的文档教程:[如何在 Windows 上使用 FastDeploy C++ 部署 PaddleDetection 目标检测模型](https://www.bilibili.com/read/cv18807232) + +
+ +#### 3.2.1 步骤一:Visual Studio 2019 创建 sln 工程项目 + +
+ +(1) 打开Visual Studio 2019,点击"创建新项目"->点击"控制台程序",从而创建新的sln工程项目. + +![image](https://user-images.githubusercontent.com/31974251/192813386-cf9a93e0-ee42-42b3-b8bf-d03ae7171d4e.png) + +![image](https://user-images.githubusercontent.com/31974251/192816516-a4965b9c-21c9-4a01-bbb2-c648a8256fc9.png) + +(2)点击“创建”,便创建了一个空的sln工程。我们直接从examples里面拷贝infer_ppyoloe的代码这里。 + +![image](https://user-images.githubusercontent.com/31974251/192817382-643c8ca2-1f2a-412e-954e-576c22b4ea62.png) + +#### 3.2.2 步骤二:从examples中拷贝infer_ppyoloe.cc的代码到工程 + +
+ +(1)从examples中拷贝infer_ppyoloe.cc的代码到工程,直接替换即可,拷贝代码的路径为: +```bat +fastdeploy-win-x64-gpu-0.2.1\examples\vision\detection\paddledetection\cpp +``` + +![image](https://user-images.githubusercontent.com/31974251/192818456-21ca846c-ab52-4001-96d2-77c8174bff6b.png) + +#### 3.2.3 步骤三:将工程配置设置成"Release x64"配置 + +
+ +![image](https://user-images.githubusercontent.com/31974251/192818918-98d7a54c-4a60-4760-a3cb-ecacc38b7e7a.png) + +#### 3.2.4 步骤四:配置头文件include路径 + +
+ + +(1)配置头文件include路径:鼠标选择项目,然后单击右键即可弹出下来菜单,在其中单击“属性”。 + +![image](https://user-images.githubusercontent.com/31974251/192820573-23096aea-046c-4bb4-9929-c412718805cb.png) + + +(2)在弹出来的属性页中选择:C/C++ —> 常规 —> 附加包含目录,然后在添加 fastdeploy 和 opencv 的头文件路径。如: + +```bat + +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\include +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv-win-x64-3.4.16\build\include +``` +注意,如果是自行编译最新的SDK或版本>0.2.1,依赖库目录结构有所变动,opencv路径需要做出适当的修改。如: +```bat +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv\build\include +``` + +![image](https://user-images.githubusercontent.com/31974251/192824445-978c06ed-cc14-4d6a-8ccf-d4594ca11533.png) + +用户需要根据自己实际的sdk路径稍作修改。 + + +#### 3.2.5 步骤五:配置lib路径和添加库文件 + +
+ +(1)属性页中选择:链接器—>常规—> 附加库目录,然后在添加 fastdeploy 和 opencv 的lib路径。如: +```bat +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\lib +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv-win-x64-3.4.16\build\x64\vc15\lib +``` +注意,如果是自行编译最新的SDK或版本>0.2.1,依赖库目录结构有所变动,opencv路径需要做出适当的修改。如: +```bat +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv\build\include +``` + +![image](https://user-images.githubusercontent.com/31974251/192826130-fe28791f-317c-4e66-a6a5-133e60b726f0.png) + +(2)添加库文件:只需要 fastdeploy.lib 和 opencv_world3416.lib + + ![image](https://user-images.githubusercontent.com/31974251/192826884-44fc84a1-c57a-45f1-8ee2-30b7eaa3dce9.png) + +#### 3.2.6 步骤六:编译工程并运行获取结果 + +
+ + +(1)点击菜单栏“生成”->“生成解决方案” + +![image](https://user-images.githubusercontent.com/31974251/192827608-beb53685-2f94-44dc-aa28-49b09a4ab864.png) + +![image](https://user-images.githubusercontent.com/31974251/192827842-1f05d435-8a3e-492b-a3b7-d5e88f85f814.png) + +编译成功,可以看到exe保存在: +```bat +D:\qiuyanjun\fastdeploy_test\infer_ppyoloe\x64\Release\infer_ppyoloe.exe +``` + +(2)执行可执行文件,获得推理结果。 首先需要拷贝所有的dll到exe所在的目录下。同时,也需要把ppyoloe的模型文件和测试图片下载解压缩后,拷贝到exe所在的目录。 特别说明,exe运行时所需要的依赖库配置方法,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +![image](https://user-images.githubusercontent.com/31974251/192829545-3ea36bfc-9a54-492b-984b-2d5d39094d47.png) + + +### 3.3 SDK使用方式三:Visual Studio 2019 创建 CMake 工程使用 C++ SDK +
+ +本章节针对CMake用户,介绍如何在Visual Studio 2019 中创建 CMake 工程使用 FastDeploy C++ SDK. + +#### 3.3.1 步骤一:Visual Studio 2019 创建“CMake”工程项目 + +
+ +(1)打开Visual Studio 2019,点击"创建新项目"->点击"CMake",从而创建CMake工程项目。以PPYOLOE为例,来说明如何在Visual Studio 2019 IDE中使用FastDeploy C++ SDK. + +![image](https://user-images.githubusercontent.com/31974251/192143543-9f29e4cb-2307-45ca-a61a-bcfba5df19ff.png) + +![image](https://user-images.githubusercontent.com/31974251/192143640-39e79c65-8b50-4254-8da6-baa21bb23e3c.png) + + +![image](https://user-images.githubusercontent.com/31974251/192143713-be2e6490-4cab-4151-8463-8c367dbc451a.png) + +(2)打开工程发现,Visual Stuio 2019已经为我们生成了一些基本的文件,其中包括CMakeLists.txt。infer_ppyoloe.h头文件这里实际上用不到,我们可以直接删除。 + +![image](https://user-images.githubusercontent.com/31974251/192143930-db1655c2-66ee-448c-82cb-0103ca1ca2a0.png) + +#### 3.3.2 步骤二:在CMakeLists中配置 FastDeploy C++ SDK + +
+ +(1)在工程创建完成后,我们需要添加infer_ppyoloe推理源码,并修改CMakeLists.txt,修改如下: + +![image](https://user-images.githubusercontent.com/31974251/192144782-79bccf8f-65d0-4f22-9f41-81751c530319.png) + +(2)其中infer_ppyoloe.cpp的代码可以直接从examples中的代码拷贝过来: +- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc) + +(3)CMakeLists.txt主要包括配置FastDeploy C++ SDK的路径,如果是GPU版本的SDK,还需要配置CUDA_DIRECTORY为CUDA的安装路径,CMakeLists.txt的配置如下: + +```cmake +project(infer_ppyoloe_demo C CXX) +cmake_minimum_required(VERSION 3.12) + +# Only support "Release" mode now +set(CMAKE_BUILD_TYPE "Release") + +# Set FastDeploy install dir +set(FASTDEPLOY_INSTALL_DIR "D:/qiuyanjun/fastdeploy-win-x64-gpu-0.2.1" + CACHE PATH "Path to downloaded or built fastdeploy sdk.") + +# Set CUDA_DIRECTORY (CUDA 11.x) for GPU SDK +set(CUDA_DIRECTORY "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.7" + CACHE PATH "Path to installed CUDA Toolkit.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cpp) +target_link_libraries(infer_ppyoloe_demo ${FASTDEPLOY_LIBS}) + +# Optional: install all DLLs to binary dir. +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` +注意,`install_fastdeploy_libraries`函数仅在最新的代码编译的SDK或版本>0.2.1下有效。 + +#### 3.3.3 步骤三:生成工程缓存并修改CMakeSetting.json配置 + +
+ +(1)点击"CMakeLists.txt"->右键点击"生成缓存": + +![image](https://user-images.githubusercontent.com/31974251/192145349-c78b110a-0e41-4ee5-8942-3bf70bd94a75.png) + +发现已经成功生成缓存了,但是由于打开工程时,默认是Debug模式,我们发现exe和缓存保存路径还是Debug模式下的。 我们可以先修改CMake的设置为Release. + +(2)点击"CMakeLists.txt"->右键点击"infer_ppyoloe_demo的cmake设置",进入CMakeSettings.json的设置面板,把其中的Debug设置修改为Release. + +![image](https://user-images.githubusercontent.com/31974251/192145242-01d37b44-e2fa-47df-82c1-c11c2ccbff99.png) + +同时设置CMake生成器为 "Visual Studio 16 2019 Win64" + +![image](https://user-images.githubusercontent.com/31974251/192147961-ac46d0f6-7349-4126-a123-914af2b63d95.jpg) + +(3)点击保存CMake缓存以切换为Release配置: + +![image](https://user-images.githubusercontent.com/31974251/192145974-b5a63341-9143-49a2-8bfe-94ac641b1670.png) + +(4):(4.1)点击"CMakeLists.txt"->右键"CMake缓存仅限x64-Release"->"点击删除缓存";(4.2)点击"CMakeLists.txt"->"生成缓存";(4.3)如果在步骤一发现删除缓存的选项是灰色的可以直接点击"CMakeLists.txt"->"生成",若生成失败则可以重复尝试(4.1)和(4。2) + +![image](https://user-images.githubusercontent.com/31974251/192146394-51fbf2b8-1cba-41ca-bb45-5f26890f64ce.jpg) + +最终可以看到,配置已经成功生成Relase模式下的CMake缓存了。 + +![image](https://user-images.githubusercontent.com/31974251/192146239-a1eacd9e-034d-4373-a262-65b18ce25b87.png) + + +#### 3.3.4 步骤四:生成可执行文件,运行获取结果。 + +
+ +(1)点击"CMakeLists.txt"->"生成"。可以发现已经成功生成了infer_ppyoloe_demo.exe,并保存在`out/build/x64-Release/Release`目录下。 + +![image](https://user-images.githubusercontent.com/31974251/192146852-c64d2252-8c8f-4309-a950-908a5cb258b8.png) + +(2)执行可执行文件,获得推理结果。 首先需要拷贝所有的dll到exe所在的目录下,这里我们可以在CMakeLists.txt添加一下命令,可将FastDeploy中所有的dll安装到指定的目录。注意,该方式仅在最新的代码编译的SDK或版本>0.2.1下有效。其他配置方式,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +```cmake +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` +(3)同时,也需要把ppyoloe的模型文件和测试图片下载解压缩后,拷贝到exe所在的目录。 准备完成后,目录结构如下: + +![image](https://user-images.githubusercontent.com/31974251/192147505-054edb77-564b-405e-89ee-fd0d2e413e78.png) + +(4)最后,执行以下命令获得推理结果: + +```bat +D:\xxxinfer_ppyoloe\out\build\x64-Release\Release>infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 +[INFO] fastdeploy/runtime.cc(304)::fastdeploy::Runtime::Init Runtime initialized with Backend::OPENVINO in Device::CPU. +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +415.047180,89.311569, 506.009613, 283.863098, 0.950423, 0 +163.665710,81.914932, 198.585342, 166.760895, 0.896433, 0 +581.788635,113.027618, 612.623474, 198.521713, 0.842596, 0 +267.217224,89.777306, 298.796051, 169.361526, 0.837951, 0 +...... +153.301407,123.233757, 177.130539, 164.558350, 0.066697, 60 +505.887604,140.919601, 523.167236, 151.875336, 0.084912, 67 + +Visualized result saved in ./vis_result.jpg +``` + +打开保存的图片查看可视化结果: + +
+ +
+ +特别说明,exe运行时所需要的依赖库配置方法,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +## 4. 多种方法配置exe运行时所需的依赖库 +
+说明:对于使用的最新源码编译的SDK或SDK版本>0.2.1的用户,我们推荐使用(4.1)和(4.2)中的方式配置运行时的依赖库。如果使用的SDK版本<=0.2.1,请参考(4.3)和(4.4)中的方式进行配置。 + +### 4.1 方式一:使用 fastdeploy_init.bat 进行配置(推荐) +
+ +对于版本高于0.2.1的SDK,我们提供了 **fastdeploy_init.bat** 工具来管理FastDeploy中所有的依赖库。可以通过该脚本工具查看(show)、拷贝(install) 和 设置(init and setup) SDK中所有的dll,方便用户快速完成运行时环境配置。 + +#### 4.1.1 fastdeploy_init.bat 使用说明 +
+ +首先进入SDK的根目录,运行以下命令,可以查看 fastdeploy_init.bat 的用法说明 +```bat +D:\path-to-your-fastdeploy-sdk-dir>fastdeploy_init.bat help +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[1] [help] print help information: fastdeploy_init.bat help +[2] [show] show all dlls/libs/include paths: fastdeploy_init.bat show fastdeploy-sdk-dir +[3] [init] init all dlls paths for current terminal: fastdeploy_init.bat init fastdeploy-sdk-dir [WARNING: need copy onnxruntime.dll manually] +[4] [setup] setup path env for current terminal: fastdeploy_init.bat setup fastdeploy-sdk-dir [WARNING: need copy onnxruntime.dll manually] +[5] [install] install all dlls to a specific dir: fastdeploy_init.bat install fastdeploy-sdk-dir another-dir-to-install-dlls **[RECOMMEND]** +[6] [install] install all dlls with logging infos: fastdeploy_init.bat install fastdeploy-sdk-dir another-dir-to-install-dlls info +------------------------------------------------------------------------------------------------------------------------------------------------------------ +``` +用法简要说明如下: +- help: 打印所有的用法说明 +- show: 查看SDK中所有的 dll、lib 和 include 路径 +- init: 初始化所有dll路径信息,后续用于设置terminal环境变量(不推荐,请参考4.3中关于onnxruntime的说明) +- setup: 在init之后运行,设置terminal环境便令(不推荐,请参考4.3中关于onnxruntime的说明) +- install: 将SDK中所有的dll安装到某个指定的目录(推荐) +#### 4.1.2 fastdeploy_init.bat 查看 SDK 中所有的 dll、lib 和 include 路径 +
+ +进入SDK的根目录,运行show命令,可以查看SDK中所有的 dll、lib 和 include 路径。以下命令中 %cd% 表示当前目录(SDK的根目录)。 +```bat +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat show %cd% +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[SDK] D:\path-to-fastdeploy-sdk-dir +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[DLL] D:\path-to-fastdeploy-sdk-dir\lib\fastdeploy.dll **[NEEDED]** +[DLL] D:\path-to-fastdeploy-sdk-dir\third_libs\install\faster_tokenizer\lib\core_tokenizers.dll **[NEEDED]** +[DLL] D:\path-to-fastdeploy-sdk-dir\third_libs\install\opencv\build\x64\vc15\bin\opencv_ffmpeg3416_64.dll **[NEEDED]** +...... +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[Lib] D:\path-to-fastdeploy-sdk-dir\lib\fastdeploy.lib **[NEEDED][fastdeploy]** +[Lib] D:\path-to-fastdeploy-sdk-dir\third_libs\install\faster_tokenizer\lib\core_tokenizers.lib **[NEEDED][fastdeploy::text]** +[Lib] D:\path-to-fastdeploy-sdk-dir\third_libs\install\opencv\build\x64\vc15\lib\opencv_world3416.lib **[NEEDED][fastdeploy::vision]** +...... +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[Include] D:\path-to-fastdeploy-sdk-dir\include **[NEEDED][fastdeploy]** +[Include] D:\path-to-fastdeploy-sdk-dir\third_libs\install\faster_tokenizer\include **[NEEDED][fastdeploy::text]** +[Include] D:\path-to-fastdeploy-sdk-dir\third_libs\install\opencv\build\include **[NEEDED][fastdeploy::vision]** +...... +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[XML] D:\path-to-fastdeploy-sdk-dir\third_libs\install\openvino\runtime\bin\plugins.xml **[NEEDED]** +------------------------------------------------------------------------------------------------------------------------------------------------------------ +``` +可以看到该命令会根据您当前的SDK,输出对应的信息,包含 dll、lib 和 include 的路径信息。对于 dll,被标记为 `[NEEDED]`的,是运行时所需要的,如果包含OpenVINO后端,还需要将他的plugins.xml拷贝到exe所在的目录;对于 lib 和 include,被标记为`[NEEDED]`的,是开发时所需要配置的最小依赖。并且,我们还增加了对应的API Tag标记,如果您只使用vision API,则只需要配置标记为 `[NEEDED][fastdeploy::vision]` 的 lib 和 include 路径. + +#### 4.1.3 fastdeploy_init.bat 安装 SDK 中所有的 dll 到指定的目录 (推荐) +
+ +进入SDK的根目录,运行install命令,可以将SDK 中所有的 dll 安装到指定的目录(如exe所在的目录)。我们推荐这种方式来配置exe运行所需要的依赖库。比如,可以在SDK根目录下创建一个临时的bin目录备份所有的dll文件。以下命令中 %cd% 表示当前目录(SDK的根目录)。 +```bat +% info参数为可选参数,添加info参数后会打印详细的安装信息 % +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat install %cd% bin +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat install %cd% bin info +``` +```bat +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat install %cd% bin +[INFO] Do you want to install all FastDeploy dlls ? +[INFO] From: D:\path-to-fastdeploy-sdk-dir +[INFO] To: bin +Choose y means YES, n means NO: [y/n]y +YES. +请按任意键继续. . . +[INFO] Created bin done! +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +..... +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +..... +``` +#### 4.1.4 fastdeploy_init.bat 配置 SDK 环境变量 +
+ +您也可以选择通过配置环境变量的方式来设置运行时的依赖库环境,这种方式只在当前的terminal有效。如果您使用的SDK中包含了onnxruntime推理后端,我们不推荐这种方式,详细原因请参考(4.3)中关于onnxruntime配置的说明(需要手动拷贝onnxruntime所有的dll到exe所在的目录)。配置 SDK 环境变量的方式如下。以下命令中 %cd% 表示当前目录(SDK的根目录)。 +```bat +% 先运行 init 初始化当前SDK所有的dll文件路径 % +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat init %cd% +% 再运行 setup 完成 SDK 环境变量配置 % +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat setup %cd% +``` + +### 4.2 方式二:修改CMakeLists.txt,一行命令配置(推荐) +
+ +考虑到Windows下C++开发的特殊性,如经常需要拷贝所有的lib或dll文件到某个指定的目录,FastDeploy提供了`install_fastdeploy_libraries`的cmake函数,方便用户快速配置所有的dll。修改ppyoloe的CMakeLists.txt,添加: +```cmake +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` +注意,该方式仅在最新的代码编译的SDK或版本>0.2.1下有效。 + +### 4.3 方式三:命令行设置环境变量 +
+ +编译好的exe保存在Release目录下,在运行demo前,需要将模型和测试图片拷贝至该目录。另外,需要在终端指定DLL的搜索路径。请在build目录下执行以下命令。 +```bat +set FASTDEPLOY_HOME=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1 +set PATH=%FASTDEPLOY_HOME%\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\onnxruntime\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\opencv-win-x64-3.4.16\build\x64\vc15\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle_inference\paddle\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mkldnn\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mklml\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle2onnx\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\tensorrt\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\third_party\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\yaml-cpp\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\3rdparty\tbb\bin;%PATH% +``` +注意,需要拷贝onnxruntime.dll到exe所在的目录。 +```bat +copy /Y %FASTDEPLOY_HOME%\third_libs\install\onnxruntime\lib\onnxruntime* Release\ +``` +由于较新的Windows在System32系统目录下自带了onnxruntime.dll,因此就算设置了PATH,系统依然会出现onnxruntime的加载冲突。因此需要先拷贝demo用到的onnxruntime.dll到exe所在的目录。如下 +```bat +where onnxruntime.dll +C:\Windows\System32\onnxruntime.dll # windows自带的onnxruntime.dll +``` +另外,注意,如果是自行编译最新的SDK或版本>0.2.1,opencv和openvino目录结构有所改变,路径需要做出适当的修改。如: +```bat +set PATH=%FASTDEPLOY_HOME%\third_libs\install\opencv\build\x64\vc15\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\3rdparty\tbb\bin;%PATH% +``` +可以把上述命令拷贝并保存到build目录下的某个bat脚本文件中(包含copy onnxruntime),如`setup_fastdeploy_dll.bat`,方便多次使用。 +```bat +setup_fastdeploy_dll.bat +``` + +### 4.4 方式四:手动拷贝依赖库到exe的目录下 + +
+ +手动拷贝,或者在build目录下执行以下命令: +```bat +set FASTDEPLOY_HOME=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1 +copy /Y %FASTDEPLOY_HOME%\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\onnxruntime\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\opencv-win-x64-3.4.16\build\x64\vc15\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle_inference\paddle\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mkldnn\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mklml\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle2onnx\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\tensorrt\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\third_party\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\yaml-cpp\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.xml Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\3rdparty\tbb\bin\*.dll Release\ +``` +另外,注意,如果是自行编译最新的SDK或版本>0.2.1,opencv和openvino目录结构有所改变,路径需要做出适当的修改。如: +```bat +copy /Y %FASTDEPLOY_HOME%\third_libs\install\opencv\build\x64\vc15\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\3rdparty\tbb\bin\*.dll Release\ +``` +可以把上述命令拷贝并保存到build目录下的某个bat脚本文件中,如`copy_fastdeploy_dll.bat`,方便多次使用。 +```bat +copy_fastdeploy_dll.bat +``` +特别说明:上述的set和copy命令对应的依赖库路径,需要用户根据自己使用SDK中的依赖库进行适当地修改。比如,若是CPU版本的SDK,则不需要TensorRT相关的设置。 diff --git a/.new_docs/quantize.md b/.new_docs/cn/quantize.md similarity index 83% rename from .new_docs/quantize.md rename to .new_docs/cn/quantize.md index a022863dbd..eb626c6e6c 100644 --- a/.new_docs/quantize.md +++ b/.new_docs/cn/quantize.md @@ -1,3 +1,5 @@ +[English](../en/quantize.md) | 简体中文 + # 量化加速 简要介绍量化加速的原理。 diff --git a/.new_docs/quick_start/models/cpp.md b/.new_docs/cn/quick_start/models/cpp.md similarity index 100% rename from .new_docs/quick_start/models/cpp.md rename to .new_docs/cn/quick_start/models/cpp.md diff --git a/.new_docs/quick_start/models/python.md b/.new_docs/cn/quick_start/models/python.md similarity index 81% rename from .new_docs/quick_start/models/python.md rename to .new_docs/cn/quick_start/models/python.md index d5cd00f6a0..6cad7ccf1b 100644 --- a/.new_docs/quick_start/models/python.md +++ b/.new_docs/cn/quick_start/models/python.md @@ -2,7 +2,7 @@ 确认开发环境已安装FastDeploy,参考[FastDeploy安装](../../build_and_install/)安装预编译的FastDeploy,或根据自己需求进行编译安装。 -本文档以PaddleDetection目标检测模型PPYOLOE为例展示CPU上的推理示例 +本文档以PaddleDetection目标检测模型PicoDet为例展示CPU上的推理示例 ## 1. 获取模型和测试图像 @@ -10,7 +10,7 @@ import fastdeploy as fd model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz" -image_url - "https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg" +image_url - "https://bj.bcebos.com/fastdeploy/tests/test_det.jpg" fd.download_and_decompress(model_url, path=".") fd.download(image_url, path=".") ``` @@ -18,7 +18,6 @@ fd.download(image_url, path=".") ## 2. 加载模型 - 更多模型的示例可参考[FastDeploy/examples](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples) -- 模型API说明见API文档[FastDeploy模型Python API文档](../../apis/models/python/) ``` python model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel" diff --git a/.new_docs/quick_start/runtime/cpp.md b/.new_docs/cn/quick_start/runtime/cpp.md similarity index 100% rename from .new_docs/quick_start/runtime/cpp.md rename to .new_docs/cn/quick_start/runtime/cpp.md diff --git a/.new_docs/quick_start/runtime/python.md b/.new_docs/cn/quick_start/runtime/python.md similarity index 100% rename from .new_docs/quick_start/runtime/python.md rename to .new_docs/cn/quick_start/runtime/python.md diff --git a/.new_docs/en/build_and_install/README.md b/.new_docs/en/build_and_install/README.md new file mode 100644 index 0000000000..3a2d3fcc2d --- /dev/null +++ b/.new_docs/en/build_and_install/README.md @@ -0,0 +1,8 @@ +# FastDeploy安装 + +- [预编译库下载安装](cn/build_and_install/prebuilt.md) +- [GPU部署环境编译安装](cn/build_and_install/gpu.md) +- [CPU部署环境编译安装](cn/build_and_install/cpu.md) +- [Jetson部署环境编译安装](cn/build_and_install/jetson.md) +- [Android平台部署环境编译安装](cn/build_and_install/android.md) + diff --git a/.new_docs/en/build_and_install/android.md b/.new_docs/en/build_and_install/android.md new file mode 100644 index 0000000000..c091b75d55 --- /dev/null +++ b/.new_docs/en/build_and_install/android.md @@ -0,0 +1,3 @@ +# Android部署库编译 + +进行中... diff --git a/.new_docs/en/build_and_install/cpu.md b/.new_docs/en/build_and_install/cpu.md new file mode 100644 index 0000000000..5a5647c510 --- /dev/null +++ b/.new_docs/en/build_and_install/cpu.md @@ -0,0 +1,107 @@ +[English](../../en/build_and_install/cpu.md) | 简体中文 + +# CPU部署库编译 + +FastDeploy当前在CPU支持后端引擎如下 + +| 后端 | 平台 | 支持模型格式 | 说明 | +| :--- | :---- | :----------- | :--- | +| Paddle Inference | Windows(x64)
Linux(x64) | Paddle | 编译开关`ENABLE_PADDLE_BACKEND`为ON或OFF控制, 默认OFF | +| ONNX Runtime | Windows(x64)
Linux(x64/aarch64)
Mac(x86/arm64) | Paddle/ONNX | 编译开关`ENABLE_ORT_BACKEND`为ON或OFF控制,默认OFF | +| OpenVINO | Windows(x64)
Linux(x64)
Mac(x86) | Paddle/ONNX | 编译开关`ENABLE_OPENVINO_BACKEND`为ON或OFF控制,默认OFF | + +## C++ SDK编译安装 + +### Linux & Mac + +Linux上编译需满足 +- gcc/g++ >= 5.4(推荐8.2) +- cmake >= 3.18.0 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy +mkdir build && cd build +cmake .. -DENABLE_ORT_BACKEND=ON \ + -DENABLE_PADDLE_BACKEND=ON \ + -DENABLE_OPENVINO_BACKEND=ON \ + -DCMAKE_INSTALL_PREFIX=${PWD}/compiled_fastdeploy_sdk \ + -DENABLE_VISION=ON +make -j12 +make install +``` + +### Windows + +Windows编译需要满足条件 + +- Windows 10/11 x64 +- Visual Studio 2019 + +在Windows菜单中,找到`x64 Native Tools Command Prompt for VS 2019`打开,执行如下命令 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy +mkdir build && cd build +cmake .. -G "Visual Studio 16 2019" -A x64 \ + -DENABLE_ORT_BACKEND=ON \ + -DENABLE_PADDLE_BACKEND=ON \ + -DENABLE_OPENVINO_BACKEND=ON \ + -DENABLE_VISION=ON \ + -DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy" +msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64 +msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64 +``` + +编译完成后,即在`CMAKE_INSTALL_PREFIX`指定的目录下生成C++推理库 + +如您使用CMake GUI可参考文档[Windows使用CMakeGUI + Visual Studio 2019 IDE编译](../faq/build_on_win_with_gui.md) + +## Python编译安装 + +编译过程同样需要满足 +- gcc/g++ >= 5.4(推荐8.2) +- cmake >= 3.18.0 +- python >= 3.6 + +所有编译选项通过环境变量导入 + +### Linux & Mac + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/python +export ENABLE_ORT_BACKEND=ON +export ENABLE_PADDLE_BACKEND=ON +export ENABLE_OPENVINO_BACKEND=ON +export ENABLE_VISION=ON + +python setup.py build +python setup.py bdist_wheel +``` + +### Windows + +编译过程同样需要满足 +- Windows 10/11 x64 +- Visual Studio 2019 +- python >= 3.6 + +在Windows菜单中,找到`x64 Native Tools Command Prompt for VS 2019`打开,执行如下命令 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/python +set ENABLE_ORT_BACKEND=ON +set ENABLE_PADDLE_BACKEND=ON +set ENABLE_OPENVINO_BACKEND=ON +set ENABLE_VISION=ON + +python setup.py build +python setup.py bdist_wheel +``` + +编译完成即会在`FastDeploy/python/dist`目录下生成编译后的`wheel`包,直接pip install即可 + +编译过程中,如若修改编译参数,为避免带来缓存影响,可删除`FastDeploy/python`目录下的`build`和`.setuptools-cmake-build`两个子目录后再重新编译 diff --git a/.new_docs/en/build_and_install/gpu.md b/.new_docs/en/build_and_install/gpu.md new file mode 100644 index 0000000000..7d6bc77092 --- /dev/null +++ b/.new_docs/en/build_and_install/gpu.md @@ -0,0 +1,135 @@ +[English](../../en/build_and_install/gpu.md) | 简体中文 + +# GPU部署库编译 + +FastDeploy当前在GPU环境支持Paddle Inference、ONNX Runtime和TensorRT,但同时在Linux&Windows的GPU环境也同时支持CPU硬件,因此编译时也可以同步将CPU的推理后端OpenVINO编译集成 + +| 后端 | 平台 | 支持模型格式 | 说明 | +| :--- | :---- | :----------- | :--- | +| Paddle Inference | Windows(x64)
Linux(x64) | Paddle | 同时支持CPU/GPU,编译开关`ENABLE_PADDLE_BACKEND`为ON或OFF控制, 默认OFF | +| ONNX Runtime | Windows(x64)
Linux(x64/aarch64)
Mac(x86/arm64) | Paddle/ONNX | 同时支持CPU/GPU,编译开关`ENABLE_ORT_BACKEND`为ON或OFF控制,默认OFF | +| TensorRT | Windows(x64)
Linux(x64) | Paddle/ONNX | 仅支持GPU,编译开关`ENABLE_TRT_BACKEND`为ON或OFF控制,默认OFF | +| OpenVINO | Windows(x64)
Linux(x64) | Paddle/ONNX | 仅支持CPU,编译开关`ENABLE_OPENVINO_BACKEND`为ON或OFF控制,默认OFF | + +注意编译GPU环境时,需额外指定`WITH_GPU`为ON,设定`CUDA_DIRECTORY`,如若需集成TensorRT,还需同时设定`TRT_DIRECTORY` + +## C++ SDK编译安装 + +### Linux + +Linux上编译需满足 +- gcc/g++ >= 5.4(推荐8.2) +- cmake >= 3.18.0 +- cuda >= 11.2 +- cudnn >= 8.2 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy +mkdir build && cd build +cmake .. -DENABLE_ORT_BACKEND=ON \ + -DENABLE_PADDLE_BACKEND=ON \ + -DENABLE_OPENVINO_BACKEND=ON \ + -DENABLE_TRT_BACKEND=ON \ + -DWITH_GPU=ON \ + -DTRT_DIRECTORY=/Paddle/TensorRT-8.4.1.5 \ + -DCUDA_DIRECTORY=/usr/local/cuda \ + -DCMAKE_INSTALL_PREFIX=${PWD}/compiled_fastdeploy_sdk \ + -DENABLE_VISION=ON +make -j12 +make install +``` + +### Windows + +Windows编译需要满足条件 + +- Windows 10/11 x64 +- Visual Studio 2019 +- cuda >= 11.2 +- cudnn >= 8.2 + +在Windows菜单中,找到`x64 Native Tools Command Prompt for VS 2019`打开,执行如下命令 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy +mkdir build && cd build +cmake .. -G "Visual Studio 16 2019" -A x64 \ + -DENABLE_ORT_BACKEND=ON \ + -DENABLE_PADDLE_BACKEND=ON \ + -DENABLE_OPENVINO_BACKEND=ON \ + -DENABLE_TRT_BACKEND=ON + -DENABLE_VISION=ON \ + -DWITH_GPU=ON \ + -DTRT_DIRECTORY="D:\Paddle\TensorRT-8.4.1.5" \ + -DCUDA_DIRECTORY="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2" \ + -DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy" +msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64 +msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64 +``` + +编译完成后,即在`CMAKE_INSTALL_PREFIX`指定的目录下生成C++推理库 + +如您使用CMake GUI可参考文档[Windows使用CMakeGUI + Visual Studio 2019 IDE编译](../faq/build_on_win_with_gui.md) + +## Python编译安装 + + +### Linux + +编译过程需要满足 +- gcc/g++ >= 5.4(推荐8.2) +- cmake >= 3.18.0 +- python >= 3.6 +- cuda >= 11.2 +- cudnn >= 8.2 + +所有编译选项通过环境变量导入 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/python +export ENABLE_ORT_BACKEND=ON +export ENABLE_PADDLE_BACKEND=ON +export ENABLE_OPENVINO_BACKEND=ON +export ENABLE_VISION=ON +export ENABLE_TRT_BACKEND=ON +export WITH_GPU=ON +export TRT_DIRECTORY=/Paddle/TensorRT-8.4.1.5 +export CUDA_DIRECTORY=/usr/local/cuda + +python setup.py build +python setup.py bdist_wheel +``` + +### Windows + +编译过程同样需要满足 +- Windows 10/11 x64 +- Visual Studio 2019 +- python >= 3.6 +- cuda >= 11.2 +- cudnn >= 8.2 + +在Windows菜单中,找到`x64 Native Tools Command Prompt for VS 2019`打开,执行如下命令 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/python +export ENABLE_ORT_BACKEND=ON +export ENABLE_PADDLE_BACKEND=ON +export ENABLE_OPENVINO_BACKEND=ON +export ENABLE_VISION=ON +export ENABLE_TRT_BACKEND=ON +export WITH_GPU=ON +export TRT_DIRECTORY="D:\Paddle\TensorRT-8.4.1.5" +export CUDA_DIRECTORY="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2" + +python setup.py build +python setup.py bdist_wheel +``` + +编译完成即会在`FastDeploy/python/dist`目录下生成编译后的`wheel`包,直接pip install即可 + +编译过程中,如若修改编译参数,为避免带来缓存影响,可删除`FastDeploy/python`目录下的`build`和`.setuptools-cmake-build`两个子目录后再重新编译 diff --git a/.new_docs/en/build_and_install/jetson.md b/.new_docs/en/build_and_install/jetson.md new file mode 100644 index 0000000000..38eb5e26ad --- /dev/null +++ b/.new_docs/en/build_and_install/jetson.md @@ -0,0 +1,50 @@ +[English](../../en/build_and_install/jetson.md) | 简体中文 + +# Jetson部署库编译 + +FastDeploy当前在Jetson仅支持ONNX Runtime CPU和TensorRT GPU两种后端推理 + +## C++ SDK编译安装 + +编译需满足 +- gcc/g++ >= 5.4(推荐8.2) +- cmake >= 3.10.0 +- jetpack >= 4.6.1 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy +mkdir build && cd build +cmake .. -DBUILD_ON_JETSON=ON \ + -DENABLE_VISION=ON \ + -DCMAKE_INSTALL_PREFIX=${PWD}/installed_fastdeploy +make -j8 +make install +``` + +编译完成后,即在`CMAKE_INSTALL_PREFIX`指定的目录下生成C++推理库 + + +## Python编译安装 + +编译过程同样需要满足 +- gcc/g++ >= 5.4(推荐8.2) +- cmake >= 3.10.0 +- jetpack >= 4.6.1 +- python >= 3.6 + +所有编译选项通过环境变量导入 + +``` +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/python +export BUILD_ON_JETSON=ON +export ENABLE_VISION=ON + +python setup.py build +python setup.py bdist_wheel +``` + +编译完成即会在`FastDeploy/python/dist`目录下生成编译后的`wheel`包,直接pip install即可 + +编译过程中,如若修改编译参数,为避免带来缓存影响,可删除`FastDeploy/python`目录下的`build`和`.setuptools-cmake-build`两个子目录后再重新编译 diff --git a/.new_docs/en/build_and_install/prebuilt.md b/.new_docs/en/build_and_install/prebuilt.md new file mode 100644 index 0000000000..4d945ae208 --- /dev/null +++ b/.new_docs/en/build_and_install/prebuilt.md @@ -0,0 +1,64 @@ +[English](../../en/build_and_install/prebuilt.md) | 简体中文 + +# 预编译库安装 + +FastDeploy提供各平台预编译库,供开发者直接下载安装使用。当然FastDeploy编译也非常容易,开发者也可根据自身需求编译FastDeploy。 + +## GPU部署环境 + +### 环境要求 +- CUDA >= 11.2 +- cuDNN >= 8.0 +- python >= 3.6 +- OS: Linux(x64)/Windows 10(x64) + +支持CPU和Nvidia GPU的部署,默认集成Paddle Inference、ONNX Runtime、OpenVINO以及TensorRT推理后端,Vision视觉模型模块,Text文本NLP模型模块 + +### Python安装 + +Release版本(当前最新0.2.1)安装 +``` +pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html +``` + +其中推荐使用Conda配置开发环境 +``` +conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2 +``` + +### C++ SDK安装 + +Release版本(当前最新0.2.1) + +| 平台 | 文件 | 说明 | +| :--- | :--- | :---- | +| Linux x64 | [fastdeploy-linux-x64-gpu-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-0.2.1.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2编译产出 | +| Windows x64 | [fastdeploy-win-x64-gpu-0.2.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.1.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2编译产出 | + +## CPU部署环境 + +### 环境要求 +- python >= 3.6 +- OS: Linux(x64/aarch64)/Windows 10 x64/Mac OSX(x86/aarm64) + +仅支持CPU部署,默认集成Paddle Inference、ONNX Runtime、OpenVINO, Vision视觉模型模块(Linux aarch64和Mac OSX下仅集成ONNX Runtime模块), Text文本NLP模型模块。 + +### Python安装 + +Release版本(当前最新0.2.1)安装 +``` +pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html +``` + +## C++ SDK安装 + +Release版本(当前最新0.2.1) + +| 平台 | 文件 | 说明 | +| :--- | :--- | :---- | +| Linux x64 | [fastdeploy-linux-x64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-0.2.1.tgz) | g++ 8.2编译产出 | +| Windows x64 | [fastdeploy-win-x64-0.2.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-0.2.1.zip) | Visual Studio 16 2019编译产出 | +| Mac OSX x64 | [fastdeploy-osx-x86_64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-0.2.1.tgz) | - | +| Mac OSX arm64 | [fastdeploy-osx-arm64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-0.2.1.tgz) | - | +| Linux aarch64 | [fastdeploy-linux-aarch64-0.2.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-0.2.0.tgz) | g++ 6.3.0编译产出 | + diff --git a/.new_docs/en/faq/build_on_win_with_gui.md b/.new_docs/en/faq/build_on_win_with_gui.md new file mode 100644 index 0000000000..c7fdb76f5a --- /dev/null +++ b/.new_docs/en/faq/build_on_win_with_gui.md @@ -0,0 +1,134 @@ +# CMakeGUI + VS 2019 IDE编译FastDeploy + +注:此方式仅支持编译FastDeploy C++ SDK + +## 目录 +- [使用CMake GUI进行基础配置](#CMakeGuiAndVS2019Basic) +- [编译CPU版本 C++ SDK设置](#CMakeGuiAndVS2019CPU) +- [编译GPU版本 C++ SDK设置](#CMakeGuiAndVS2019GPU) +- [使用Visual Studio 2019 IDE进行编译](#CMakeGuiAndVS2019Build) + +### 使用CMake GUI进行基础配置 +
+ +步骤一:首先,打开CMake GUI,先初始化FastDeploy工程: + +![image](https://user-images.githubusercontent.com/31974251/192094881-c5beb0e5-82ae-4a62-a88c-73f3d80f7936.png) + +步骤二:点击Configure后,在弹窗中设置编译"x64"架构: + +![image](https://user-images.githubusercontent.com/31974251/192094951-958a0a22-2090-4ab6-84f5-3573164d0835.png) + +初始化完成后,显示如下: + +![image](https://user-images.githubusercontent.com/31974251/192095053-874b9c73-fc0d-4325-b555-ac94ab9a9f38.png) + +步骤三:由于FastDeploy目前只支持Release版本,因此,先将"CMAKE_CONFIGURATION_TYPES"修改成"Release" + +![image](https://user-images.githubusercontent.com/31974251/192095175-3aeede95-a633-4b3c-81f8-067f0a0a44a3.png) + +接下来,用户可根据自己实际的开发需求开启对应的编译选项,并生成sln解决方案。以下,针对编译CPU和GPU版本SDK各举一个例子。 + +### 编译CPU版本 C++ SDK设置 + +
+ +步骤一:勾选CPU版本对应的编译选项。注意CPU版本,请`不要`勾选WITH_GPU和ENABLE_TRT_BACKEND + +![image](https://user-images.githubusercontent.com/31974251/192095848-b3cfdf19-e378-41e0-b44e-5edb49461eeb.png) + +这个示例中,我们开启ORT、Paddle、OpenVINO等推理后端,并且选择了需要编译TEXT和VISION的API + + +步骤二:自定义设置SDK安装路径,修改CMAKE_INSTALL_PREFIX + +![image](https://user-images.githubusercontent.com/31974251/192095961-5f6e348a-c30b-4473-8331-8beefb7cd2e6.png) + +由于默认的安装路径是C盘,我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径,这里我们将安装路径修改到`build\fastdeploy-win-x64-0.2.1`目录下。 + +![image](https://user-images.githubusercontent.com/31974251/192096055-8a276a9e-6017-4447-9ded-b95c5579d663.png) + + + +### 编译GPU版本 C++ SDK设置 +
+ +步骤一:勾选GPU版本对应的编译选项。注意GPU版本,请`需要`勾选WITH_GPU + +![image](https://user-images.githubusercontent.com/31974251/192099254-9f82abb0-8a29-41ce-a0ce-da6aacf23582.png) + +这个示例中,我们开启ORT、Paddle、OpenVINO和TRT等推理后端,并且选择了需要编译TEXT和VISION的API。并且,由于开启了GPU和TensorRT,此时需要额外指定CUDA_DIRECTORY和TRT_DIRECTORY,在GUI界面中找到这两个变量,点击右侧的选项框,分别选择您安装CUDA的路径和TensorRT的路径 + + +![image](https://user-images.githubusercontent.com/31974251/192098907-9dd9a49c-4a3e-4641-8e68-f25da1cafbba.png) + + +![image](https://user-images.githubusercontent.com/31974251/192098984-7fefd824-7e3b-4185-abba-bae5d8765e2a.png) + + +步骤二:自定义设置SDK安装路径,修改CMAKE_INSTALL_PREFIX + +![image](https://user-images.githubusercontent.com/31974251/192099125-81fc8217-e51f-4039-9421-ba7a09c0027c.png) + + +由于默认的安装路径是C盘,我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径,这里我们将安装路径修改到`build\fastdeploy-win-x64-gpu-0.2.1`目录下。 + + +### 使用Visual Studio 2019 IDE进行编译 + +
+ +步骤一:点击"Generate",生成sln解决方案,并用Visual Studio 2019打开 + +![image](https://user-images.githubusercontent.com/31974251/192096162-c05cbb11-f96e-4c82-afde-c7fc02cddf68.png) + +这个过程默认会从下载一些编译需要的资源,cmake的dev警告可以不用管。生成完成之后可以看到以下界面: + +CPU版本SDK: + +![image](https://user-images.githubusercontent.com/31974251/192096478-faa570bd-7569-43c3-ad79-cc6be5b605e3.png) + +GPU版本SDK: + +![image](https://user-images.githubusercontent.com/31974251/192099583-300e4680-1089-45cf-afaa-d2afda8fd436.png) + + +左侧界面,可以看到所有编译需要的include路径和lib路径已经被设置好了,用户可以考虑把这些路径记录下来方便后续的开发。右侧界面,可以看到已经生成fastdeploy.sln解决方案文件。接下来,我们使用Visual Studio 2019打开这个解决方案文件(理论上VS2022也可以编译,但目前建议使用VS2019)。 + +![image](https://user-images.githubusercontent.com/31974251/192096765-2aeadd68-47fb-4cd6-b083-4a478cf5e584.jpg) + + +步骤二:在Visual Studio 2019点击"ALL BUILD"->右键点击"生成"开始编译 + +![image](https://user-images.githubusercontent.com/31974251/192096893-5d6bc428-b824-4ffe-8930-0ec6d4dcfd02.png) + +CPU版本SDK编译成功! + +![image](https://user-images.githubusercontent.com/31974251/192097020-979bd7a3-1cdd-4fb5-a931-864c5372933d.png) + +GPU版本SDK编译成功! + +![image](https://user-images.githubusercontent.com/31974251/192099902-4b661f9a-7691-4f7f-b573-92ca9397a890.png) + + +步骤三:编译完成后,在Visual Studio 2019点击"INSTALL"->右键点击"生成"将编译好的SDK安装到先前指定的目录 + + +![image](https://user-images.githubusercontent.com/31974251/192097073-ce5236eb-1ed7-439f-8098-fef7a2d02779.png) + +![image](https://user-images.githubusercontent.com/31974251/192097122-d675ae39-35fb-4dbb-9c75-eefb0597ec2e.png) + +SDK成功安装到指定目录! + +### 编译所有examples(可选) +可以在CMake GUI中勾选BUILD_EXAMPLES选项,连带编译所有的examples,编译完成后所有example的可执行文件保存在build/bin/Release目录下 + +![image](https://user-images.githubusercontent.com/31974251/192110769-a4f0940d-dea3-4524-831b-1c2a6ab8e871.png) + +![image](https://user-images.githubusercontent.com/31974251/192110930-e7e49bc6-c271-4076-be74-3d103f27bc78.png) + + +## 特别提示 + +如果是用户自行编译SDK,理论上支持Windows 10/11,VS 2019/2022,CUDA 11.x 以及 TensorRT 8.x等配置,但建议使用我们推荐的默认配置,即:Windows 10, VS 2019, CUDA 11.2 和 TensorRT 8.4.x版本。另外,如果编译过程中遇到中文字符的编码问题(如UIE example必须传入中文字符进行预测),可以参考Visual Studio的官方文档,设置源字符集为`/utf-8`解决: +- [/utf-8(将源字符集和执行字符集设置为 UTF-8)](https://learn.microsoft.com/zh-cn/cpp/build/reference/utf-8-set-source-and-executable-character-sets-to-utf-8?view=msvc-170) diff --git a/.new_docs/en/faq/develop_a_new_model.md b/.new_docs/en/faq/develop_a_new_model.md new file mode 100644 index 0000000000..b2b5ac9903 --- /dev/null +++ b/.new_docs/en/faq/develop_a_new_model.md @@ -0,0 +1,3 @@ +# FastDeploy集成新模型流程 + +进行中... diff --git a/.new_docs/en/faq/how_to_change_backend.md b/.new_docs/en/faq/how_to_change_backend.md new file mode 100644 index 0000000000..0188d849d9 --- /dev/null +++ b/.new_docs/en/faq/how_to_change_backend.md @@ -0,0 +1,47 @@ +# 如何切换模型推理后端 + +FastDeploy中各视觉模型可支持多种后端,包括 +- OpenVINO (支持Paddle/ONNX两种格式模型, 仅支持CPU上推理) +- ONNX Runtime (支持Paddle/ONNX两种格式模型, 支持CPU/GPU) +- TensorRT (支持Paddle/ONNX两种格式模型,仅支持GPU上推理) +- Paddle Inference(支持Paddle格式模型, 支持CPU/GPU) + +所有模型切换后端方式均通过RuntimeOption进行切换, + +**Python** +```python +import fastdeploy as fd +option = fd.RuntimeOption() + +# 切换使用CPU/GPU +option.use_cpu() +option.use_gpu() + +# 切换不同后端 +option.use_paddle_backend() # Paddle Inference +option.use_trt_backend() # TensorRT +option.use_openvino_backend() # OpenVINO +option.use_ort_backend() # ONNX Runtime + +``` + +**C++** +```C++ +fastdeploy::RuntimeOption option; + +// 切换使用CPU/GPU +option.UseCpu(); +option.UseGpu(); + +// 切换不同后端 +option.UsePaddleBackend(); // Paddle Inference +option.UseTrtBackend(); // TensorRT +option.UseOpenVINOBackend(); // OpenVINO +option.UseOrtBackend(); // ONNX Runtime +``` + +具体示例可参阅`FastDeploy/examples/vision`下不同模型的python或c++推理代码 + +更多`RuntimeOption`的配置方式查阅FastDeploy API文档 +- [Python API]() +- [C++ API]() diff --git a/.new_docs/en/faq/use_sdk_on_android.md b/.new_docs/en/faq/use_sdk_on_android.md new file mode 100644 index 0000000000..0add916cea --- /dev/null +++ b/.new_docs/en/faq/use_sdk_on_android.md @@ -0,0 +1,3 @@ +# Android平台使用FastDeploy部署 + +进行中... diff --git a/.new_docs/en/faq/use_sdk_on_windows.md b/.new_docs/en/faq/use_sdk_on_windows.md new file mode 100644 index 0000000000..be1e1ab0a0 --- /dev/null +++ b/.new_docs/en/faq/use_sdk_on_windows.md @@ -0,0 +1,505 @@ +# 在 Windows 使用 FastDeploy C++ SDK + +## 目录 +- [1. 环境依赖](#Environment) +- [2. 下载 FastDeploy Windows 10 C++ SDK](#Download) +- [3. Windows下多种方式使用 C++ SDK 的方式](#CommandLine) + - [3.1 命令行方式使用 C++ SDK](#CommandLine) + - [3.1.1 在 Windows 命令行终端 上编译 example](#CommandLine) + - [3.1.2 运行可执行文件获得推理结果](#CommandLine) + - [3.2 Visual Studio 2019 创建sln工程使用 C++ SDK](#VisualStudio2019Sln) + - [3.2.1 Visual Studio 2019 创建 sln 工程项目](#VisualStudio2019Sln1) + - [3.2.2 从examples中拷贝infer_ppyoloe.cc的代码到工程](#VisualStudio2019Sln2) + - [3.2.3 将工程配置设置成"Release x64"配置](#VisualStudio2019Sln3) + - [3.2.4 配置头文件include路径](#VisualStudio2019Sln4) + - [3.2.5 配置lib路径和添加库文件](#VisualStudio2019Sln5) + - [3.2.6 编译工程并运行获取结果](#VisualStudio2019Sln6) + - [3.3 Visual Studio 2019 创建CMake工程使用 C++ SDK](#VisualStudio2019) + - [3.3.1 Visual Studio 2019 创建CMake工程项目](#VisualStudio20191) + - [3.3.2 在CMakeLists中配置 FastDeploy C++ SDK](#VisualStudio20192) + - [3.3.3 生成工程缓存并修改CMakeSetting.json配置](#VisualStudio20193) + - [3.3.4 生成可执行文件,运行获取结果](#VisualStudio20194) +- [4. 多种方法配置exe运行时所需的依赖库](#CommandLineDeps1) + - [4.1 使用 fastdeploy_init.bat 进行配置(推荐)](#CommandLineDeps1) + - [4.1.1 fastdeploy_init.bat 使用说明](#CommandLineDeps11) + - [4.1.2 fastdeploy_init.bat 查看 SDK 中所有的 dll、lib 和 include 路径](#CommandLineDeps12) + - [4.1.3 fastdeploy_init.bat 安装 SDK 中所有的 dll 到指定的目录](#CommandLineDeps13) + - [4.1.4 fastdeploy_init.bat 配置 SDK 环境变量](#CommandLineDeps14) + - [4.2 修改 CMakeLists.txt,一行命令配置(推荐)](#CommandLineDeps2) + - [4.3 命令行设置环境变量](#CommandLineDeps3) + - [4.4 手动拷贝依赖库到exe的目录下](#CommandLineDeps4) + + +## 1. 环境依赖 +
+ +- cmake >= 3.12 +- Visual Studio 16 2019 +- cuda >= 11.2 (当WITH_GPU=ON) +- cudnn >= 8.0 (当WITH_GPU=ON) + +## 2. 下载 FastDeploy Windows 10 C++ SDK +
+ +### 2.1 下载预编译库或者从源码编译最新的SDK +可以从以下链接下载编译好的 FastDeploy Windows 10 C++ SDK,SDK中包含了examples代码。 +```text +https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.1.zip +``` +源码编译请参考: [build_and_install](../build_and_install) +### 2.2 准备模型文件和测试图片 +可以从以下链接下载模型文件和测试图片,并解压缩 +```text +https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (下载后解压缩) +https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg +``` + +## 3. Windows下多种方式使用 C++ SDK 的方式 +### 3.1 SDK使用方式一:命令行方式使用 C++ SDK +
+ +#### 3.1.1 在 Windows 上编译 PPYOLOE +Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,cd到ppyoloe的demo路径 +```bat +cd fastdeploy-win-x64-gpu-0.2.1\examples\vision\detection\paddledetection\cpp +``` +```bat +mkdir build && cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1 -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2" +``` +然后执行 +```bat +msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64 +``` + +#### 3.1.2 运行 demo +```bat +cd Release +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 2 # GPU + TensorRT +``` + +特别说明,exe运行时所需要的依赖库配置方法,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +### 3.2 SDK使用方式二:Visual Studio 2019 创建 sln 工程使用 C++ SDK + +本章节针对非CMake用户,介绍如何在Visual Studio 2019 中创建 sln 工程使用 FastDeploy C++ SDK. CMake用户请直接看下一章节。另外,本章节内容特别感谢“梦醒南天”同学关于FastDeploy使用的文档教程:[如何在 Windows 上使用 FastDeploy C++ 部署 PaddleDetection 目标检测模型](https://www.bilibili.com/read/cv18807232) + +
+ +#### 3.2.1 步骤一:Visual Studio 2019 创建 sln 工程项目 + +
+ +(1) 打开Visual Studio 2019,点击"创建新项目"->点击"控制台程序",从而创建新的sln工程项目. + +![image](https://user-images.githubusercontent.com/31974251/192813386-cf9a93e0-ee42-42b3-b8bf-d03ae7171d4e.png) + +![image](https://user-images.githubusercontent.com/31974251/192816516-a4965b9c-21c9-4a01-bbb2-c648a8256fc9.png) + +(2)点击“创建”,便创建了一个空的sln工程。我们直接从examples里面拷贝infer_ppyoloe的代码这里。 + +![image](https://user-images.githubusercontent.com/31974251/192817382-643c8ca2-1f2a-412e-954e-576c22b4ea62.png) + +#### 3.2.2 步骤二:从examples中拷贝infer_ppyoloe.cc的代码到工程 + +
+ +(1)从examples中拷贝infer_ppyoloe.cc的代码到工程,直接替换即可,拷贝代码的路径为: +```bat +fastdeploy-win-x64-gpu-0.2.1\examples\vision\detection\paddledetection\cpp +``` + +![image](https://user-images.githubusercontent.com/31974251/192818456-21ca846c-ab52-4001-96d2-77c8174bff6b.png) + +#### 3.2.3 步骤三:将工程配置设置成"Release x64"配置 + +
+ +![image](https://user-images.githubusercontent.com/31974251/192818918-98d7a54c-4a60-4760-a3cb-ecacc38b7e7a.png) + +#### 3.2.4 步骤四:配置头文件include路径 + +
+ + +(1)配置头文件include路径:鼠标选择项目,然后单击右键即可弹出下来菜单,在其中单击“属性”。 + +![image](https://user-images.githubusercontent.com/31974251/192820573-23096aea-046c-4bb4-9929-c412718805cb.png) + + +(2)在弹出来的属性页中选择:C/C++ —> 常规 —> 附加包含目录,然后在添加 fastdeploy 和 opencv 的头文件路径。如: + +```bat + +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\include +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv-win-x64-3.4.16\build\include +``` +注意,如果是自行编译最新的SDK或版本>0.2.1,依赖库目录结构有所变动,opencv路径需要做出适当的修改。如: +```bat +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv\build\include +``` + +![image](https://user-images.githubusercontent.com/31974251/192824445-978c06ed-cc14-4d6a-8ccf-d4594ca11533.png) + +用户需要根据自己实际的sdk路径稍作修改。 + + +#### 3.2.5 步骤五:配置lib路径和添加库文件 + +
+ +(1)属性页中选择:链接器—>常规—> 附加库目录,然后在添加 fastdeploy 和 opencv 的lib路径。如: +```bat +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\lib +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv-win-x64-3.4.16\build\x64\vc15\lib +``` +注意,如果是自行编译最新的SDK或版本>0.2.1,依赖库目录结构有所变动,opencv路径需要做出适当的修改。如: +```bat +D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\install\opencv\build\include +``` + +![image](https://user-images.githubusercontent.com/31974251/192826130-fe28791f-317c-4e66-a6a5-133e60b726f0.png) + +(2)添加库文件:只需要 fastdeploy.lib 和 opencv_world3416.lib + + ![image](https://user-images.githubusercontent.com/31974251/192826884-44fc84a1-c57a-45f1-8ee2-30b7eaa3dce9.png) + +#### 3.2.6 步骤六:编译工程并运行获取结果 + +
+ + +(1)点击菜单栏“生成”->“生成解决方案” + +![image](https://user-images.githubusercontent.com/31974251/192827608-beb53685-2f94-44dc-aa28-49b09a4ab864.png) + +![image](https://user-images.githubusercontent.com/31974251/192827842-1f05d435-8a3e-492b-a3b7-d5e88f85f814.png) + +编译成功,可以看到exe保存在: +```bat +D:\qiuyanjun\fastdeploy_test\infer_ppyoloe\x64\Release\infer_ppyoloe.exe +``` + +(2)执行可执行文件,获得推理结果。 首先需要拷贝所有的dll到exe所在的目录下。同时,也需要把ppyoloe的模型文件和测试图片下载解压缩后,拷贝到exe所在的目录。 特别说明,exe运行时所需要的依赖库配置方法,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +![image](https://user-images.githubusercontent.com/31974251/192829545-3ea36bfc-9a54-492b-984b-2d5d39094d47.png) + + +### 3.3 SDK使用方式三:Visual Studio 2019 创建 CMake 工程使用 C++ SDK +
+ +本章节针对CMake用户,介绍如何在Visual Studio 2019 中创建 CMake 工程使用 FastDeploy C++ SDK. + +#### 3.3.1 步骤一:Visual Studio 2019 创建“CMake”工程项目 + +
+ +(1)打开Visual Studio 2019,点击"创建新项目"->点击"CMake",从而创建CMake工程项目。以PPYOLOE为例,来说明如何在Visual Studio 2019 IDE中使用FastDeploy C++ SDK. + +![image](https://user-images.githubusercontent.com/31974251/192143543-9f29e4cb-2307-45ca-a61a-bcfba5df19ff.png) + +![image](https://user-images.githubusercontent.com/31974251/192143640-39e79c65-8b50-4254-8da6-baa21bb23e3c.png) + + +![image](https://user-images.githubusercontent.com/31974251/192143713-be2e6490-4cab-4151-8463-8c367dbc451a.png) + +(2)打开工程发现,Visual Stuio 2019已经为我们生成了一些基本的文件,其中包括CMakeLists.txt。infer_ppyoloe.h头文件这里实际上用不到,我们可以直接删除。 + +![image](https://user-images.githubusercontent.com/31974251/192143930-db1655c2-66ee-448c-82cb-0103ca1ca2a0.png) + +#### 3.3.2 步骤二:在CMakeLists中配置 FastDeploy C++ SDK + +
+ +(1)在工程创建完成后,我们需要添加infer_ppyoloe推理源码,并修改CMakeLists.txt,修改如下: + +![image](https://user-images.githubusercontent.com/31974251/192144782-79bccf8f-65d0-4f22-9f41-81751c530319.png) + +(2)其中infer_ppyoloe.cpp的代码可以直接从examples中的代码拷贝过来: +- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc) + +(3)CMakeLists.txt主要包括配置FastDeploy C++ SDK的路径,如果是GPU版本的SDK,还需要配置CUDA_DIRECTORY为CUDA的安装路径,CMakeLists.txt的配置如下: + +```cmake +project(infer_ppyoloe_demo C CXX) +cmake_minimum_required(VERSION 3.12) + +# Only support "Release" mode now +set(CMAKE_BUILD_TYPE "Release") + +# Set FastDeploy install dir +set(FASTDEPLOY_INSTALL_DIR "D:/qiuyanjun/fastdeploy-win-x64-gpu-0.2.1" + CACHE PATH "Path to downloaded or built fastdeploy sdk.") + +# Set CUDA_DIRECTORY (CUDA 11.x) for GPU SDK +set(CUDA_DIRECTORY "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.7" + CACHE PATH "Path to installed CUDA Toolkit.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cpp) +target_link_libraries(infer_ppyoloe_demo ${FASTDEPLOY_LIBS}) + +# Optional: install all DLLs to binary dir. +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` +注意,`install_fastdeploy_libraries`函数仅在最新的代码编译的SDK或版本>0.2.1下有效。 + +#### 3.3.3 步骤三:生成工程缓存并修改CMakeSetting.json配置 + +
+ +(1)点击"CMakeLists.txt"->右键点击"生成缓存": + +![image](https://user-images.githubusercontent.com/31974251/192145349-c78b110a-0e41-4ee5-8942-3bf70bd94a75.png) + +发现已经成功生成缓存了,但是由于打开工程时,默认是Debug模式,我们发现exe和缓存保存路径还是Debug模式下的。 我们可以先修改CMake的设置为Release. + +(2)点击"CMakeLists.txt"->右键点击"infer_ppyoloe_demo的cmake设置",进入CMakeSettings.json的设置面板,把其中的Debug设置修改为Release. + +![image](https://user-images.githubusercontent.com/31974251/192145242-01d37b44-e2fa-47df-82c1-c11c2ccbff99.png) + +同时设置CMake生成器为 "Visual Studio 16 2019 Win64" + +![image](https://user-images.githubusercontent.com/31974251/192147961-ac46d0f6-7349-4126-a123-914af2b63d95.jpg) + +(3)点击保存CMake缓存以切换为Release配置: + +![image](https://user-images.githubusercontent.com/31974251/192145974-b5a63341-9143-49a2-8bfe-94ac641b1670.png) + +(4):(4.1)点击"CMakeLists.txt"->右键"CMake缓存仅限x64-Release"->"点击删除缓存";(4.2)点击"CMakeLists.txt"->"生成缓存";(4.3)如果在步骤一发现删除缓存的选项是灰色的可以直接点击"CMakeLists.txt"->"生成",若生成失败则可以重复尝试(4.1)和(4。2) + +![image](https://user-images.githubusercontent.com/31974251/192146394-51fbf2b8-1cba-41ca-bb45-5f26890f64ce.jpg) + +最终可以看到,配置已经成功生成Relase模式下的CMake缓存了。 + +![image](https://user-images.githubusercontent.com/31974251/192146239-a1eacd9e-034d-4373-a262-65b18ce25b87.png) + + +#### 3.3.4 步骤四:生成可执行文件,运行获取结果。 + +
+ +(1)点击"CMakeLists.txt"->"生成"。可以发现已经成功生成了infer_ppyoloe_demo.exe,并保存在`out/build/x64-Release/Release`目录下。 + +![image](https://user-images.githubusercontent.com/31974251/192146852-c64d2252-8c8f-4309-a950-908a5cb258b8.png) + +(2)执行可执行文件,获得推理结果。 首先需要拷贝所有的dll到exe所在的目录下,这里我们可以在CMakeLists.txt添加一下命令,可将FastDeploy中所有的dll安装到指定的目录。注意,该方式仅在最新的代码编译的SDK或版本>0.2.1下有效。其他配置方式,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +```cmake +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` +(3)同时,也需要把ppyoloe的模型文件和测试图片下载解压缩后,拷贝到exe所在的目录。 准备完成后,目录结构如下: + +![image](https://user-images.githubusercontent.com/31974251/192147505-054edb77-564b-405e-89ee-fd0d2e413e78.png) + +(4)最后,执行以下命令获得推理结果: + +```bat +D:\xxxinfer_ppyoloe\out\build\x64-Release\Release>infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 +[INFO] fastdeploy/runtime.cc(304)::fastdeploy::Runtime::Init Runtime initialized with Backend::OPENVINO in Device::CPU. +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +415.047180,89.311569, 506.009613, 283.863098, 0.950423, 0 +163.665710,81.914932, 198.585342, 166.760895, 0.896433, 0 +581.788635,113.027618, 612.623474, 198.521713, 0.842596, 0 +267.217224,89.777306, 298.796051, 169.361526, 0.837951, 0 +...... +153.301407,123.233757, 177.130539, 164.558350, 0.066697, 60 +505.887604,140.919601, 523.167236, 151.875336, 0.084912, 67 + +Visualized result saved in ./vis_result.jpg +``` + +打开保存的图片查看可视化结果: + +
+ +
+ +特别说明,exe运行时所需要的依赖库配置方法,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + +## 4. 多种方法配置exe运行时所需的依赖库 +
+说明:对于使用的最新源码编译的SDK或SDK版本>0.2.1的用户,我们推荐使用(4.1)和(4.2)中的方式配置运行时的依赖库。如果使用的SDK版本<=0.2.1,请参考(4.3)和(4.4)中的方式进行配置。 + +### 4.1 方式一:使用 fastdeploy_init.bat 进行配置(推荐) +
+ +对于版本高于0.2.1的SDK,我们提供了 **fastdeploy_init.bat** 工具来管理FastDeploy中所有的依赖库。可以通过该脚本工具查看(show)、拷贝(install) 和 设置(init and setup) SDK中所有的dll,方便用户快速完成运行时环境配置。 + +#### 4.1.1 fastdeploy_init.bat 使用说明 +
+ +首先进入SDK的根目录,运行以下命令,可以查看 fastdeploy_init.bat 的用法说明 +```bat +D:\path-to-your-fastdeploy-sdk-dir>fastdeploy_init.bat help +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[1] [help] print help information: fastdeploy_init.bat help +[2] [show] show all dlls/libs/include paths: fastdeploy_init.bat show fastdeploy-sdk-dir +[3] [init] init all dlls paths for current terminal: fastdeploy_init.bat init fastdeploy-sdk-dir [WARNING: need copy onnxruntime.dll manually] +[4] [setup] setup path env for current terminal: fastdeploy_init.bat setup fastdeploy-sdk-dir [WARNING: need copy onnxruntime.dll manually] +[5] [install] install all dlls to a specific dir: fastdeploy_init.bat install fastdeploy-sdk-dir another-dir-to-install-dlls **[RECOMMEND]** +[6] [install] install all dlls with logging infos: fastdeploy_init.bat install fastdeploy-sdk-dir another-dir-to-install-dlls info +------------------------------------------------------------------------------------------------------------------------------------------------------------ +``` +用法简要说明如下: +- help: 打印所有的用法说明 +- show: 查看SDK中所有的 dll、lib 和 include 路径 +- init: 初始化所有dll路径信息,后续用于设置terminal环境变量(不推荐,请参考4.3中关于onnxruntime的说明) +- setup: 在init之后运行,设置terminal环境便令(不推荐,请参考4.3中关于onnxruntime的说明) +- install: 将SDK中所有的dll安装到某个指定的目录(推荐) +#### 4.1.2 fastdeploy_init.bat 查看 SDK 中所有的 dll、lib 和 include 路径 +
+ +进入SDK的根目录,运行show命令,可以查看SDK中所有的 dll、lib 和 include 路径。以下命令中 %cd% 表示当前目录(SDK的根目录)。 +```bat +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat show %cd% +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[SDK] D:\path-to-fastdeploy-sdk-dir +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[DLL] D:\path-to-fastdeploy-sdk-dir\lib\fastdeploy.dll **[NEEDED]** +[DLL] D:\path-to-fastdeploy-sdk-dir\third_libs\install\faster_tokenizer\lib\core_tokenizers.dll **[NEEDED]** +[DLL] D:\path-to-fastdeploy-sdk-dir\third_libs\install\opencv\build\x64\vc15\bin\opencv_ffmpeg3416_64.dll **[NEEDED]** +...... +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[Lib] D:\path-to-fastdeploy-sdk-dir\lib\fastdeploy.lib **[NEEDED][fastdeploy]** +[Lib] D:\path-to-fastdeploy-sdk-dir\third_libs\install\faster_tokenizer\lib\core_tokenizers.lib **[NEEDED][fastdeploy::text]** +[Lib] D:\path-to-fastdeploy-sdk-dir\third_libs\install\opencv\build\x64\vc15\lib\opencv_world3416.lib **[NEEDED][fastdeploy::vision]** +...... +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[Include] D:\path-to-fastdeploy-sdk-dir\include **[NEEDED][fastdeploy]** +[Include] D:\path-to-fastdeploy-sdk-dir\third_libs\install\faster_tokenizer\include **[NEEDED][fastdeploy::text]** +[Include] D:\path-to-fastdeploy-sdk-dir\third_libs\install\opencv\build\include **[NEEDED][fastdeploy::vision]** +...... +------------------------------------------------------------------------------------------------------------------------------------------------------------ +[XML] D:\path-to-fastdeploy-sdk-dir\third_libs\install\openvino\runtime\bin\plugins.xml **[NEEDED]** +------------------------------------------------------------------------------------------------------------------------------------------------------------ +``` +可以看到该命令会根据您当前的SDK,输出对应的信息,包含 dll、lib 和 include 的路径信息。对于 dll,被标记为 `[NEEDED]`的,是运行时所需要的,如果包含OpenVINO后端,还需要将他的plugins.xml拷贝到exe所在的目录;对于 lib 和 include,被标记为`[NEEDED]`的,是开发时所需要配置的最小依赖。并且,我们还增加了对应的API Tag标记,如果您只使用vision API,则只需要配置标记为 `[NEEDED][fastdeploy::vision]` 的 lib 和 include 路径. + +#### 4.1.3 fastdeploy_init.bat 安装 SDK 中所有的 dll 到指定的目录 (推荐) +
+ +进入SDK的根目录,运行install命令,可以将SDK 中所有的 dll 安装到指定的目录(如exe所在的目录)。我们推荐这种方式来配置exe运行所需要的依赖库。比如,可以在SDK根目录下创建一个临时的bin目录备份所有的dll文件。以下命令中 %cd% 表示当前目录(SDK的根目录)。 +```bat +% info参数为可选参数,添加info参数后会打印详细的安装信息 % +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat install %cd% bin +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat install %cd% bin info +``` +```bat +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat install %cd% bin +[INFO] Do you want to install all FastDeploy dlls ? +[INFO] From: D:\path-to-fastdeploy-sdk-dir +[INFO] To: bin +Choose y means YES, n means NO: [y/n]y +YES. +请按任意键继续. . . +[INFO] Created bin done! +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +..... +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +已复制 1 个文件。 +..... +``` +#### 4.1.4 fastdeploy_init.bat 配置 SDK 环境变量 +
+ +您也可以选择通过配置环境变量的方式来设置运行时的依赖库环境,这种方式只在当前的terminal有效。如果您使用的SDK中包含了onnxruntime推理后端,我们不推荐这种方式,详细原因请参考(4.3)中关于onnxruntime配置的说明(需要手动拷贝onnxruntime所有的dll到exe所在的目录)。配置 SDK 环境变量的方式如下。以下命令中 %cd% 表示当前目录(SDK的根目录)。 +```bat +% 先运行 init 初始化当前SDK所有的dll文件路径 % +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat init %cd% +% 再运行 setup 完成 SDK 环境变量配置 % +D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat setup %cd% +``` + +### 4.2 方式二:修改CMakeLists.txt,一行命令配置(推荐) +
+ +考虑到Windows下C++开发的特殊性,如经常需要拷贝所有的lib或dll文件到某个指定的目录,FastDeploy提供了`install_fastdeploy_libraries`的cmake函数,方便用户快速配置所有的dll。修改ppyoloe的CMakeLists.txt,添加: +```cmake +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` +注意,该方式仅在最新的代码编译的SDK或版本>0.2.1下有效。 + +### 4.3 方式三:命令行设置环境变量 +
+ +编译好的exe保存在Release目录下,在运行demo前,需要将模型和测试图片拷贝至该目录。另外,需要在终端指定DLL的搜索路径。请在build目录下执行以下命令。 +```bat +set FASTDEPLOY_HOME=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1 +set PATH=%FASTDEPLOY_HOME%\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\onnxruntime\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\opencv-win-x64-3.4.16\build\x64\vc15\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle_inference\paddle\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mkldnn\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mklml\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\paddle2onnx\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\tensorrt\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\third_party\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\yaml-cpp\lib;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\3rdparty\tbb\bin;%PATH% +``` +注意,需要拷贝onnxruntime.dll到exe所在的目录。 +```bat +copy /Y %FASTDEPLOY_HOME%\third_libs\install\onnxruntime\lib\onnxruntime* Release\ +``` +由于较新的Windows在System32系统目录下自带了onnxruntime.dll,因此就算设置了PATH,系统依然会出现onnxruntime的加载冲突。因此需要先拷贝demo用到的onnxruntime.dll到exe所在的目录。如下 +```bat +where onnxruntime.dll +C:\Windows\System32\onnxruntime.dll # windows自带的onnxruntime.dll +``` +另外,注意,如果是自行编译最新的SDK或版本>0.2.1,opencv和openvino目录结构有所改变,路径需要做出适当的修改。如: +```bat +set PATH=%FASTDEPLOY_HOME%\third_libs\install\opencv\build\x64\vc15\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\bin;%PATH% +set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\3rdparty\tbb\bin;%PATH% +``` +可以把上述命令拷贝并保存到build目录下的某个bat脚本文件中(包含copy onnxruntime),如`setup_fastdeploy_dll.bat`,方便多次使用。 +```bat +setup_fastdeploy_dll.bat +``` + +### 4.4 方式四:手动拷贝依赖库到exe的目录下 + +
+ +手动拷贝,或者在build目录下执行以下命令: +```bat +set FASTDEPLOY_HOME=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1 +copy /Y %FASTDEPLOY_HOME%\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\onnxruntime\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\opencv-win-x64-3.4.16\build\x64\vc15\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle_inference\paddle\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mkldnn\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle_inference\third_party\install\mklml\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\paddle2onnx\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\tensorrt\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\third_party\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\yaml-cpp\lib\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.xml Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\3rdparty\tbb\bin\*.dll Release\ +``` +另外,注意,如果是自行编译最新的SDK或版本>0.2.1,opencv和openvino目录结构有所改变,路径需要做出适当的修改。如: +```bat +copy /Y %FASTDEPLOY_HOME%\third_libs\install\opencv\build\x64\vc15\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\bin\*.dll Release\ +copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\runtime\3rdparty\tbb\bin\*.dll Release\ +``` +可以把上述命令拷贝并保存到build目录下的某个bat脚本文件中,如`copy_fastdeploy_dll.bat`,方便多次使用。 +```bat +copy_fastdeploy_dll.bat +``` +特别说明:上述的set和copy命令对应的依赖库路径,需要用户根据自己使用SDK中的依赖库进行适当地修改。比如,若是CPU版本的SDK,则不需要TensorRT相关的设置。 diff --git a/.new_docs/en/quantize.md b/.new_docs/en/quantize.md new file mode 100644 index 0000000000..eb626c6e6c --- /dev/null +++ b/.new_docs/en/quantize.md @@ -0,0 +1,11 @@ +[English](../en/quantize.md) | 简体中文 + +# 量化加速 + +简要介绍量化加速的原理。 + +目前量化支持在哪些硬件及后端的使用 + +## 量化示例 + +这里一个表格,展示目前支持的量化列表(跳转到相应的example下去),精度、性能 diff --git a/.new_docs/en/quick_start/models/cpp.md b/.new_docs/en/quick_start/models/cpp.md new file mode 100644 index 0000000000..a504d41d03 --- /dev/null +++ b/.new_docs/en/quick_start/models/cpp.md @@ -0,0 +1,116 @@ +# C++部署 + +确认开发环境已准备FastDeploy C++部署库,参考[FastDeploy安装](../../build_and_install/)安装预编译的FastDeploy,或根据自己需求进行编译安装。 + +本文档以PaddleDetection目标检测模型PPYOLOE为例展示CPU上的推理示例 + +## 1. 获取模型和测试图像 + +``` +wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg +tar xvf ppyoloe_crn_l_300e_coco.tgz +``` + +## 2. 准备C++推理代码 + +如下C++代码保存为`infer_demo.cc` + +``` c++ +#include "fastdeploy/vision.h" +int main() { + std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; + std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; + std::string infer_cfg_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; + auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file, infer_cfg_file); + + assert(model.Initialized()); // 判断模型是否初始化成功 + + cv::mat im = cv::imread("000000014439.jpg"); + fastdeploy::vision::DetectionResult result; + + assert(model.Predict(im)); // 判断是否预测成功 + + std::cout << result << std::endl; + + cv::mat vis_im = fastdeploy::vision::Visualize::VisDetection(im, result, 0.5); + // 可视化结果保存到本地 + cv::imwrite("vis_result.jpg", vis_im); + + return 0; +} +``` + +## 3. 准备CMakeList.txt + +FastDeploy中包含多个依赖库,直接采用`g++`或编译器编译较为繁杂,推荐使用cmake进行编译配置。示例配置如下, + +假设下载或准备的FastDeploy C++ SDK在`/Paddle/Download`目录下,且目录名为`fastdeploy_cpp_sdk`,在开发者的项目中只需添加如下代码,即可引入`FASTDEPLOY_INCS`和`FASTDEPLOY_LIBS`两个变量,分别表示依赖的头文件和库文件 + +``` shell +include(/Paddle/Download/fastdeploy_cpp_sdk/FastDeploy.cmake) +``` + +``` +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +include(/Paddle/Download/fastdeploy_cpp_sdk/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer_demo.cc) +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) +``` + +## 4. 编译可执行程序 + +假设当前目录已经准备好`infer_demo.cc`和`CMakeLists.txt`两个文件,目录结构如下所示,即可进行编译 + +### Linux & Mac + +打开命令行终端,进入`infer_demo.cc`和`CmakeLists.txt`所在的目录,执行如下命令 + +``` +mkdir build & cd build +cmake .. +make -j +``` + +在执行`cmake`命令时,屏幕会输出FastDeploy编译信息以及Notice,其中如下提示是指引开发者将FastDeploy依赖库路径添加到环境变量,便于编译后执行二进制程序能链接到相应的库,开发者复制相应command在终端执行即可。 + +``` +======================= Notice ======================== +After compiled binary executable file, please add the following path to environment, execute the following command, + +export LD_LIBRARY_PATH=/Paddle/Download/fastdeploy_cpp_sdk/third_libs/install/paddle2onnx/lib:/Paddle/Download/fastdeploy_cpp_sdk/third_libs/install/opencv/lib:/Paddle/Download/fastdeploy_cpp_sdk/third_libs/install/onnxruntime/lib:/Paddle/Download/fastdeploy_cpp_sdk/lib:${LD_LIBRARY_PATH} +======================================================= +``` + +编译完成后,使用如下命令执行可得到预测结果 +``` +./infer_demo +``` + +### Windows + +在Windows菜单中,找到`x64 Native Tools Command Prompt for VS 2019`打开,进入`infer_demo.cc`和`CMakeLists.txt`所在目录,执行如下命令 +``` +mkdir build +cd build +cmake .. -G "Visual Studio 16 2019" -A x64 +msbuild infer_demo.sln /m /p:Configuration=Release /p:Platform=x64 +``` + +在执行`cmake`命令时,屏幕会输出FastDeploy编译信息以及Notice,其中如下提示是指引开发者将FastDeploy依赖库路径添加到环境变量,便于编译后执行exe能链接到相应的库,开发者复制相应command在终端执行即可。 + +``` +======================= Notice ======================== + +``` + +执行完后,即会在`build/Release`目录下生成`infer_demo.exe`程序,使用如下命令执行可得到预测结果 +``` +Release\infer_demo.exe +``` diff --git a/.new_docs/en/quick_start/models/python.md b/.new_docs/en/quick_start/models/python.md new file mode 100644 index 0000000000..6cad7ccf1b --- /dev/null +++ b/.new_docs/en/quick_start/models/python.md @@ -0,0 +1,44 @@ +# Python部署 + +确认开发环境已安装FastDeploy,参考[FastDeploy安装](../../build_and_install/)安装预编译的FastDeploy,或根据自己需求进行编译安装。 + +本文档以PaddleDetection目标检测模型PicoDet为例展示CPU上的推理示例 + +## 1. 获取模型和测试图像 + +``` python +import fastdeploy as fd + +model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz" +image_url - "https://bj.bcebos.com/fastdeploy/tests/test_det.jpg" +fd.download_and_decompress(model_url, path=".") +fd.download(image_url, path=".") +``` + +## 2. 加载模型 + +- 更多模型的示例可参考[FastDeploy/examples](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples) + +``` python +model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel" +params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams" +infer_cfg_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml" +model = fd.vision.detection.PPYOLOE(model_file, params_file, infer_cfg_file) +``` + +## 3. 预测图片检测结果 + +``` python +import cv2 +im = cv2.imread("000000014439.jpg") + +result = model.predict(im) +print(result) +``` + +## 4. 可视化图片预测结果 + +``` python +vis_im = fd.vision.visualize.vis_detection(im, result, score_threshold=0.5) +cv2.imwrite("vis_image.jpg", vis_im) +``` diff --git a/.new_docs/en/quick_start/runtime/cpp.md b/.new_docs/en/quick_start/runtime/cpp.md new file mode 100644 index 0000000000..7d52d9b58b --- /dev/null +++ b/.new_docs/en/quick_start/runtime/cpp.md @@ -0,0 +1 @@ +# C++推理 diff --git a/.new_docs/en/quick_start/runtime/python.md b/.new_docs/en/quick_start/runtime/python.md new file mode 100644 index 0000000000..cb2c6efd22 --- /dev/null +++ b/.new_docs/en/quick_start/runtime/python.md @@ -0,0 +1 @@ +# Python推理 diff --git a/.new_docs/quick_start/index.rst b/.new_docs/quick_start/index.rst deleted file mode 100644 index 615e8b6d3a..0000000000 --- a/.new_docs/quick_start/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -快速使用 -======================================= - -FastDeploy集成了计算机视觉、NLP以及语音等领域多种模型的端到端部署支持,同时也提供多后端的Runtime方便开发者以统一的API快速完成在不同硬件平台,不同后端的推理部署需求。 - -.. toctree:: - :caption: 目录 - :maxdepth: 2 - :titlesonly: - - models/index - runtime/index diff --git a/.new_docs/quick_start/models/index.rst b/.new_docs/quick_start/models/index.rst deleted file mode 100644 index dbaf483278..0000000000 --- a/.new_docs/quick_start/models/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -模型端到端部署 -======================================= - -FastDeploy支持计算机视觉、NLP以及语音等领域100+模型的端到端部署支持,用户加载模型后即可一键完成从数据预处理、模型后端推理、结果后处理整个流程,得到最终的任务预测结果。 - -.. toctree:: - :caption: 目录 - :maxdepth: 2 - :titlesonly: - - python.md - cpp.md diff --git a/.new_docs/quick_start/runtime/index.rst b/.new_docs/quick_start/runtime/index.rst deleted file mode 100644 index f8d1e66b99..0000000000 --- a/.new_docs/quick_start/runtime/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -Runtime推理 -======================================= - -FastDeploy Runtime集成了多种后端,支持Paddle/ONNX模型一键加载推理,满足快速切换不同硬件和后端的推理需求 - -.. toctree:: - :caption: 目录 - :maxdepth: 2 - :titlesonly: - - python.md - cpp.md