mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-22 16:07:51 +08:00
[Hackathon 181] Add TVM support for FastDeploy on macOS (#1969)
* update for tvm backend * update third_party * update third_party * update * update * update * update * update * update * update * update --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
@@ -0,0 +1,35 @@
|
||||
[English](README.md) | 简体中文
|
||||
|
||||
# PaddleDetection TVM部署示例
|
||||
|
||||
在TVM上已经通过测试的PaddleDetection模型如下:
|
||||
|
||||
* picodet
|
||||
* PPYOLOE
|
||||
|
||||
### Paddle模型转换为TVM模型
|
||||
|
||||
由于TVM不支持NMS算子,因此在转换模型前我们需要对PaddleDetection模型进行裁剪,将模型的输出节点改为NMS节点的输入节点。
|
||||
输入以下命令,你将得到一个裁剪后的PPYOLOE模型。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/PaddlePaddle/Paddle2ONNX.git
|
||||
cd Paddle2ONNX/tools/paddle
|
||||
wget https://bj.bcebos.com/fastdeploy/models/ppyoloe_plus_crn_m_80e_coco.tgz
|
||||
tar xvf ppyoloe_plus_crn_m_80e_coco.tgz
|
||||
python prune_paddle_model.py --model_dir ppyoloe_plus_crn_m_80e_coco \
|
||||
--model_filename model.pdmodel \
|
||||
--params_filename model.pdiparams \
|
||||
--output_names tmp_17 concat_14.tmp_0 \
|
||||
--save_dir ppyoloe_plus_crn_m_80e_coco
|
||||
```
|
||||
|
||||
裁剪完模型后我们就可以通过tvm python库实现编译模型,这里为了方便大家使用,提供了转换脚本。
|
||||
输入以下命令,你将得到转换过后的TVM模型。
|
||||
注意,FastDeploy在推理PPYOLOE时不关依赖模型,还依赖yml文件,因此你还需要将对应的yml文件拷贝到模型目录下。
|
||||
|
||||
```bash
|
||||
python path/to/FastDeploy/tools/tvm/paddle2tvm.py --model_path=./ppyoloe_plus_crn_m_80e_coco/model \
|
||||
--shape_dict="{'image': [1, 3, 640, 640], 'scale_factor': [1, 2]}"
|
||||
cp ppyoloe_plus_crn_m_80e_coco/infer_cfg.yml tvm_save
|
||||
```
|
||||
@@ -0,0 +1,13 @@
|
||||
PROJECT(infer_demo C CXX)
|
||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||
|
||||
# 指定下载解压后的fastdeploy库路径
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||
|
||||
# 添加FastDeploy依赖头文件
|
||||
include_directories(${FASTDEPLOY_INCS})
|
||||
|
||||
add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe_demo.cc)
|
||||
target_link_libraries(infer_ppyoloe_demo ${FASTDEPLOY_LIBS})
|
||||
@@ -0,0 +1,60 @@
|
||||
[English](README.md) | 简体中文
|
||||
|
||||
# PaddleDetection C++部署示例
|
||||
|
||||
本目录下提供`infer_ppyoloe_demo.cc`快速完成PPDetection模型使用TVM加速部署的示例。
|
||||
|
||||
## 转换模型并运行
|
||||
|
||||
```bash
|
||||
# build example
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=/path/to/fastdeploy-sdk
|
||||
make -j
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
./infer_ppyoloe_demo ../tvm_save 000000014439.jpg
|
||||
```
|
||||
|
||||
|
||||
## PaddleDetection C++接口
|
||||
|
||||
### 模型类
|
||||
|
||||
PaddleDetection目前支持6种模型系列,类名分别为`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN`,`SSD`,`PaddleYOLOv5`,`PaddleYOLOv6`,`PaddleYOLOv7`,`RTMDet`,`CascadeRCNN`,`PSSDet`,`RetinaNet`,`PPYOLOESOD`,`FCOS`,`TTFNet`,`TOOD`,`GFL`所有类名的构造函数和预测函数在参数上完全一致,本文档以PPYOLOE为例讲解API
|
||||
```c++
|
||||
fastdeploy::vision::detection::PPYOLOE(
|
||||
const string& model_file,
|
||||
const string& params_file,
|
||||
const string& config_file
|
||||
const RuntimeOption& runtime_option = RuntimeOption(),
|
||||
const ModelFormat& model_format = ModelFormat::PADDLE)
|
||||
```
|
||||
|
||||
PaddleDetection PPYOLOE模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
**参数**
|
||||
|
||||
> * **model_file**(str): 模型文件路径
|
||||
> * **params_file**(str): 参数文件路径
|
||||
> * **config_file**(str): 配置文件路径,即PaddleDetection导出的部署yaml文件
|
||||
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
|
||||
> * **model_format**(ModelFormat): 模型格式,默认为PADDLE格式
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```c++
|
||||
> PPYOLOE::Predict(cv::Mat* im, DetectionResult* result)
|
||||
> ```
|
||||
>
|
||||
> 模型预测接口,输入图像直接输出检测结果。
|
||||
>
|
||||
> **参数**
|
||||
>
|
||||
> > * **im**: 输入图像,注意需为HWC,BGR格式
|
||||
> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||
|
||||
- [模型介绍](../../)
|
||||
- [Python部署](../python)
|
||||
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
||||
@@ -0,0 +1,57 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
|
||||
void TVMInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + "/tvm_model";
|
||||
auto params_file = "";
|
||||
auto config_file = model_dir + "/infer_cfg.yml";
|
||||
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
option.UseCpu();
|
||||
option.UseTVMBackend();
|
||||
|
||||
auto format = fastdeploy::ModelFormat::TVMFormat;
|
||||
|
||||
auto model = fastdeploy::vision::detection::PPYOLOE(
|
||||
model_file, params_file, config_file, option, format);
|
||||
model.GetPostprocessor().ApplyNMS();
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::DetectionResult res;
|
||||
if (!model.Predict(&im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << res.Str() << std::endl;
|
||||
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
|
||||
cv::imwrite("infer.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./infer.jpg" << std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc < 3) {
|
||||
std::cout
|
||||
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
|
||||
"e.g ./infer_model ./picodet_model_dir ./test.jpeg"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
TVMInfer(argv[1], argv[2]);
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user