[Model] Support PP-StructureV2-Layout model (#1867)

* [Model] init pp-structurev2-layout code

* [Model] init pp-structurev2-layout code

* [Model] init pp-structurev2-layout code

* [Model] add structurev2_layout_preprocessor

* [PP-StructureV2] add postprocessor and layout detector class

* [PP-StructureV2] add postprocessor and layout detector class

* [PP-StructureV2] add postprocessor and layout detector class

* [PP-StructureV2] add postprocessor and layout detector class

* [PP-StructureV2] add postprocessor and layout detector class

* [pybind] add pp-structurev2-layout model pybind

* [pybind] add pp-structurev2-layout model pybind

* [Bug Fix] fixed code style

* [examples] add pp-structurev2-layout c++ examples

* [PP-StructureV2] add python example and docs

* [benchmark] add pp-structurev2-layout benchmark support
This commit is contained in:
DefTruth
2023-05-05 13:05:58 +08:00
committed by GitHub
parent 2c5fd91a7f
commit 6d0261e9e4
26 changed files with 1255 additions and 23 deletions
@@ -38,3 +38,8 @@ target_link_libraries(infer_rec ${FASTDEPLOY_LIBS})
add_executable(infer_structurev2_table ${PROJECT_SOURCE_DIR}/infer_structurev2_table.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_structurev2_table ${FASTDEPLOY_LIBS})
# Only Layout
add_executable(infer_structurev2_layout ${PROJECT_SOURCE_DIR}/infer_structurev2_layout.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_structurev2_layout ${FASTDEPLOY_LIBS})
@@ -46,12 +46,18 @@ tar -xvf ch_PP-OCRv3_rec_infer.tar
# 下载PPStructureV2表格识别模型
wget https://paddleocr.bj.bcebos.com/ppstructure/models/slanet/ch_ppstructure_mobile_v2.0_SLANet_infer.tar
tar xf ch_ppstructure_mobile_v2.0_SLANet_infer.tar
# 下载PP-StructureV2版面分析模型
wget https://paddleocr.bj.bcebos.com/ppstructure/models/layout/picodet_lcnet_x1_0_fgd_layout_infer.tar
tar -xvf picodet_lcnet_x1_0_fgd_layout_infer.tar
# 下载预测图片与字典文件
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppstructure/docs/table/table.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppstructure/docs/table/layout.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/table_structure_dict_ch.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/layout_dict/layout_publaynet_dict.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/layout_dict/layout_cdla_dict.txt
# 运行部署示例
# 在CPU上使用Paddle Inference推理
@@ -71,7 +77,7 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/t
# 在GPU上使用Nvidia TensorRT推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 7
# 同时, FastDeploy提供文字检测,文字分类,文字识别三个模型的单独推理,
# 同时, FastDeploy提供文字检测,文字分类,文字识别,表格识别,版面分析等模型的单独推理,
# 有需要的用户, 请准备合适的图片, 同时根据自己的需求, 参考infer.cc来配置自定义硬件与推理后端.
# 在CPU上,单独使用文字检测模型部署
@@ -85,6 +91,9 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/t
# 在CPU上,单独使用表格识别模型部署
./infer_structurev2_table ./ch_ppstructure_mobile_v2.0_SLANet_infer ./table_structure_dict_ch.txt ./table.jpg 0
# 在CPU上,单独使用版面分析模型部署
./infer_structurev2_layout ./picodet_lcnet_x1_0_fgd_layout_infer ./layout.jpg 0
```
运行完成可视化结果如下图所示
@@ -0,0 +1,87 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void InitAndInfer(const std::string &layout_model_dir,
const std::string &image_file,
const fastdeploy::RuntimeOption &option) {
auto layout_model_file = layout_model_dir + sep + "model.pdmodel";
auto layout_params_file = layout_model_dir + sep + "model.pdiparams";
auto layout_model = fastdeploy::vision::ocr::StructureV2Layout(
layout_model_file, layout_params_file, option);
if (!layout_model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
// 5 for publaynet, 10 for cdla
layout_model.GetPostprocessor().SetNumClass(5);
fastdeploy::vision::DetectionResult res;
if (!layout_model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
std::vector<std::string> labels = {"text", "title", "list", "table",
"figure"};
if (layout_model.GetPostprocessor().GetNumClass() == 10) {
labels = {"text", "title", "figure", "figure_caption",
"table", "table_caption", "header", "footer",
"reference", "equation"};
}
auto vis_im = fastdeploy::vision::VisDetection(im, res, labels, 0.3, 2, .5f,
{255, 0, 0}, 2);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char *argv[]) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/layout_model path/to/image "
"run_option, "
"e.g ./infer_structurev2_layout picodet_lcnet_x1_0_fgd_layout_infer "
"layout.png 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu;."
<< std::endl;
return -1;
}
fastdeploy::RuntimeOption option;
int flag = std::atoi(argv[3]);
if (flag == 0) {
option.UseCpu();
} else if (flag == 1) {
option.UseGpu();
}
std::string layout_model_dir = argv[1];
std::string image_file = argv[2];
InitAndInfer(layout_model_dir, image_file, option);
return 0;
}
@@ -39,12 +39,18 @@ tar -xvf ch_PP-OCRv3_rec_infer.tar
# 下载PPStructureV2表格识别模型
wget https://paddleocr.bj.bcebos.com/ppstructure/models/slanet/ch_ppstructure_mobile_v2.0_SLANet_infer.tar
tar xf ch_ppstructure_mobile_v2.0_SLANet_infer.tar
# 下载PP-StructureV2版面分析模型
wget https://paddleocr.bj.bcebos.com/ppstructure/models/layout/picodet_lcnet_x1_0_fgd_layout_infer.tar
tar -xvf picodet_lcnet_x1_0_fgd_layout_infer.tar
# 下载预测图片与字典文件
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppstructure/docs/table/table.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppstructure/docs/table/layout.jpg
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/table_structure_dict_ch.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/layout_dict/layout_publaynet_dict.txt
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/dict/layout_dict/layout_cdla_dict.txt
# 运行部署示例
# 在CPU上使用Paddle Inference推理
@@ -64,7 +70,7 @@ python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2
# 在GPU上使用Nvidia TensorRT推理
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
# 同时, FastDeploy提供文字检测,文字分类,文字识别三个模型的单独推理,
# 同时, FastDeploy提供文字检测,文字分类,文字识别,表格识别,版面分析等模型的单独推理,
# 有需要的用户, 请准备合适的图片, 同时根据自己的需求, 参考infer.py来配置自定义硬件与推理后端.
# 在CPU上,单独使用文字检测模型部署
@@ -76,8 +82,11 @@ python infer_cls.py --cls_model ch_ppocr_mobile_v2.0_cls_infer --image 12.jpg --
# 在CPU上,单独使用文字识别模型部署
python infer_rec.py --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu
# 在CPU上,单独使用文字识别模型部署
# 在CPU上,单独使用表格识别模型部署
python infer_structurev2_table.py --table_model ./ch_ppstructure_mobile_v2.0_SLANet_infer --table_char_dict_path ./table_structure_dict_ch.txt --image table.jpg --device cpu
# 在CPU上,单独使用版面分析模型部署
python infer_structurev2_layout.py --layout_model ./picodet_lcnet_x1_0_fgd_layout_infer --image layout.jpg --device cpu
```
运行完成可视化结果如下图所示
@@ -0,0 +1,91 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--layout_model",
required=True,
help="Path of Layout detection model of PP-StructureV2.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--device_id",
type=int,
default=0,
help="Define which GPU card used to run model.")
return parser.parse_args()
def build_option(args):
layout_option = fd.RuntimeOption()
if args.device.lower() == "gpu":
layout_option.use_gpu(args.device_id)
return layout_option
args = parse_arguments()
layout_model_file = os.path.join(args.layout_model, "model.pdmodel")
layout_params_file = os.path.join(args.layout_model, "model.pdiparams")
# Set the runtime option
layout_option = build_option(args)
# Create the table_model
layout_model = fd.vision.ocr.StructureV2Layout(
layout_model_file, layout_params_file, layout_option)
layout_model.postprocessor.num_class = 5
# Read the image
im = cv2.imread(args.image)
# Predict and return the results
result = layout_model.predict(im)
print(result)
# Visualize the results
labels = ["text", "title", "list", "table", "figure"]
if layout_model.postprocessor.num_class == 10:
labels = [
"text", "title", "figure", "figure_caption", "table", "table_caption",
"header", "footer", "reference", "equation"
]
vis_im = fd.vision.vis_detection(
im,
result,
labels,
score_threshold=0.5,
font_color=[255, 0, 0],
font_thickness=2)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")
@@ -23,7 +23,7 @@ def parse_arguments():
parser.add_argument(
"--table_model",
required=True,
help="Path of Table recognition model of PPOCR.")
help="Path of Table recognition model of PP-StructureV2.")
parser.add_argument(
"--table_char_dict_path",
type=str,