From 38a79ebfdcb78d6e6812e95547545c448a2ac1b8 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Sat, 5 Nov 2022 07:20:47 +0000 Subject: [PATCH 01/30] Update model tests --- tests/eval_example/test_pfld.py | 42 --------- tests/eval_example/test_ppmatting.py | 109 ----------------------- tests/eval_example/test_pptinypose.py | 100 --------------------- tests/eval_example/test_pptracking.py | 89 ------------------ tests/eval_example/test_quantize_diff.py | 96 -------------------- tests/eval_example/test_rvm.py | 101 --------------------- tests/eval_example/test_yolov5cls.py | 49 ---------- 7 files changed, 586 deletions(-) delete mode 100644 tests/eval_example/test_pfld.py delete mode 100644 tests/eval_example/test_ppmatting.py delete mode 100644 tests/eval_example/test_pptinypose.py delete mode 100644 tests/eval_example/test_pptracking.py delete mode 100755 tests/eval_example/test_quantize_diff.py delete mode 100644 tests/eval_example/test_rvm.py delete mode 100755 tests/eval_example/test_yolov5cls.py diff --git a/tests/eval_example/test_pfld.py b/tests/eval_example/test_pfld.py deleted file mode 100644 index 62156c785b..0000000000 --- a/tests/eval_example/test_pfld.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fastdeploy as fd -import cv2 -import os -import numpy as np - - -def test_facealignment_pfld(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png" - output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy" - fd.download(model_url, ".") - fd.download(input_url, ".") - fd.download(output_url, ".") - model_path = "pfld-106-lite.onnx" - # use ORT - runtime_option = fd.RuntimeOption() - runtime_option.use_ort_backend() - model = fd.vision.facealign.PFLD(model_path, runtime_option=runtime_option) - - # compare diff - im = cv2.imread("./facealign_input.png") - result = model.predict(im.copy()) - expect = np.load("./result_landmarks.npy") - - diff = np.fabs(np.array(result.landmarks) - expect) - thres = 1e-04 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) diff --git a/tests/eval_example/test_ppmatting.py b/tests/eval_example/test_ppmatting.py deleted file mode 100644 index f1f1883854..0000000000 --- a/tests/eval_example/test_ppmatting.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fastdeploy as fd -import cv2 -import os -import pickle -import numpy as np - - -def test_matting_ppmatting(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "./PP-Matting-512" - # 配置runtime,加载模型 - runtime_option = fd.RuntimeOption() - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - config_file = os.path.join(model_path, "deploy.yaml") - model = fd.vision.matting.PPMatting( - model_file, params_file, config_file, runtime_option=runtime_option) - - # 预测图片抠图结果 - im = cv2.imread("./matting_input.jpg") - result = model.predict(im.copy()) - pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl" - if pkl_url: - fd.download(pkl_url, ".") - with open("./ppmatting_result.pkl", "rb") as f: - baseline = pickle.load(f) - - diff = np.fabs(np.array(result.alpha) - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) - - -def test_matting_ppmodnet(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "./PPModnet_MobileNetV2" - # 配置runtime,加载模型 - runtime_option = fd.RuntimeOption() - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - config_file = os.path.join(model_path, "deploy.yaml") - model = fd.vision.matting.PPMatting( - model_file, params_file, config_file, runtime_option=runtime_option) - - # 预测图片抠图结果 - im = cv2.imread("./matting_input.jpg") - result = model.predict(im.copy()) - - pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl" - if pkl_url: - fd.download(pkl_url, ".") - with open("./ppmodnet_result.pkl", "rb") as f: - baseline = pickle.load(f) - - diff = np.fabs(np.array(result.alpha) - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) - - -def test_matting_pphumanmatting(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "./PPHumanMatting" - # 配置runtime,加载模型 - runtime_option = fd.RuntimeOption() - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - config_file = os.path.join(model_path, "deploy.yaml") - model = fd.vision.matting.PPMatting( - model_file, params_file, config_file, runtime_option=runtime_option) - - # 预测图片抠图结果 - im = cv2.imread("./matting_input.jpg") - result = model.predict(im.copy()) - - pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl" - if pkl_url: - fd.download(pkl_url, ".") - - with open("./pphumanmatting_result.pkl", "rb") as f: - baseline = pickle.load(f) - - diff = np.fabs(np.array(result.alpha) - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) diff --git a/tests/eval_example/test_pptinypose.py b/tests/eval_example/test_pptinypose.py deleted file mode 100644 index 0da899293e..0000000000 --- a/tests/eval_example/test_pptinypose.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fastdeploy as fd -import cv2 -import os -import numpy as np - - -def test_keypointdetection_pptinypose(): - pp_tinypose_model_url = "https://bj.bcebos.com/fastdeploy/tests/PP_TinyPose_256x192_test.tgz" - fd.download_and_decompress(pp_tinypose_model_url, ".") - model_path = "./PP_TinyPose_256x192_test" - # 配置runtime,加载模型 - runtime_option = fd.RuntimeOption() - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - config_file = os.path.join(model_path, "infer_cfg.yml") - image_file = os.path.join(model_path, "hrnet_demo.jpg") - baseline_file = os.path.join(model_path, "baseline.npy") - model = fd.vision.keypointdetection.PPTinyPose( - model_file, params_file, config_file, runtime_option=runtime_option) - - # 预测图片关键点 - im = cv2.imread(image_file) - result = model.predict(im) - result = np.concatenate( - (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]), - axis=1) - baseline = np.load(baseline_file) - diff = np.fabs(result - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) - print("No diff") - - -def test_keypointdetection_det_keypoint_unite(): - det_keypoint_unite_model_url = "https://bj.bcebos.com/fastdeploy/tests/PicoDet_320x320_TinyPose_256x192_test.tgz" - fd.download_and_decompress(det_keypoint_unite_model_url, ".") - model_path = "./PicoDet_320x320_TinyPose_256x192_test" - # 配置runtime,加载模型 - runtime_option = fd.RuntimeOption() - tinypose_model_file = os.path.join( - model_path, "PP_TinyPose_256x192_infer/model.pdmodel") - tinypose_params_file = os.path.join( - model_path, "PP_TinyPose_256x192_infer/model.pdiparams") - tinypose_config_file = os.path.join( - model_path, "PP_TinyPose_256x192_infer/infer_cfg.yml") - picodet_model_file = os.path.join( - model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdmodel") - picodet_params_file = os.path.join( - model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdiparams") - picodet_config_file = os.path.join( - model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/infer_cfg.yml") - image_file = os.path.join(model_path, "000000018491.jpg") - # image_file = os.path.join(model_path, "hrnet_demo.jpg") - - baseline_file = os.path.join(model_path, "baseline.npy") - - tinypose_model = fd.vision.keypointdetection.PPTinyPose( - tinypose_model_file, - tinypose_params_file, - tinypose_config_file, - runtime_option=runtime_option) - - det_model = fd.vision.detection.PicoDet( - picodet_model_file, - picodet_params_file, - picodet_config_file, - runtime_option=runtime_option) - - # 预测图片关键点 - im = cv2.imread(image_file) - pipeline = fd.pipeline.PPTinyPose(det_model, tinypose_model) - pipeline.detection_model_score_threshold = 0.5 - result = pipeline.predict(im) - print(result) - result = np.concatenate( - (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]), - axis=1) - print(result) - np.save("baseline.npy", result) - baseline = np.load(baseline_file) - diff = np.fabs(result - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) - print("No diff") diff --git a/tests/eval_example/test_pptracking.py b/tests/eval_example/test_pptracking.py deleted file mode 100644 index ee1cb9bc51..0000000000 --- a/tests/eval_example/test_pptracking.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fastdeploy as fd -import cv2 -import os -import numpy as np -import pickle - - -def test_pptracking_cpu(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pptracking.tgz" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320" - # use default backend - runtime_option = fd.RuntimeOption() - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - config_file = os.path.join(model_path, "infer_cfg.yml") - model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=runtime_option) - cap = cv2.VideoCapture("./person.mp4") - frame_id = 0 - while True: - _, frame = cap.read() - if frame is None: - break - result = model.predict(frame) - # compare diff - expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb")) - diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes)) - diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores)) - diff = max(diff_boxes.max(), diff_scores.max()) - thres = 1e-05 - assert diff < thres, "The label diff is %f, which is bigger than %f" % (diff, thres) - frame_id = frame_id + 1 - cv2.waitKey(30) - if frame_id >= 10: - cap.release() - cv2.destroyAllWindows() - break - - -def test_pptracking_gpu(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pptracking.tgz" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320" - runtime_option = fd.RuntimeOption() - runtime_option.use_gpu() - # Not supported trt backend, up to now - # runtime_option.use_trt_backend() - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - config_file = os.path.join(model_path, "infer_cfg.yml") - model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=runtime_option) - cap = cv2.VideoCapture("./person.mp4") - frame_id = 0 - while True: - _, frame = cap.read() - if frame is None: - break - result = model.predict(frame) - # compare diff - expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb")) - diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes)) - diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores)) - diff = max(diff_boxes.max(), diff_scores.max()) - thres = 1e-05 - assert diff < thres, "The label diff is %f, which is bigger than %f" % (diff, thres) - frame_id = frame_id + 1 - cv2.waitKey(30) - if frame_id >= 10: - cap.release() - cv2.destroyAllWindows() - break diff --git a/tests/eval_example/test_quantize_diff.py b/tests/eval_example/test_quantize_diff.py deleted file mode 100755 index 8bc7b396a8..0000000000 --- a/tests/eval_example/test_quantize_diff.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fastdeploy as fd -import cv2 -import os -import pickle -import numpy as np - -model_url = "https://bj.bcebos.com/fastdeploy/tests/yolov6_quant.tgz" -fd.download_and_decompress(model_url, ".") - - -def test_quant_mkldnn(): - model_path = "./yolov6_quant" - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - - input_file = os.path.join(model_path, "input.npy") - output_file = os.path.join(model_path, "mkldnn_output.npy") - - option = fd.RuntimeOption() - option.use_paddle_backend() - option.use_cpu() - - option.set_model_path(model_file, params_file) - runtime = fd.Runtime(option) - input_name = runtime.get_input_info(0).name - data = np.load(input_file) - outs = runtime.infer({input_name: data}) - expected = np.load(output_file) - diff = np.fabs(outs[0] - expected) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) - - -def test_quant_ort(): - model_path = "./yolov6_quant" - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - - input_file = os.path.join(model_path, "input.npy") - output_file = os.path.join(model_path, "ort_output.npy") - - option = fd.RuntimeOption() - option.use_ort_backend() - option.use_cpu() - - option.set_ort_graph_opt_level(1) - - option.set_model_path(model_file, params_file) - runtime = fd.Runtime(option) - input_name = runtime.get_input_info(0).name - data = np.load(input_file) - outs = runtime.infer({input_name: data}) - expected = np.load(output_file) - diff = np.fabs(outs[0] - expected) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) - - -def test_quant_trt(): - model_path = "./yolov6_quant" - model_file = os.path.join(model_path, "model.pdmodel") - params_file = os.path.join(model_path, "model.pdiparams") - - input_file = os.path.join(model_path, "input.npy") - output_file = os.path.join(model_path, "trt_output.npy") - - option = fd.RuntimeOption() - option.use_trt_backend() - option.use_gpu() - - option.set_model_path(model_file, params_file) - runtime = fd.Runtime(option) - input_name = runtime.get_input_info(0).name - data = np.load(input_file) - outs = runtime.infer({input_name: data}) - expected = np.load(output_file) - diff = np.fabs(outs[0] - expected) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) diff --git a/tests/eval_example/test_rvm.py b/tests/eval_example/test_rvm.py deleted file mode 100644 index 4b8d5afe8d..0000000000 --- a/tests/eval_example/test_rvm.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fastdeploy as fd -import cv2 -import os -import pickle -import numpy as np - - -def test_matting_rvm_cpu(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "rvm/rvm_mobilenetv3_fp32.onnx" - # use ORT - runtime_option = fd.RuntimeOption() - runtime_option.use_ort_backend() - model = fd.vision.matting.RobustVideoMatting( - model_path, runtime_option=runtime_option) - - cap = cv2.VideoCapture(input_url) - - frame_id = 0 - while True: - _, frame = cap.read() - if frame is None: - break - result = model.predict(frame) - # compare diff - expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy") - result_alpha = np.array(result.alpha).reshape(1920, 1080) - diff = np.fabs(expect_alpha - result_alpha) - thres = 1e-05 - assert diff.max( - ) < thres, "The label diff is %f, which is bigger than %f" % ( - diff.max(), thres) - frame_id = frame_id + 1 - cv2.waitKey(30) - if frame_id >= 10: - cap.release() - cv2.destroyAllWindows() - break - - -def test_matting_rvm_gpu_trt(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz" - input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "rvm/rvm_mobilenetv3_trt.onnx" - # use TRT - runtime_option = fd.RuntimeOption() - runtime_option.use_gpu() - runtime_option.use_trt_backend() - runtime_option.set_trt_input_shape("src", [1, 3, 1920, 1080]) - runtime_option.set_trt_input_shape("r1i", [1, 1, 1, 1], [1, 16, 240, 135], - [1, 16, 240, 135]) - runtime_option.set_trt_input_shape("r2i", [1, 1, 1, 1], [1, 20, 120, 68], - [1, 20, 120, 68]) - runtime_option.set_trt_input_shape("r3i", [1, 1, 1, 1], [1, 40, 60, 34], - [1, 40, 60, 34]) - runtime_option.set_trt_input_shape("r4i", [1, 1, 1, 1], [1, 64, 30, 17], - [1, 64, 30, 17]) - model = fd.vision.matting.RobustVideoMatting( - model_path, runtime_option=runtime_option) - - cap = cv2.VideoCapture("./video.mp4") - - frame_id = 0 - while True: - _, frame = cap.read() - if frame is None: - break - result = model.predict(frame) - # compare diff - expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy") - result_alpha = np.array(result.alpha).reshape(1920, 1080) - diff = np.fabs(expect_alpha - result_alpha) - thres = 1e-04 - assert diff.max( - ) < thres, "The label diff is %f, which is bigger than %f" % ( - diff.max(), thres) - frame_id = frame_id + 1 - cv2.waitKey(30) - if frame_id >= 10: - cap.release() - cv2.destroyAllWindows() - break diff --git a/tests/eval_example/test_yolov5cls.py b/tests/eval_example/test_yolov5cls.py deleted file mode 100755 index 50eefa36c5..0000000000 --- a/tests/eval_example/test_yolov5cls.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fastdeploy as fd -import cv2 -import os -import pickle -import numpy as np - - -def test_classification_yolov5cls(): - model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n-cls.tgz" - input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "yolov5n-cls/yolov5n-cls.onnx" - # use ORT - runtime_option = fd.RuntimeOption() - runtime_option.use_ort_backend() - model = fd.vision.classification.YOLOv5Cls( - model_path, runtime_option=runtime_option) - - # compare diff - im = cv2.imread("./ILSVRC2012_val_00000010.jpeg") - result = model.predict(im.copy(), topk=5) - with open("yolov5n-cls/result.pkl", "rb") as f: - expect = pickle.load(f) - - diff_label = np.fabs( - np.array(result.label_ids) - np.array(expect["labels"])) - diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"])) - thres = 1e-05 - assert diff_label.max( - ) < thres, "The label diff is %f, which is bigger than %f" % ( - diff_label.max(), thres) - assert diff_score.max( - ) < thres, "The score diff is %f, which is bigger than %f" % ( - diff_score.max(), thres) From f00212aa425b32d4aae10411fc895cb1bd006856 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Sat, 5 Nov 2022 07:23:44 +0000 Subject: [PATCH 02/30] Update model tests --- tests/models/runtime_config.py | 4 ++ tests/models/test_pfld.py | 40 +++++++++++ tests/models/test_ppmatting.py | 105 +++++++++++++++++++++++++++++ tests/models/test_pptinypose.py | 100 +++++++++++++++++++++++++++ tests/models/test_pptracking.py | 54 +++++++++++++++ tests/models/test_quantize_diff.py | 96 ++++++++++++++++++++++++++ tests/models/test_rvm.py | 54 +++++++++++++++ tests/models/test_yolov5cls.py | 49 ++++++++++++++ 8 files changed, 502 insertions(+) create mode 100644 tests/models/runtime_config.py create mode 100644 tests/models/test_pfld.py create mode 100644 tests/models/test_ppmatting.py create mode 100644 tests/models/test_pptinypose.py create mode 100644 tests/models/test_pptracking.py create mode 100755 tests/models/test_quantize_diff.py create mode 100644 tests/models/test_rvm.py create mode 100755 tests/models/test_yolov5cls.py diff --git a/tests/models/runtime_config.py b/tests/models/runtime_config.py new file mode 100644 index 0000000000..12383a7c24 --- /dev/null +++ b/tests/models/runtime_config.py @@ -0,0 +1,4 @@ +import fastdeploy as fd + + +test_option = fd.RuntimeOption() diff --git a/tests/models/test_pfld.py b/tests/models/test_pfld.py new file mode 100644 index 0000000000..ef1ba448e3 --- /dev/null +++ b/tests/models/test_pfld.py @@ -0,0 +1,40 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import numpy as np +import runtime_config as rc + +def test_facealignment_pfld(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png" + output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy" + fd.download(model_url, ".") + fd.download(input_url, ".") + fd.download(output_url, ".") + model_path = "pfld-106-lite.onnx" + # use ORT + model = fd.vision.facealign.PFLD(model_path, runtime_option=rc.test_option) + + # compare diff + im = cv2.imread("./facealign_input.png") + result = model.predict(im.copy()) + expect = np.load("./result_landmarks.npy") + + diff = np.fabs(np.array(result.landmarks) - expect) + thres = 1e-04 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_ppmatting.py b/tests/models/test_ppmatting.py new file mode 100644 index 0000000000..78a085a5f3 --- /dev/null +++ b/tests/models/test_ppmatting.py @@ -0,0 +1,105 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np +import runtime_config as rc + +def test_matting_ppmatting(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "./PP-Matting-512" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "deploy.yaml") + model = fd.vision.matting.PPMatting( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片抠图结果 + im = cv2.imread("./matting_input.jpg") + result = model.predict(im.copy()) + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl" + if pkl_url: + fd.download(pkl_url, ".") + with open("./ppmatting_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_matting_ppmodnet(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "./PPModnet_MobileNetV2" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "deploy.yaml") + model = fd.vision.matting.PPMatting( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片抠图结果 + im = cv2.imread("./matting_input.jpg") + result = model.predict(im.copy()) + + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl" + if pkl_url: + fd.download(pkl_url, ".") + with open("./ppmodnet_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_matting_pphumanmatting(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "./PPHumanMatting" + # 配置runtime,加载模型 + runtime_option = fd.RuntimeOption() + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "deploy.yaml") + model = fd.vision.matting.PPMatting( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片抠图结果 + im = cv2.imread("./matting_input.jpg") + result = model.predict(im.copy()) + + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl" + if pkl_url: + fd.download(pkl_url, ".") + + with open("./pphumanmatting_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_pptinypose.py b/tests/models/test_pptinypose.py new file mode 100644 index 0000000000..95cacdd5e1 --- /dev/null +++ b/tests/models/test_pptinypose.py @@ -0,0 +1,100 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import numpy as np +import runtime_config as rc + +def test_keypointdetection_pptinypose(): + pp_tinypose_model_url = "https://bj.bcebos.com/fastdeploy/tests/PP_TinyPose_256x192_test.tgz" + fd.download_and_decompress(pp_tinypose_model_url, ".") + model_path = "./PP_TinyPose_256x192_test" + # 配置runtime,加载模型 + runtime_option = fd.RuntimeOption() + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "infer_cfg.yml") + image_file = os.path.join(model_path, "hrnet_demo.jpg") + baseline_file = os.path.join(model_path, "baseline.npy") + model = fd.vision.keypointdetection.PPTinyPose( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片关键点 + im = cv2.imread(image_file) + result = model.predict(im) + result = np.concatenate( + (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]), + axis=1) + baseline = np.load(baseline_file) + diff = np.fabs(result - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + print("No diff") + + +def test_keypointdetection_det_keypoint_unite(): + det_keypoint_unite_model_url = "https://bj.bcebos.com/fastdeploy/tests/PicoDet_320x320_TinyPose_256x192_test.tgz" + fd.download_and_decompress(det_keypoint_unite_model_url, ".") + model_path = "./PicoDet_320x320_TinyPose_256x192_test" + # 配置runtime,加载模型 + runtime_option = fd.RuntimeOption() + tinypose_model_file = os.path.join( + model_path, "PP_TinyPose_256x192_infer/model.pdmodel") + tinypose_params_file = os.path.join( + model_path, "PP_TinyPose_256x192_infer/model.pdiparams") + tinypose_config_file = os.path.join( + model_path, "PP_TinyPose_256x192_infer/infer_cfg.yml") + picodet_model_file = os.path.join( + model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdmodel") + picodet_params_file = os.path.join( + model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdiparams") + picodet_config_file = os.path.join( + model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/infer_cfg.yml") + image_file = os.path.join(model_path, "000000018491.jpg") + # image_file = os.path.join(model_path, "hrnet_demo.jpg") + + baseline_file = os.path.join(model_path, "baseline.npy") + + tinypose_model = fd.vision.keypointdetection.PPTinyPose( + tinypose_model_file, + tinypose_params_file, + tinypose_config_file, + runtime_option=runtime_option) + + det_model = fd.vision.detection.PicoDet( + picodet_model_file, + picodet_params_file, + picodet_config_file, + runtime_option=rc.test_option) + + # 预测图片关键点 + im = cv2.imread(image_file) + pipeline = fd.pipeline.PPTinyPose(det_model, tinypose_model) + pipeline.detection_model_score_threshold = 0.5 + result = pipeline.predict(im) + print(result) + result = np.concatenate( + (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]), + axis=1) + print(result) + np.save("baseline.npy", result) + baseline = np.load(baseline_file) + diff = np.fabs(result - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + print("No diff") diff --git a/tests/models/test_pptracking.py b/tests/models/test_pptracking.py new file mode 100644 index 0000000000..42010705c3 --- /dev/null +++ b/tests/models/test_pptracking.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import numpy as np +import pickle +import runtime_config as rc + + +def test_pptracking(): + model_url = "https://bj.bcebos.com/fastdeploy/tests/pptracking.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320" + # use default backend + runtime_option = fd.RuntimeOption() + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "infer_cfg.yml") + model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=rc.test_option) + cap = cv2.VideoCapture("./person.mp4") + frame_id = 0 + while True: + _, frame = cap.read() + if frame is None: + break + result = model.predict(frame) + # compare diff + expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb")) + diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes)) + diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores)) + diff = max(diff_boxes.max(), diff_scores.max()) + thres = 1e-05 + assert diff < thres, "The label diff is %f, which is bigger than %f" % (diff, thres) + frame_id = frame_id + 1 + cv2.waitKey(30) + if frame_id >= 10: + cap.release() + cv2.destroyAllWindows() + break diff --git a/tests/models/test_quantize_diff.py b/tests/models/test_quantize_diff.py new file mode 100755 index 0000000000..8bc7b396a8 --- /dev/null +++ b/tests/models/test_quantize_diff.py @@ -0,0 +1,96 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np + +model_url = "https://bj.bcebos.com/fastdeploy/tests/yolov6_quant.tgz" +fd.download_and_decompress(model_url, ".") + + +def test_quant_mkldnn(): + model_path = "./yolov6_quant" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + + input_file = os.path.join(model_path, "input.npy") + output_file = os.path.join(model_path, "mkldnn_output.npy") + + option = fd.RuntimeOption() + option.use_paddle_backend() + option.use_cpu() + + option.set_model_path(model_file, params_file) + runtime = fd.Runtime(option) + input_name = runtime.get_input_info(0).name + data = np.load(input_file) + outs = runtime.infer({input_name: data}) + expected = np.load(output_file) + diff = np.fabs(outs[0] - expected) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_quant_ort(): + model_path = "./yolov6_quant" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + + input_file = os.path.join(model_path, "input.npy") + output_file = os.path.join(model_path, "ort_output.npy") + + option = fd.RuntimeOption() + option.use_ort_backend() + option.use_cpu() + + option.set_ort_graph_opt_level(1) + + option.set_model_path(model_file, params_file) + runtime = fd.Runtime(option) + input_name = runtime.get_input_info(0).name + data = np.load(input_file) + outs = runtime.infer({input_name: data}) + expected = np.load(output_file) + diff = np.fabs(outs[0] - expected) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_quant_trt(): + model_path = "./yolov6_quant" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + + input_file = os.path.join(model_path, "input.npy") + output_file = os.path.join(model_path, "trt_output.npy") + + option = fd.RuntimeOption() + option.use_trt_backend() + option.use_gpu() + + option.set_model_path(model_file, params_file) + runtime = fd.Runtime(option) + input_name = runtime.get_input_info(0).name + data = np.load(input_file) + outs = runtime.infer({input_name: data}) + expected = np.load(output_file) + diff = np.fabs(outs[0] - expected) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_rvm.py b/tests/models/test_rvm.py new file mode 100644 index 0000000000..23fd544c66 --- /dev/null +++ b/tests/models/test_rvm.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np +import runtime_config as rc + +def test_matting_rvm_cpu(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "rvm/rvm_mobilenetv3_fp32.onnx" + # use ORT + runtime_option.use_ort_backend() + model = fd.vision.matting.RobustVideoMatting( + model_path, runtime_option=rc.test_option) + + cap = cv2.VideoCapture(input_url) + + frame_id = 0 + while True: + _, frame = cap.read() + if frame is None: + break + result = model.predict(frame) + # compare diff + expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy") + result_alpha = np.array(result.alpha).reshape(1920, 1080) + diff = np.fabs(expect_alpha - result_alpha) + thres = 1e-05 + assert diff.max( + ) < thres, "The label diff is %f, which is bigger than %f" % ( + diff.max(), thres) + frame_id = frame_id + 1 + cv2.waitKey(30) + if frame_id >= 10: + cap.release() + cv2.destroyAllWindows() + break diff --git a/tests/models/test_yolov5cls.py b/tests/models/test_yolov5cls.py new file mode 100755 index 0000000000..aeafad5196 --- /dev/null +++ b/tests/models/test_yolov5cls.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np +import runtime_config as rc + +def test_classification_yolov5cls(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n-cls.tgz" + input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "yolov5n-cls/yolov5n-cls.onnx" + # use ORT + runtime_option = fd.RuntimeOption() + runtime_option.use_ort_backend() + model = fd.vision.classification.YOLOv5Cls( + model_path, runtime_option=rc.test_option) + + # compare diff + im = cv2.imread("./ILSVRC2012_val_00000010.jpeg") + result = model.predict(im.copy(), topk=5) + with open("yolov5n-cls/result.pkl", "rb") as f: + expect = pickle.load(f) + + diff_label = np.fabs( + np.array(result.label_ids) - np.array(expect["labels"])) + diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"])) + thres = 1e-05 + assert diff_label.max( + ) < thres, "The label diff is %f, which is bigger than %f" % ( + diff_label.max(), thres) + assert diff_score.max( + ) < thres, "The score diff is %f, which is bigger than %f" % ( + diff_score.max(), thres) From cf8f53e36d376dc0f6235d494cd08fb81225c749 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Sat, 5 Nov 2022 07:54:16 +0000 Subject: [PATCH 03/30] Modify model tests --- tests/models/README.md | 14 +++++ tests/models/test_pfld.py | 25 +++++---- tests/models/test_ppmatting.py | 98 +++++++++++++++++---------------- tests/models/test_pptinypose.py | 10 ++-- tests/models/test_pptracking.py | 10 ++-- tests/models/test_rvm.py | 9 ++- tests/models/test_yolov5cls.py | 37 +++++++------ 7 files changed, 111 insertions(+), 92 deletions(-) create mode 100644 tests/models/README.md diff --git a/tests/models/README.md b/tests/models/README.md new file mode 100644 index 0000000000..c80ccc9ddf --- /dev/null +++ b/tests/models/README.md @@ -0,0 +1,14 @@ +# 添加模型单测 + + +所有模型统一使用`runtime_config.py`中的RuntimeOption进行配置 + +``` +import runtime_config as rc + + +model = fd.vision.XXX(..., runtime_option=rc.test_option) +``` + + +验证For循环跑2+次与Baseline结果符合预期 diff --git a/tests/models/test_pfld.py b/tests/models/test_pfld.py index ef1ba448e3..8d455b1652 100644 --- a/tests/models/test_pfld.py +++ b/tests/models/test_pfld.py @@ -22,19 +22,20 @@ def test_facealignment_pfld(): model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx" input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png" output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy" - fd.download(model_url, ".") - fd.download(input_url, ".") - fd.download(output_url, ".") - model_path = "pfld-106-lite.onnx" + fd.download(model_url, "resources") + fd.download(input_url, "resources") + fd.download(output_url, "resources") + model_path = "resources/pfld-106-lite.onnx" # use ORT model = fd.vision.facealign.PFLD(model_path, runtime_option=rc.test_option) # compare diff - im = cv2.imread("./facealign_input.png") - result = model.predict(im.copy()) - expect = np.load("./result_landmarks.npy") - - diff = np.fabs(np.array(result.landmarks) - expect) - thres = 1e-04 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) + im = cv2.imread("resources/facealign_input.png") + for i in range(2): + result = model.predict(im) + expect = np.load("resources/result_landmarks.npy") + + diff = np.fabs(np.array(result.landmarks) - expect) + thres = 1e-04 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_ppmatting.py b/tests/models/test_ppmatting.py index 78a085a5f3..8021f5b221 100644 --- a/tests/models/test_ppmatting.py +++ b/tests/models/test_ppmatting.py @@ -22,9 +22,9 @@ import runtime_config as rc def test_matting_ppmatting(): model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz" input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "./PP-Matting-512" + fd.download_and_decompress(model_url, "resources") + fd.download(input_url, "resources") + model_path = "./resources/PP-Matting-512" model_file = os.path.join(model_path, "model.pdmodel") params_file = os.path.join(model_path, "model.pdiparams") config_file = os.path.join(model_path, "deploy.yaml") @@ -32,26 +32,27 @@ def test_matting_ppmatting(): model_file, params_file, config_file, runtime_option=rc.test_option) # 预测图片抠图结果 - im = cv2.imread("./matting_input.jpg") - result = model.predict(im.copy()) - pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl" - if pkl_url: - fd.download(pkl_url, ".") - with open("./ppmatting_result.pkl", "rb") as f: - baseline = pickle.load(f) - - diff = np.fabs(np.array(result.alpha) - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) + im = cv2.imread("./resources/matting_input.jpg") + for i in range(2): + result = model.predict(im) + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl" + if pkl_url: + fd.download(pkl_url, "resources") + with open("./resources/ppmatting_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) def test_matting_ppmodnet(): model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz" input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "./PPModnet_MobileNetV2" + fd.download_and_decompress(model_url, "resources") + fd.download(input_url, "resources") + model_path = "./resources/PPModnet_MobileNetV2" model_file = os.path.join(model_path, "model.pdmodel") params_file = os.path.join(model_path, "model.pdiparams") config_file = os.path.join(model_path, "deploy.yaml") @@ -59,27 +60,29 @@ def test_matting_ppmodnet(): model_file, params_file, config_file, runtime_option=rc.test_option) # 预测图片抠图结果 - im = cv2.imread("./matting_input.jpg") - result = model.predict(im.copy()) + im = cv2.imread("./resources/matting_input.jpg") - pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl" - if pkl_url: - fd.download(pkl_url, ".") - with open("./ppmodnet_result.pkl", "rb") as f: - baseline = pickle.load(f) - - diff = np.fabs(np.array(result.alpha) - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) + for i in range(2): + result = model.predict(im) + + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl" + if pkl_url: + fd.download(pkl_url, "resources") + with open("./resources/ppmodnet_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) def test_matting_pphumanmatting(): model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz" input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "./PPHumanMatting" + fd.download_and_decompress(model_url, "resources") + fd.download(input_url, "resources") + model_path = "./resources/PPHumanMatting" # 配置runtime,加载模型 runtime_option = fd.RuntimeOption() model_file = os.path.join(model_path, "model.pdmodel") @@ -89,17 +92,18 @@ def test_matting_pphumanmatting(): model_file, params_file, config_file, runtime_option=rc.test_option) # 预测图片抠图结果 - im = cv2.imread("./matting_input.jpg") - result = model.predict(im.copy()) - - pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl" - if pkl_url: - fd.download(pkl_url, ".") - - with open("./pphumanmatting_result.pkl", "rb") as f: - baseline = pickle.load(f) - - diff = np.fabs(np.array(result.alpha) - np.array(baseline)) - thres = 1e-05 - assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( - diff.max(), thres) + im = cv2.imread("./resources/matting_input.jpg") + for i in range(2): + result = model.predict(im) + + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl" + if pkl_url: + fd.download(pkl_url, "resources") + + with open("./resources/pphumanmatting_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_pptinypose.py b/tests/models/test_pptinypose.py index 95cacdd5e1..fe838c4902 100644 --- a/tests/models/test_pptinypose.py +++ b/tests/models/test_pptinypose.py @@ -20,8 +20,8 @@ import runtime_config as rc def test_keypointdetection_pptinypose(): pp_tinypose_model_url = "https://bj.bcebos.com/fastdeploy/tests/PP_TinyPose_256x192_test.tgz" - fd.download_and_decompress(pp_tinypose_model_url, ".") - model_path = "./PP_TinyPose_256x192_test" + fd.download_and_decompress(pp_tinypose_model_url, "resources") + model_path = "./resources/PP_TinyPose_256x192_test" # 配置runtime,加载模型 runtime_option = fd.RuntimeOption() model_file = os.path.join(model_path, "model.pdmodel") @@ -48,8 +48,8 @@ def test_keypointdetection_pptinypose(): def test_keypointdetection_det_keypoint_unite(): det_keypoint_unite_model_url = "https://bj.bcebos.com/fastdeploy/tests/PicoDet_320x320_TinyPose_256x192_test.tgz" - fd.download_and_decompress(det_keypoint_unite_model_url, ".") - model_path = "./PicoDet_320x320_TinyPose_256x192_test" + fd.download_and_decompress(det_keypoint_unite_model_url, "resources") + model_path = "./resources/PicoDet_320x320_TinyPose_256x192_test" # 配置runtime,加载模型 runtime_option = fd.RuntimeOption() tinypose_model_file = os.path.join( @@ -91,7 +91,7 @@ def test_keypointdetection_det_keypoint_unite(): (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]), axis=1) print(result) - np.save("baseline.npy", result) + np.save("resources/baseline.npy", result) baseline = np.load(baseline_file) diff = np.fabs(result - np.array(baseline)) thres = 1e-05 diff --git a/tests/models/test_pptracking.py b/tests/models/test_pptracking.py index 42010705c3..b8842c73f8 100644 --- a/tests/models/test_pptracking.py +++ b/tests/models/test_pptracking.py @@ -23,16 +23,16 @@ import runtime_config as rc def test_pptracking(): model_url = "https://bj.bcebos.com/fastdeploy/tests/pptracking.tgz" input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320" + fd.download_and_decompress(model_url, "resources") + fd.download(input_url, "resources") + model_path = "resources/pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320" # use default backend runtime_option = fd.RuntimeOption() model_file = os.path.join(model_path, "model.pdmodel") params_file = os.path.join(model_path, "model.pdiparams") config_file = os.path.join(model_path, "infer_cfg.yml") model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=rc.test_option) - cap = cv2.VideoCapture("./person.mp4") + cap = cv2.VideoCapture("./resources/person.mp4") frame_id = 0 while True: _, frame = cap.read() @@ -40,7 +40,7 @@ def test_pptracking(): break result = model.predict(frame) # compare diff - expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb")) + expect = pickle.load(open("resources/pptracking/frame" + str(frame_id) + ".pkl", "rb")) diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes)) diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores)) diff = max(diff_boxes.max(), diff_scores.max()) diff --git a/tests/models/test_rvm.py b/tests/models/test_rvm.py index 23fd544c66..10d680948f 100644 --- a/tests/models/test_rvm.py +++ b/tests/models/test_rvm.py @@ -22,11 +22,10 @@ import runtime_config as rc def test_matting_rvm_cpu(): model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz" input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "rvm/rvm_mobilenetv3_fp32.onnx" + fd.download_and_decompress(model_url, "resources") + fd.download(input_url, "resources") + model_path = "resources/rvm/rvm_mobilenetv3_fp32.onnx" # use ORT - runtime_option.use_ort_backend() model = fd.vision.matting.RobustVideoMatting( model_path, runtime_option=rc.test_option) @@ -39,7 +38,7 @@ def test_matting_rvm_cpu(): break result = model.predict(frame) # compare diff - expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy") + expect_alpha = np.load("resources/rvm/result_alpha_" + str(frame_id) + ".npy") result_alpha = np.array(result.alpha).reshape(1920, 1080) diff = np.fabs(expect_alpha - result_alpha) thres = 1e-05 diff --git a/tests/models/test_yolov5cls.py b/tests/models/test_yolov5cls.py index aeafad5196..a7070de4d6 100755 --- a/tests/models/test_yolov5cls.py +++ b/tests/models/test_yolov5cls.py @@ -22,9 +22,9 @@ import runtime_config as rc def test_classification_yolov5cls(): model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n-cls.tgz" input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg" - fd.download_and_decompress(model_url, ".") - fd.download(input_url, ".") - model_path = "yolov5n-cls/yolov5n-cls.onnx" + fd.download_and_decompress(model_url, "resources") + fd.download(input_url, "resources") + model_path = "resources/yolov5n-cls/yolov5n-cls.onnx" # use ORT runtime_option = fd.RuntimeOption() runtime_option.use_ort_backend() @@ -32,18 +32,19 @@ def test_classification_yolov5cls(): model_path, runtime_option=rc.test_option) # compare diff - im = cv2.imread("./ILSVRC2012_val_00000010.jpeg") - result = model.predict(im.copy(), topk=5) - with open("yolov5n-cls/result.pkl", "rb") as f: - expect = pickle.load(f) - - diff_label = np.fabs( - np.array(result.label_ids) - np.array(expect["labels"])) - diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"])) - thres = 1e-05 - assert diff_label.max( - ) < thres, "The label diff is %f, which is bigger than %f" % ( - diff_label.max(), thres) - assert diff_score.max( - ) < thres, "The score diff is %f, which is bigger than %f" % ( - diff_score.max(), thres) + im = cv2.imread("./resources/ILSVRC2012_val_00000010.jpeg") + for i in range(2): + result = model.predict(im, topk=5) + with open("resources/yolov5n-cls/result.pkl", "rb") as f: + expect = pickle.load(f) + + diff_label = np.fabs( + np.array(result.label_ids) - np.array(expect["labels"])) + diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"])) + thres = 1e-05 + assert diff_label.max( + ) < thres, "The label diff is %f, which is bigger than %f" % ( + diff_label.max(), thres) + assert diff_score.max( + ) < thres, "The score diff is %f, which is bigger than %f" % ( + diff_score.max(), thres) From 19a6941f0470a0eb95cc51e0cfbd8d0af07d8d00 Mon Sep 17 00:00:00 2001 From: Jason Date: Sat, 5 Nov 2022 17:48:50 +0800 Subject: [PATCH 04/30] [Model] Move letter box resize code (#502) * Remove letter box resize code * Remove letter box resize code * Add model test for mobilenetv2 --- fastdeploy/vision/common/processors/base.h | 5 +- .../vision/common/processors/transform.h | 1 - .../pptracking/letter_box_resize.cc} | 29 ++++------- .../pptracking/letter_box_resize.h} | 8 +-- .../vision/tracking/pptracking/model.cc | 1 + tests/models/test_mobilenetv2.py | 50 +++++++++++++++++++ 6 files changed, 68 insertions(+), 26 deletions(-) rename fastdeploy/vision/{common/processors/letter_box.cc => tracking/pptracking/letter_box_resize.cc} (65%) rename fastdeploy/vision/{common/processors/letter_box.h => tracking/pptracking/letter_box_resize.h} (87%) create mode 100755 tests/models/test_mobilenetv2.py diff --git a/fastdeploy/vision/common/processors/base.h b/fastdeploy/vision/common/processors/base.h index bb414669af..6c67d10bca 100644 --- a/fastdeploy/vision/common/processors/base.h +++ b/fastdeploy/vision/common/processors/base.h @@ -41,7 +41,10 @@ class FASTDEPLOY_DECL Processor { virtual std::string Name() = 0; - virtual bool ImplByOpenCV(Mat* mat) = 0; + virtual bool ImplByOpenCV(Mat* mat) { + FDERROR << Name() << " Not Implement Yet." << std::endl; + return false; + } virtual bool ImplByFlyCV(Mat* mat) { return ImplByOpenCV(mat); diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h index 7952caca34..9054ade55f 100644 --- a/fastdeploy/vision/common/processors/transform.h +++ b/fastdeploy/vision/common/processors/transform.h @@ -20,7 +20,6 @@ #include "fastdeploy/vision/common/processors/convert.h" #include "fastdeploy/vision/common/processors/crop.h" #include "fastdeploy/vision/common/processors/hwc2chw.h" -#include "fastdeploy/vision/common/processors/letter_box.h" #include "fastdeploy/vision/common/processors/limit_by_stride.h" #include "fastdeploy/vision/common/processors/limit_long.h" #include "fastdeploy/vision/common/processors/limit_short.h" diff --git a/fastdeploy/vision/common/processors/letter_box.cc b/fastdeploy/vision/tracking/pptracking/letter_box_resize.cc similarity index 65% rename from fastdeploy/vision/common/processors/letter_box.cc rename to fastdeploy/vision/tracking/pptracking/letter_box_resize.cc index 423aef1d36..a2747def1f 100644 --- a/fastdeploy/vision/common/processors/letter_box.cc +++ b/fastdeploy/vision/tracking/pptracking/letter_box_resize.cc @@ -12,17 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "fastdeploy/vision/common/processors/letter_box.h" +#include "fastdeploy/vision/tracking/pptracking/letter_box_resize.h" +#include "fastdeploy/vision/common/processors/transform.h" namespace fastdeploy{ namespace vision{ -bool LetterBoxResize::ImplByOpenCV(Mat* mat) { +bool LetterBoxResize::operator()(Mat* mat, ProcLib lib) { if (mat->Channels() != color_.size()) { - FDERROR << "Pad: Require input channels equals to size of padding value, " + FDERROR << "LetterBoxResize: Require input channels equals to size of color value, " "but now channels = " << mat->Channels() - << ", the size of padding values = " << color_.size() << "." + << ", the size of color values = " << color_.size() << "." << std::endl; return false; } @@ -36,8 +37,8 @@ bool LetterBoxResize::ImplByOpenCV(Mat* mat) { float ratio_w = static_cast(target_w) / static_cast(origin_w); float resize_scale = std::min(ratio_h, ratio_w); // get_resized_shape - int new_shape_w = std::round(im->cols * resize_scale); - int new_shape_h = std::round(im->rows * resize_scale); + int new_shape_w = std::round(origin_w * resize_scale); + int new_shape_h = std::round(origin_h * resize_scale); // calculate pad float padw = (target_size_[1] - new_shape_w) / 2.; float padh = (target_size_[0] - new_shape_h) / 2.; @@ -45,20 +46,8 @@ bool LetterBoxResize::ImplByOpenCV(Mat* mat) { int bottom = std::round(padh + 0.1); int left = std::round(padw - 0.1); int right = std::round(padw + 0.1); - cv::resize(*im, *im, cv::Size(new_shape_w, new_shape_h), 0, 0, cv::INTER_AREA); - cv::Scalar color; - if (color_.size() == 1) { - color = cv::Scalar(color_[0]); - } else if (color_.size() == 2) { - color = cv::Scalar(color_[0], color_[1]); - } else if (color_.size() == 3) { - color = cv::Scalar(color_[0], color_[1], color_[2]); - } else { - color = cv::Scalar(color_[0], color_[1], color_[2], color_[3]); - } - cv::copyMakeBorder(*im, *im, top, bottom, left, right, cv::BORDER_CONSTANT, color); - mat->SetWidth(im->cols); - mat->SetHeight(im->rows); + Resize::Run(mat, new_shape_w, new_shape_h, -1.0, -1.0, 3, false, lib); + Pad::Run(mat, top, bottom, left, right, color_, lib); return true; } diff --git a/fastdeploy/vision/common/processors/letter_box.h b/fastdeploy/vision/tracking/pptracking/letter_box_resize.h similarity index 87% rename from fastdeploy/vision/common/processors/letter_box.h rename to fastdeploy/vision/tracking/pptracking/letter_box_resize.h index f69647e892..77e04557b8 100644 --- a/fastdeploy/vision/common/processors/letter_box.h +++ b/fastdeploy/vision/tracking/pptracking/letter_box_resize.h @@ -27,13 +27,13 @@ class LetterBoxResize : public Processor { color_ = color; } - bool ImplByOpenCV(Mat* mat); - std::string Name() { return "LetterBoxResize"; } + virtual bool operator()(Mat* mat, ProcLib lib = ProcLib::DEFAULT); + static bool Run(Mat* mat, const std::vector& target_size, - const std::vector& color, - ProcLib lib = ProcLib::DEFAULT); + const std::vector& color, + ProcLib lib = ProcLib::DEFAULT); private: std::vector target_size_; diff --git a/fastdeploy/vision/tracking/pptracking/model.cc b/fastdeploy/vision/tracking/pptracking/model.cc index 0ae550ad24..2047497b92 100644 --- a/fastdeploy/vision/tracking/pptracking/model.cc +++ b/fastdeploy/vision/tracking/pptracking/model.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "fastdeploy/vision/tracking/pptracking/model.h" +#include "fastdeploy/vision/tracking/pptracking/letter_box_resize.h" #include "yaml-cpp/yaml.h" namespace fastdeploy { diff --git a/tests/models/test_mobilenetv2.py b/tests/models/test_mobilenetv2.py new file mode 100755 index 0000000000..c2cec02208 --- /dev/null +++ b/tests/models/test_mobilenetv2.py @@ -0,0 +1,50 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np +import runtime_config as rc + + +def test_classification_mobilenetv2(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_x0_25_infer.tgz" + input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg" + fd.download_and_decompress(model_url, "resources") + fd.download(input_url, "resources") + model_path = "resources/MobileNetV1_x0_25_infer" + + model_file = "resources/MobileNetV1_x0_25_infer/inference.pdmodel" + params_file = "resources/MobileNetV1_x0_25_infer/inference.pdiparams" + config_file = "resources/MobileNetV1_x0_25_infer/inference_cls.yaml" + model = fd.vision.classification.PaddleClasModel( + model_file, params_file, config_file, runtime_option=rc.test_option) + + expected_label_ids = [153, 333, 259, 338, 265, 154] + expected_scores = [ + 0.221088, 0.109457, 0.078668, 0.076814, 0.052401, 0.048206 + ] + # compare diff + im = cv2.imread("./resources/ILSVRC2012_val_00000010.jpeg") + for i in range(2): + result = model.predict(im, topk=6) + diff_label = np.fabs( + np.array(result.label_ids) - np.array(expected_label_ids)) + diff_scores = np.fabs( + np.array(result.scores) - np.array(expected_scores)) + assert diff_label.max() < 1e-06, "There's difference in classify label." + assert diff_scores.max( + ) < 1e-05, "There's difference in classify score." From 40e080a40cf6b6a42c08e7b9caaaaa627bc69bf5 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Sun, 6 Nov 2022 07:22:05 +0800 Subject: [PATCH 05/30] Update README_CN.md --- README_CN.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/README_CN.md b/README_CN.md index ab6030aa23..4c61b6c4a6 100644 --- a/README_CN.md +++ b/README_CN.md @@ -28,6 +28,13 @@ ## 近期更新 +- 🔥 **2022.11.09 20:30~21:30,【直播分享】《覆盖云边端全场景,150+热门模型快速部署》。扫码报名** +- 🔥 **2022.11.10 20:30~21:30,【直播分享】《瑞芯微、晶晨、恩智浦等10+AI硬件部署,直达产业落地》。扫码报名** +- 🔥 **2022.11.10 19:00~20:00,【直播分享】《10+热门模型在RK3588、RK3568部署实战》。扫码报名** +-
+ +
+ - 🔥 **2022.10.15:Release FastDeploy [release v0.3.0](https://github.com/PaddlePaddle/FastDeploy/tree/release%2F0.3.0)**
- **New server-side deployment upgrade:更快的推理性能,一键量化,更多的视觉和NLP模型** - 集成 OpenVINO 推理引擎,并且保证了使用 OpenVINO 与 使用 TensorRT、ONNX Runtime、 Paddle Inference一致的开发体验; @@ -278,7 +285,7 @@ int main(int argc, char* argv[]) { - **加入社区👬:** 微信扫描二维码,进入**FastDeploy技术交流群**
- +
## Acknowledge From 295af8f4676d2a4945589334ac7e1581a58ff71f Mon Sep 17 00:00:00 2001 From: DefTruth <31974251+DefTruth@users.noreply.github.com> Date: Sun, 6 Nov 2022 13:29:46 +0800 Subject: [PATCH 06/30] [FlyCV] update vision processors with latest FlyCV API (#503) * [Other] Add static create methods to Mat * [Other] avoid field name conflicts * [FlyCV] update vision processors with latest FlyCV API * [FlyCV] update vision processors with latest FlyCV API * [Model] update rvm cv::Mat usage to FD Mat usage --- cmake/flycv.cmake | 2 +- fastdeploy/vision/common/processors/mat.cc | 17 ----- fastdeploy/vision/common/processors/mat.h | 1 - .../vision/common/processors/normalize.cc | 2 +- .../vision/common/processors/proc_lib.cc | 17 +++++ .../vision/common/processors/proc_lib.h | 2 + fastdeploy/vision/common/processors/utils.cc | 62 +++++++++---------- fastdeploy/vision/matting/contrib/rvm.cc | 14 +++-- 8 files changed, 60 insertions(+), 57 deletions(-) diff --git a/cmake/flycv.cmake b/cmake/flycv.cmake index 52a77fb806..9bb56df5fe 100644 --- a/cmake/flycv.cmake +++ b/cmake/flycv.cmake @@ -64,7 +64,7 @@ else() endif(WIN32) set(FLYCV_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/") -set(FLYCV_VERSION "1.0.1025") +set(FLYCV_VERSION "1.3") if(WIN32) message(FATAL_ERROR "FlyCV is not supported on Windows now.") set(FLYCV_FILE "flycv-win-x64-${FLYCV_VERSION}.zip") diff --git a/fastdeploy/vision/common/processors/mat.cc b/fastdeploy/vision/common/processors/mat.cc index ba0eddcb1f..e2a64ea04e 100644 --- a/fastdeploy/vision/common/processors/mat.cc +++ b/fastdeploy/vision/common/processors/mat.cc @@ -101,23 +101,6 @@ FDDataType Mat::Type() { return OpenCVDataTypeToFD(cpu_mat.type()); } -std::ostream& operator<<(std::ostream& out, const ProcLib& p) { - switch (p) { - case ProcLib::DEFAULT: - out << "ProcLib::DEFAULT"; - break; - case ProcLib::OPENCV: - out << "ProcLib::OPENCV"; - break; - case ProcLib::FLYCV: - out << "ProcLib::FLYCV"; - break; - default: - FDASSERT(false, "Unknow type of ProcLib."); - } - return out; -} - Mat Mat::Create(const FDTensor& tensor) { if (DefaultProcLib::default_lib == ProcLib::FLYCV) { #ifdef ENABLE_FLYCV diff --git a/fastdeploy/vision/common/processors/mat.h b/fastdeploy/vision/common/processors/mat.h index cfec2fbd5e..5e618057ca 100644 --- a/fastdeploy/vision/common/processors/mat.h +++ b/fastdeploy/vision/common/processors/mat.h @@ -22,7 +22,6 @@ namespace vision { enum Layout { HWC, CHW }; -FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out, const ProcLib& p); struct FASTDEPLOY_DECL Mat { explicit Mat(const cv::Mat& mat) { diff --git a/fastdeploy/vision/common/processors/normalize.cc b/fastdeploy/vision/common/processors/normalize.cc index cf4888613c..726ba67a74 100644 --- a/fastdeploy/vision/common/processors/normalize.cc +++ b/fastdeploy/vision/common/processors/normalize.cc @@ -80,7 +80,7 @@ bool Normalize::ImplByFlyCV(Mat* mat) { mean[i] = -1 * beta_[i] * std[i]; } fcv::Mat new_im(im->width(), im->height(), - fcv::FCVImageType::PACKAGE_BGR_F32); + fcv::FCVImageType::PKG_BGR_F32); fcv::normalize_to_submean_to_reorder(*im, mean, std, std::vector(), new_im, true); mat->SetMat(new_im); diff --git a/fastdeploy/vision/common/processors/proc_lib.cc b/fastdeploy/vision/common/processors/proc_lib.cc index e5009d9a63..07f8e83fd0 100644 --- a/fastdeploy/vision/common/processors/proc_lib.cc +++ b/fastdeploy/vision/common/processors/proc_lib.cc @@ -19,5 +19,22 @@ namespace vision { ProcLib DefaultProcLib::default_lib = ProcLib::DEFAULT; +std::ostream& operator<<(std::ostream& out, const ProcLib& p) { + switch (p) { + case ProcLib::DEFAULT: + out << "ProcLib::DEFAULT"; + break; + case ProcLib::OPENCV: + out << "ProcLib::OPENCV"; + break; + case ProcLib::FLYCV: + out << "ProcLib::FLYCV"; + break; + default: + FDASSERT(false, "Unknow type of ProcLib."); + } + return out; +} + } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/proc_lib.h b/fastdeploy/vision/common/processors/proc_lib.h index 6db6b5177e..deee4b4f23 100644 --- a/fastdeploy/vision/common/processors/proc_lib.h +++ b/fastdeploy/vision/common/processors/proc_lib.h @@ -20,6 +20,8 @@ namespace vision { enum class FASTDEPLOY_DECL ProcLib { DEFAULT, OPENCV, FLYCV }; +FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out, const ProcLib& p); + struct FASTDEPLOY_DECL DefaultProcLib { // default_lib has the highest priority // all the function in `processor` will force to use diff --git a/fastdeploy/vision/common/processors/utils.cc b/fastdeploy/vision/common/processors/utils.cc index e38a1687c8..4d2c9a0d02 100644 --- a/fastdeploy/vision/common/processors/utils.cc +++ b/fastdeploy/vision/common/processors/utils.cc @@ -73,61 +73,61 @@ int CreateOpenCVDataType(FDDataType type, int channel) { FDDataType FlyCVDataTypeToFD(fcv::FCVImageType type) { if (type == fcv::FCVImageType::GRAY_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_BGR_U8) { + } else if (type == fcv::FCVImageType::PKG_BGR_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_RGB_U8) { + } else if (type == fcv::FCVImageType::PKG_RGB_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_BGR_U8) { + } else if (type == fcv::FCVImageType::PKG_BGR_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_RGB_U8) { + } else if (type == fcv::FCVImageType::PKG_RGB_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PLANAR_BGR_U8) { + } else if (type == fcv::FCVImageType::PLA_BGR_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PLANAR_RGB_U8) { + } else if (type == fcv::FCVImageType::PLA_RGB_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PLANAR_BGRA_U8) { + } else if (type == fcv::FCVImageType::PLA_BGRA_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PLANAR_RGBA_U8) { + } else if (type == fcv::FCVImageType::PLA_RGBA_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PLANAR_BGR_F32) { + } else if (type == fcv::FCVImageType::PLA_BGR_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PLANAR_RGB_F32) { + } else if (type == fcv::FCVImageType::PLA_RGB_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PLANAR_BGRA_F32) { + } else if (type == fcv::FCVImageType::PLA_BGRA_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PLANAR_RGBA_F32) { + } else if (type == fcv::FCVImageType::PLA_RGBA_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_BGRA_U8) { + } else if (type == fcv::FCVImageType::PKG_BGRA_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_RGBA_U8) { + } else if (type == fcv::FCVImageType::PKG_RGBA_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_BGRA_U8) { + } else if (type == fcv::FCVImageType::PKG_BGRA_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_RGBA_U8) { + } else if (type == fcv::FCVImageType::PKG_RGBA_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_BGR565_U8) { + } else if (type == fcv::FCVImageType::PKG_BGR565_U8) { return FDDataType::UINT8; - } else if (type == fcv::FCVImageType::PACKAGE_RGB565_U8) { + } else if (type == fcv::FCVImageType::PKG_RGB565_U8) { return FDDataType::UINT8; } else if (type == fcv::FCVImageType::GRAY_S32) { return FDDataType::INT32; } else if (type == fcv::FCVImageType::GRAY_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_BGR_F32) { + } else if (type == fcv::FCVImageType::PKG_BGR_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_RGB_F32) { + } else if (type == fcv::FCVImageType::PKG_RGB_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_BGR_F32) { + } else if (type == fcv::FCVImageType::PKG_BGR_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_RGB_F32) { + } else if (type == fcv::FCVImageType::PKG_RGB_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_BGRA_F32) { + } else if (type == fcv::FCVImageType::PKG_BGRA_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_RGBA_F32) { + } else if (type == fcv::FCVImageType::PKG_RGBA_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_BGRA_F32) { + } else if (type == fcv::FCVImageType::PKG_BGRA_F32) { return FDDataType::FP32; - } else if (type == fcv::FCVImageType::PACKAGE_RGBA_F32) { + } else if (type == fcv::FCVImageType::PKG_RGBA_F32) { return FDDataType::FP32; } else if (type == fcv::FCVImageType::GRAY_F64) { return FDDataType::FP64; @@ -144,21 +144,21 @@ fcv::FCVImageType CreateFlyCVDataType(FDDataType type, int channel) { if (channel == 1) { return fcv::FCVImageType::GRAY_U8; } else if (channel == 3) { - return fcv::FCVImageType::PACKAGE_BGR_U8; + return fcv::FCVImageType::PKG_BGR_U8; } else { - return fcv::FCVImageType::PACKAGE_BGRA_U8; + return fcv::FCVImageType::PKG_BGRA_U8; } } else if (type == FDDataType::FP32) { if (channel == 1) { return fcv::FCVImageType::GRAY_F32; } else if (channel == 3) { - return fcv::FCVImageType::PACKAGE_BGR_F32; + return fcv::FCVImageType::PKG_BGR_F32; } else { - return fcv::FCVImageType::PACKAGE_BGRA_F32; + return fcv::FCVImageType::PKG_BGRA_F32; } } FDASSERT(false, "Data type of %s is not supported.", Str(type).c_str()); - return fcv::FCVImageType::PACKAGE_BGR_F32; + return fcv::FCVImageType::PKG_BGR_F32; } fcv::Mat ConvertOpenCVMatToFlyCV(cv::Mat& im) { diff --git a/fastdeploy/vision/matting/contrib/rvm.cc b/fastdeploy/vision/matting/contrib/rvm.cc index 6f48a38652..846db6bd60 100755 --- a/fastdeploy/vision/matting/contrib/rvm.cc +++ b/fastdeploy/vision/matting/contrib/rvm.cc @@ -120,19 +120,21 @@ bool RobustVideoMatting::Postprocess( // for alpha float* alpha_ptr = static_cast(alpha.Data()); - cv::Mat alpha_zero_copy_ref(out_h, out_w, CV_32FC1, alpha_ptr); - Mat alpha_resized(alpha_zero_copy_ref); // ref-only, zero copy. + // cv::Mat alpha_zero_copy_ref(out_h, out_w, CV_32FC1, alpha_ptr); + // Mat alpha_resized(alpha_zero_copy_ref); // ref-only, zero copy. + Mat alpha_resized = Mat::Create(out_h, out_w, 1, FDDataType::FP32, + alpha_ptr); // ref-only, zero copy. if ((out_h != in_h) || (out_w != in_w)) { - // already allocated a new continuous memory after resize. Resize::Run(&alpha_resized, in_w, in_h, -1, -1); } // for foreground float* fgr_ptr = static_cast(fgr.Data()); - cv::Mat fgr_zero_copy_ref(out_h, out_w, CV_32FC1, fgr_ptr); - Mat fgr_resized(fgr_zero_copy_ref); // ref-only, zero copy. + // cv::Mat fgr_zero_copy_ref(out_h, out_w, CV_32FC1, fgr_ptr); + // Mat fgr_resized(fgr_zero_copy_ref); // ref-only, zero copy. + Mat fgr_resized = Mat::Create(out_h, out_w, 1, FDDataType::FP32, + fgr_ptr); // ref-only, zero copy. if ((out_h != in_h) || (out_w != in_w)) { - // already allocated a new continuous memory after resize. Resize::Run(&fgr_resized, in_w, in_h, -1, -1); } From 6408af263a2abb976c4b064e974b4da67f328be1 Mon Sep 17 00:00:00 2001 From: Zheng_Bicheng <58363586+Zheng-Bicheng@users.noreply.github.com> Date: Sun, 6 Nov 2022 17:29:00 +0800 Subject: [PATCH 07/30] [Add Model]Add RKPicodet (#495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 11-02/14:35 * 新增输入数据format错误判断 * 优化推理过程,减少内存分配次数 * 支持多输入rknn模型 * rknn模型输出shape为三维时,输出将被强制对齐为4纬。现在将直接抹除rknn补充的shape,方便部分对输出shape进行判断的模型进行正确的后处理。 * 11-03/17:25 * 支持导出多输入RKNN模型 * 更新各种文档 * ppseg改用Fastdeploy中的模型进行转换 * 11-03/17:25 * 新增开源头 * 11-03/21:48 * 删除无用debug代码,补充注释 * 11-04/01:00 * 新增rkpicodet代码 * 11-04/13:13 * 提交编译缺少的文件 * 11-04/14:03 * 更新安装文档 * 11-04/14:21 * 更新picodet_s配置文件 * 11-04/14:21 * 更新picodet自适应输出结果 * 11-04/14:21 * 更新文档 * * 更新配置文件 * * 修正配置文件 * * 添加缺失的python文件 * * 修正文档 * * 修正代码格式问题0 * * 按照要求修改 * * 按照要求修改 * * 按照要求修改 * * 按照要求修改 * * 按照要求修改 * test --- docs/cn/faq/rknpu2/export.md | 6 +- .../paddledetection/rknpu2/README.md | 38 ++++ .../paddledetection/rknpu2/cpp/CMakeLists.txt | 37 ++++ .../paddledetection/rknpu2/cpp/README.md | 71 +++++++ .../rknpu2/cpp/infer_picodet.cc | 86 ++++++++ .../paddledetection/rknpu2/python/README.md | 35 +++ .../paddledetection/rknpu2/python/infer.py | 59 +++++ fastdeploy/vision.h | 1 + .../vision/detection/contrib/rknpu2/model.h | 16 ++ .../detection/contrib/rknpu2/rkdet_pybind.cc | 29 +++ .../detection/contrib/rknpu2/rkpicodet.cc | 201 ++++++++++++++++++ .../detection/contrib/rknpu2/rkpicodet.h | 46 ++++ .../vision/detection/detection_pybind.cc | 3 + .../fastdeploy/vision/detection/__init__.py | 1 + .../vision/detection/rknpu2/__init__.py | 44 ++++ .../RK3568/picodet_s_416_coco_lcnet.yaml | 7 + .../config/RK3568/picodet_s_416_coco_npu.yaml | 5 + .../RK3588/picodet_s_416_coco_lcnet.yaml | 7 + .../config/RK3588/picodet_s_416_coco_npu.yaml | 5 + 19 files changed, 694 insertions(+), 3 deletions(-) create mode 100644 examples/vision/detection/paddledetection/rknpu2/README.md create mode 100644 examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt create mode 100644 examples/vision/detection/paddledetection/rknpu2/cpp/README.md create mode 100644 examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc create mode 100644 examples/vision/detection/paddledetection/rknpu2/python/README.md create mode 100644 examples/vision/detection/paddledetection/rknpu2/python/infer.py create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/model.h create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h create mode 100644 python/fastdeploy/vision/detection/rknpu2/__init__.py create mode 100644 tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml create mode 100644 tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml create mode 100644 tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml create mode 100644 tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml diff --git a/docs/cn/faq/rknpu2/export.md b/docs/cn/faq/rknpu2/export.md index 9399c78d5d..6992506cf6 100644 --- a/docs/cn/faq/rknpu2/export.md +++ b/docs/cn/faq/rknpu2/export.md @@ -22,8 +22,8 @@ model_path: ./portrait_pp_humansegv2_lite_256x144_pretrained.onnx output_folder: ./ target_platform: RK3588 normalize: - mean: [0.5,0.5,0.5] - std: [0.5,0.5,0.5] + mean: [[0.5,0.5,0.5]] + std: [[0.5,0.5,0.5]] outputs: None ``` @@ -45,4 +45,4 @@ python tools/export.py --config_path=./config.yaml ## 模型导出要注意的事项 -* 请不要导出带softmax和argmax的模型,这两个算子存在bug,请在外部进行运算 \ No newline at end of file +* 请不要导出带softmax和argmax的模型,这两个算子存在bug,请在外部进行运算 diff --git a/examples/vision/detection/paddledetection/rknpu2/README.md b/examples/vision/detection/paddledetection/rknpu2/README.md new file mode 100644 index 0000000000..32eff20a6f --- /dev/null +++ b/examples/vision/detection/paddledetection/rknpu2/README.md @@ -0,0 +1,38 @@ +# PaddleDetection RKNPU2部署示例 + +## 支持模型列表 + +目前FastDeploy支持如下模型的部署 +- [PicoDet系列模型](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet) + +## 准备PaddleDetection部署模型以及转换模型 +RKNPU部署模型前需要将Paddle模型转换成RKNN模型,具体步骤如下: +* Paddle动态图模型转换为ONNX模型,请参考[PaddleDetection导出模型](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/deploy/EXPORT_MODEL.md) + ,注意在转换时请设置**export.nms=True**. +* ONNX模型转换RKNN模型的过程,请参考[转换文档](../../../../../docs/cn/faq/rknpu2/export.md)进行转换。 + + +## 模型转换example +下面以Picodet-npu为例子,教大家如何转换PaddleDetection模型到RKNN模型。 +```bash +## 下载Paddle静态图模型并解压 +wget https://bj.bcebos.com/fastdeploy/models/rknn2/picodet_s_416_coco_npu.zip +unzip -qo picodet_s_416_coco_npu.zip + +# 静态图转ONNX模型,注意,这里的save_file请和压缩包名对齐 +paddle2onnx --model_dir picodet_s_416_coco_npu \ + --model_filename model.pdmodel \ + --params_filename model.pdiparams \ + --save_file picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx \ + --enable_dev_version True + +python -m paddle2onnx.optimize --input_model picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx \ + --output_model picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx \ + --input_shape_dict "{'image':[1,3,416,416]}" +# ONNX模型转RKNN模型 +# 转换模型,模型将生成在picodet_s_320_coco_lcnet_non_postprocess目录下 +python tools/rknpu2/export.py --config_path tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml +``` + +- [Python部署](./python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt b/examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt new file mode 100644 index 0000000000..b4eca78ec5 --- /dev/null +++ b/examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt @@ -0,0 +1,37 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.10) +project(rknpu2_test) + +set(CMAKE_CXX_STANDARD 14) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR "thirdpartys/fastdeploy-0.0.3") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeployConfig.cmake) +include_directories(${FastDeploy_INCLUDE_DIRS}) + +add_executable(infer_picodet infer_picodet.cc) +target_link_libraries(infer_picodet ${FastDeploy_LIBS}) + + + +set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install) + +install(TARGETS infer_picodet DESTINATION ./) + +install(DIRECTORY model DESTINATION ./) +install(DIRECTORY images DESTINATION ./) + +file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*) +message("${FASTDEPLOY_LIBS}") +install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib) + +file(GLOB ONNXRUNTIME_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/onnxruntime/lib/*) +install(PROGRAMS ${ONNXRUNTIME_LIBS} DESTINATION lib) + +install(DIRECTORY ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib DESTINATION ./) + +file(GLOB PADDLETOONNX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddle2onnx/lib/*) +install(PROGRAMS ${PADDLETOONNX_LIBS} DESTINATION lib) + +file(GLOB RKNPU2_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/rknpu2_runtime/RK3588/lib/*) +install(PROGRAMS ${RKNPU2_LIBS} DESTINATION lib) diff --git a/examples/vision/detection/paddledetection/rknpu2/cpp/README.md b/examples/vision/detection/paddledetection/rknpu2/cpp/README.md new file mode 100644 index 0000000000..d0b1319712 --- /dev/null +++ b/examples/vision/detection/paddledetection/rknpu2/cpp/README.md @@ -0,0 +1,71 @@ +# PaddleDetection C++部署示例 + +本目录下提供`infer_xxxxx.cc`快速完成PPDetection模型在Rockchip板子上上通过二代NPU加速部署的示例。 + +在部署前,需确认以下两个步骤: + +1. 软硬件环境满足要求 +2. 根据开发环境,下载预编译部署库或者从头编译FastDeploy仓库 + +以上步骤请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)实现 + +## 生成基本目录文件 + +该例程由以下几个部分组成 +```text +. +├── CMakeLists.txt +├── build # 编译文件夹 +├── image # 存放图片的文件夹 +├── infer_cpu_npu.cc +├── infer_cpu_npu.h +├── main.cc +├── model # 存放模型文件的文件夹 +└── thirdpartys # 存放sdk的文件夹 +``` + +首先需要先生成目录结构 +```bash +mkdir build +mkdir images +mkdir model +mkdir thirdpartys +``` + +## 编译 + +### 编译并拷贝SDK到thirdpartys文件夹 + +请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)仓库编译SDK,编译完成后,将在build目录下生成 +fastdeploy-0.0.3目录,请移动它至thirdpartys目录下. + +### 拷贝模型文件,以及配置文件至model文件夹 +在Paddle动态图模型 -> Paddle静态图模型 -> ONNX模型的过程中,将生成ONNX文件以及对应的yaml配置文件,请将配置文件存放到model文件夹内。 +转换为RKNN后的模型文件也需要拷贝至model。 + +### 准备测试图片至image文件夹 +```bash +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg +cp 000000014439.jpg ./images +``` + +### 编译example + +```bash +cd build +cmake .. +make -j8 +make install +``` + +## 运行例程 + +```bash +cd ./build/install +./rknpu_test +``` + + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc b/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc new file mode 100644 index 0000000000..297fa52e52 --- /dev/null +++ b/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc @@ -0,0 +1,86 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include +#include "fastdeploy/vision.h" + +void InferPicodet(const std::string& device = "cpu"); + +int main() { + InferPicodet("npu"); + return 0; +} + +fastdeploy::RuntimeOption GetOption(const std::string& device) { + auto option = fastdeploy::RuntimeOption(); + if (device == "npu") { + option.UseRKNPU2(); + } else { + option.UseCpu(); + } + return option; +} + +fastdeploy::ModelFormat GetFormat(const std::string& device) { + auto format = fastdeploy::ModelFormat::ONNX; + if (device == "npu") { + format = fastdeploy::ModelFormat::RKNN; + } else { + format = fastdeploy::ModelFormat::ONNX; + } + return format; +} + +std::string GetModelPath(std::string& model_path, const std::string& device) { + if (device == "npu") { + model_path += "rknn"; + } else { + model_path += "onnx"; + } + return model_path; +} + +void InferPicodet(const std::string &device) { + std::string model_file = "./model/picodet_s_416_coco_npu/picodet_s_416_coco_npu_rk3588."; + std::string params_file; + std::string config_file = "./model/picodet_s_416_coco_npu/infer_cfg.yml"; + + fastdeploy::RuntimeOption option = GetOption(device); + fastdeploy::ModelFormat format = GetFormat(device); + model_file = GetModelPath(model_file, device); + auto model = fastdeploy::vision::detection::RKPicoDet( + model_file, params_file, config_file,option,format); + + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + auto image_file = "./images/000000014439.jpg"; + auto im = cv::imread(image_file); + + fastdeploy::vision::DetectionResult res; + clock_t start = clock(); + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + clock_t end = clock(); + auto dur = static_cast(end - start); + printf("picodet_npu use time:%f\n", (dur / CLOCKS_PER_SEC)); + + std::cout << res.Str() << std::endl; + auto vis_im = fastdeploy::vision::VisDetection(im, res,0.5); + cv::imwrite("picodet_npu_result.jpg", vis_im); + std::cout << "Visualized result saved in ./picodet_npu_result.jpg" << std::endl; +} \ No newline at end of file diff --git a/examples/vision/detection/paddledetection/rknpu2/python/README.md b/examples/vision/detection/paddledetection/rknpu2/python/README.md new file mode 100644 index 0000000000..23b13cd3b9 --- /dev/null +++ b/examples/vision/detection/paddledetection/rknpu2/python/README.md @@ -0,0 +1,35 @@ +# PaddleDetection Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/rknpu2.md) + +本目录下提供`infer.py`快速完成Picodet在RKNPU上部署的示例。执行如下脚本即可完成 + +```bash +# 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/detection/paddledetection/rknpu2/python + +# 下载图片 +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + +# copy model +cp -r ./picodet_s_416_coco_npu /path/to/FastDeploy/examples/vision/detection/rknpu2detection/paddledetection/python + +# 推理 +python3 infer.py --model_file ./picodet_s_416_coco_npu/picodet_s_416_coco_npu_3588.rknn \ + --config_file ./picodet_s_416_coco_npu/infer_cfg.yml \ + --image 000000014439.jpg +``` + + +## 注意事项 +RKNPU上对模型的输入要求是使用NHWC格式,且图片归一化操作会在转RKNN模型时,内嵌到模型中,因此我们在使用FastDeploy部署时, +需要先调用DisableNormalizePermute(C++)或`disable_normalize_permute(Python),在预处理阶段禁用归一化以及数据格式的转换。 +## 其它文档 + +- [PaddleDetection 模型介绍](..) +- [PaddleDetection C++部署](../cpp) +- [模型预测结果说明](../../../../../../docs/api/vision_results/) +- [转换PaddleDetection RKNN模型文档](../README.md) diff --git a/examples/vision/detection/paddledetection/rknpu2/python/infer.py b/examples/vision/detection/paddledetection/rknpu2/python/infer.py new file mode 100644 index 0000000000..ae2d8796a6 --- /dev/null +++ b/examples/vision/detection/paddledetection/rknpu2/python/infer.py @@ -0,0 +1,59 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_file", required=True, help="Path of rknn model.") + parser.add_argument("--config_file", required=True, help="Path of config.") + parser.add_argument( + "--image", type=str, required=True, help="Path of test image file.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + option.use_rknpu2() + return option + + +args = parse_arguments() + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model_file = args.model_file +params_file = "" +config_file = args.config_file +model = fd.vision.detection.RKPicoDet( + model_file, + params_file, + config_file, + runtime_option=runtime_option, + model_format=fd.ModelFormat.RKNN) + +# 预测图片分割结果 +im = cv2.imread(args.image) +result = model.predict(im.copy()) +print(result) + +# 可视化结果 +vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index d9ceb5dda1..44054ee937 100755 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -29,6 +29,7 @@ #include "fastdeploy/vision/detection/contrib/yolov7end2end_trt.h" #include "fastdeploy/vision/detection/contrib/yolox.h" #include "fastdeploy/vision/detection/ppdet/model.h" +#include "fastdeploy/vision/detection/contrib/rknpu2/model.h" #include "fastdeploy/vision/facedet/contrib/retinaface.h" #include "fastdeploy/vision/facedet/contrib/scrfd.h" #include "fastdeploy/vision/facedet/contrib/ultraface.h" diff --git a/fastdeploy/vision/detection/contrib/rknpu2/model.h b/fastdeploy/vision/detection/contrib/rknpu2/model.h new file mode 100644 index 0000000000..f0f8616eed --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/model.h @@ -0,0 +1,16 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h" diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc b/fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc new file mode 100644 index 0000000000..6482ea6755 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc @@ -0,0 +1,29 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { +void BindRKDet(pybind11::module& m) { + pybind11::class_(m, "RKPicoDet") + .def(pybind11::init()) + .def("predict", + [](vision::detection::RKPicoDet& self, pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res); + return res; + }); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc new file mode 100644 index 0000000000..926214d86d --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc @@ -0,0 +1,201 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h" +#include "yaml-cpp/yaml.h" +namespace fastdeploy { +namespace vision { +namespace detection { + +RKPicoDet::RKPicoDet(const std::string& model_file, + const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const ModelFormat& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::ORT}; + valid_rknpu_backends = {Backend::RKNPU2}; + if ((model_format == ModelFormat::RKNN) || + (model_format == ModelFormat::ONNX)) { + has_nms_ = false; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + + // NMS parameters come from RKPicoDet_s_nms + background_label = -1; + keep_top_k = 100; + nms_eta = 1; + nms_threshold = 0.5; + nms_top_k = 1000; + normalized = true; + score_threshold = 0.3; + initialized = Initialize(); +} + +bool RKPicoDet::Initialize() { + if (!BuildPreprocessPipelineFromConfig()) { + FDERROR << "Failed to build preprocess pipeline from configuration file." + << std::endl; + return false; + } + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool RKPicoDet::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + FDERROR << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + } + + Cast::Run(mat, "float"); + + scale_factor.resize(2); + scale_factor[0] = mat->Height() * 1.0 / origin_h; + scale_factor[1] = mat->Width() * 1.0 / origin_w; + + outputs->resize(1); + (*outputs)[0].name = InputInfoOfRuntime(0).name; + mat->ShareWithTensor(&((*outputs)[0])); + // reshape to [1, c, h, w] + (*outputs)[0].shape.insert((*outputs)[0].shape.begin(), 1); + return true; +} + +bool RKPicoDet::BuildPreprocessPipelineFromConfig() { + processors_.clear(); + YAML::Node cfg; + try { + cfg = YAML::LoadFile(config_file_); + } catch (YAML::BadFile& e) { + FDERROR << "Failed to load yaml file " << config_file_ + << ", maybe you should check this file." << std::endl; + return false; + } + + processors_.push_back(std::make_shared()); + + for (const auto& op : cfg["Preprocess"]) { + std::string op_name = op["type"].as(); + if (op_name == "NormalizeImage") { + continue; + } else if (op_name == "Resize") { + bool keep_ratio = op["keep_ratio"].as(); + auto target_size = op["target_size"].as>(); + int interp = op["interp"].as(); + FDASSERT(target_size.size() == 2, + "Require size of target_size be 2, but now it's %lu.", + target_size.size()); + if (!keep_ratio) { + int width = target_size[1]; + int height = target_size[0]; + processors_.push_back( + std::make_shared(width, height, -1.0, -1.0, interp, false)); + } else { + int min_target_size = std::min(target_size[0], target_size[1]); + int max_target_size = std::max(target_size[0], target_size[1]); + std::vector max_size; + if (max_target_size > 0) { + max_size.push_back(max_target_size); + max_size.push_back(max_target_size); + } + processors_.push_back(std::make_shared( + min_target_size, interp, true, max_size)); + } + } else if (op_name == "Permute") { + continue; + } else if (op_name == "Pad") { + auto size = op["size"].as>(); + auto value = op["fill_value"].as>(); + processors_.push_back(std::make_shared("float")); + processors_.push_back( + std::make_shared(size[1], size[0], value)); + } else if (op_name == "PadStride") { + auto stride = op["stride"].as(); + processors_.push_back( + std::make_shared(stride, std::vector(3, 0))); + } else { + FDERROR << "Unexcepted preprocess operator: " << op_name << "." + << std::endl; + return false; + } + } + return true; +} + +bool RKPicoDet::Postprocess(std::vector& infer_result, + DetectionResult* result) { + FDASSERT(infer_result[1].shape[0] == 1, + "Only support batch = 1 in FastDeploy now."); + + if (!has_nms_) { + int boxes_index = 0; + int scores_index = 1; + if (infer_result[0].shape[1] == infer_result[1].shape[2]) { + boxes_index = 0; + scores_index = 1; + } else if (infer_result[0].shape[2] == infer_result[1].shape[1]) { + boxes_index = 1; + scores_index = 0; + } else { + FDERROR << "The shape of boxes and scores should be [batch, boxes_num, " + "4], [batch, classes_num, boxes_num]" + << std::endl; + return false; + } + + backend::MultiClassNMS nms; + nms.background_label = background_label; + nms.keep_top_k = keep_top_k; + nms.nms_eta = nms_eta; + nms.nms_threshold = nms_threshold; + nms.score_threshold = score_threshold; + nms.nms_top_k = nms_top_k; + nms.normalized = normalized; + nms.Compute(static_cast(infer_result[boxes_index].Data()), + static_cast(infer_result[scores_index].Data()), + infer_result[boxes_index].shape, + infer_result[scores_index].shape); + if (nms.out_num_rois_data[0] > 0) { + result->Reserve(nms.out_num_rois_data[0]); + } + for (size_t i = 0; i < nms.out_num_rois_data[0]; ++i) { + result->label_ids.push_back(nms.out_box_data[i * 6]); + result->scores.push_back(nms.out_box_data[i * 6 + 1]); + result->boxes.emplace_back( + std::array{nms.out_box_data[i * 6 + 2] / scale_factor[1], + nms.out_box_data[i * 6 + 3] / scale_factor[0], + nms.out_box_data[i * 6 + 4] / scale_factor[1], + nms.out_box_data[i * 6 + 5] / scale_factor[0]}); + } + } else { + FDERROR << "Picodet in Backend::RKNPU2 don't support NMS" << std::endl; + } + return true; +} + +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h new file mode 100644 index 0000000000..dbb48c16d9 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h @@ -0,0 +1,46 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/detection/ppdet/ppyoloe.h" + +namespace fastdeploy { +namespace vision { +namespace detection { +class FASTDEPLOY_DECL RKPicoDet : public PPYOLOE { + public: + RKPicoDet(const std::string& model_file, + const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const ModelFormat& model_format = ModelFormat::RKNN); + + virtual std::string ModelName() const { return "RKPicoDet"; } + + protected: + /// Build the preprocess pipeline from the loaded model + virtual bool BuildPreprocessPipelineFromConfig(); + /// Preprocess an input image, and set the preprocessed results to `outputs` + virtual bool Preprocess(Mat* mat, std::vector* outputs); + + /// Postprocess the inferenced results, and set the final result to `result` + virtual bool Postprocess(std::vector& infer_result, + DetectionResult* result); + virtual bool Initialize(); + private: + std::vector scale_factor{1.0, 1.0}; +}; +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/detection_pybind.cc b/fastdeploy/vision/detection/detection_pybind.cc index b3a7a6ad94..f55bf68bf1 100644 --- a/fastdeploy/vision/detection/detection_pybind.cc +++ b/fastdeploy/vision/detection/detection_pybind.cc @@ -27,6 +27,8 @@ void BindNanoDetPlus(pybind11::module& m); void BindPPDet(pybind11::module& m); void BindYOLOv7End2EndTRT(pybind11::module& m); void BindYOLOv7End2EndORT(pybind11::module& m); +void BindRKDet(pybind11::module& m); + void BindDetection(pybind11::module& m) { auto detection_module = @@ -42,5 +44,6 @@ void BindDetection(pybind11::module& m) { BindNanoDetPlus(detection_module); BindYOLOv7End2EndTRT(detection_module); BindYOLOv7End2EndORT(detection_module); + BindRKDet(detection_module); } } // namespace fastdeploy diff --git a/python/fastdeploy/vision/detection/__init__.py b/python/fastdeploy/vision/detection/__init__.py index 89441f7a27..a4fe4c035b 100644 --- a/python/fastdeploy/vision/detection/__init__.py +++ b/python/fastdeploy/vision/detection/__init__.py @@ -24,3 +24,4 @@ from .contrib.yolov6 import YOLOv6 from .contrib.yolov7end2end_trt import YOLOv7End2EndTRT from .contrib.yolov7end2end_ort import YOLOv7End2EndORT from .ppdet import PPYOLOE, PPYOLO, PPYOLOv2, PaddleYOLOX, PicoDet, FasterRCNN, YOLOv3, MaskRCNN +from .rknpu2 import RKPicoDet diff --git a/python/fastdeploy/vision/detection/rknpu2/__init__.py b/python/fastdeploy/vision/detection/rknpu2/__init__.py new file mode 100644 index 0000000000..57fcecc64b --- /dev/null +++ b/python/fastdeploy/vision/detection/rknpu2/__init__.py @@ -0,0 +1,44 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from typing import Union, List +import logging +from .... import FastDeployModel, ModelFormat +from .... import c_lib_wrap as C +from .. import PPYOLOE + + +class RKPicoDet(PPYOLOE): + def __init__(self, + model_file, + params_file, + config_file, + runtime_option=None, + model_format=ModelFormat.RKNN): + """Load a PicoDet model exported by PaddleDetection. + + :param model_file: (str)Path of model file, e.g picodet/model.pdmodel + :param params_file: (str)Path of parameters file, e.g picodet/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string + :param config_file: (str)Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml + :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU + :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model + """ + + super(PPYOLOE, self).__init__(runtime_option) + + assert model_format == ModelFormat.RKNN, "RKPicoDet model only support model format of ModelFormat.RKNN now." + self._model = C.vision.detection.RKPicoDet( + model_file, params_file, config_file, self._runtime_option, + model_format) + assert self.initialized, "RKPicoDet model initialize failed." diff --git a/tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml b/tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml new file mode 100644 index 0000000000..9854891634 --- /dev/null +++ b/tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml @@ -0,0 +1,7 @@ +model_path: ./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx +output_folder: ./picodet_s_416_coco_lcnet +target_platform: RK3568 +normalize: + mean: [[0.485,0.456,0.406]] + std: [[0.229,0.224,0.225]] +outputs: ['tmp_16','p2o.Concat.9'] diff --git a/tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml b/tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml new file mode 100644 index 0000000000..723acc8b55 --- /dev/null +++ b/tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml @@ -0,0 +1,5 @@ +model_path: ./picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx +output_folder: ./picodet_s_416_coco_npu +target_platform: RK3568 +normalize: None +outputs: ['tmp_16','p2o.Concat.17'] diff --git a/tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml b/tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml new file mode 100644 index 0000000000..6110e8c0f9 --- /dev/null +++ b/tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml @@ -0,0 +1,7 @@ +model_path: ./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx +output_folder: ./picodet_s_416_coco_lcnet +target_platform: RK3588 +normalize: + mean: [[0.485,0.456,0.406]] + std: [[0.229,0.224,0.225]] +outputs: ['tmp_16','p2o.Concat.9'] diff --git a/tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml b/tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml new file mode 100644 index 0000000000..356fcfad88 --- /dev/null +++ b/tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml @@ -0,0 +1,5 @@ +model_path: ./picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx +output_folder: ./picodet_s_416_coco_npu +target_platform: RK3588 +normalize: None +outputs: ['tmp_16','p2o.Concat.17'] From 08a384f7c26b84510d754fd24acbe1e94c5cbb73 Mon Sep 17 00:00:00 2001 From: Jack Zhou Date: Sun, 6 Nov 2022 20:17:35 +0800 Subject: [PATCH 08/30] [Other]Fix the fd tensor copy assignment (#506) Fix the fd tensor copy assignment --- fastdeploy/core/fd_tensor.cc | 22 ++++----- tests/core/test_fd_tensor.cc | 89 ++++++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 11 deletions(-) create mode 100644 tests/core/test_fd_tensor.cc diff --git a/fastdeploy/core/fd_tensor.cc b/fastdeploy/core/fd_tensor.cc index e98a81e1b7..8b739d844a 100644 --- a/fastdeploy/core/fd_tensor.cc +++ b/fastdeploy/core/fd_tensor.cc @@ -89,8 +89,9 @@ void FDTensor::Squeeze(int64_t axis) { size_t ndim = shape.size(); FDASSERT(axis >= 0 && axis < ndim, "The allowed 'axis' must be in range of (0, %lu)!", ndim); - FDASSERT(shape[axis]==1, - "The No.%ld dimension of shape should be 1, but it is %ld!", (long)axis, (long)shape[axis]); + FDASSERT(shape[axis] == 1, + "The No.%ld dimension of shape should be 1, but it is %ld!", + (long)axis, (long)shape[axis]); shape.erase(shape.begin() + axis); } @@ -220,9 +221,9 @@ bool FDTensor::ReallocFn(size_t nbytes) { return buffer_ != nullptr; #else FDASSERT(false, - "The FastDeploy FDTensor allocator didn't compile under " - "-DWITH_GPU=ON," - "so this is an unexpected problem happend."); + "The FastDeploy FDTensor allocator didn't compile under " + "-DWITH_GPU=ON," + "so this is an unexpected problem happend."); #endif } buffer_ = realloc(buffer_, nbytes); @@ -316,16 +317,15 @@ FDTensor& FDTensor::operator=(const FDTensor& other) { if (other.buffer_ == nullptr) { FreeFn(); buffer_ = nullptr; + shape = other.shape; + name = other.name; + dtype = other.dtype; + device = other.device; } else { - Resize(other.shape); + Resize(other.shape, other.dtype, other.name, other.device); size_t nbytes = Nbytes(); CopyBuffer(buffer_, other.buffer_, nbytes); } - - shape = other.shape; - name = other.name; - dtype = other.dtype; - device = other.device; external_data_ptr = other.external_data_ptr; } return *this; diff --git a/tests/core/test_fd_tensor.cc b/tests/core/test_fd_tensor.cc new file mode 100644 index 0000000000..ad4d639e4e --- /dev/null +++ b/tests/core/test_fd_tensor.cc @@ -0,0 +1,89 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "fastdeploy/core/fd_tensor.h" +#include "gtest/gtest.h" +#include "gtest_utils.h" + +namespace fastdeploy { + +TEST(fastdeploy, fd_tensor_constructor) { + CheckShape check_shape; + CheckData check_data; + + FDTensor tensor1; + check_shape(tensor1.shape, {0}); + ASSERT_EQ(tensor1.name, ""); + ASSERT_EQ(tensor1.dtype, FDDataType::INT8); + ASSERT_EQ(tensor1.device, Device::CPU); + + std::vector inputs = {2, 4, 3, 7, 1, 5}; + tensor1.SetExternalData({2, 3}, FDDataType::INT32, inputs.data()); + ASSERT_EQ(tensor1.dtype, FDDataType::INT32); + + FDTensor tensor2(tensor1); + check_shape(tensor1.shape, {2, 3}); + ASSERT_EQ(tensor2.name, ""); + ASSERT_EQ(tensor2.dtype, FDDataType::INT32); + ASSERT_EQ(tensor2.device, Device::CPU); + + FDTensor tensor3; + tensor3.Resize({2, 3}, FDDataType::INT32, "tensor3"); + check_shape(tensor3.shape, {2, 3}); + ASSERT_EQ(tensor3.Nbytes(), 24); + + // Copy constructor + FDTensor tensor4(tensor3); + check_shape(tensor4.shape, {2, 3}); + ASSERT_EQ(tensor3.Nbytes(), tensor4.Nbytes()); + check_data(reinterpret_cast(tensor3.Data()), + reinterpret_cast(tensor4.Data()), tensor4.Numel()); + + // Move constructor + ASSERT_NE(tensor1.external_data_ptr, nullptr); + FDTensor tensor5(std::move(tensor1)); + ASSERT_EQ(tensor1.external_data_ptr, nullptr); + ASSERT_EQ(tensor5.external_data_ptr, inputs.data()); + check_shape(tensor5.shape, {2, 3}); +} + +TEST(fastdeploy, fd_tensor_assignment) { + CheckShape check_shape; + CheckData check_data; + + FDTensor tensor1("T1"); + std::vector inputs = {2, 4, 3, 7, 1, 5}; + tensor1.SetExternalData({2, 3}, FDDataType::INT32, inputs.data()); + + FDTensor tensor2; + tensor2 = tensor1; + ASSERT_EQ(tensor2.name, "T1"); + ASSERT_EQ(tensor2.dtype, FDDataType::INT32); + ASSERT_EQ(tensor2.device, Device::CPU); + ASSERT_EQ(tensor2.Data(), inputs.data()); + check_shape(tensor2.shape, {2, 3}); + + FDTensor tensor3; + tensor3 = std::move(tensor1); + ASSERT_EQ(tensor3.name, "T1"); + ASSERT_EQ(tensor3.dtype, FDDataType::INT32); + ASSERT_EQ(tensor3.device, Device::CPU); + ASSERT_EQ(tensor3.Data(), inputs.data()); + ASSERT_EQ(tensor1.Data(), nullptr); +} + +} // namespace fastdeploy \ No newline at end of file From 25d0521c3e5048d63878d88890ede0a19f48fee5 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Sun, 6 Nov 2022 23:09:58 +0800 Subject: [PATCH 09/30] Update README_EN.md --- docs/README_EN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_EN.md b/docs/README_EN.md index fe958a6739..de2e2bef84 100644 --- a/docs/README_EN.md +++ b/docs/README_EN.md @@ -13,8 +13,8 @@ ## A Quick Start - Demos -- [PP-YOLOE Python Deployment Demo](en/quick_start/models/python.md) -- [PP-YOLOE C++ Deployment Demo](en/quick_start/models/cpp.md) +- [Python Deployment Demo](en/quick_start/models/python.md) +- [C++ Deployment Demo](en/quick_start/models/cpp.md) - [A Quick Start on Runtime Python](en/quick_start/runtime/python.md) - [A Quick Start on Runtime C++](en/quick_start/runtime/cpp.md) From faa4f9b048f81f79824f3113bc4b8951a1882bcc Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Sun, 6 Nov 2022 23:52:13 +0800 Subject: [PATCH 10/30] Update README_CN.md --- README_CN.md | 334 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 197 insertions(+), 137 deletions(-) diff --git a/README_CN.md b/README_CN.md index 4c61b6c4a6..383f88b62e 100644 --- a/README_CN.md +++ b/README_CN.md @@ -16,56 +16,89 @@

-**⚡️FastDeploy**是一款**易用高效**的推理部署开发套件。覆盖业界🔥**热门AI模型**并提供📦**开箱即用**的部署体验,包括图像分类、目标检测、图像分割、人脸检测、人脸识别、人体关键点识别、文字识别、语义理解等多任务,满足开发者**多场景**,**多硬件**、**多平台**的产业部署需求。 +**⚡️FastDeploy**是一款**易用高效**的推理部署开发套件。覆盖业界🔥**热门CV、NLP、Speech的AI模型**并提供📦**开箱即用**的部署体验,包括图像分类、目标检测、图像分割、人脸检测、人脸识别、人体关键点识别、文字识别、语义理解等多任务,满足开发者**多场景**,**多硬件**、**多平台**的产业部署需求。 -| Potrait Segmentation | Image Matting | Semantic Segmentation | Real-Time Matting | +| [Object Detection](examples/vision) | [3D Object Detection](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) | |:---------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| -| | | | | -| **OCR** | **Behavior Recognition** | **Object Detection** |**Pose Estimation** -| | | | | -| **Face Alignment** | **3D Object Detection** | **Face Editing** | **Image Animation** -| | | | +| | | | | +| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/ocr) +| | | | | +| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) |[**Speech**](examples/audio/pp-tts) +| | | |

**input** :早上好,今天是2020
/10/29,最低温度是-3°C。

**output**: [](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)

| + ## 近期更新 -- 🔥 **2022.11.09 20:30~21:30,【直播分享】《覆盖云边端全场景,150+热门模型快速部署》。扫码报名** -- 🔥 **2022.11.10 20:30~21:30,【直播分享】《瑞芯微、晶晨、恩智浦等10+AI硬件部署,直达产业落地》。扫码报名** -- 🔥 **2022.11.10 19:00~20:00,【直播分享】《10+热门模型在RK3588、RK3568部署实战》。扫码报名** --
- +- 🔥 **【直播分享】2022.11.09 20:30~21:30,《覆盖云边端全场景,150+热门模型快速部署》。微信扫码报名** +- 🔥 **【直播分享】2022.11.10 20:30~21:30,《瑞芯微、晶晨、恩智浦等10+AI硬件部署,直达产业落地》。微信扫码报名** +- 🔥 **【直播分享】2022.11.10 19:00~20:00,《10+热门模型在RK3588、RK3568部署实战》。微信扫码报名** +
+
- -- 🔥 **2022.10.15:Release FastDeploy [release v0.3.0](https://github.com/PaddlePaddle/FastDeploy/tree/release%2F0.3.0)**
- - **New server-side deployment upgrade:更快的推理性能,一键量化,更多的视觉和NLP模型** - - 集成 OpenVINO 推理引擎,并且保证了使用 OpenVINO 与 使用 TensorRT、ONNX Runtime、 Paddle Inference一致的开发体验; - - 提供[一键模型量化工具](tools/quantization),支持YOLOv7、YOLOv6、YOLOv5等视觉模型,在CPU和GPU推理速度可提升1.5~2倍; - - 新增加 PP-OCRv3, PP-OCRv2, PP-Matting, PP-HumanMatting, ModNet 等视觉模型并提供[端到端部署示例](examples/vision); - - 新增加NLP信息抽取模型 UIE 并提供[端到端部署示例](examples/text/uie). - - -- 🔥 **2022.8.18:发布FastDeploy [release v0.2.0](https://github.com/PaddlePaddle/FastDeploy/tree/release%2F0.2.0)**
- - **服务端部署全新升级:更快的推理性能,更多的视觉模型支持** - - 发布基于x86 CPU、NVIDIA GPU的高性能推理引擎SDK,推理速度大幅提升 - - 集成Paddle Inference、ONNX Runtime、TensorRT等推理引擎并提供统一的部署体验 - - 支持YOLOv7、YOLOv6、YOLOv5、PP-YOLOE等全系列目标检测模型并提供[端到端部署示例](examples/vision/detection/) - - 支持人脸检测、人脸识别、实时人像抠图、图像分割等40+重点模型及[Demo示例](examples/vision/) - - 支持Python和C++两种语言部署 - - **边缘移动端部署新增瑞芯微、晶晨、恩智浦等NPU芯片部署能力** - - 发布轻量化目标检测[Picodet-NPU部署Demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/linux/picodet_detection),提供极致的INT8全量化推理能力 +- 🔥 **2022.10.31:Release FastDeploy [release v0.5.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.5.0)** + - **🖥️ 服务端部署:支持推理速度更快的后端,支持更多的模型** + - 集成 Paddle Inference TensorRT后端,并保证其使用与Paddle Inference、TensorRT、OpenVINO、ONNX Runtime、Paddle Lite等一致的开发体验; + - 支持并测试 Graphcore IPU 通过 Paddle Inference后端; + - 优化[一键模型量化工具](tools/quantization),支持YOLOv7、YOLOv6、YOLOv5等视觉模型,在CPU和GPU推理速度可提升1.5~2倍; + - 新增 [PP-Tracking](./examples/vision/tracking/pptracking) 和 [RobustVideoMatting](./examples/vision/matting) 等模型; + +- 🔥 **2022.10.24:Release FastDeploy [release v0.4.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.4.0)** + - **🖥️ 服务端部署:推理速度大升级** + - 升级 GPU 端到端的优化,在YOLO系列上,模型推理速度从 43ms 提升到 25ms; + - 新增 [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDetji lianTinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline部署能力; + - **📲 移动端和端侧部署:移动端后端能力升级,支持更多的CV模型** + - 集成 Paddle Lite,并保证其使用与服务端常用推理引擎 Paddle Inference、TensorRT、OpenVINO、ONNX Runtime 等一致的开发体验; + - 新增 [轻量化目标检测模型](examples/vision/detection/paddledetection/android)和[分类模型](examples/vision/classification/paddleclas/android)的安卓端部署能力; + - ** Web和小程序部署:新增Web端部署能力** + - 集成 Paddle.js部署能力,新增 OCR、目标检测、人像分割背景替换、物体识别等Web端部署能力和Demo(examples/application/js); ## 目录 -* **服务端部署** + +*
📖 文档教程(点击可收缩) + + - 安装文档 + - [预编译库下载安装](docs/cn/build_and_install/download_prebuilt_libraries.md) + - [GPU部署环境编译安装](docs/cn/build_and_install/gpu.md) + - [CPU部署环境编译安装](docs/cn/build_and_install/cpu.md) + - [IPU部署环境编译安装](docs/cn/build_and_install/ipu.md) + - [Jetson部署环境编译安装](docs/cn/build_and_install/jetson.md) + - [Android平台部署环境编译安装](docs/cn/build_and_install/android.md) + - 快速使用 + - [Python部署示例](docs/cn/quick_start/models/python.md) + - [C++部署示例](docs/cn/quick_start/models/cpp.md) + - [Runtime Python使用示例](docs/cn/quick_start/runtime/python.md) + - [Runtime C++使用示例](docs/cn/quick_start/runtime/cpp.md) + - API文档(进行中) + - [Python API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/) + - [C++ API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/) + - 性能调优 + - [量化加速](docs/cn/quantize.md) + - 常见问题 + - [1. 如何配置模型部署的推理后端](docs/cn/faq/how_to_change_backend.md) + - [2. Windows上C++ SDK如何使用](docs/cn/faq/use_sdk_on_windows.md) + - [3. Android上如何使用FastDeploy](docs/cn/faq/use_sdk_on_android.md)(进行中) + - [4. TensorRT使用中的一些技巧](docs/cn/faq/tensorrt_tricks.md) + - [5. 如何增加新的模型](docs/cn/faq/develop_a_new_model.md)(进行中) + - 更多FastDeploy部署模块 + - [服务化部署](../serving) + - [Benchmark测试](../benchmark) +
+ +* **🖥️ 服务器端部署** * [Python SDK快速开始](#fastdeploy-quick-start-python) * [C++ SDK快速开始](#fastdeploy-quick-start-cpp) * [服务端模型支持列表](#fastdeploy-server-models) -* **端侧部署** +* **📲 移动端和端侧部署** * [Paddle Lite NPU部署](#fastdeploy-edge-sdk-npu) - * [端侧模型支持列表](#fastdeploy-edge-sdk) -* [社区交流](#fastdeploy-community) -* [Acknowledge](#fastdeploy-acknowledge) -* [License](#fastdeploy-license) + * [端侧模型支持列表](#fastdeploy-edge-models) +* ** Web和小程序部署** + * [Web端模型支持列表](#fastdeploy-web-models) +* [**社区交流**](#fastdeploy-community) +* [**Acknowledge**](#fastdeploy-acknowledge) +* [**License**](#fastdeploy-license) -## 服务端部署 +## 🖥️ 服务端部署 ### Python SDK快速开始
@@ -73,9 +106,7 @@ #### 快速安装 ##### 前置依赖 -- CUDA >= 11.2 -- cuDNN >= 8.0 -- python >= 3.6 +- CUDA >= 11.2、cuDNN >= 8.0、Python >= 3.6 - OS: Linux x86_64/macOS/Windows 10 ##### 安装GPU版本 @@ -119,12 +150,13 @@ print(result) vis_im = vision.vis_detection(im, result, score_threshold=0.5) cv2.imwrite("vis_image.jpg", vis_im) ``` -
-C++ SDK快速开始 - -### C++ SDK快速开始 +
+
+C++ SDK快速开始(点开查看详情) + + #### 安装 - 参考[C++预编译库下载](docs/quick_start/CPP_prebuilt_libraries.md)文档 @@ -164,69 +196,81 @@ int main(int argc, char* argv[]) { 更多部署案例请参考[视觉模型部署示例](examples/vision) . -### 服务端和web模型支持列表 🔥🔥🔥 +### 服务端模型支持列表 🔥🔥🔥🔥🔥
-符号说明: (1) ✅: 已经支持; (2) ❔: 未来支持; (3) ❌: 暂不支持; (4) --: 暂不考虑;
+符号说明: (1) ✅: 已经支持; (2) ❔: 正在进行中; (3) N/A: 暂不支持; (4) --: 暂不考虑;
链接说明:「模型列」会跳转到模型推理Demo代码 - -| 任务场景 | 模型 | API | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | [web_demo](examples/application/js/web_demo) | [mini_program](examples/application/js/mini_program) | -|:-----------------------------:|:---------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------:|:---------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:|:---------------------------:|:--------------------------:|:---------------------------:|:---------------------------:| -| --- | --- | --- | X86 CPU | NVIDIA GPU | Intel CPU | NVIDIA GPU | Intel CPU | Arm CPU | AArch64 CPU | NVIDIA Jetson | Graphcore IPU |[Paddle.js](examples/application/js)| [Paddle.js](examples/application/js)| -| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) |✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅|✅| -| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) |✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/SwinTransformer](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❔|❔| -| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❔|❔| -| Detection | [PaddleDetection/FasterRCNN](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❔|❔| -| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | [Python](./examples/vision/detection/yolox/python)/[C++](./examples/vision/detection/yolox/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | [Python](./examples/vision/detection/yolov7/python)/[C++](./examples/vision/detection/yolov7/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | [Python](./examples/vision/detection/yolov6/python)/[C++](./examples/vision/detection/yolov6/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | [Python](./examples/vision/detection/yolov5/python)/[C++](./examples/vision/detection/yolov5/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | [Python](./examples/vision/detection/yolor/python)/[C++](./examples/vision/detection/yolor/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | [Python](./examples/vision/detection/scaledyolov4/python)/[C++](./examples/vision/detection/scaledyolov4/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | [Python](./examples/vision/detection/yolov5lite/python)/[C++](./examples/vision/detection/yolov5lite/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/detection/nanodet_plus/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp)| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | [Python](./examples/vision/ocr/PP-OCRv3/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |✅|✅| -| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |✅|✅| -| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Detection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | [Python](./examples/vision/facedet/retinaface/python)/[C++](./examples/vision/facedet/retinaface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Detection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | [ Python](./examples/vision/facedet/ultraface/python)/[C++](./examples/vision/facedet/ultraface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | [Python](./examples/vision/facedet/yolov5face/python)/[C++](./examples/vision/facedet/yolov5face/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Detection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | [Python](./examples/vision/facedet/scrfd/python)/[C++](./examples/vision/facedet/scrfd/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/CosFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/VPL](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/modnet/python)/[C++](./examples/vision/matting/modnet/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | [Python](./examples/text/uie/python)/[C++](./examples/text/uie/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| +| 任务场景 | 模型 | API | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | +|:----------------------:|:--------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------:|:-------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:-------------:|:-------------:|:-------:| +| --- | --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | NVIDIA Jetson | Graphcore IPU | Serving | +| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | [Python](./examples/vision/classification/resnet/python)/[C++](./examples/vision/classification/resnet/python/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | [Python](./examples/vision/classification/yolov5cls/python)/[C++](./examples/vision/classification/yolov5cls/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/SwinTransformer](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | [Python](./examples/vision/detection/yolox/python)/[C++](./examples/vision/detection/yolox/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | [Python](./examples/vision/detection/yolov7/python)/[C++](./examples/vision/detection/yolov7/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | [Python](./examples/vision/detection/yolov7end2end_ort/python)/[C++](./examples/vision/detection/yolov7end2end_ort/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | +| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | [Python](./examples/vision/detection/yolov7end2end_ort/python)/[C++](./examples/vision/detection/yolov7end2end_ort/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | +| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | [Python](./examples/vision/detection/yolov6/python)/[C++](./examples/vision/detection/yolov6/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | [Python](./examples/vision/detection/yolov5/python)/[C++](./examples/vision/detection/yolov5/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | [Python](./examples/vision/detection/yolor/python)/[C++](./examples/vision/detection/yolor/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | [Python](./examples/vision/detection/scaledyolov4/python)/[C++](./examples/vision/detection/scaledyolov4/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | [Python](./examples/vision/detection/yolov5lite/python)/[C++](./examples/vision/detection/yolov5lite/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/detection/nanodet_plus/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | [Python](./examples/vision/keypointdetection/tiny_pose/python)/[C++](./examples/vision/keypointdetection/tiny_pose/python/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | [Python](./examples/vision/keypointdetection/det_keypoint_unite/python)/[C++](./examples/vision/keypointdetection/det_keypoint_unite/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| HeadPose | [omasaht/headpose](examples/vision/headpose) | [Python](./xamples/vision/headpose/fsanet/python)/[C++](./xamples/vision/headpose/fsanet/cpp/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | [Python](examples/vision/tracking/pptracking/python)/[C++](examples/vision/tracking/pptracking/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | [Python](./examples/vision/ocr/PP-OCRv3/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | [Python](./examples/vision/facedet/retinaface/python)/[C++](./examples/vision/facedet/retinaface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | [ Python](./examples/vision/facedet/ultraface/python)/[C++](./examples/vision/facedet/ultraface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | [Python](./examples/vision/facedet/yolov5face/python)/[C++](./examples/vision/facedet/yolov5face/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | +| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | [Python](./examples/vision/facedet/scrfd/python)/[C++](./examples/vision/facedet/scrfd/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | [Python](./examples/vision/facealign/pfld/python)/[C++](./examples/vision/facealign/pfld/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/modnet/python)/[C++](./examples/vision/matting/modnet/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PeterL1n/RobustVideoMatting]() | [Python](./examples/vision/matting/rvm/python)/[C++](./examples/vision/matting/rvm/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | [Python](./examples/text/uie/python)/[C++](./examples/text/uie/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | Python/C++ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ✅ | +| Speech | [PaddleSpeech/PP-TTS](./examples/text/uie) | [Python](examples/audio/pp-tts/python)/C++ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | ✅ | -## 端侧部署 +## 📲 移动端和端侧部署 🔥🔥🔥🔥
@@ -236,48 +280,64 @@ int main(int argc, char* argv[]) { - [瑞芯微-NPU/晶晨-NPU/恩智浦-NPU](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/linux/picodet_detection) -### 端侧模型支持列表 +### 📲 端侧模型支持列表
-| 任务场景 | 模型 | 大小(MB) | Linux | Android | iOS |Linux | Linux | Linux |更新中...| -|:------------------:|:----------------------------:|:---------------------:|:---------------------:|:----------------------:|:---------------------:| :------------------:|:----------------------------:|:---------------------:|:---------------------:| -| --- | --- | --- | ARM CPU | ARM CPU | ARM CPU |瑞芯微NPU
RV1109
RV1126
RK1808 | 晶晨NPU
A311D
S905D
C308X | 恩智浦NPU
i.MX 8M Plus |更新中...| -| Classification | PP-LCNet | 11.9 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | PP-LCNetv2 | 26.6 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | EfficientNet | 31.4 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | GhostNet | 20.8 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | MobileNetV1 | 17 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | MobileNetV2 | 14.2 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | MobileNetV3 | 22 | ✅ | ✅ | ✅ |❔ | ❔ | ❔ |❔| -| Classification | ShuffleNetV2 | 9.2 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | SqueezeNetV1.1 | 5 | ✅ | ✅ | ✅ | -| Classification | Inceptionv3 | 95.5 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | PP-HGNet | 59 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Classification | SwinTransformer_224_win7 | 352.7 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | PP-PicoDet_s_320_coco | 4.1 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | PP-PicoDet_s_320_lcnet | 4.9 | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ❔| -| Detection | CenterNet | 4.8 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | YOLOv3_MobileNetV3 | 94.6 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | PP-YOLO_tiny_650e_coco | 4.4 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | SSD_MobileNetV1_300_120e_voc | 23.3 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | PP-YOLO_ResNet50vd | 188.5 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | PP-YOLOv2_ResNet50vd | 218.7 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | PP-YOLO_crn_l_300e_coco | 209.1 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Detection | YOLOv5s | 29.3 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Face Detection | BlazeFace | 1.5 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Face Detection | RetinaFace | 1.7 | ✅ | ❌ | ❌ |-- | -- | -- |--| -| Keypoint Detection | PP-TinyPose | 5.5 | ✅ | ✅ | ✅ |❔ | ❔ | ❔ |❔| -| Segmentation | PP-LiteSeg(STDC1) | 32.2 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Segmentation | PP-HumanSeg-Lite | 0.556 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Segmentation | HRNet-w18 | 38.7 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Segmentation | PP-HumanSeg-Server | 107.2 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| Segmentation | Unet | 53.7 | ❌ | ✅ | ❌ |-- | -- | -- |--| -| OCR | PP-OCRv1 | 2.3+4.4 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| OCR | PP-OCRv2 | 2.3+4.4 | ✅ | ✅ | ✅ |-- | -- | -- |--| -| OCR | PP-OCRv3 | 2.4+10.6 | ✅ | ✅ | ✅ |❔ | ❔ | ❔ |❔| -| OCR | PP-OCRv3-tiny | 2.4+10.7 | ✅ | ✅ | ✅ |-- | -- | -- |--| +| 任务场景 | 模型 | 大小(MB) | Linux | Android | iOS | Linux | Linux | Linux | Linux | 更新中... | +|:------------------:|:-----------------------------------------------------------------------------------------:|:--------:|:-------:|:-------:|:-------: |:------------------:|:------------------------------------:|:---------------------------------:|:------------------------:|:-------:| +| --- | --- | --- | ARM CPU | ARM CPU | ARM CPU | 瑞芯微NPU
RK3568/RK3588 | 瑞芯微NPU
RV1109/RV1126/RK1808 | 晶晨NPU
A311D/S905D/C308X | 恩智浦NPU
i.MX 8M Plus | 更新中...| | +| Classification | [PaddleClas/PP-LCNet](examples/vision/classification/paddleclas) | 11.9 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/PP-LCNetv2](examples/vision/classification/paddleclas) | 26.6 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/EfficientNet](examples/vision/classification/paddleclas) | 31.4 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/GhostNet](examples/vision/classification/paddleclas) | 20.8 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/MobileNetV1](examples/vision/classification/paddleclas) | 17 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/MobileNetV2](examples/vision/classification/paddleclas) | 14.2 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/MobileNetV3](examples/vision/classification/paddleclas) | 22 | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | +| Classification | [PaddleClas/ShuffleNetV2](examples/vision/classification/paddleclas) | 9.2 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/SqueezeNetV1.1](examples/vision/classification/paddleclas) | 5 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/Inceptionv3](examples/vision/classification/paddleclas) | 95.5 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/PP-HGNet](examples/vision/classification/paddleclas) | 59 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/SwinTransformer_224_win7](examples/vision/classification/paddleclas) | 352.7 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-PicoDet_s_320_coco](examples/vision/detection/paddledetection) | 4.1 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-PicoDet_s_320_lcnet](examples/vision/detection/paddledetection) | 4.9 | ✅ | ✅ | ❔ | ❔ | ✅ | ✅ | ✅ | -- | +| Detection | [PaddleDetection/CenterNet](examples/vision/detection/paddledetection) | 4.8 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/YOLOv3_MobileNetV3](examples/vision/detection/paddledetection) | 94.6 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLO_tiny_650e_coco](examples/vision/detection/paddledetection) | 4.4 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/SSD_MobileNetV1_300_120e_voc](examples/vision/detection/paddledetection) | 23.3 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLO_ResNet50vd](examples/vision/detection/paddledetection) | 188.5 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLOv2_ResNet50vd](examples/vision/detection/paddledetection) | 218.7 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLO_crn_l_300e_coco](examples/vision/detection/paddledetection) | 209.1 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | YOLOv5s | 29.3 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Face Detection | BlazeFace | 1.5 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Face Detection | RetinaFace | 1.7 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Keypoint Detection | [PaddleDetection/PP-TinyPose](examples/vision/keypointdetection/tiny_pose) | 5.5 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | +| Segmentation | [PaddleSeg/PP-LiteSeg(STDC1)](examples/vision/segmentation/paddleseg) | 32.2 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/PP-HumanSeg-Lite](examples/vision/segmentation/paddleseg) | 0.556 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/HRNet-w18](examples/vision/segmentation/paddleseg) | 38.7 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/PP-HumanSeg](examples/vision/segmentation/paddleseg) | 107.2 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/Unet](examples/vision/segmentation/paddleseg) | 53.7 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/Deeplabv3](examples/vision/segmentation/paddleseg) | 150 | ❔ | ❔ | ❔ | ✅ | | | | | +| OCR | PaddleOCR/PP-OCRv1 | 2.3+4.4 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| OCR | [PaddleOCR/PP-OCRv2](examples/vision/ocr/PP-OCRv2) | 2.3+4.4 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| OCR | [PaddleOCR/PP-OCRv3](examples/vision/ocr/PP-OCRv3) | 2.4+10.6 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | +| OCR | PaddleOCR/PP-OCRv3-tiny | 2.4+10.7 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | + + +## Web和小程序部署 +
+ +| 任务场景 | 模型 | [web_demo](examples/application/js/web_demo) | +|:------------------:|:-------------------------------------------------------------------------------------------:|:--------------------------------------------:| +| --- | --- | [Paddle.js](examples/application/js) | +| Detection | [FaceDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ | +| Detection | [ScrewDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ | +| Segmentation | [PaddleSeg/HumanSeg](./examples/application/js/web_demo/src/pages/cv/segmentation/HumanSeg) | ✅ | +| Object Recognition | [GestureRecognition](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ | +| Object Recognition | [ItemIdentification](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ | +| OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ | + ## 社区交流
From 2c84f190982538e13b33b93d7c5c5bf2241c4fea Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Sun, 6 Nov 2022 23:53:22 +0800 Subject: [PATCH 11/30] Update README_EN.md --- README_EN.md | 326 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 200 insertions(+), 126 deletions(-) diff --git a/README_EN.md b/README_EN.md index e03dc65408..b298bf9a86 100644 --- a/README_EN.md +++ b/README_EN.md @@ -18,49 +18,94 @@ English | [简体中文](README_CN.md) -**⚡️FastDeploy** is an **accessible and efficient** deployment Development Toolkit. It covers 🔥**critical AI models** in the industry and provides 📦**out-of-the-box** deployment experience. It covers image classification, object detection, image segmentation, face detection, face recognition, human keypoint detection, OCR, semantic understanding and other tasks to meet developers' industrial deployment needs for **multi-scenario**, **multi-hardware** and **multi-platform** . +**⚡️FastDeploy** is an **accessible and efficient** deployment Development Toolkit. It covers 🔥**critical CV、NLP、Speech AI models** in the industry and provides 📦**out-of-the-box** deployment experience. It covers image classification, object detection, image segmentation, face detection, face recognition, human keypoint detection, OCR, semantic understanding and other tasks to meet developers' industrial deployment needs for **multi-scenario**, **multi-hardware** and **multi-platform** . + +| [Object Detection](examples/vision) | [3D Object Detection](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) | +|:---------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +| | | | | +| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/ocr) +| | | | | +| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) |[**Speech**](examples/audio/pp-tts) +| | | |

**input** :早上好,今天是2020
/10/29,最低温度是-3°C。

**output**: [](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)

| -| Potrait Segmentation | Image Matting | Semantic Segmentation | Real-Time Matting | -|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:| -| | | | | -| **OCR** | **Behavior Recognition** | **Object Detection** | **Pose Estimation** | -| | | | | -| **Face Alignment** | **3D Object Detection** | **Face Editing** | **Image Animation** | -| | | | | ## 📣 Recent Updates +- 🔥 **【Live Preview】2022.11.09 20:30~21:30,《Covering the full spectrum of cloud-side scenarios with 150+ popular models for rapid deployment》** +- 🔥 **【Live Preview】2022.11.10 20:30~21:30,《10+ AI hardware deployments from Rockchip, Amlogic, NXP and others, straight to industry landing》** +- 🔥 **【Live Preview】2022.11.10 19:00~20:00,《10+ popular models deployed in RK3588, RK3568 in action》** + - Scan the QR code below using WeChat, follow the PaddlePaddle official account and fill out the questionnaire to join the WeChat group +
+ +
+ + - 🔥 **2022.10.31:Release FastDeploy [release v0.5.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.5.0)**
- - **New deployment upgrade: Support support more backend, support more cv models** + - **🖥️ Data Center and Cloud Deployment: Support more backend, Support more CV models** - Support Paddle Inference TensorRT, and provide a seamless deployment experience with other inference engines include TensorRT、OpenVINO、ONNX Runtime、Paddle Lite、Paddle Inference; - Support Graphcore IPU through paddle Inference; - - Support tracking model [PP-Tracking](./examples/vision/tracking/pptracking) and [RobustVideoMatting](./examples/vision/matting) model + - Support tracking model [PP-Tracking](./examples/vision/tracking/pptracking) and [RobustVideoMatting](./examples/vision/matting) model; + - Support [one-click model quantization](tools/quantization) to improve model inference speed by 1.5 to 2 times on CPU & GPU platform. The supported quantized model are YOLOv7, YOLOv6, YOLOv5, etc. - 🔥 **2022.10.24:Release FastDeploy [release v0.4.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.4.0)**
- - **New server-side deployment upgrade: support more CV model and NLP model** - - Integrate Paddle Lite and provide a seamless deployment experience with other inference engines include TensorRT、OpenVINO、ONNX Runtime、Paddle Inference; - - Support [Lightweight Detection Model](examples/vision/detection/paddledetection/android) and [classification model](examples/vision/classification/paddleclas/android) on Android Platform,Download to try it out + - **🖥️ Data Center and Cloud Deployment: end-to-end optimization, Support more CV and NLP model** - end-to-end optimization on GPU, [YOLO series](examples/vision/detection) model end-to-end inference speedup from 43ms to 25ms;
- - Web deployment and Mini Program deployment New [OCR and other CV models](examples/application/js) capability; + - Support CV models include PP-OCRv3, PP-OCRv2, PP-TinyPose, PP-Matting, etc. and provides [end-to-end deployment demos](examples/vision/detection/) + - Support information extraction model is UIE, and provides [end-to-end deployment demos](examples/text/uie); - Support [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDet+TinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline deployment; - - New CV models include PP-OCRv3, PP-OCRv2, PP-TinyPose, PP-Matting, etc. and provides [end-to-end deployment demos](examples/vision/detection/) - - New information extraction model is UIE, and provides [end-to-end deployment demos](examples/text/uie). - + - **📲 Mobile and Edge Device Deployment: support new backend,support more CV model** + - Integrate Paddle Lite and provide a seamless deployment experience with other inference engines include TensorRT、OpenVINO、ONNX Runtime、Paddle Inference; + - Support [Lightweight Detection Model](examples/vision/detection/paddledetection/android) and [classification model](examples/vision/classification/paddleclas/android) on Android Platform,Download to try it out; + - **Web-Side Deployment: support more CV model** + - Web deployment and Mini Program deployment New [OCR and other CV models](examples/application/js) capability; + ## Contents -* **Data Center and Cloud Deployment** +*
📖 Tutorials(click to shrink) + + - Install + - [How to Install FastDeploy Prebuilt Libraries](en/build_and_install/download_prebuilt_libraries.md) + - [How to Build and Install FastDeploy Library on GPU Platform](en/build_and_install/gpu.md) + - [How to Build and Install FastDeploy Library on CPU Platform](en/build_and_install/cpu.md) + - [How to Build and Install FastDeploy Library on IPU Platform](en/build_and_install/ipu.md) + - [How to Build and Install FastDeploy Library on Nvidia Jetson Platform](en/build_and_install/jetson.md) + - [How to Build and Install FastDeploy Library on Android Platform](en/build_and_install/android.md) + - A Quick Start - Demos + - [Python Deployment Demo](en/quick_start/models/python.md) + - [C++ Deployment Demo](en/quick_start/models/cpp.md) + - [A Quick Start on Runtime Python](en/quick_start/runtime/python.md) + - [A Quick Start on Runtime C++](en/quick_start/runtime/cpp.md) + - API (To be continued) + - [Python API](https://baidu-paddle.github.io/fastdeploy-api/python/html/) + - [C++ API](https://baidu-paddle.github.io/fastdeploy-api/cpp/html/) + - Performance Optimization + - [Quantization Acceleration](en/quantize.md) + - Frequent Q&As + - [1. How to Change Inference Backends](en/faq/how_to_change_backend.md) + - [2. How to Use FastDeploy C++ SDK on Windows Platform](en/faq/use_sdk_on_windows.md) + - [3. How to Use FastDeploy C++ SDK on Android Platform](en/faq/use_sdk_on_android.md)(To be Continued) + - [4. Tricks of TensorRT](en/faq/tensorrt_tricks.md) + - [5. How to Develop a New Model](en/faq/develop_a_new_model.md)(To be Continued) + - More FastDeploy Deployment Module + - [deployment AI Model as a Service](../serving) + - [Benchmark Testing](../benchmark) +
+ +* **🖥️ Data Center and Cloud Deployment** * [A Quick Start for Python SDK](#fastdeploy-quick-start-python) * [A Quick Start for C++ SDK](#fastdeploy-quick-start-cpp) * [Supported Data Center and Cloud Model List](#fastdeploy-server-models) -* **Mobile and Edge Device Deployment** +* **📲 Mobile and Edge Device Deployment** * [Paddle Lite NPU Deployment](#fastdeploy-edge-sdk-npu) - * [Supported Mobile and Edge Model List](#fastdeploy-edge-sdk) -* [Community](#fastdeploy-community) -* [Acknowledge](#fastdeploy-acknowledge) -* [License](#fastdeploy-license) + * [Supported Mobile and Edge Model List](#fastdeploy-edge-models) +* **Web and Mini_Program Deployment** + * [Supported Web and Mini_Program Model List](#fastdeploy-web-models) +* [**Community**](#fastdeploy-community) +* [**Acknowledge**](#fastdeploy-acknowledge) +* [**License**](#fastdeploy-license) -## Data Center and Web Deployment +## 🖥️ Data Center and Web Deployment ### A Quick Start for Python SDK @@ -70,9 +115,7 @@ English | [简体中文](README_CN.md) ##### Prerequisites -- CUDA >= 11.2 -- cuDNN >= 8.0 -- python >= 3.6 +- CUDA >= 11.2 、cuDNN >= 8.0 、 Python >= 3.6 - OS: Linux x86_64/macOS/Windows 10 ##### Install Fastdeploy SDK with CPU&GPU support @@ -122,8 +165,12 @@ vis_im = vision.vis_detection(im, result, score_threshold=0.5) cv2.imwrite("vis_image.jpg", vis_im) ``` +
+
-A Quick Start for C++ SDK +A Quick Start for C++ SDK(click to expand) + +
#### Installation @@ -166,72 +213,82 @@ For more deployment models, please refer to [Vision Model Deployment Examples](e -### Supported Data Center and Web Model List🔥🔥🔥 +### Supported Data Center and Web Model List🔥🔥🔥🔥🔥
-Notes: ✅: already supported; ❔: to be supported in the future; ❌: not supported now; +Notes: ✅: already supported; ❔: to be supported in the future; N/A: not supported now; -| Task | Model | API | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | [web_demo](examples/application/js/web_demo) | [mini_program](examples/application/js/mini_program) | -|:-----------------------------:|:---------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------:|:---------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:|:---------------------------:|:--------------------------:|:---------------------------:|:---------------------------:| -| --- | --- | --- | X86 CPU | NVIDIA GPU | Intel CPU | NVIDIA GPU | Intel CPU | Arm CPU | AArch64 CPU | NVIDIA Jetson | Graphcore IPU |[Paddle.js](examples/application/js)| [Paddle.js](examples/application/js)| -| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) |✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅|✅| -| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) |✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |❔|❔| -| Classification | [PaddleClas/SwinTransformer](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❔|❔| -| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❔|❔| -| Detection | [PaddleDetection/FasterRCNN](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❔|❔| -| Detection | [PaddleDetection/PP-Tracking](./examples/vision/tracking/pptracking) | [Python](./examples/vision/tracking/pptracking/python)/[C++](./examples/vision/tracking/pptracking/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |❔|❔| -| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | [Python](./examples/vision/detection/yolox/python)/[C++](./examples/vision/detection/yolox/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | [Python](./examples/vision/detection/yolov7/python)/[C++](./examples/vision/detection/yolov7/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | [Python](./examples/vision/detection/yolov6/python)/[C++](./examples/vision/detection/yolov6/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | [Python](./examples/vision/detection/yolov5/python)/[C++](./examples/vision/detection/yolov5/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | [Python](./examples/vision/detection/yolor/python)/[C++](./examples/vision/detection/yolor/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | [Python](./examples/vision/detection/scaledyolov4/python)/[C++](./examples/vision/detection/scaledyolov4/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | [Python](./examples/vision/detection/yolov5lite/python)/[C++](./examples/vision/detection/yolov5lite/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/detection/nanodet_plus/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp)| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | [Python](./examples/vision/ocr/PP-OCRv3/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |✅|✅| -| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |✅|✅| -| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Detection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | [Python](./examples/vision/facedet/retinaface/python)/[C++](./examples/vision/facedet/retinaface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Detection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | [ Python](./examples/vision/facedet/ultraface/python)/[C++](./examples/vision/facedet/ultraface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | [Python](./examples/vision/facedet/yolov5face/python)/[C++](./examples/vision/facedet/yolov5face/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Detection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | [Python](./examples/vision/facedet/scrfd/python)/[C++](./examples/vision/facedet/scrfd/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/CosFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Face Recognition | [insightface/VPL](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |❔|❔| -| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/modnet/python)/[C++](./examples/vision/matting/modnet/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Matting | [eterL1n/RobustVideoMatting](./examples/vision/matting/rvm) | [Python](./examples/vision/matting/rvm/python)/[C++](./examples/vision/matting/rvm/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | [Python](./examples/text/uie/python)/[C++](./examples/text/uie/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Text Classification | [PaddleNLP/Ernie-3.0](./examples/text/ernie-3.0) | Python/C++ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| -| Text-to-Speech| [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | [Python](./examples/audio/pp-tts/python)/C++ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅| ❌ |❔|❔| +| Task | Model | API | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | +|:-----------------------------:|:---------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------:|:---------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:|:---------------------------:|:--------------------------:|:---------------------------:| +| --- | --- | --- | X86 CPU | NVIDIA GPU | Intel CPU | NVIDIA GPU | Intel CPU | Arm CPU | AArch64 CPU | NVIDIA Jetson | Graphcore IPU | Serving| +| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | [Python](./examples/vision/classification/resnet/python)/[C++](./examples/vision/classification/resnet/python/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | [Python](./examples/vision/classification/yolov5cls/python)/[C++](./examples/vision/classification/yolov5cls/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | +| Classification | [PaddleClas/SwinTransformer](./examples/vision/classification/paddleclas) | [Python](./examples/vision/classification/paddleclas/python)/[C++](./examples/vision/classification/paddleclas/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | [Python](./examples/vision/detection/paddledetection/python)/[C++](./examples/vision/detection/paddledetection/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | [Python](./examples/vision/detection/yolox/python)/[C++](./examples/vision/detection/yolox/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | [Python](./examples/vision/detection/yolov7/python)/[C++](./examples/vision/detection/yolov7/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | [Python](./examples/vision/detection/yolov7end2end_ort/python)/[C++](./examples/vision/detection/yolov7end2end_ort/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | +| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | [Python](./examples/vision/detection/yolov7end2end_ort/python)/[C++](./examples/vision/detection/yolov7end2end_ort/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | +| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | [Python](./examples/vision/detection/yolov6/python)/[C++](./examples/vision/detection/yolov6/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | [Python](./examples/vision/detection/yolov5/python)/[C++](./examples/vision/detection/yolov5/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | [Python](./examples/vision/detection/yolor/python)/[C++](./examples/vision/detection/yolor/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | [Python](./examples/vision/detection/scaledyolov4/python)/[C++](./examples/vision/detection/scaledyolov4/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | [Python](./examples/vision/detection/yolov5lite/python)/[C++](./examples/vision/detection/yolov5lite/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/detection/nanodet_plus/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | [Python](./examples/vision/keypointdetection/tiny_pose/python)/[C++](./examples/vision/keypointdetection/tiny_pose/python/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | [Python](./examples/vision/keypointdetection/det_keypoint_unite/python)/[C++](./examples/vision/keypointdetection/det_keypoint_unite/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| HeadPose | [omasaht/headpose](examples/vision/headpose) | [Python](./xamples/vision/headpose/fsanet/python)/[C++](./xamples/vision/headpose/fsanet/cpp/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | [Python](examples/vision/tracking/pptracking/python)/[C++](examples/vision/tracking/pptracking/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | [Python](./examples/vision/detection/nanodet_plus/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | [Python](./examples/vision/ocr/PP-OCRv3/python)/[C++](./examples/vision/ocr/PP-OCRv3/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | [Python](./examples/vision/segmentation/paddleseg/python)/[C++](./examples/vision/segmentation/paddleseg/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | [Python](./examples/vision/facedet/retinaface/python)/[C++](./examples/vision/facedet/retinaface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | [ Python](./examples/vision/facedet/ultraface/python)/[C++](./examples/vision/facedet/ultraface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | [Python](./examples/vision/facedet/yolov5face/python)/[C++](./examples/vision/facedet/yolov5face/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | +| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | [Python](./examples/vision/facedet/scrfd/python)/[C++](./examples/vision/facedet/scrfd/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | [Python](./examples/vision/facealign/pfld/python)/[C++](./examples/vision/facealign/pfld/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | [Python](./examples/vision/faceid/insightface/python)/[C++](./examples/vision/faceid/insightface/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/modnet/python)/[C++](./examples/vision/matting/modnet/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PeterL1n/RobustVideoMatting]() | [Python](./examples/vision/matting/rvm/python)/[C++](./examples/vision/matting/rvm/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | [Python](./examples/vision/matting/ppmatting/python)/[C++](./examples/vision/matting/ppmatting/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | [Python](./examples/text/uie/python)/[C++](./examples/text/uie/cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | +| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | Python/C++ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ✅ | +| Speech | [PaddleSpeech/PP-TTS](./examples/text/uie) | [Python](examples/audio/pp-tts/python)/C++ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | ✅ | + -## Edge-Side Deployment +## 📲 Mobile and Edge Device Deployment 🔥🔥🔥🔥
@@ -243,46 +300,63 @@ Notes: ✅: already supported; ❔: to be supported in the future; ❌: not supp ### Supported Edge-Side Model List -
+
-| | Model | Size (MB) | Linux | Android | iOS | Linux | Linux | Linux | TBD... | -|:------------------:|:----------------------------:|:---------:|:-------:|:-------:|:-------:|:-----------------------------------------:|:---------------------------------------:|:------------------------:|:-------:| -| --- | --- | --- | ARM CPU | ARM CPU | ARM CPU | Rockchip-NPU
RV1109
RV1126
RK1808 | Amlogic-NPU
A311D
S905D
C308X | NXPNPU
i.MX 8M Plus | TBD...| | -| Classification | PP-LCNet | 11.9 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | PP-LCNetv2 | 26.6 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | EfficientNet | 31.4 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | GhostNet | 20.8 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | MobileNetV1 | 17 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | MobileNetV2 | 14.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | MobileNetV3 | 22 | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | -| Classification | ShuffleNetV2 | 9.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | SqueezeNetV1.1 | 5 | ✅ | ✅ | ✅ | | | | | -| Classification | Inceptionv3 | 95.5 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | PP-HGNet | 59 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Classification | SwinTransformer_224_win7 | 352.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | PP-PicoDet_s_320_coco | 4.1 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | PP-PicoDet_s_320_lcnet | 4.9 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | -| Detection | CenterNet | 4.8 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | YOLOv3_MobileNetV3 | 94.6 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | PP-YOLO_tiny_650e_coco | 4.4 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | SSD_MobileNetV1_300_120e_voc | 23.3 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | PP-YOLO_ResNet50vd | 188.5 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | PP-YOLOv2_ResNet50vd | 218.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | PP-YOLO_crn_l_300e_coco | 209.1 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Detection | YOLOv5s | 29.3 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Face Detection | BlazeFace | 1.5 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Face Detection | RetinaFace | 1.7 | ✅ | ❌ | ❌ | -- | -- | -- | -- | -| Keypoint Detection | PP-TinyPose | 5.5 | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | -| Segmentation | PP-LiteSeg(STDC1) | 32.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Segmentation | PP-HumanSeg-Lite | 0.556 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Segmentation | HRNet-w18 | 38.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Segmentation | PP-HumanSeg-Server | 107.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| Segmentation | Unet | 53.7 | ❌ | ✅ | ❌ | -- | -- | -- | -- | -| OCR | PP-OCRv1 | 2.3+4.4 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| OCR | PP-OCRv2 | 2.3+4.4 | ✅ | ✅ | ✅ | -- | -- | -- | -- | -| OCR | PP-OCRv3 | 2.4+10.6 | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | -| OCR | PP-OCRv3-tiny | 2.4+10.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- | +| Task | Model | Size (MB) | Linux | Android | iOS | Linux |Linux | Linux | Linux | TBD... | +|:------------------:|:----------------------------:|:---------:|:-------:|:-------:|:-------:|:-----------------------------------------:|:---------------------------------------:|:------------------------:|:-------:|:-------:| +| --- | --- | --- | ARM CPU | ARM CPU | ARM CPU |Rockchip-NPU
RK3568/RK3588 |Rockchip-NPU
RV1109/RV1126/RK1808 | Amlogic-NPU
A311D/S905D/C308X | NXP-NPU
i.MX 8M Plus | TBD...| | +| Classification | [PaddleClas/PP-LCNet](examples/vision/classification/paddleclas) | 11.9 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/PP-LCNetv2](examples/vision/classification/paddleclas) | 26.6 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/EfficientNet](examples/vision/classification/paddleclas) | 31.4 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/GhostNet](examples/vision/classification/paddleclas) | 20.8 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/MobileNetV1](examples/vision/classification/paddleclas) | 17 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/MobileNetV2](examples/vision/classification/paddleclas) | 14.2 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/MobileNetV3](examples/vision/classification/paddleclas) | 22 | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | +| Classification | [PaddleClas/ShuffleNetV2](examples/vision/classification/paddleclas) | 9.2 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/SqueezeNetV1.1](examples/vision/classification/paddleclas) | 5 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/Inceptionv3](examples/vision/classification/paddleclas) | 95.5 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/PP-HGNet](examples/vision/classification/paddleclas) | 59 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Classification | [PaddleClas/SwinTransformer_224_win7](examples/vision/classification/paddleclas) | 352.7 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-PicoDet_s_320_coco](examples/vision/detection/paddledetection) | 4.1 | ✅ | ✅ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-PicoDet_s_320_lcnet](examples/vision/detection/paddledetection) | 4.9 | ✅ | ✅ | ❔ | ❔ | ✅ | ✅ | ✅ | -- | +| Detection | [PaddleDetection/CenterNet](examples/vision/detection/paddledetection) | 4.8 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/YOLOv3_MobileNetV3](examples/vision/detection/paddledetection) | 94.6 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLO_tiny_650e_coco](examples/vision/detection/paddledetection) | 4.4 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/SSD_MobileNetV1_300_120e_voc](examples/vision/detection/paddledetection) | 23.3 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLO_ResNet50vd](examples/vision/detection/paddledetection) | 188.5 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLOv2_ResNet50vd](examples/vision/detection/paddledetection) | 218.7 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | [PaddleDetection/PP-YOLO_crn_l_300e_coco](examples/vision/detection/paddledetection) | 209.1 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Detection | YOLOv5s | 29.3 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Face Detection | BlazeFace | 1.5 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Face Detection | RetinaFace | 1.7 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| Keypoint Detection | [PaddleDetection/PP-TinyPose](examples/vision/keypointdetection/tiny_pose) | 5.5 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | +| Segmentation | [PaddleSeg/PP-LiteSeg(STDC1)](examples/vision/segmentation/paddleseg) | 32.2 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/PP-HumanSeg-Lite](examples/vision/segmentation/paddleseg) | 0.556 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/HRNet-w18](examples/vision/segmentation/paddleseg) | 38.7 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/PP-HumanSeg](examples/vision/segmentation/paddleseg) | 107.2 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/Unet](examples/vision/segmentation/paddleseg) | 53.7 | ✅ | ❔ | ❔ | ✅ | -- | -- | -- | -- | +| Segmentation | [PaddleSeg/Deeplabv3](examples/vision/segmentation/paddleseg) | 150 | ❔ | ❔ | ❔ | ✅ | | | | | +| OCR | PaddleOCR/PP-OCRv1 | 2.3+4.4 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| OCR | [PaddleOCR/PP-OCRv2](examples/vision/ocr/PP-OCRv2) | 2.3+4.4 | ✅ | ❔ | ❔ | ❔ | -- | -- | -- | -- | +| OCR | [PaddleOCR/PP-OCRv3](examples/vision/ocr/PP-OCRv3) | 2.4+10.6 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | +| OCR | PaddleOCR/PP-OCRv3-tiny | 2.4+10.7 | ❔ | ❔ | ❔ | ❔ | -- | -- | -- | -- | + +## Web and Mini Program Deployment + +
+ +| Task | Model | [web_demo](examples/application/js/web_demo) | +|:------------------:|:-------------------------------------------------------------------------------------------:|:--------------------------------------------:| +| --- | --- | [Paddle.js](examples/application/js) | +| Detection | [FaceDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ | +| Detection | [ScrewDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ | +| Segmentation | [PaddleSeg/HumanSeg](./examples/application/js/web_demo/src/pages/cv/segmentation/HumanSeg) | ✅ | +| Object Recognition | [GestureRecognition](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ | +| Object Recognition | [ItemIdentification](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ | +| OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ | + + ## Community
@@ -293,7 +367,7 @@ Notes: ✅: already supported; ❔: to be supported in the future; ❌: not supp - **WeChat**:join our **WeChat community** and chat with other community members about ideas
- +
From bb0b251a10cd94f28e590b2191be524311465a7b Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Sun, 6 Nov 2022 23:54:11 +0800 Subject: [PATCH 12/30] Update README_EN.md --- README_EN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_EN.md b/README_EN.md index b298bf9a86..0142f22c29 100644 --- a/README_EN.md +++ b/README_EN.md @@ -26,7 +26,7 @@ English | [简体中文](README_CN.md) | [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/ocr) | | | | | | [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) |[**Speech**](examples/audio/pp-tts) -| | | |

**input** :早上好,今天是2020
/10/29,最低温度是-3°C。

**output**: [](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)

| +| | | |

**input** :早上好,今天是2020
/10/29,最低温度是-3°C。

**output**: [](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)

| ## 📣 Recent Updates From 74ff1d3c785973b28118661cd2aec3d58030f47e Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Sun, 6 Nov 2022 23:56:35 +0800 Subject: [PATCH 13/30] Update README_EN.md --- README_EN.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/README_EN.md b/README_EN.md index 0142f22c29..cf7f205c0d 100644 --- a/README_EN.md +++ b/README_EN.md @@ -107,10 +107,12 @@ English | [简体中文](README_CN.md) ## 🖥️ Data Center and Web Deployment -### A Quick Start for Python SDK -
+
+A Quick Start for Python SDK(click to shrink) + + #### Installation ##### Prerequisites @@ -164,7 +166,8 @@ print(result) vis_im = vision.vis_detection(im, result, score_threshold=0.5) cv2.imwrite("vis_image.jpg", vis_im) ``` - +
+
From 9673550d8dd52796f59a60b5d2b421b7c85d08cc Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:01:11 +0800 Subject: [PATCH 14/30] Update README_CN.md --- README_CN.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README_CN.md b/README_CN.md index 383f88b62e..1002435050 100644 --- a/README_CN.md +++ b/README_CN.md @@ -100,9 +100,10 @@ ## 🖥️ 服务端部署 -### Python SDK快速开始
+
Python SDK快速开始(点击可收缩) + #### 快速安装 ##### 前置依赖 @@ -151,6 +152,8 @@ vis_im = vision.vis_detection(im, result, score_threshold=0.5) cv2.imwrite("vis_image.jpg", vis_im) ``` +
+
From cd0daa39ffc2c448e25d6eadad07a0b9f0c52837 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:03:05 +0800 Subject: [PATCH 15/30] Update README_CN.md --- README_CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README_CN.md b/README_CN.md index 1002435050..4cd182f274 100644 --- a/README_CN.md +++ b/README_CN.md @@ -51,7 +51,7 @@ - 集成 Paddle Lite,并保证其使用与服务端常用推理引擎 Paddle Inference、TensorRT、OpenVINO、ONNX Runtime 等一致的开发体验; - 新增 [轻量化目标检测模型](examples/vision/detection/paddledetection/android)和[分类模型](examples/vision/classification/paddleclas/android)的安卓端部署能力; - ** Web和小程序部署:新增Web端部署能力** - - 集成 Paddle.js部署能力,新增 OCR、目标检测、人像分割背景替换、物体识别等Web端部署能力和Demo(examples/application/js); + - 集成 Paddle.js部署能力,新增 OCR、目标检测、人像分割背景替换、物体识别等Web端部署能力和[Demo](examples/application/js); ## 目录 @@ -285,7 +285,7 @@ int main(int argc, char* argv[]) { ### 📲 端侧模型支持列表 -
+
| 任务场景 | 模型 | 大小(MB) | Linux | Android | iOS | Linux | Linux | Linux | Linux | 更新中... | |:------------------:|:-----------------------------------------------------------------------------------------:|:--------:|:-------:|:-------:|:-------: |:------------------:|:------------------------------------:|:---------------------------------:|:------------------------:|:-------:| From 554a49c14d48b63091935f6cb3e586cf99e34aaf Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:06:04 +0800 Subject: [PATCH 16/30] Update README_EN.md --- README_EN.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README_EN.md b/README_EN.md index cf7f205c0d..98caa28300 100644 --- a/README_EN.md +++ b/README_EN.md @@ -172,8 +172,6 @@ cv2.imwrite("vis_image.jpg", vis_im)
A Quick Start for C++ SDK(click to expand) - -
#### Installation @@ -215,11 +213,10 @@ int main(int argc, char* argv[]) { For more deployment models, please refer to [Vision Model Deployment Examples](examples/vision) . +
### Supported Data Center and Web Model List🔥🔥🔥🔥🔥 -
- Notes: ✅: already supported; ❔: to be supported in the future; N/A: not supported now; @@ -291,19 +288,22 @@ Notes: ✅: already supported; ❔: to be supported in the future; N/A: not supp | Speech | [PaddleSpeech/PP-TTS](./examples/text/uie) | [Python](examples/audio/pp-tts/python)/C++ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | ✅ | +
+ ## 📲 Mobile and Edge Device Deployment 🔥🔥🔥🔥 -
- -### Paddle Lite NPU Deployment
+ +### Paddle Lite NPU Deployment -- [Rexchip-NPU / Amlogic-NPU / NXP-NPU](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/linux/picodet_detection) - -### Supported Edge-Side Model List +- [Rockchip-NPU / Amlogic-NPU / NXP-NPU](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/linux/picodet_detection)
+ +### Supported Edge-Side Model List + + | Task | Model | Size (MB) | Linux | Android | iOS | Linux |Linux | Linux | Linux | TBD... | |:------------------:|:----------------------------:|:---------:|:-------:|:-------:|:-------:|:-----------------------------------------:|:---------------------------------------:|:------------------------:|:-------:|:-------:| From 612836e8023c4130eccebfa13b59fd25c8db5080 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:07:10 +0800 Subject: [PATCH 17/30] Update README_EN.md --- README_EN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_EN.md b/README_EN.md index 98caa28300..616b17bef7 100644 --- a/README_EN.md +++ b/README_EN.md @@ -100,7 +100,7 @@ English | [简体中文](README_CN.md) * [Paddle Lite NPU Deployment](#fastdeploy-edge-sdk-npu) * [Supported Mobile and Edge Model List](#fastdeploy-edge-models) * **Web and Mini_Program Deployment** - * [Supported Web and Mini_Program Model List](#fastdeploy-web-models) + * [Supported Web and Mini Program Model List](#fastdeploy-web-models) * [**Community**](#fastdeploy-community) * [**Acknowledge**](#fastdeploy-acknowledge) * [**License**](#fastdeploy-license) From 9d025a594c5b1b11550f9d536352c265d33b9536 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:10:57 +0800 Subject: [PATCH 18/30] Update README_EN.md --- README_EN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_EN.md b/README_EN.md index 616b17bef7..a7b6fcde2f 100644 --- a/README_EN.md +++ b/README_EN.md @@ -26,7 +26,7 @@ English | [简体中文](README_CN.md) | [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/ocr) | | | | | | [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) |[**Speech**](examples/audio/pp-tts) -| | | |

**input** :早上好,今天是2020
/10/29,最低温度是-3°C。

**output**: [](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)

| +| | | |

**input** :Life was like a box of chocolates, you never know what you're gonna get.

**output**: [](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/tacotron2_ljspeech_waveflow_samples_0.2/sentence_1.wav)

| ## 📣 Recent Updates From 276c9be0fe5f4ba1de56c8a3b3aa85742f50c363 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:17:38 +0800 Subject: [PATCH 19/30] Update README_EN.md --- README_EN.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README_EN.md b/README_EN.md index a7b6fcde2f..8e127e6679 100644 --- a/README_EN.md +++ b/README_EN.md @@ -42,22 +42,22 @@ English | [简体中文](README_CN.md) - 🔥 **2022.10.31:Release FastDeploy [release v0.5.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.5.0)**
- **🖥️ Data Center and Cloud Deployment: Support more backend, Support more CV models** - - Support Paddle Inference TensorRT, and provide a seamless deployment experience with other inference engines include TensorRT、OpenVINO、ONNX Runtime、Paddle Lite、Paddle Inference; + - Support Paddle Inference TensorRT, and provide a seamless deployment experience with other inference engines include Paddle Inference、Paddle Lite、TensorRT、OpenVINO、ONNX Runtime; - Support Graphcore IPU through paddle Inference; - Support tracking model [PP-Tracking](./examples/vision/tracking/pptracking) and [RobustVideoMatting](./examples/vision/matting) model; - Support [one-click model quantization](tools/quantization) to improve model inference speed by 1.5 to 2 times on CPU & GPU platform. The supported quantized model are YOLOv7, YOLOv6, YOLOv5, etc. - 🔥 **2022.10.24:Release FastDeploy [release v0.4.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.4.0)**
- **🖥️ Data Center and Cloud Deployment: end-to-end optimization, Support more CV and NLP model** - - end-to-end optimization on GPU, [YOLO series](examples/vision/detection) model end-to-end inference speedup from 43ms to 25ms;
+ - end-to-end optimization on GPU, [YOLO series](examples/vision/detection) model end-to-end inference speedup from 43ms to 25ms; - Support CV models include PP-OCRv3, PP-OCRv2, PP-TinyPose, PP-Matting, etc. and provides [end-to-end deployment demos](examples/vision/detection/) - Support information extraction model is UIE, and provides [end-to-end deployment demos](examples/text/uie); - - Support [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDet+TinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline deployment; + - Support [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDet+TinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline deployment. - **📲 Mobile and Edge Device Deployment: support new backend,support more CV model** - Integrate Paddle Lite and provide a seamless deployment experience with other inference engines include TensorRT、OpenVINO、ONNX Runtime、Paddle Inference; - - Support [Lightweight Detection Model](examples/vision/detection/paddledetection/android) and [classification model](examples/vision/classification/paddleclas/android) on Android Platform,Download to try it out; + - Support [Lightweight Detection Model](examples/vision/detection/paddledetection/android) and [classification model](examples/vision/classification/paddleclas/android) on Android Platform,Download to try it out. - **Web-Side Deployment: support more CV model** - - Web deployment and Mini Program deployment New [OCR and other CV models](examples/application/js) capability; + - Web deployment and Mini Program deployment New [OCR and other CV models](examples/application/js) capability. ## Contents From 628140dfcb07136ce2476baf687f3609bf2b6b30 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:18:54 +0800 Subject: [PATCH 20/30] Update README_EN.md --- README_EN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_EN.md b/README_EN.md index 8e127e6679..3930a5f816 100644 --- a/README_EN.md +++ b/README_EN.md @@ -50,7 +50,7 @@ English | [简体中文](README_CN.md) - 🔥 **2022.10.24:Release FastDeploy [release v0.4.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.4.0)**
- **🖥️ Data Center and Cloud Deployment: end-to-end optimization, Support more CV and NLP model** - end-to-end optimization on GPU, [YOLO series](examples/vision/detection) model end-to-end inference speedup from 43ms to 25ms; - - Support CV models include PP-OCRv3, PP-OCRv2, PP-TinyPose, PP-Matting, etc. and provides [end-to-end deployment demos](examples/vision/detection/) + - Support CV models include PP-OCRv3, PP-OCRv2, PP-TinyPose, PP-Matting, etc. and provides [end-to-end deployment demos](examples/vision/detection/); - Support information extraction model is UIE, and provides [end-to-end deployment demos](examples/text/uie); - Support [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDet+TinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline deployment. - **📲 Mobile and Edge Device Deployment: support new backend,support more CV model** From e90aa3722380c7a50b931b07aa3d063323575e6b Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:23:49 +0800 Subject: [PATCH 21/30] Update README_EN.md --- README_EN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_EN.md b/README_EN.md index 3930a5f816..bdf4fbd503 100644 --- a/README_EN.md +++ b/README_EN.md @@ -217,7 +217,7 @@ For more deployment models, please refer to [Vision Model Deployment Examples](e ### Supported Data Center and Web Model List🔥🔥🔥🔥🔥 -Notes: ✅: already supported; ❔: to be supported in the future; N/A: not supported now; +Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Available; From cccdf30941fcced410201ebac88b88ae6f4ae680 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:23:52 +0800 Subject: [PATCH 22/30] Update README_CN.md --- README_CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_CN.md b/README_CN.md index 4cd182f274..8b380a224d 100644 --- a/README_CN.md +++ b/README_CN.md @@ -203,7 +203,7 @@ int main(int argc, char* argv[]) {
-符号说明: (1) ✅: 已经支持; (2) ❔: 正在进行中; (3) N/A: 暂不支持; (4) --: 暂不考虑;
+符号说明: (1) ✅: 已经支持; (2) ❔: 正在进行中; (3) N/A: 暂不支持;
链接说明:「模型列」会跳转到模型推理Demo代码 | 任务场景 | 模型 | API | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | From 4db162ed53f854fee063dc0cee4a24b531cc7314 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:26:43 +0800 Subject: [PATCH 23/30] Update README_EN.md --- README_EN.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/README_EN.md b/README_EN.md index bdf4fbd503..7976617cf0 100644 --- a/README_EN.md +++ b/README_EN.md @@ -219,7 +219,10 @@ For more deployment models, please refer to [Vision Model Deployment Examples](e Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Available; - +
+ +
+ | Task | Model | API | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | |:-----------------------------:|:---------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------:|:---------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:|:---------------------------:|:--------------------------:|:---------------------------:| @@ -303,7 +306,9 @@ Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Ava ### Supported Edge-Side Model List - +
+ +
| Task | Model | Size (MB) | Linux | Android | iOS | Linux |Linux | Linux | Linux | TBD... | |:------------------:|:----------------------------:|:---------:|:-------:|:-------:|:-------:|:-----------------------------------------:|:---------------------------------------:|:------------------------:|:-------:|:-------:| From 672d50a0d131e3f9dea4f9b7d7afcbe9719ce26f Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:28:37 +0800 Subject: [PATCH 24/30] Update README_CN.md --- README_CN.md | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/README_CN.md b/README_CN.md index 8b380a224d..b8eed04585 100644 --- a/README_CN.md +++ b/README_CN.md @@ -199,13 +199,17 @@ int main(int argc, char* argv[]) { 更多部署案例请参考[视觉模型部署示例](examples/vision) . -### 服务端模型支持列表 🔥🔥🔥🔥🔥 -
+### 服务端模型支持列表 🔥🔥🔥🔥🔥 + 符号说明: (1) ✅: 已经支持; (2) ❔: 正在进行中; (3) N/A: 暂不支持;
链接说明:「模型列」会跳转到模型推理Demo代码 +
+ +
+ | 任务场景 | 模型 | API | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | |:----------------------:|:--------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------:|:-------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:-------------:|:-------------:|:-------:| | --- | --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | NVIDIA Jetson | Graphcore IPU | Serving | @@ -273,19 +277,24 @@ int main(int argc, char* argv[]) { | Speech | [PaddleSpeech/PP-TTS](./examples/text/uie) | [Python](examples/audio/pp-tts/python)/C++ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | ❔ | -- | ✅ | -## 📲 移动端和端侧部署 🔥🔥🔥🔥 -
-### Paddle Lite NPU部署 +## 📲 移动端和端侧部署 🔥🔥🔥🔥 +
+### Paddle Lite NPU部署 + - [瑞芯微-NPU/晶晨-NPU/恩智浦-NPU](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/linux/picodet_detection) +
+ ### 📲 端侧模型支持列表 -
+
+ +
| 任务场景 | 模型 | 大小(MB) | Linux | Android | iOS | Linux | Linux | Linux | Linux | 更新中... | |:------------------:|:-----------------------------------------------------------------------------------------:|:--------:|:-------:|:-------:|:-------: |:------------------:|:------------------------------------:|:---------------------------------:|:------------------------:|:-------:| @@ -341,20 +350,21 @@ int main(int argc, char* argv[]) { | Object Recognition | [ItemIdentification](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ | | OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ | -## 社区交流 -
+## 社区交流 + - **加入社区👬:** 微信扫描二维码,进入**FastDeploy技术交流群**
-## Acknowledge
+## Acknowledge + 本项目中SDK生成和下载使用了[EasyEdge](https://ai.baidu.com/easyedge/app/openSource)中的免费开放能力,在此表示感谢。 ## License From 5caded06a34654466f47fcf4c90db38598b1e8b6 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 00:34:43 +0800 Subject: [PATCH 25/30] Update README_EN.md --- README_EN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_EN.md b/README_EN.md index 7976617cf0..16ec336493 100644 --- a/README_EN.md +++ b/README_EN.md @@ -52,7 +52,7 @@ English | [简体中文](README_CN.md) - end-to-end optimization on GPU, [YOLO series](examples/vision/detection) model end-to-end inference speedup from 43ms to 25ms; - Support CV models include PP-OCRv3, PP-OCRv2, PP-TinyPose, PP-Matting, etc. and provides [end-to-end deployment demos](examples/vision/detection/); - Support information extraction model is UIE, and provides [end-to-end deployment demos](examples/text/uie); - - Support [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDet+TinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline deployment. + - Support [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDet and TinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline deployment. - **📲 Mobile and Edge Device Deployment: support new backend,support more CV model** - Integrate Paddle Lite and provide a seamless deployment experience with other inference engines include TensorRT、OpenVINO、ONNX Runtime、Paddle Inference; - Support [Lightweight Detection Model](examples/vision/detection/paddledetection/android) and [classification model](examples/vision/classification/paddleclas/android) on Android Platform,Download to try it out. From 70f664161fbd4243c62dfef606c96a32b67881d8 Mon Sep 17 00:00:00 2001 From: Jack Zhou Date: Mon, 7 Nov 2022 10:02:42 +0800 Subject: [PATCH 26/30] [Functions] Add fd tensor concat (#507) * Add fd tensor concat * fix comment --- fastdeploy/function/concat.cc | 124 ++++++++++++++++++++++++++++++++++ fastdeploy/function/concat.h | 30 ++++++++ tests/function/test_concat.cc | 80 ++++++++++++++++++++++ 3 files changed, 234 insertions(+) create mode 100644 fastdeploy/function/concat.cc create mode 100644 fastdeploy/function/concat.h create mode 100644 tests/function/test_concat.cc diff --git a/fastdeploy/function/concat.cc b/fastdeploy/function/concat.cc new file mode 100644 index 0000000000..c2b1f27444 --- /dev/null +++ b/fastdeploy/function/concat.cc @@ -0,0 +1,124 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/function/concat.h" + +#include +#include +#include +#include +#include "fastdeploy/utils/utils.h" + +namespace fastdeploy { + +std::string Str(const std::vector& shape) { + std::ostringstream oss; + oss << "[ " << shape[0]; + for (int i = 1; i < shape.size(); ++i) { + oss << " ," << shape[i]; + } + oss << " ]"; + return oss.str(); +} + +std::vector ComputeAndCheckConcatOutputShape( + const std::vector& input, int axis) { + const size_t n = input.size(); + auto out_dims = input[0].shape; + size_t in_zero_dims_size = out_dims.size(); + for (size_t i = 1; i < n; ++i) { + FDASSERT(input[i].shape.size() == out_dims.size(), + "The shape of input[0] and input[%d] is expected to be equal. But " + "received input[0]'s shape = %s, input[%d]'s shape = %s.", + i, Str(out_dims).c_str(), i, Str(input[i].shape).c_str()); + for (size_t j = 0; j < in_zero_dims_size; j++) { + if (j == axis) { + out_dims[axis] += input[i].shape[axis]; + } else { + FDASSERT( + input[0].shape[j] == input[i].shape[j], + "The %d-th dimension of input[0] and input[%d] is expected to be " + "equal." + "But received input[0]'s shape = %s, input[%d]'s shape = %s.", + j, i, Str(input[0].shape).c_str(), i, Str(input[i].shape).c_str()); + } + } + } + return out_dims; +} + +template +struct ConcatFunctor { + void operator()(const std::vector& input, int axis, + FDTensor* output) { + size_t num = input.size(); + + int64_t rows = 1; + auto dim_0 = input[0].shape; + for (int i = 0; i < axis; ++i) { + rows *= dim_0[i]; + } + int64_t out_rows = rows, out_cols = 0; + + std::vector input_cols(num); + for (size_t i = 0; i < num; ++i) { + int64_t t_cols = input[i].Numel() / rows; + out_cols += t_cols; + input_cols[i] = t_cols; + } + + // computation + T* output_data = reinterpret_cast(output->Data()); + int64_t col_idx = 0; + for (size_t j = 0; j < num; ++j) { + int64_t col_len = input_cols[j]; + const T* input_data = reinterpret_cast(input[j].Data()); + for (int64_t k = 0; k < out_rows; ++k) { + std::memcpy(output_data + k * out_cols + col_idx, + input_data + k * col_len, sizeof(T) * col_len); + } + col_idx += col_len; + } + } +}; + +template +void ConcatKernel(const std::vector& input, FDTensor* output, + int axis) { + auto output_shape = ComputeAndCheckConcatOutputShape(input, axis); + output->Allocate(output_shape, TypeToDataType::dtype); + + ConcatFunctor functor; + functor(input, axis, output); +} + +void Concat(const std::vector& x, FDTensor* out, int axis) { + FDASSERT(x.size() > 0, + "The number of FDTensor array should be larger than 0, but the size " + "of input is %d", + x.size()); + int64_t rank = x[0].shape.size(); + FDASSERT(axis >= -rank && axis < rank, + "The axis is expected to be in range of [%d, %d), but got %d", -rank, + rank, axis); + if (axis < 0) { + axis += rank; + } + FDTensor out_temp; + FD_VISIT_ALL_TYPES(x[0].dtype, "Concat", + ([&] { ConcatKernel(x, &out_temp, axis); })); + *out = std::move(out_temp); +} + +} // namespace fastdeploy diff --git a/fastdeploy/function/concat.h b/fastdeploy/function/concat.h new file mode 100644 index 0000000000..22e388b0fe --- /dev/null +++ b/fastdeploy/function/concat.h @@ -0,0 +1,30 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/core/fd_tensor.h" + +namespace fastdeploy { + +/** Excute the concatenate operation for input FDTensor along given axis. + @param x The input tensor. + @param out The output tensor which stores the result. + @param axisi Axis which will be concatenated. +*/ + +FASTDEPLOY_DECL void Concat(const std::vector& x, FDTensor* out, + int axis = 0); + +} // namespace fastdeploy diff --git a/tests/function/test_concat.cc b/tests/function/test_concat.cc new file mode 100644 index 0000000000..4bdd49dd12 --- /dev/null +++ b/tests/function/test_concat.cc @@ -0,0 +1,80 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "fastdeploy/core/fd_tensor.h" +#include "fastdeploy/function/concat.h" +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "gtest_utils.h" + +namespace fastdeploy { + +TEST(fastdeploy, concat1) { + CheckShape check_shape; + std::vector inputs(3); + FDTensor output; + inputs[0].Allocate({5, 1, 4, 5}, FDDataType::FP32); + inputs[1].Allocate({5, 2, 4, 5}, FDDataType::FP32); + inputs[2].Allocate({5, 3, 4, 5}, FDDataType::FP32); + Concat(inputs, &output, 1); + check_shape(output.shape, {5, 6, 4, 5}); +} + +TEST(fastdeploy, concat2) { + CheckShape check_shape; + std::vector inputs(3); + FDTensor output; + inputs[0].Allocate({2, 3, 4, 5}, FDDataType::FP32); + inputs[1].Allocate({2, 3, 4, 5}, FDDataType::FP32); + inputs[2].Allocate({2, 3, 4, 5}, FDDataType::FP32); + Concat(inputs, &output, 1); + check_shape(output.shape, {2, 9, 4, 5}); +} + +TEST(fastdeploy, concat3) { + CheckShape check_shape; + std::vector inputs(3); + FDTensor output; + inputs[0].Allocate({1, 256, 170, 256}, FDDataType::FP32); + inputs[1].Allocate({1, 128, 170, 256}, FDDataType::FP32); + inputs[2].Allocate({1, 128, 170, 256}, FDDataType::FP32); + Concat(inputs, &output, 1); + check_shape(output.shape, {1, 512, 170, 256}); +} + +TEST(fastdeploy, concat4) { + CheckShape check_shape; + std::vector inputs(3); + FDTensor output; + inputs[0].Allocate({2, 3, 4, 5}, FDDataType::FP32); + inputs[1].Allocate({2, 3, 4, 5}, FDDataType::FP32); + inputs[2].Allocate({0, 3, 4, 5}, FDDataType::FP32); + Concat(inputs, &output, 0); + check_shape(output.shape, {4, 3, 4, 5}); +} + +TEST(fastdeploy, concat5) { + CheckShape check_shape; + std::vector inputs(3); + FDTensor output; + inputs[0].Allocate({5, 1, 4, 5}, FDDataType::FP32); + inputs[1].Allocate({5, 2, 4, 5}, FDDataType::FP32); + inputs[2].Allocate({5, 3, 4, 5}, FDDataType::FP32); + Concat(inputs, &output, -3); + check_shape(output.shape, {5, 6, 4, 5}); +} + +} // namespace fastdeploy From 152b2770c8fad7a72a2c49e17e1a59032c32c388 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:07:31 +0800 Subject: [PATCH 27/30] Update README_EN.md --- README_EN.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README_EN.md b/README_EN.md index 16ec336493..eb786cac8d 100644 --- a/README_EN.md +++ b/README_EN.md @@ -99,13 +99,13 @@ English | [简体中文](README_CN.md) * **📲 Mobile and Edge Device Deployment** * [Paddle Lite NPU Deployment](#fastdeploy-edge-sdk-npu) * [Supported Mobile and Edge Model List](#fastdeploy-edge-models) -* **Web and Mini_Program Deployment** +* **Web and Mini Program Deployment** * [Supported Web and Mini Program Model List](#fastdeploy-web-models) * [**Community**](#fastdeploy-community) * [**Acknowledge**](#fastdeploy-acknowledge) * [**License**](#fastdeploy-license) -## 🖥️ Data Center and Web Deployment +## 🖥️ Data Center and Cloud Deployment
@@ -293,7 +293,7 @@ Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Ava
-## 📲 Mobile and Edge Device Deployment 🔥🔥🔥🔥 +## 📲 Mobile and Edge Device Deployment
@@ -304,7 +304,7 @@ Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Ava
-### Supported Edge-Side Model List +### Supported Mobile and Edge Model List 🔥🔥🔥🔥
From bf5c1aec656d12865232a4adb853e8ef965c63b5 Mon Sep 17 00:00:00 2001 From: leiqing <54695910+leiqing1@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:09:05 +0800 Subject: [PATCH 28/30] Update README_EN.md --- README_EN.md | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/README_EN.md b/README_EN.md index eb786cac8d..d30fd587ad 100644 --- a/README_EN.md +++ b/README_EN.md @@ -65,31 +65,31 @@ English | [简体中文](README_CN.md) *
📖 Tutorials(click to shrink) - Install - - [How to Install FastDeploy Prebuilt Libraries](en/build_and_install/download_prebuilt_libraries.md) - - [How to Build and Install FastDeploy Library on GPU Platform](en/build_and_install/gpu.md) - - [How to Build and Install FastDeploy Library on CPU Platform](en/build_and_install/cpu.md) - - [How to Build and Install FastDeploy Library on IPU Platform](en/build_and_install/ipu.md) - - [How to Build and Install FastDeploy Library on Nvidia Jetson Platform](en/build_and_install/jetson.md) - - [How to Build and Install FastDeploy Library on Android Platform](en/build_and_install/android.md) + - [How to Install FastDeploy Prebuilt Libraries](docs/en/build_and_install/download_prebuilt_libraries.md) + - [How to Build and Install FastDeploy Library on GPU Platform](docs/en/build_and_install/gpu.md) + - [How to Build and Install FastDeploy Library on CPU Platform](docs/en/build_and_install/cpu.md) + - [How to Build and Install FastDeploy Library on IPU Platform](docs/en/build_and_install/ipu.md) + - [How to Build and Install FastDeploy Library on Nvidia Jetson Platform](docs/en/build_and_install/jetson.md) + - [How to Build and Install FastDeploy Library on Android Platform](docs/en/build_and_install/android.md) - A Quick Start - Demos - - [Python Deployment Demo](en/quick_start/models/python.md) - - [C++ Deployment Demo](en/quick_start/models/cpp.md) - - [A Quick Start on Runtime Python](en/quick_start/runtime/python.md) - - [A Quick Start on Runtime C++](en/quick_start/runtime/cpp.md) + - [Python Deployment Demo](docs/en/quick_start/models/python.md) + - [C++ Deployment Demo](docs/en/quick_start/models/cpp.md) + - [A Quick Start on Runtime Python](docs/en/quick_start/runtime/python.md) + - [A Quick Start on Runtime C++](docs/en/quick_start/runtime/cpp.md) - API (To be continued) - [Python API](https://baidu-paddle.github.io/fastdeploy-api/python/html/) - [C++ API](https://baidu-paddle.github.io/fastdeploy-api/cpp/html/) - Performance Optimization - - [Quantization Acceleration](en/quantize.md) + - [Quantization Acceleration](docs/en/quantize.md) - Frequent Q&As - - [1. How to Change Inference Backends](en/faq/how_to_change_backend.md) - - [2. How to Use FastDeploy C++ SDK on Windows Platform](en/faq/use_sdk_on_windows.md) - - [3. How to Use FastDeploy C++ SDK on Android Platform](en/faq/use_sdk_on_android.md)(To be Continued) - - [4. Tricks of TensorRT](en/faq/tensorrt_tricks.md) - - [5. How to Develop a New Model](en/faq/develop_a_new_model.md)(To be Continued) + - [1. How to Change Inference Backends](docs/en/faq/how_to_change_backend.md) + - [2. How to Use FastDeploy C++ SDK on Windows Platform](docs/en/faq/use_sdk_on_windows.md) + - [3. How to Use FastDeploy C++ SDK on Android Platform](docs/en/faq/use_sdk_on_android.md)(To be Continued) + - [4. Tricks of TensorRT](docs/en/faq/tensorrt_tricks.md) + - [5. How to Develop a New Model](docs/en/faq/develop_a_new_model.md)(To be Continued) - More FastDeploy Deployment Module - - [deployment AI Model as a Service](../serving) - - [Benchmark Testing](../benchmark) + - [deployment AI Model as a Service](./serving) + - [Benchmark Testing](./benchmark)
* **🖥️ Data Center and Cloud Deployment** From 40b099ac99a26982e5abf1b3cb2064ac1ad49527 Mon Sep 17 00:00:00 2001 From: WJJ1995 Date: Mon, 7 Nov 2022 14:25:46 +0800 Subject: [PATCH 29/30] [Doc] Update api_docs in python (#511) * add paddle_trt in benchmark * update benchmark in device * update benchmark * update result doc * fixed for CI * update python api_docs --- docs/api_docs/python/face_alignment.md | 9 +++++++++ docs/api_docs/python/headpose.md | 9 +++++++++ docs/api_docs/python/matting.md | 8 ++++++++ 3 files changed, 26 insertions(+) create mode 100644 docs/api_docs/python/face_alignment.md create mode 100644 docs/api_docs/python/headpose.md diff --git a/docs/api_docs/python/face_alignment.md b/docs/api_docs/python/face_alignment.md new file mode 100644 index 0000000000..f0369b55af --- /dev/null +++ b/docs/api_docs/python/face_alignment.md @@ -0,0 +1,9 @@ +# Face Alignment API + +## fastdeploy.vision.facealign.PFLD + +```{eval-rst} +.. autoclass:: fastdeploy.vision.facealign.PFLD + :members: + :inherited-members: +``` diff --git a/docs/api_docs/python/headpose.md b/docs/api_docs/python/headpose.md new file mode 100644 index 0000000000..d1fba74f92 --- /dev/null +++ b/docs/api_docs/python/headpose.md @@ -0,0 +1,9 @@ +# Headpose API + +## fastdeploy.vision.headpose.FSANet + +```{eval-rst} +.. autoclass:: fastdeploy.vision.headpose.FSANet + :members: + :inherited-members: +``` diff --git a/docs/api_docs/python/matting.md b/docs/api_docs/python/matting.md index 7c121110ac..5e9c2a2273 100644 --- a/docs/api_docs/python/matting.md +++ b/docs/api_docs/python/matting.md @@ -15,3 +15,11 @@ :members: :inherited-members: ``` + +## fastdeploy.vision.matting.RobustVideoMatting + +```{eval-rst} +.. autoclass:: fastdeploy.vision.matting.RobustVideoMatting + :members: + :inherited-members: +``` From 6633fa3db90eb5f85bc3ec22f5a3c5280740b2cf Mon Sep 17 00:00:00 2001 From: Jason Date: Mon, 7 Nov 2022 15:09:00 +0800 Subject: [PATCH 30/30] [Model] [Part 1] Refactor PaddleClas module (#508) * Split PaddleClas Module refactor * Split PaddleClas Module refactor * fix bug --- fastdeploy/core/fd_tensor.cc | 1 + fastdeploy/core/fd_tensor.h | 6 + fastdeploy/pybind/fd_tensor.cc | 35 ++++++ fastdeploy/pybind/main.cc.in | 7 +- fastdeploy/pybind/main.h | 1 + fastdeploy/pybind/runtime.cc | 46 ++++---- .../vision/common/processors/limit_long.cc | 88 --------------- .../vision/common/processors/limit_long.h | 51 --------- .../vision/common/processors/limit_short.cc | 2 +- fastdeploy/vision/common/processors/mat.cc | 13 +++ fastdeploy/vision/common/processors/mat.h | 10 ++ fastdeploy/vision/common/processors/resize.cc | 2 +- .../vision/common/processors/transform.cc | 106 ++++++++++++++++++ .../vision/common/processors/transform.h | 88 +-------------- .../vision/tracking/pptracking/model.cc | 4 +- python/fastdeploy/__init__.py | 18 ++- python/setup.py | 18 ++- 17 files changed, 227 insertions(+), 269 deletions(-) create mode 100644 fastdeploy/pybind/fd_tensor.cc delete mode 100644 fastdeploy/vision/common/processors/limit_long.cc delete mode 100644 fastdeploy/vision/common/processors/limit_long.h create mode 100644 fastdeploy/vision/common/processors/transform.cc diff --git a/fastdeploy/core/fd_tensor.cc b/fastdeploy/core/fd_tensor.cc index 8b739d844a..ff4ae61aaa 100644 --- a/fastdeploy/core/fd_tensor.cc +++ b/fastdeploy/core/fd_tensor.cc @@ -131,6 +131,7 @@ void FDTensor::Resize(const std::vector& new_shape, const FDDataType& data_type, const std::string& tensor_name, const Device& new_device) { + external_data_ptr = nullptr; name = tensor_name; device = new_device; dtype = data_type; diff --git a/fastdeploy/core/fd_tensor.h b/fastdeploy/core/fd_tensor.h index 1619fe2711..ef7fbff414 100644 --- a/fastdeploy/core/fd_tensor.h +++ b/fastdeploy/core/fd_tensor.h @@ -93,6 +93,12 @@ struct FASTDEPLOY_DECL FDTensor { // Total number of elements in this tensor int Numel() const; + // Get shape of FDTensor + std::vector Shape() const { return shape; } + + // Get dtype of FDTensor + FDDataType Dtype() const { return dtype; } + void Resize(size_t nbytes); void Resize(const std::vector& new_shape); diff --git a/fastdeploy/pybind/fd_tensor.cc b/fastdeploy/pybind/fd_tensor.cc new file mode 100644 index 0000000000..2e14b6d18a --- /dev/null +++ b/fastdeploy/pybind/fd_tensor.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { + +void BindFDTensor(pybind11::module& m) { + pybind11::class_(m, "FDTensor") + .def(pybind11::init<>(), "Default Constructor") + .def_readwrite("name", &FDTensor::name) + .def_readonly("shape", &FDTensor::shape) + .def_readonly("dtype", &FDTensor::dtype) + .def_readonly("device", &FDTensor::device) + .def("numpy", [](FDTensor& self) { + return TensorToPyArray(self); + }) + .def("from_numpy", [](FDTensor& self, pybind11::array& pyarray, bool share_buffer = false) { + PyArrayToTensor(pyarray, &self, share_buffer); + }); +} + +} // namespace fastdeploy diff --git a/fastdeploy/pybind/main.cc.in b/fastdeploy/pybind/main.cc.in index 74fe90433b..97aafc64a2 100644 --- a/fastdeploy/pybind/main.cc.in +++ b/fastdeploy/pybind/main.cc.in @@ -16,6 +16,7 @@ namespace fastdeploy { +void BindFDTensor(pybind11::module&); void BindRuntime(pybind11::module&); void BindFDModel(pybind11::module&); void BindVision(pybind11::module&); @@ -70,7 +71,7 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor, data_shape.insert(data_shape.begin(), pyarray.shape(), pyarray.shape() + pyarray.ndim()); if (share_buffer) { - tensor-> SetExternalData(data_shape, dtype, + tensor->SetExternalData(data_shape, dtype, pyarray.mutable_data()); } else { tensor->Resize(data_shape, dtype); @@ -80,6 +81,7 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor, void PyArrayToTensorList(std::vector& pyarrays, std::vector* tensors, bool share_buffer) { + tensors->resize(pyarrays.size()); for(auto i = 0; i < pyarrays.size(); ++i) { PyArrayToTensor(pyarrays[i], &(*tensors)[i], share_buffer); } @@ -88,7 +90,7 @@ void PyArrayToTensorList(std::vector& pyarrays, std::vector #include #include +#include #include diff --git a/fastdeploy/pybind/runtime.cc b/fastdeploy/pybind/runtime.cc index a694be9703..11cf9bf4ed 100755 --- a/fastdeploy/pybind/runtime.cc +++ b/fastdeploy/pybind/runtime.cc @@ -162,6 +162,25 @@ void BindRuntime(pybind11::module& m) { } return results; }) + .def("infer", [](Runtime& self, std::map& data) { + std::vector inputs; + inputs.reserve(data.size()); + for (auto iter = data.begin(); iter != data.end(); ++iter) { + FDTensor tensor; + tensor.SetExternalData(iter->second.Shape(), iter->second.Dtype(), iter->second.Data(), iter->second.device); + tensor.name = iter->first; + inputs.push_back(tensor); + } + std::vector outputs; + if (!self.Infer(inputs, &outputs)) { + pybind11::eval("raise Exception('Failed to inference with Runtime.')"); + } + return outputs; + }) + .def("infer", [](Runtime& self, std::vector& inputs) { + std::vector outputs; + return self.Infer(inputs, &outputs); + }) .def("num_inputs", &Runtime::NumInputs) .def("num_outputs", &Runtime::NumOutputs) .def("get_input_info", &Runtime::GetInputInfo) @@ -202,33 +221,6 @@ void BindRuntime(pybind11::module& m) { .value("FP64", FDDataType::FP64) .value("UINT8", FDDataType::UINT8); - pybind11::class_(m, "FDTensor", pybind11::buffer_protocol()) - .def(pybind11::init()) - .def("cpu_data", - [](FDTensor& self) { - auto ptr = self.CpuData(); - auto numel = self.Numel(); - auto dtype = FDDataTypeToNumpyDataType(self.dtype); - auto base = pybind11::array(dtype, self.shape); - return pybind11::array(dtype, self.shape, ptr, base); - }) - .def("resize", static_cast(&FDTensor::Resize)) - .def("resize", - static_cast&)>( - &FDTensor::Resize)) - .def( - "resize", - [](FDTensor& self, const std::vector& shape, - const FDDataType& dtype, const std::string& name, - const Device& device) { self.Resize(shape, dtype, name, device); }) - .def("numel", &FDTensor::Numel) - .def("nbytes", &FDTensor::Nbytes) - .def_readwrite("name", &FDTensor::name) - .def_readwrite("is_pinned_memory", &FDTensor::is_pinned_memory) - .def_readonly("shape", &FDTensor::shape) - .def_readonly("dtype", &FDTensor::dtype) - .def_readonly("device", &FDTensor::device); - m.def("get_available_backends", []() { return GetAvailableBackends(); }); } diff --git a/fastdeploy/vision/common/processors/limit_long.cc b/fastdeploy/vision/common/processors/limit_long.cc deleted file mode 100644 index 7021f131b4..0000000000 --- a/fastdeploy/vision/common/processors/limit_long.cc +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "fastdeploy/vision/common/processors/limit_long.h" - -namespace fastdeploy { -namespace vision { - -bool LimitLong::ImplByOpenCV(Mat* mat) { - cv::Mat* im = mat->GetOpenCVMat(); - int origin_w = im->cols; - int origin_h = im->rows; - int im_size_max = std::max(origin_w, origin_h); - int target = im_size_max; - if (max_long_ > 0 && im_size_max > max_long_) { - target = max_long_; - } else if (min_long_ > 0 && im_size_max < min_long_) { - target = min_long_; - } - if (target != im_size_max) { - double scale = - static_cast(target) / static_cast(im_size_max); - cv::resize(*im, *im, cv::Size(), scale, scale, interp_); - mat->SetWidth(im->cols); - mat->SetHeight(im->rows); - } - return true; -} - -#ifdef ENABLE_FLYCV -bool LimitLong::ImplByFlyCV(Mat* mat) { - fcv::Mat* im = mat->GetFlyCVMat(); - int origin_w = im->width(); - int origin_h = im->height(); - int im_size_max = std::max(origin_w, origin_h); - int target = im_size_max; - if (max_long_ > 0 && im_size_max > max_long_) { - target = max_long_; - } else if (min_long_ > 0 && im_size_max < min_long_) { - target = min_long_; - } - if (target != im_size_max) { - double scale = - static_cast(target) / static_cast(im_size_max); - if (fabs(scale - 1.0) < 1e-06) { - return true; - } - auto interp_method = fcv::InterpolationType::INTER_LINEAR; - if (interp_ == 0) { - interp_method = fcv::InterpolationType::INTER_NEAREST; - } else if (interp_ == 1) { - interp_method = fcv::InterpolationType::INTER_LINEAR; - } else if (interp_ == 2) { - interp_method = fcv::InterpolationType::INTER_CUBIC; - } else { - FDERROR << "LimitLong: Only support interp_ be 0/1/2 with FlyCV, but " - "now it's " - << interp_ << "." << std::endl; - return false; - } - fcv::Mat new_im; - fcv::resize(*im, new_im, fcv::Size(), scale, scale, interp_method); - mat->SetMat(new_im); - mat->SetWidth(new_im.width()); - mat->SetHeight(new_im.height()); - } - return true; -} -#endif - -bool LimitLong::Run(Mat* mat, int max_long, int min_long, int interp, - ProcLib lib) { - auto l = LimitLong(max_long, min_long, interp); - return l(mat, lib); -} -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/limit_long.h b/fastdeploy/vision/common/processors/limit_long.h deleted file mode 100644 index 49055973d2..0000000000 --- a/fastdeploy/vision/common/processors/limit_long.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "fastdeploy/vision/common/processors/base.h" - -namespace fastdeploy { -namespace vision { - -class FASTDEPLOY_DECL LimitLong : public Processor { - public: - explicit LimitLong(int max_long = -1, int min_long = -1, int interp = 1) { - max_long_ = max_long; - min_long_ = min_long; - interp_ = interp; - } - - // Limit the long edge of image. - // If the long edge is larger than max_long_, resize the long edge - // to max_long_, while scale the short edge proportionally. - // If the long edge is smaller than min_long_, resize the long edge - // to min_long_, while scale the short edge proportionally. - bool ImplByOpenCV(Mat* mat); -#ifdef ENABLE_FLYCV - bool ImplByFlyCV(Mat* mat); -#endif - std::string Name() { return "LimitLong"; } - - static bool Run(Mat* mat, int max_long = -1, int min_long = -1, - int interp = 1, ProcLib lib = ProcLib::DEFAULT); - int GetMaxLong() const { return max_long_; } - - private: - int max_long_; - int min_long_; - int interp_; -}; -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/limit_short.cc b/fastdeploy/vision/common/processors/limit_short.cc index d0f0697c8f..c4e9097448 100644 --- a/fastdeploy/vision/common/processors/limit_short.cc +++ b/fastdeploy/vision/common/processors/limit_short.cc @@ -65,7 +65,7 @@ bool LimitShort::ImplByFlyCV(Mat* mat) { } else if (interp_ == 2) { interp_method = fcv::InterpolationType::INTER_CUBIC; } else { - FDERROR << "LimitLong: Only support interp_ be 0/1/2 with FlyCV, but " + FDERROR << "LimitShort: Only support interp_ be 0/1/2 with FlyCV, but " "now it's " << interp_ << "." << std::endl; return false; diff --git a/fastdeploy/vision/common/processors/mat.cc b/fastdeploy/vision/common/processors/mat.cc index e2a64ea04e..7ef4f9d703 100644 --- a/fastdeploy/vision/common/processors/mat.cc +++ b/fastdeploy/vision/common/processors/mat.cc @@ -174,5 +174,18 @@ Mat Mat::Create(int height, int width, int channels, return mat; } +FDMat WrapMat(const cv::Mat& image) { + FDMat mat(image); + return mat; +} + +std::vector WrapMat(const std::vector& images) { + std::vector mats; + for (size_t i = 0; i < images.size(); ++i) { + mats.emplace_back(FDMat(images[i])); + } + return mats; +} + } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/mat.h b/fastdeploy/vision/common/processors/mat.h index 5e618057ca..5253700432 100644 --- a/fastdeploy/vision/common/processors/mat.h +++ b/fastdeploy/vision/common/processors/mat.h @@ -147,5 +147,15 @@ struct FASTDEPLOY_DECL Mat { FDDataType type, void* data, ProcLib lib); }; +typedef Mat FDMat; +/* + * @brief Wrap a cv::Mat to FDMat, there's no memory copy, memory buffer is managed by user + */ +FASTDEPLOY_DECL FDMat WrapMat(const cv::Mat& image); +/* + * Warp a vector to vector, there's no memory copy, memory buffer is managed by user + */ +FASTDEPLOY_DECL std::vector WrapMat(const std::vector& images); + } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/resize.cc b/fastdeploy/vision/common/processors/resize.cc index 28488c2cd4..da9104059d 100644 --- a/fastdeploy/vision/common/processors/resize.cc +++ b/fastdeploy/vision/common/processors/resize.cc @@ -79,7 +79,7 @@ bool Resize::ImplByFlyCV(Mat* mat) { } else if (interp_ == 2) { interp_method = fcv::InterpolationType::INTER_CUBIC; } else { - FDERROR << "LimitLong: Only support interp_ be 0/1/2 with FlyCV, but " + FDERROR << "Resize: Only support interp_ be 0/1/2 with FlyCV, but " "now it's " << interp_ << "." << std::endl; return false; diff --git a/fastdeploy/vision/common/processors/transform.cc b/fastdeploy/vision/common/processors/transform.cc new file mode 100644 index 0000000000..8d440b9c62 --- /dev/null +++ b/fastdeploy/vision/common/processors/transform.cc @@ -0,0 +1,106 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/common/processors/transform.h" + +namespace fastdeploy { +namespace vision { + +void FuseNormalizeCast( + std::vector>* processors) { + // Fuse Normalize and Cast + int cast_index = -1; + for (size_t i = 0; i < processors->size(); ++i) { + if ((*processors)[i]->Name() == "Cast") { + if (i == 0) { + continue; + } + if ((*processors)[i - 1]->Name() != "Normalize" && + (*processors)[i - 1]->Name() != "NormalizeAndPermute") { + continue; + } + cast_index = i; + } + } + if (cast_index < 0) { + return; + } + + if (dynamic_cast((*processors)[cast_index].get())->GetDtype() != + "float") { + return; + } + processors->erase(processors->begin() + cast_index); + FDINFO << (*processors)[cast_index - 1]->Name() << " and Cast are fused to " + << (*processors)[cast_index - 1]->Name() + << " in preprocessing pipeline." << std::endl; +} + +void FuseNormalizeHWC2CHW( + std::vector>* processors) { + // Fuse Normalize and HWC2CHW to NormalizeAndPermute + int hwc2chw_index = -1; + for (size_t i = 0; i < processors->size(); ++i) { + if ((*processors)[i]->Name() == "HWC2CHW") { + if (i == 0) { + continue; + } + if ((*processors)[i - 1]->Name() != "Normalize") { + continue; + } + hwc2chw_index = i; + } + } + + if (hwc2chw_index < 0) { + return; + } + + // Get alpha and beta of Normalize + std::vector alpha = + dynamic_cast((*processors)[hwc2chw_index - 1].get()) + ->GetAlpha(); + std::vector beta = + dynamic_cast((*processors)[hwc2chw_index - 1].get()) + ->GetBeta(); + + // Delete Normalize and HWC2CHW + processors->erase(processors->begin() + hwc2chw_index); + processors->erase(processors->begin() + hwc2chw_index - 1); + + // Add NormalizeAndPermute + std::vector mean({0.0, 0.0, 0.0}); + std::vector std({1.0, 1.0, 1.0}); + processors->push_back(std::make_shared(mean, std)); + + // Set alpha and beta + auto processor = dynamic_cast( + (*processors)[hwc2chw_index - 1].get()); + + processor->SetAlpha(alpha); + processor->SetBeta(beta); + FDINFO << "Normalize and HWC2CHW are fused to NormalizeAndPermute " + " in preprocessing pipeline." + << std::endl; +} + +void FuseTransforms( + std::vector>* processors) { + FuseNormalizeCast(processors); + FuseNormalizeHWC2CHW(processors); +} + + +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h index 9054ade55f..53f7ffd63f 100644 --- a/fastdeploy/vision/common/processors/transform.h +++ b/fastdeploy/vision/common/processors/transform.h @@ -21,7 +21,6 @@ #include "fastdeploy/vision/common/processors/crop.h" #include "fastdeploy/vision/common/processors/hwc2chw.h" #include "fastdeploy/vision/common/processors/limit_by_stride.h" -#include "fastdeploy/vision/common/processors/limit_long.h" #include "fastdeploy/vision/common/processors/limit_short.h" #include "fastdeploy/vision/common/processors/normalize.h" #include "fastdeploy/vision/common/processors/normalize_and_permute.h" @@ -36,89 +35,12 @@ namespace fastdeploy { namespace vision { -inline void FuseNormalizeCast( - std::vector>* processors) { - // Fuse Normalize and Cast - int cast_index = -1; - for (size_t i = 0; i < processors->size(); ++i) { - if ((*processors)[i]->Name() == "Cast") { - if (i == 0) { - continue; - } - if ((*processors)[i - 1]->Name() != "Normalize" && - (*processors)[i - 1]->Name() != "NormalizeAndPermute") { - continue; - } - cast_index = i; - } - } - if (cast_index < 0) { - return; - } +void FuseTransforms(std::vector>* processors); - if (dynamic_cast((*processors)[cast_index].get())->GetDtype() != - "float") { - return; - } - processors->erase(processors->begin() + cast_index); - FDINFO << (*processors)[cast_index - 1]->Name() << " and Cast are fused to " - << (*processors)[cast_index - 1]->Name() - << " in preprocessing pipeline." << std::endl; -} - -inline void FuseNormalizeHWC2CHW( - std::vector>* processors) { - // Fuse Normalize and HWC2CHW to NormalizeAndPermute - int hwc2chw_index = -1; - for (size_t i = 0; i < processors->size(); ++i) { - if ((*processors)[i]->Name() == "HWC2CHW") { - if (i == 0) { - continue; - } - if ((*processors)[i - 1]->Name() != "Normalize") { - continue; - } - hwc2chw_index = i; - } - } - - if (hwc2chw_index < 0) { - return; - } - - // Get alpha and beta of Normalize - std::vector alpha = - dynamic_cast((*processors)[hwc2chw_index - 1].get()) - ->GetAlpha(); - std::vector beta = - dynamic_cast((*processors)[hwc2chw_index - 1].get()) - ->GetBeta(); - - // Delete Normalize and HWC2CHW - processors->erase(processors->begin() + hwc2chw_index); - processors->erase(processors->begin() + hwc2chw_index - 1); - - // Add NormalizeAndPermute - std::vector mean({0.0, 0.0, 0.0}); - std::vector std({1.0, 1.0, 1.0}); - processors->push_back(std::make_shared(mean, std)); - - // Set alpha and beta - auto processor = dynamic_cast( - (*processors)[hwc2chw_index - 1].get()); - - processor->SetAlpha(alpha); - processor->SetBeta(beta); - FDINFO << "Normalize and HWC2CHW are fused to NormalizeAndPermute " - " in preprocessing pipeline." - << std::endl; -} - -inline void FuseTransforms( - std::vector>* processors) { - FuseNormalizeCast(processors); - FuseNormalizeHWC2CHW(processors); -} +// Fuse Normalize + Cast(Float) to Normalize +void FuseNormalizeCast(std::vector>* processors); +// Fuse Normalize + HWC2CHW to NormalizeAndPermute +void FuseNormalizeHWC2CHW(std::vector>* processors); } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/tracking/pptracking/model.cc b/fastdeploy/vision/tracking/pptracking/model.cc index 2047497b92..a4e6c175be 100644 --- a/fastdeploy/vision/tracking/pptracking/model.cc +++ b/fastdeploy/vision/tracking/pptracking/model.cc @@ -27,7 +27,7 @@ PPTracking::PPTracking(const std::string& model_file, const ModelFormat& model_format){ config_file_=config_file; valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; - valid_gpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; runtime_option = custom_option; runtime_option.model_format = model_format; @@ -148,6 +148,8 @@ bool PPTracking::BuildPreprocessPipelineFromConfig(){ } } processors_.push_back(std::make_shared()); + + FuseTransforms(&processors_); return true; } diff --git a/python/fastdeploy/__init__.py b/python/fastdeploy/__init__.py index 36e52c9111..b767393f10 100644 --- a/python/fastdeploy/__init__.py +++ b/python/fastdeploy/__init__.py @@ -16,11 +16,19 @@ import logging import os import sys -from .c_lib_wrap import (ModelFormat, Backend, rknpu2, - FDDataType, TensorInfo, Device, - FDTensor, is_built_with_gpu, is_built_with_ort, - ModelFormat, is_built_with_paddle, is_built_with_trt, - get_default_cuda_directory, ) +from .c_lib_wrap import ( + ModelFormat, + Backend, + rknpu2, + FDDataType, + TensorInfo, + Device, + is_built_with_gpu, + is_built_with_ort, + ModelFormat, + is_built_with_paddle, + is_built_with_trt, + get_default_cuda_directory, ) from .runtime import Runtime, RuntimeOption from .model import FastDeployModel diff --git a/python/setup.py b/python/setup.py index 10f57a2cb5..2f3183222b 100755 --- a/python/setup.py +++ b/python/setup.py @@ -49,28 +49,28 @@ setup_configs = dict() setup_configs["ENABLE_PADDLE_FRONTEND"] = os.getenv("ENABLE_PADDLE_FRONTEND", "ON") setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND", - "OFF") + "OFF") setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF") setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND", "OFF") setup_configs["ENABLE_PADDLE_BACKEND"] = os.getenv("ENABLE_PADDLE_BACKEND", "OFF") -setup_configs["ENABLE_POROS_BACKEND"] = os.getenv("ENABLE_POROS_BACKEND", - "OFF") +setup_configs["ENABLE_POROS_BACKEND"] = os.getenv("ENABLE_POROS_BACKEND", "OFF") +setup_configs["ENABLE_TRT_BACKEND"] = os.getenv("ENABLE_TRT_BACKEND", "OFF") +setup_configs["ENABLE_LITE_BACKEND"] = os.getenv("ENABLE_LITE_BACKEND", "OFF") setup_configs["ENABLE_VISION"] = os.getenv("ENABLE_VISION", "OFF") setup_configs["ENABLE_FLYCV"] = os.getenv("ENABLE_FLYCV", "OFF") setup_configs["ENABLE_TEXT"] = os.getenv("ENABLE_TEXT", "OFF") -setup_configs["ENABLE_TRT_BACKEND"] = os.getenv("ENABLE_TRT_BACKEND", "OFF") setup_configs["WITH_GPU"] = os.getenv("WITH_GPU", "OFF") setup_configs["WITH_IPU"] = os.getenv("WITH_IPU", "OFF") setup_configs["BUILD_ON_JETSON"] = os.getenv("BUILD_ON_JETSON", "OFF") setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED") -setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", - "/usr/local/cuda") +setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", "/usr/local/cuda") setup_configs["LIBRARY_NAME"] = PACKAGE_NAME setup_configs["PY_LIBRARY_NAME"] = PACKAGE_NAME + "_main" setup_configs["OPENCV_DIRECTORY"] = os.getenv("OPENCV_DIRECTORY", "") setup_configs["ORT_DIRECTORY"] = os.getenv("ORT_DIRECTORY", "") + setup_configs["RKNN2_TARGET_SOC"] = os.getenv("RKNN2_TARGET_SOC", "") if setup_configs["WITH_GPU"] == "ON" or setup_configs[ @@ -99,8 +99,7 @@ extras_require = {} # Default value is set to TRUE\1 to keep the settings same as the current ones. # However going forward the recomemded way to is to set this to False\0 -USE_MSVC_STATIC_RUNTIME = bool( - os.getenv('USE_MSVC_STATIC_RUNTIME', '1') == '1') +USE_MSVC_STATIC_RUNTIME = bool(os.getenv('USE_MSVC_STATIC_RUNTIME', '1') == '1') ONNX_NAMESPACE = os.getenv('ONNX_NAMESPACE', 'paddle2onnx') ################################################################################ # Version @@ -130,8 +129,7 @@ assert CMAKE, 'Could not find "cmake" executable!' @contextmanager def cd(path): if not os.path.isabs(path): - raise RuntimeError('Can only cd to absolute path, got: {}'.format( - path)) + raise RuntimeError('Can only cd to absolute path, got: {}'.format(path)) orig_path = os.getcwd() os.chdir(path) try: