From 38a79ebfdcb78d6e6812e95547545c448a2ac1b8 Mon Sep 17 00:00:00 2001
From: jiangjiajun
Date: Sat, 5 Nov 2022 07:20:47 +0000
Subject: [PATCH 01/30] Update model tests
---
tests/eval_example/test_pfld.py | 42 ---------
tests/eval_example/test_ppmatting.py | 109 -----------------------
tests/eval_example/test_pptinypose.py | 100 ---------------------
tests/eval_example/test_pptracking.py | 89 ------------------
tests/eval_example/test_quantize_diff.py | 96 --------------------
tests/eval_example/test_rvm.py | 101 ---------------------
tests/eval_example/test_yolov5cls.py | 49 ----------
7 files changed, 586 deletions(-)
delete mode 100644 tests/eval_example/test_pfld.py
delete mode 100644 tests/eval_example/test_ppmatting.py
delete mode 100644 tests/eval_example/test_pptinypose.py
delete mode 100644 tests/eval_example/test_pptracking.py
delete mode 100755 tests/eval_example/test_quantize_diff.py
delete mode 100644 tests/eval_example/test_rvm.py
delete mode 100755 tests/eval_example/test_yolov5cls.py
diff --git a/tests/eval_example/test_pfld.py b/tests/eval_example/test_pfld.py
deleted file mode 100644
index 62156c785b..0000000000
--- a/tests/eval_example/test_pfld.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fastdeploy as fd
-import cv2
-import os
-import numpy as np
-
-
-def test_facealignment_pfld():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png"
- output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy"
- fd.download(model_url, ".")
- fd.download(input_url, ".")
- fd.download(output_url, ".")
- model_path = "pfld-106-lite.onnx"
- # use ORT
- runtime_option = fd.RuntimeOption()
- runtime_option.use_ort_backend()
- model = fd.vision.facealign.PFLD(model_path, runtime_option=runtime_option)
-
- # compare diff
- im = cv2.imread("./facealign_input.png")
- result = model.predict(im.copy())
- expect = np.load("./result_landmarks.npy")
-
- diff = np.fabs(np.array(result.landmarks) - expect)
- thres = 1e-04
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
diff --git a/tests/eval_example/test_ppmatting.py b/tests/eval_example/test_ppmatting.py
deleted file mode 100644
index f1f1883854..0000000000
--- a/tests/eval_example/test_ppmatting.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fastdeploy as fd
-import cv2
-import os
-import pickle
-import numpy as np
-
-
-def test_matting_ppmatting():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "./PP-Matting-512"
- # 配置runtime,加载模型
- runtime_option = fd.RuntimeOption()
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
- config_file = os.path.join(model_path, "deploy.yaml")
- model = fd.vision.matting.PPMatting(
- model_file, params_file, config_file, runtime_option=runtime_option)
-
- # 预测图片抠图结果
- im = cv2.imread("./matting_input.jpg")
- result = model.predict(im.copy())
- pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl"
- if pkl_url:
- fd.download(pkl_url, ".")
- with open("./ppmatting_result.pkl", "rb") as f:
- baseline = pickle.load(f)
-
- diff = np.fabs(np.array(result.alpha) - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
-
-
-def test_matting_ppmodnet():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "./PPModnet_MobileNetV2"
- # 配置runtime,加载模型
- runtime_option = fd.RuntimeOption()
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
- config_file = os.path.join(model_path, "deploy.yaml")
- model = fd.vision.matting.PPMatting(
- model_file, params_file, config_file, runtime_option=runtime_option)
-
- # 预测图片抠图结果
- im = cv2.imread("./matting_input.jpg")
- result = model.predict(im.copy())
-
- pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl"
- if pkl_url:
- fd.download(pkl_url, ".")
- with open("./ppmodnet_result.pkl", "rb") as f:
- baseline = pickle.load(f)
-
- diff = np.fabs(np.array(result.alpha) - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
-
-
-def test_matting_pphumanmatting():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "./PPHumanMatting"
- # 配置runtime,加载模型
- runtime_option = fd.RuntimeOption()
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
- config_file = os.path.join(model_path, "deploy.yaml")
- model = fd.vision.matting.PPMatting(
- model_file, params_file, config_file, runtime_option=runtime_option)
-
- # 预测图片抠图结果
- im = cv2.imread("./matting_input.jpg")
- result = model.predict(im.copy())
-
- pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl"
- if pkl_url:
- fd.download(pkl_url, ".")
-
- with open("./pphumanmatting_result.pkl", "rb") as f:
- baseline = pickle.load(f)
-
- diff = np.fabs(np.array(result.alpha) - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
diff --git a/tests/eval_example/test_pptinypose.py b/tests/eval_example/test_pptinypose.py
deleted file mode 100644
index 0da899293e..0000000000
--- a/tests/eval_example/test_pptinypose.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fastdeploy as fd
-import cv2
-import os
-import numpy as np
-
-
-def test_keypointdetection_pptinypose():
- pp_tinypose_model_url = "https://bj.bcebos.com/fastdeploy/tests/PP_TinyPose_256x192_test.tgz"
- fd.download_and_decompress(pp_tinypose_model_url, ".")
- model_path = "./PP_TinyPose_256x192_test"
- # 配置runtime,加载模型
- runtime_option = fd.RuntimeOption()
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
- config_file = os.path.join(model_path, "infer_cfg.yml")
- image_file = os.path.join(model_path, "hrnet_demo.jpg")
- baseline_file = os.path.join(model_path, "baseline.npy")
- model = fd.vision.keypointdetection.PPTinyPose(
- model_file, params_file, config_file, runtime_option=runtime_option)
-
- # 预测图片关键点
- im = cv2.imread(image_file)
- result = model.predict(im)
- result = np.concatenate(
- (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]),
- axis=1)
- baseline = np.load(baseline_file)
- diff = np.fabs(result - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
- print("No diff")
-
-
-def test_keypointdetection_det_keypoint_unite():
- det_keypoint_unite_model_url = "https://bj.bcebos.com/fastdeploy/tests/PicoDet_320x320_TinyPose_256x192_test.tgz"
- fd.download_and_decompress(det_keypoint_unite_model_url, ".")
- model_path = "./PicoDet_320x320_TinyPose_256x192_test"
- # 配置runtime,加载模型
- runtime_option = fd.RuntimeOption()
- tinypose_model_file = os.path.join(
- model_path, "PP_TinyPose_256x192_infer/model.pdmodel")
- tinypose_params_file = os.path.join(
- model_path, "PP_TinyPose_256x192_infer/model.pdiparams")
- tinypose_config_file = os.path.join(
- model_path, "PP_TinyPose_256x192_infer/infer_cfg.yml")
- picodet_model_file = os.path.join(
- model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdmodel")
- picodet_params_file = os.path.join(
- model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdiparams")
- picodet_config_file = os.path.join(
- model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/infer_cfg.yml")
- image_file = os.path.join(model_path, "000000018491.jpg")
- # image_file = os.path.join(model_path, "hrnet_demo.jpg")
-
- baseline_file = os.path.join(model_path, "baseline.npy")
-
- tinypose_model = fd.vision.keypointdetection.PPTinyPose(
- tinypose_model_file,
- tinypose_params_file,
- tinypose_config_file,
- runtime_option=runtime_option)
-
- det_model = fd.vision.detection.PicoDet(
- picodet_model_file,
- picodet_params_file,
- picodet_config_file,
- runtime_option=runtime_option)
-
- # 预测图片关键点
- im = cv2.imread(image_file)
- pipeline = fd.pipeline.PPTinyPose(det_model, tinypose_model)
- pipeline.detection_model_score_threshold = 0.5
- result = pipeline.predict(im)
- print(result)
- result = np.concatenate(
- (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]),
- axis=1)
- print(result)
- np.save("baseline.npy", result)
- baseline = np.load(baseline_file)
- diff = np.fabs(result - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
- print("No diff")
diff --git a/tests/eval_example/test_pptracking.py b/tests/eval_example/test_pptracking.py
deleted file mode 100644
index ee1cb9bc51..0000000000
--- a/tests/eval_example/test_pptracking.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fastdeploy as fd
-import cv2
-import os
-import numpy as np
-import pickle
-
-
-def test_pptracking_cpu():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pptracking.tgz"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320"
- # use default backend
- runtime_option = fd.RuntimeOption()
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
- config_file = os.path.join(model_path, "infer_cfg.yml")
- model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=runtime_option)
- cap = cv2.VideoCapture("./person.mp4")
- frame_id = 0
- while True:
- _, frame = cap.read()
- if frame is None:
- break
- result = model.predict(frame)
- # compare diff
- expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb"))
- diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes))
- diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores))
- diff = max(diff_boxes.max(), diff_scores.max())
- thres = 1e-05
- assert diff < thres, "The label diff is %f, which is bigger than %f" % (diff, thres)
- frame_id = frame_id + 1
- cv2.waitKey(30)
- if frame_id >= 10:
- cap.release()
- cv2.destroyAllWindows()
- break
-
-
-def test_pptracking_gpu():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pptracking.tgz"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320"
- runtime_option = fd.RuntimeOption()
- runtime_option.use_gpu()
- # Not supported trt backend, up to now
- # runtime_option.use_trt_backend()
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
- config_file = os.path.join(model_path, "infer_cfg.yml")
- model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=runtime_option)
- cap = cv2.VideoCapture("./person.mp4")
- frame_id = 0
- while True:
- _, frame = cap.read()
- if frame is None:
- break
- result = model.predict(frame)
- # compare diff
- expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb"))
- diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes))
- diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores))
- diff = max(diff_boxes.max(), diff_scores.max())
- thres = 1e-05
- assert diff < thres, "The label diff is %f, which is bigger than %f" % (diff, thres)
- frame_id = frame_id + 1
- cv2.waitKey(30)
- if frame_id >= 10:
- cap.release()
- cv2.destroyAllWindows()
- break
diff --git a/tests/eval_example/test_quantize_diff.py b/tests/eval_example/test_quantize_diff.py
deleted file mode 100755
index 8bc7b396a8..0000000000
--- a/tests/eval_example/test_quantize_diff.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fastdeploy as fd
-import cv2
-import os
-import pickle
-import numpy as np
-
-model_url = "https://bj.bcebos.com/fastdeploy/tests/yolov6_quant.tgz"
-fd.download_and_decompress(model_url, ".")
-
-
-def test_quant_mkldnn():
- model_path = "./yolov6_quant"
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
-
- input_file = os.path.join(model_path, "input.npy")
- output_file = os.path.join(model_path, "mkldnn_output.npy")
-
- option = fd.RuntimeOption()
- option.use_paddle_backend()
- option.use_cpu()
-
- option.set_model_path(model_file, params_file)
- runtime = fd.Runtime(option)
- input_name = runtime.get_input_info(0).name
- data = np.load(input_file)
- outs = runtime.infer({input_name: data})
- expected = np.load(output_file)
- diff = np.fabs(outs[0] - expected)
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
-
-
-def test_quant_ort():
- model_path = "./yolov6_quant"
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
-
- input_file = os.path.join(model_path, "input.npy")
- output_file = os.path.join(model_path, "ort_output.npy")
-
- option = fd.RuntimeOption()
- option.use_ort_backend()
- option.use_cpu()
-
- option.set_ort_graph_opt_level(1)
-
- option.set_model_path(model_file, params_file)
- runtime = fd.Runtime(option)
- input_name = runtime.get_input_info(0).name
- data = np.load(input_file)
- outs = runtime.infer({input_name: data})
- expected = np.load(output_file)
- diff = np.fabs(outs[0] - expected)
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
-
-
-def test_quant_trt():
- model_path = "./yolov6_quant"
- model_file = os.path.join(model_path, "model.pdmodel")
- params_file = os.path.join(model_path, "model.pdiparams")
-
- input_file = os.path.join(model_path, "input.npy")
- output_file = os.path.join(model_path, "trt_output.npy")
-
- option = fd.RuntimeOption()
- option.use_trt_backend()
- option.use_gpu()
-
- option.set_model_path(model_file, params_file)
- runtime = fd.Runtime(option)
- input_name = runtime.get_input_info(0).name
- data = np.load(input_file)
- outs = runtime.infer({input_name: data})
- expected = np.load(output_file)
- diff = np.fabs(outs[0] - expected)
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
diff --git a/tests/eval_example/test_rvm.py b/tests/eval_example/test_rvm.py
deleted file mode 100644
index 4b8d5afe8d..0000000000
--- a/tests/eval_example/test_rvm.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fastdeploy as fd
-import cv2
-import os
-import pickle
-import numpy as np
-
-
-def test_matting_rvm_cpu():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "rvm/rvm_mobilenetv3_fp32.onnx"
- # use ORT
- runtime_option = fd.RuntimeOption()
- runtime_option.use_ort_backend()
- model = fd.vision.matting.RobustVideoMatting(
- model_path, runtime_option=runtime_option)
-
- cap = cv2.VideoCapture(input_url)
-
- frame_id = 0
- while True:
- _, frame = cap.read()
- if frame is None:
- break
- result = model.predict(frame)
- # compare diff
- expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy")
- result_alpha = np.array(result.alpha).reshape(1920, 1080)
- diff = np.fabs(expect_alpha - result_alpha)
- thres = 1e-05
- assert diff.max(
- ) < thres, "The label diff is %f, which is bigger than %f" % (
- diff.max(), thres)
- frame_id = frame_id + 1
- cv2.waitKey(30)
- if frame_id >= 10:
- cap.release()
- cv2.destroyAllWindows()
- break
-
-
-def test_matting_rvm_gpu_trt():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz"
- input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "rvm/rvm_mobilenetv3_trt.onnx"
- # use TRT
- runtime_option = fd.RuntimeOption()
- runtime_option.use_gpu()
- runtime_option.use_trt_backend()
- runtime_option.set_trt_input_shape("src", [1, 3, 1920, 1080])
- runtime_option.set_trt_input_shape("r1i", [1, 1, 1, 1], [1, 16, 240, 135],
- [1, 16, 240, 135])
- runtime_option.set_trt_input_shape("r2i", [1, 1, 1, 1], [1, 20, 120, 68],
- [1, 20, 120, 68])
- runtime_option.set_trt_input_shape("r3i", [1, 1, 1, 1], [1, 40, 60, 34],
- [1, 40, 60, 34])
- runtime_option.set_trt_input_shape("r4i", [1, 1, 1, 1], [1, 64, 30, 17],
- [1, 64, 30, 17])
- model = fd.vision.matting.RobustVideoMatting(
- model_path, runtime_option=runtime_option)
-
- cap = cv2.VideoCapture("./video.mp4")
-
- frame_id = 0
- while True:
- _, frame = cap.read()
- if frame is None:
- break
- result = model.predict(frame)
- # compare diff
- expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy")
- result_alpha = np.array(result.alpha).reshape(1920, 1080)
- diff = np.fabs(expect_alpha - result_alpha)
- thres = 1e-04
- assert diff.max(
- ) < thres, "The label diff is %f, which is bigger than %f" % (
- diff.max(), thres)
- frame_id = frame_id + 1
- cv2.waitKey(30)
- if frame_id >= 10:
- cap.release()
- cv2.destroyAllWindows()
- break
diff --git a/tests/eval_example/test_yolov5cls.py b/tests/eval_example/test_yolov5cls.py
deleted file mode 100755
index 50eefa36c5..0000000000
--- a/tests/eval_example/test_yolov5cls.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import fastdeploy as fd
-import cv2
-import os
-import pickle
-import numpy as np
-
-
-def test_classification_yolov5cls():
- model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n-cls.tgz"
- input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "yolov5n-cls/yolov5n-cls.onnx"
- # use ORT
- runtime_option = fd.RuntimeOption()
- runtime_option.use_ort_backend()
- model = fd.vision.classification.YOLOv5Cls(
- model_path, runtime_option=runtime_option)
-
- # compare diff
- im = cv2.imread("./ILSVRC2012_val_00000010.jpeg")
- result = model.predict(im.copy(), topk=5)
- with open("yolov5n-cls/result.pkl", "rb") as f:
- expect = pickle.load(f)
-
- diff_label = np.fabs(
- np.array(result.label_ids) - np.array(expect["labels"]))
- diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"]))
- thres = 1e-05
- assert diff_label.max(
- ) < thres, "The label diff is %f, which is bigger than %f" % (
- diff_label.max(), thres)
- assert diff_score.max(
- ) < thres, "The score diff is %f, which is bigger than %f" % (
- diff_score.max(), thres)
From f00212aa425b32d4aae10411fc895cb1bd006856 Mon Sep 17 00:00:00 2001
From: jiangjiajun
Date: Sat, 5 Nov 2022 07:23:44 +0000
Subject: [PATCH 02/30] Update model tests
---
tests/models/runtime_config.py | 4 ++
tests/models/test_pfld.py | 40 +++++++++++
tests/models/test_ppmatting.py | 105 +++++++++++++++++++++++++++++
tests/models/test_pptinypose.py | 100 +++++++++++++++++++++++++++
tests/models/test_pptracking.py | 54 +++++++++++++++
tests/models/test_quantize_diff.py | 96 ++++++++++++++++++++++++++
tests/models/test_rvm.py | 54 +++++++++++++++
tests/models/test_yolov5cls.py | 49 ++++++++++++++
8 files changed, 502 insertions(+)
create mode 100644 tests/models/runtime_config.py
create mode 100644 tests/models/test_pfld.py
create mode 100644 tests/models/test_ppmatting.py
create mode 100644 tests/models/test_pptinypose.py
create mode 100644 tests/models/test_pptracking.py
create mode 100755 tests/models/test_quantize_diff.py
create mode 100644 tests/models/test_rvm.py
create mode 100755 tests/models/test_yolov5cls.py
diff --git a/tests/models/runtime_config.py b/tests/models/runtime_config.py
new file mode 100644
index 0000000000..12383a7c24
--- /dev/null
+++ b/tests/models/runtime_config.py
@@ -0,0 +1,4 @@
+import fastdeploy as fd
+
+
+test_option = fd.RuntimeOption()
diff --git a/tests/models/test_pfld.py b/tests/models/test_pfld.py
new file mode 100644
index 0000000000..ef1ba448e3
--- /dev/null
+++ b/tests/models/test_pfld.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import numpy as np
+import runtime_config as rc
+
+def test_facealignment_pfld():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx"
+ input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png"
+ output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy"
+ fd.download(model_url, ".")
+ fd.download(input_url, ".")
+ fd.download(output_url, ".")
+ model_path = "pfld-106-lite.onnx"
+ # use ORT
+ model = fd.vision.facealign.PFLD(model_path, runtime_option=rc.test_option)
+
+ # compare diff
+ im = cv2.imread("./facealign_input.png")
+ result = model.predict(im.copy())
+ expect = np.load("./result_landmarks.npy")
+
+ diff = np.fabs(np.array(result.landmarks) - expect)
+ thres = 1e-04
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
diff --git a/tests/models/test_ppmatting.py b/tests/models/test_ppmatting.py
new file mode 100644
index 0000000000..78a085a5f3
--- /dev/null
+++ b/tests/models/test_ppmatting.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import pickle
+import numpy as np
+import runtime_config as rc
+
+def test_matting_ppmatting():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz"
+ input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
+ fd.download_and_decompress(model_url, ".")
+ fd.download(input_url, ".")
+ model_path = "./PP-Matting-512"
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+ config_file = os.path.join(model_path, "deploy.yaml")
+ model = fd.vision.matting.PPMatting(
+ model_file, params_file, config_file, runtime_option=rc.test_option)
+
+ # 预测图片抠图结果
+ im = cv2.imread("./matting_input.jpg")
+ result = model.predict(im.copy())
+ pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl"
+ if pkl_url:
+ fd.download(pkl_url, ".")
+ with open("./ppmatting_result.pkl", "rb") as f:
+ baseline = pickle.load(f)
+
+ diff = np.fabs(np.array(result.alpha) - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
+
+
+def test_matting_ppmodnet():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz"
+ input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
+ fd.download_and_decompress(model_url, ".")
+ fd.download(input_url, ".")
+ model_path = "./PPModnet_MobileNetV2"
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+ config_file = os.path.join(model_path, "deploy.yaml")
+ model = fd.vision.matting.PPMatting(
+ model_file, params_file, config_file, runtime_option=rc.test_option)
+
+ # 预测图片抠图结果
+ im = cv2.imread("./matting_input.jpg")
+ result = model.predict(im.copy())
+
+ pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl"
+ if pkl_url:
+ fd.download(pkl_url, ".")
+ with open("./ppmodnet_result.pkl", "rb") as f:
+ baseline = pickle.load(f)
+
+ diff = np.fabs(np.array(result.alpha) - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
+
+
+def test_matting_pphumanmatting():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz"
+ input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
+ fd.download_and_decompress(model_url, ".")
+ fd.download(input_url, ".")
+ model_path = "./PPHumanMatting"
+ # 配置runtime,加载模型
+ runtime_option = fd.RuntimeOption()
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+ config_file = os.path.join(model_path, "deploy.yaml")
+ model = fd.vision.matting.PPMatting(
+ model_file, params_file, config_file, runtime_option=rc.test_option)
+
+ # 预测图片抠图结果
+ im = cv2.imread("./matting_input.jpg")
+ result = model.predict(im.copy())
+
+ pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl"
+ if pkl_url:
+ fd.download(pkl_url, ".")
+
+ with open("./pphumanmatting_result.pkl", "rb") as f:
+ baseline = pickle.load(f)
+
+ diff = np.fabs(np.array(result.alpha) - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
diff --git a/tests/models/test_pptinypose.py b/tests/models/test_pptinypose.py
new file mode 100644
index 0000000000..95cacdd5e1
--- /dev/null
+++ b/tests/models/test_pptinypose.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import numpy as np
+import runtime_config as rc
+
+def test_keypointdetection_pptinypose():
+ pp_tinypose_model_url = "https://bj.bcebos.com/fastdeploy/tests/PP_TinyPose_256x192_test.tgz"
+ fd.download_and_decompress(pp_tinypose_model_url, ".")
+ model_path = "./PP_TinyPose_256x192_test"
+ # 配置runtime,加载模型
+ runtime_option = fd.RuntimeOption()
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+ config_file = os.path.join(model_path, "infer_cfg.yml")
+ image_file = os.path.join(model_path, "hrnet_demo.jpg")
+ baseline_file = os.path.join(model_path, "baseline.npy")
+ model = fd.vision.keypointdetection.PPTinyPose(
+ model_file, params_file, config_file, runtime_option=rc.test_option)
+
+ # 预测图片关键点
+ im = cv2.imread(image_file)
+ result = model.predict(im)
+ result = np.concatenate(
+ (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]),
+ axis=1)
+ baseline = np.load(baseline_file)
+ diff = np.fabs(result - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
+ print("No diff")
+
+
+def test_keypointdetection_det_keypoint_unite():
+ det_keypoint_unite_model_url = "https://bj.bcebos.com/fastdeploy/tests/PicoDet_320x320_TinyPose_256x192_test.tgz"
+ fd.download_and_decompress(det_keypoint_unite_model_url, ".")
+ model_path = "./PicoDet_320x320_TinyPose_256x192_test"
+ # 配置runtime,加载模型
+ runtime_option = fd.RuntimeOption()
+ tinypose_model_file = os.path.join(
+ model_path, "PP_TinyPose_256x192_infer/model.pdmodel")
+ tinypose_params_file = os.path.join(
+ model_path, "PP_TinyPose_256x192_infer/model.pdiparams")
+ tinypose_config_file = os.path.join(
+ model_path, "PP_TinyPose_256x192_infer/infer_cfg.yml")
+ picodet_model_file = os.path.join(
+ model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdmodel")
+ picodet_params_file = os.path.join(
+ model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdiparams")
+ picodet_config_file = os.path.join(
+ model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/infer_cfg.yml")
+ image_file = os.path.join(model_path, "000000018491.jpg")
+ # image_file = os.path.join(model_path, "hrnet_demo.jpg")
+
+ baseline_file = os.path.join(model_path, "baseline.npy")
+
+ tinypose_model = fd.vision.keypointdetection.PPTinyPose(
+ tinypose_model_file,
+ tinypose_params_file,
+ tinypose_config_file,
+ runtime_option=runtime_option)
+
+ det_model = fd.vision.detection.PicoDet(
+ picodet_model_file,
+ picodet_params_file,
+ picodet_config_file,
+ runtime_option=rc.test_option)
+
+ # 预测图片关键点
+ im = cv2.imread(image_file)
+ pipeline = fd.pipeline.PPTinyPose(det_model, tinypose_model)
+ pipeline.detection_model_score_threshold = 0.5
+ result = pipeline.predict(im)
+ print(result)
+ result = np.concatenate(
+ (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]),
+ axis=1)
+ print(result)
+ np.save("baseline.npy", result)
+ baseline = np.load(baseline_file)
+ diff = np.fabs(result - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
+ print("No diff")
diff --git a/tests/models/test_pptracking.py b/tests/models/test_pptracking.py
new file mode 100644
index 0000000000..42010705c3
--- /dev/null
+++ b/tests/models/test_pptracking.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import numpy as np
+import pickle
+import runtime_config as rc
+
+
+def test_pptracking():
+ model_url = "https://bj.bcebos.com/fastdeploy/tests/pptracking.tgz"
+ input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4"
+ fd.download_and_decompress(model_url, ".")
+ fd.download(input_url, ".")
+ model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320"
+ # use default backend
+ runtime_option = fd.RuntimeOption()
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+ config_file = os.path.join(model_path, "infer_cfg.yml")
+ model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=rc.test_option)
+ cap = cv2.VideoCapture("./person.mp4")
+ frame_id = 0
+ while True:
+ _, frame = cap.read()
+ if frame is None:
+ break
+ result = model.predict(frame)
+ # compare diff
+ expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb"))
+ diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes))
+ diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores))
+ diff = max(diff_boxes.max(), diff_scores.max())
+ thres = 1e-05
+ assert diff < thres, "The label diff is %f, which is bigger than %f" % (diff, thres)
+ frame_id = frame_id + 1
+ cv2.waitKey(30)
+ if frame_id >= 10:
+ cap.release()
+ cv2.destroyAllWindows()
+ break
diff --git a/tests/models/test_quantize_diff.py b/tests/models/test_quantize_diff.py
new file mode 100755
index 0000000000..8bc7b396a8
--- /dev/null
+++ b/tests/models/test_quantize_diff.py
@@ -0,0 +1,96 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import pickle
+import numpy as np
+
+model_url = "https://bj.bcebos.com/fastdeploy/tests/yolov6_quant.tgz"
+fd.download_and_decompress(model_url, ".")
+
+
+def test_quant_mkldnn():
+ model_path = "./yolov6_quant"
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+
+ input_file = os.path.join(model_path, "input.npy")
+ output_file = os.path.join(model_path, "mkldnn_output.npy")
+
+ option = fd.RuntimeOption()
+ option.use_paddle_backend()
+ option.use_cpu()
+
+ option.set_model_path(model_file, params_file)
+ runtime = fd.Runtime(option)
+ input_name = runtime.get_input_info(0).name
+ data = np.load(input_file)
+ outs = runtime.infer({input_name: data})
+ expected = np.load(output_file)
+ diff = np.fabs(outs[0] - expected)
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
+
+
+def test_quant_ort():
+ model_path = "./yolov6_quant"
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+
+ input_file = os.path.join(model_path, "input.npy")
+ output_file = os.path.join(model_path, "ort_output.npy")
+
+ option = fd.RuntimeOption()
+ option.use_ort_backend()
+ option.use_cpu()
+
+ option.set_ort_graph_opt_level(1)
+
+ option.set_model_path(model_file, params_file)
+ runtime = fd.Runtime(option)
+ input_name = runtime.get_input_info(0).name
+ data = np.load(input_file)
+ outs = runtime.infer({input_name: data})
+ expected = np.load(output_file)
+ diff = np.fabs(outs[0] - expected)
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
+
+
+def test_quant_trt():
+ model_path = "./yolov6_quant"
+ model_file = os.path.join(model_path, "model.pdmodel")
+ params_file = os.path.join(model_path, "model.pdiparams")
+
+ input_file = os.path.join(model_path, "input.npy")
+ output_file = os.path.join(model_path, "trt_output.npy")
+
+ option = fd.RuntimeOption()
+ option.use_trt_backend()
+ option.use_gpu()
+
+ option.set_model_path(model_file, params_file)
+ runtime = fd.Runtime(option)
+ input_name = runtime.get_input_info(0).name
+ data = np.load(input_file)
+ outs = runtime.infer({input_name: data})
+ expected = np.load(output_file)
+ diff = np.fabs(outs[0] - expected)
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
diff --git a/tests/models/test_rvm.py b/tests/models/test_rvm.py
new file mode 100644
index 0000000000..23fd544c66
--- /dev/null
+++ b/tests/models/test_rvm.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import pickle
+import numpy as np
+import runtime_config as rc
+
+def test_matting_rvm_cpu():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz"
+ input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4"
+ fd.download_and_decompress(model_url, ".")
+ fd.download(input_url, ".")
+ model_path = "rvm/rvm_mobilenetv3_fp32.onnx"
+ # use ORT
+ runtime_option.use_ort_backend()
+ model = fd.vision.matting.RobustVideoMatting(
+ model_path, runtime_option=rc.test_option)
+
+ cap = cv2.VideoCapture(input_url)
+
+ frame_id = 0
+ while True:
+ _, frame = cap.read()
+ if frame is None:
+ break
+ result = model.predict(frame)
+ # compare diff
+ expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy")
+ result_alpha = np.array(result.alpha).reshape(1920, 1080)
+ diff = np.fabs(expect_alpha - result_alpha)
+ thres = 1e-05
+ assert diff.max(
+ ) < thres, "The label diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
+ frame_id = frame_id + 1
+ cv2.waitKey(30)
+ if frame_id >= 10:
+ cap.release()
+ cv2.destroyAllWindows()
+ break
diff --git a/tests/models/test_yolov5cls.py b/tests/models/test_yolov5cls.py
new file mode 100755
index 0000000000..aeafad5196
--- /dev/null
+++ b/tests/models/test_yolov5cls.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import pickle
+import numpy as np
+import runtime_config as rc
+
+def test_classification_yolov5cls():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n-cls.tgz"
+ input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg"
+ fd.download_and_decompress(model_url, ".")
+ fd.download(input_url, ".")
+ model_path = "yolov5n-cls/yolov5n-cls.onnx"
+ # use ORT
+ runtime_option = fd.RuntimeOption()
+ runtime_option.use_ort_backend()
+ model = fd.vision.classification.YOLOv5Cls(
+ model_path, runtime_option=rc.test_option)
+
+ # compare diff
+ im = cv2.imread("./ILSVRC2012_val_00000010.jpeg")
+ result = model.predict(im.copy(), topk=5)
+ with open("yolov5n-cls/result.pkl", "rb") as f:
+ expect = pickle.load(f)
+
+ diff_label = np.fabs(
+ np.array(result.label_ids) - np.array(expect["labels"]))
+ diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"]))
+ thres = 1e-05
+ assert diff_label.max(
+ ) < thres, "The label diff is %f, which is bigger than %f" % (
+ diff_label.max(), thres)
+ assert diff_score.max(
+ ) < thres, "The score diff is %f, which is bigger than %f" % (
+ diff_score.max(), thres)
From cf8f53e36d376dc0f6235d494cd08fb81225c749 Mon Sep 17 00:00:00 2001
From: jiangjiajun
Date: Sat, 5 Nov 2022 07:54:16 +0000
Subject: [PATCH 03/30] Modify model tests
---
tests/models/README.md | 14 +++++
tests/models/test_pfld.py | 25 +++++----
tests/models/test_ppmatting.py | 98 +++++++++++++++++----------------
tests/models/test_pptinypose.py | 10 ++--
tests/models/test_pptracking.py | 10 ++--
tests/models/test_rvm.py | 9 ++-
tests/models/test_yolov5cls.py | 37 +++++++------
7 files changed, 111 insertions(+), 92 deletions(-)
create mode 100644 tests/models/README.md
diff --git a/tests/models/README.md b/tests/models/README.md
new file mode 100644
index 0000000000..c80ccc9ddf
--- /dev/null
+++ b/tests/models/README.md
@@ -0,0 +1,14 @@
+# 添加模型单测
+
+
+所有模型统一使用`runtime_config.py`中的RuntimeOption进行配置
+
+```
+import runtime_config as rc
+
+
+model = fd.vision.XXX(..., runtime_option=rc.test_option)
+```
+
+
+验证For循环跑2+次与Baseline结果符合预期
diff --git a/tests/models/test_pfld.py b/tests/models/test_pfld.py
index ef1ba448e3..8d455b1652 100644
--- a/tests/models/test_pfld.py
+++ b/tests/models/test_pfld.py
@@ -22,19 +22,20 @@ def test_facealignment_pfld():
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx"
input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png"
output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy"
- fd.download(model_url, ".")
- fd.download(input_url, ".")
- fd.download(output_url, ".")
- model_path = "pfld-106-lite.onnx"
+ fd.download(model_url, "resources")
+ fd.download(input_url, "resources")
+ fd.download(output_url, "resources")
+ model_path = "resources/pfld-106-lite.onnx"
# use ORT
model = fd.vision.facealign.PFLD(model_path, runtime_option=rc.test_option)
# compare diff
- im = cv2.imread("./facealign_input.png")
- result = model.predict(im.copy())
- expect = np.load("./result_landmarks.npy")
-
- diff = np.fabs(np.array(result.landmarks) - expect)
- thres = 1e-04
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
+ im = cv2.imread("resources/facealign_input.png")
+ for i in range(2):
+ result = model.predict(im)
+ expect = np.load("resources/result_landmarks.npy")
+
+ diff = np.fabs(np.array(result.landmarks) - expect)
+ thres = 1e-04
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
diff --git a/tests/models/test_ppmatting.py b/tests/models/test_ppmatting.py
index 78a085a5f3..8021f5b221 100644
--- a/tests/models/test_ppmatting.py
+++ b/tests/models/test_ppmatting.py
@@ -22,9 +22,9 @@ import runtime_config as rc
def test_matting_ppmatting():
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz"
input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "./PP-Matting-512"
+ fd.download_and_decompress(model_url, "resources")
+ fd.download(input_url, "resources")
+ model_path = "./resources/PP-Matting-512"
model_file = os.path.join(model_path, "model.pdmodel")
params_file = os.path.join(model_path, "model.pdiparams")
config_file = os.path.join(model_path, "deploy.yaml")
@@ -32,26 +32,27 @@ def test_matting_ppmatting():
model_file, params_file, config_file, runtime_option=rc.test_option)
# 预测图片抠图结果
- im = cv2.imread("./matting_input.jpg")
- result = model.predict(im.copy())
- pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl"
- if pkl_url:
- fd.download(pkl_url, ".")
- with open("./ppmatting_result.pkl", "rb") as f:
- baseline = pickle.load(f)
-
- diff = np.fabs(np.array(result.alpha) - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
+ im = cv2.imread("./resources/matting_input.jpg")
+ for i in range(2):
+ result = model.predict(im)
+ pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl"
+ if pkl_url:
+ fd.download(pkl_url, "resources")
+ with open("./resources/ppmatting_result.pkl", "rb") as f:
+ baseline = pickle.load(f)
+
+ diff = np.fabs(np.array(result.alpha) - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
def test_matting_ppmodnet():
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz"
input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "./PPModnet_MobileNetV2"
+ fd.download_and_decompress(model_url, "resources")
+ fd.download(input_url, "resources")
+ model_path = "./resources/PPModnet_MobileNetV2"
model_file = os.path.join(model_path, "model.pdmodel")
params_file = os.path.join(model_path, "model.pdiparams")
config_file = os.path.join(model_path, "deploy.yaml")
@@ -59,27 +60,29 @@ def test_matting_ppmodnet():
model_file, params_file, config_file, runtime_option=rc.test_option)
# 预测图片抠图结果
- im = cv2.imread("./matting_input.jpg")
- result = model.predict(im.copy())
+ im = cv2.imread("./resources/matting_input.jpg")
- pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl"
- if pkl_url:
- fd.download(pkl_url, ".")
- with open("./ppmodnet_result.pkl", "rb") as f:
- baseline = pickle.load(f)
-
- diff = np.fabs(np.array(result.alpha) - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
+ for i in range(2):
+ result = model.predict(im)
+
+ pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl"
+ if pkl_url:
+ fd.download(pkl_url, "resources")
+ with open("./resources/ppmodnet_result.pkl", "rb") as f:
+ baseline = pickle.load(f)
+
+ diff = np.fabs(np.array(result.alpha) - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
def test_matting_pphumanmatting():
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz"
input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "./PPHumanMatting"
+ fd.download_and_decompress(model_url, "resources")
+ fd.download(input_url, "resources")
+ model_path = "./resources/PPHumanMatting"
# 配置runtime,加载模型
runtime_option = fd.RuntimeOption()
model_file = os.path.join(model_path, "model.pdmodel")
@@ -89,17 +92,18 @@ def test_matting_pphumanmatting():
model_file, params_file, config_file, runtime_option=rc.test_option)
# 预测图片抠图结果
- im = cv2.imread("./matting_input.jpg")
- result = model.predict(im.copy())
-
- pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl"
- if pkl_url:
- fd.download(pkl_url, ".")
-
- with open("./pphumanmatting_result.pkl", "rb") as f:
- baseline = pickle.load(f)
-
- diff = np.fabs(np.array(result.alpha) - np.array(baseline))
- thres = 1e-05
- assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
- diff.max(), thres)
+ im = cv2.imread("./resources/matting_input.jpg")
+ for i in range(2):
+ result = model.predict(im)
+
+ pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl"
+ if pkl_url:
+ fd.download(pkl_url, "resources")
+
+ with open("./resources/pphumanmatting_result.pkl", "rb") as f:
+ baseline = pickle.load(f)
+
+ diff = np.fabs(np.array(result.alpha) - np.array(baseline))
+ thres = 1e-05
+ assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
+ diff.max(), thres)
diff --git a/tests/models/test_pptinypose.py b/tests/models/test_pptinypose.py
index 95cacdd5e1..fe838c4902 100644
--- a/tests/models/test_pptinypose.py
+++ b/tests/models/test_pptinypose.py
@@ -20,8 +20,8 @@ import runtime_config as rc
def test_keypointdetection_pptinypose():
pp_tinypose_model_url = "https://bj.bcebos.com/fastdeploy/tests/PP_TinyPose_256x192_test.tgz"
- fd.download_and_decompress(pp_tinypose_model_url, ".")
- model_path = "./PP_TinyPose_256x192_test"
+ fd.download_and_decompress(pp_tinypose_model_url, "resources")
+ model_path = "./resources/PP_TinyPose_256x192_test"
# 配置runtime,加载模型
runtime_option = fd.RuntimeOption()
model_file = os.path.join(model_path, "model.pdmodel")
@@ -48,8 +48,8 @@ def test_keypointdetection_pptinypose():
def test_keypointdetection_det_keypoint_unite():
det_keypoint_unite_model_url = "https://bj.bcebos.com/fastdeploy/tests/PicoDet_320x320_TinyPose_256x192_test.tgz"
- fd.download_and_decompress(det_keypoint_unite_model_url, ".")
- model_path = "./PicoDet_320x320_TinyPose_256x192_test"
+ fd.download_and_decompress(det_keypoint_unite_model_url, "resources")
+ model_path = "./resources/PicoDet_320x320_TinyPose_256x192_test"
# 配置runtime,加载模型
runtime_option = fd.RuntimeOption()
tinypose_model_file = os.path.join(
@@ -91,7 +91,7 @@ def test_keypointdetection_det_keypoint_unite():
(np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]),
axis=1)
print(result)
- np.save("baseline.npy", result)
+ np.save("resources/baseline.npy", result)
baseline = np.load(baseline_file)
diff = np.fabs(result - np.array(baseline))
thres = 1e-05
diff --git a/tests/models/test_pptracking.py b/tests/models/test_pptracking.py
index 42010705c3..b8842c73f8 100644
--- a/tests/models/test_pptracking.py
+++ b/tests/models/test_pptracking.py
@@ -23,16 +23,16 @@ import runtime_config as rc
def test_pptracking():
model_url = "https://bj.bcebos.com/fastdeploy/tests/pptracking.tgz"
input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320"
+ fd.download_and_decompress(model_url, "resources")
+ fd.download(input_url, "resources")
+ model_path = "resources/pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320"
# use default backend
runtime_option = fd.RuntimeOption()
model_file = os.path.join(model_path, "model.pdmodel")
params_file = os.path.join(model_path, "model.pdiparams")
config_file = os.path.join(model_path, "infer_cfg.yml")
model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=rc.test_option)
- cap = cv2.VideoCapture("./person.mp4")
+ cap = cv2.VideoCapture("./resources/person.mp4")
frame_id = 0
while True:
_, frame = cap.read()
@@ -40,7 +40,7 @@ def test_pptracking():
break
result = model.predict(frame)
# compare diff
- expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb"))
+ expect = pickle.load(open("resources/pptracking/frame" + str(frame_id) + ".pkl", "rb"))
diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes))
diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores))
diff = max(diff_boxes.max(), diff_scores.max())
diff --git a/tests/models/test_rvm.py b/tests/models/test_rvm.py
index 23fd544c66..10d680948f 100644
--- a/tests/models/test_rvm.py
+++ b/tests/models/test_rvm.py
@@ -22,11 +22,10 @@ import runtime_config as rc
def test_matting_rvm_cpu():
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz"
input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "rvm/rvm_mobilenetv3_fp32.onnx"
+ fd.download_and_decompress(model_url, "resources")
+ fd.download(input_url, "resources")
+ model_path = "resources/rvm/rvm_mobilenetv3_fp32.onnx"
# use ORT
- runtime_option.use_ort_backend()
model = fd.vision.matting.RobustVideoMatting(
model_path, runtime_option=rc.test_option)
@@ -39,7 +38,7 @@ def test_matting_rvm_cpu():
break
result = model.predict(frame)
# compare diff
- expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy")
+ expect_alpha = np.load("resources/rvm/result_alpha_" + str(frame_id) + ".npy")
result_alpha = np.array(result.alpha).reshape(1920, 1080)
diff = np.fabs(expect_alpha - result_alpha)
thres = 1e-05
diff --git a/tests/models/test_yolov5cls.py b/tests/models/test_yolov5cls.py
index aeafad5196..a7070de4d6 100755
--- a/tests/models/test_yolov5cls.py
+++ b/tests/models/test_yolov5cls.py
@@ -22,9 +22,9 @@ import runtime_config as rc
def test_classification_yolov5cls():
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n-cls.tgz"
input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg"
- fd.download_and_decompress(model_url, ".")
- fd.download(input_url, ".")
- model_path = "yolov5n-cls/yolov5n-cls.onnx"
+ fd.download_and_decompress(model_url, "resources")
+ fd.download(input_url, "resources")
+ model_path = "resources/yolov5n-cls/yolov5n-cls.onnx"
# use ORT
runtime_option = fd.RuntimeOption()
runtime_option.use_ort_backend()
@@ -32,18 +32,19 @@ def test_classification_yolov5cls():
model_path, runtime_option=rc.test_option)
# compare diff
- im = cv2.imread("./ILSVRC2012_val_00000010.jpeg")
- result = model.predict(im.copy(), topk=5)
- with open("yolov5n-cls/result.pkl", "rb") as f:
- expect = pickle.load(f)
-
- diff_label = np.fabs(
- np.array(result.label_ids) - np.array(expect["labels"]))
- diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"]))
- thres = 1e-05
- assert diff_label.max(
- ) < thres, "The label diff is %f, which is bigger than %f" % (
- diff_label.max(), thres)
- assert diff_score.max(
- ) < thres, "The score diff is %f, which is bigger than %f" % (
- diff_score.max(), thres)
+ im = cv2.imread("./resources/ILSVRC2012_val_00000010.jpeg")
+ for i in range(2):
+ result = model.predict(im, topk=5)
+ with open("resources/yolov5n-cls/result.pkl", "rb") as f:
+ expect = pickle.load(f)
+
+ diff_label = np.fabs(
+ np.array(result.label_ids) - np.array(expect["labels"]))
+ diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"]))
+ thres = 1e-05
+ assert diff_label.max(
+ ) < thres, "The label diff is %f, which is bigger than %f" % (
+ diff_label.max(), thres)
+ assert diff_score.max(
+ ) < thres, "The score diff is %f, which is bigger than %f" % (
+ diff_score.max(), thres)
From 19a6941f0470a0eb95cc51e0cfbd8d0af07d8d00 Mon Sep 17 00:00:00 2001
From: Jason
Date: Sat, 5 Nov 2022 17:48:50 +0800
Subject: [PATCH 04/30] [Model] Move letter box resize code (#502)
* Remove letter box resize code
* Remove letter box resize code
* Add model test for mobilenetv2
---
fastdeploy/vision/common/processors/base.h | 5 +-
.../vision/common/processors/transform.h | 1 -
.../pptracking/letter_box_resize.cc} | 29 ++++-------
.../pptracking/letter_box_resize.h} | 8 +--
.../vision/tracking/pptracking/model.cc | 1 +
tests/models/test_mobilenetv2.py | 50 +++++++++++++++++++
6 files changed, 68 insertions(+), 26 deletions(-)
rename fastdeploy/vision/{common/processors/letter_box.cc => tracking/pptracking/letter_box_resize.cc} (65%)
rename fastdeploy/vision/{common/processors/letter_box.h => tracking/pptracking/letter_box_resize.h} (87%)
create mode 100755 tests/models/test_mobilenetv2.py
diff --git a/fastdeploy/vision/common/processors/base.h b/fastdeploy/vision/common/processors/base.h
index bb414669af..6c67d10bca 100644
--- a/fastdeploy/vision/common/processors/base.h
+++ b/fastdeploy/vision/common/processors/base.h
@@ -41,7 +41,10 @@ class FASTDEPLOY_DECL Processor {
virtual std::string Name() = 0;
- virtual bool ImplByOpenCV(Mat* mat) = 0;
+ virtual bool ImplByOpenCV(Mat* mat) {
+ FDERROR << Name() << " Not Implement Yet." << std::endl;
+ return false;
+ }
virtual bool ImplByFlyCV(Mat* mat) {
return ImplByOpenCV(mat);
diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h
index 7952caca34..9054ade55f 100644
--- a/fastdeploy/vision/common/processors/transform.h
+++ b/fastdeploy/vision/common/processors/transform.h
@@ -20,7 +20,6 @@
#include "fastdeploy/vision/common/processors/convert.h"
#include "fastdeploy/vision/common/processors/crop.h"
#include "fastdeploy/vision/common/processors/hwc2chw.h"
-#include "fastdeploy/vision/common/processors/letter_box.h"
#include "fastdeploy/vision/common/processors/limit_by_stride.h"
#include "fastdeploy/vision/common/processors/limit_long.h"
#include "fastdeploy/vision/common/processors/limit_short.h"
diff --git a/fastdeploy/vision/common/processors/letter_box.cc b/fastdeploy/vision/tracking/pptracking/letter_box_resize.cc
similarity index 65%
rename from fastdeploy/vision/common/processors/letter_box.cc
rename to fastdeploy/vision/tracking/pptracking/letter_box_resize.cc
index 423aef1d36..a2747def1f 100644
--- a/fastdeploy/vision/common/processors/letter_box.cc
+++ b/fastdeploy/vision/tracking/pptracking/letter_box_resize.cc
@@ -12,17 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "fastdeploy/vision/common/processors/letter_box.h"
+#include "fastdeploy/vision/tracking/pptracking/letter_box_resize.h"
+#include "fastdeploy/vision/common/processors/transform.h"
namespace fastdeploy{
namespace vision{
-bool LetterBoxResize::ImplByOpenCV(Mat* mat) {
+bool LetterBoxResize::operator()(Mat* mat, ProcLib lib) {
if (mat->Channels() != color_.size()) {
- FDERROR << "Pad: Require input channels equals to size of padding value, "
+ FDERROR << "LetterBoxResize: Require input channels equals to size of color value, "
"but now channels = "
<< mat->Channels()
- << ", the size of padding values = " << color_.size() << "."
+ << ", the size of color values = " << color_.size() << "."
<< std::endl;
return false;
}
@@ -36,8 +37,8 @@ bool LetterBoxResize::ImplByOpenCV(Mat* mat) {
float ratio_w = static_cast(target_w) / static_cast(origin_w);
float resize_scale = std::min(ratio_h, ratio_w);
// get_resized_shape
- int new_shape_w = std::round(im->cols * resize_scale);
- int new_shape_h = std::round(im->rows * resize_scale);
+ int new_shape_w = std::round(origin_w * resize_scale);
+ int new_shape_h = std::round(origin_h * resize_scale);
// calculate pad
float padw = (target_size_[1] - new_shape_w) / 2.;
float padh = (target_size_[0] - new_shape_h) / 2.;
@@ -45,20 +46,8 @@ bool LetterBoxResize::ImplByOpenCV(Mat* mat) {
int bottom = std::round(padh + 0.1);
int left = std::round(padw - 0.1);
int right = std::round(padw + 0.1);
- cv::resize(*im, *im, cv::Size(new_shape_w, new_shape_h), 0, 0, cv::INTER_AREA);
- cv::Scalar color;
- if (color_.size() == 1) {
- color = cv::Scalar(color_[0]);
- } else if (color_.size() == 2) {
- color = cv::Scalar(color_[0], color_[1]);
- } else if (color_.size() == 3) {
- color = cv::Scalar(color_[0], color_[1], color_[2]);
- } else {
- color = cv::Scalar(color_[0], color_[1], color_[2], color_[3]);
- }
- cv::copyMakeBorder(*im, *im, top, bottom, left, right, cv::BORDER_CONSTANT, color);
- mat->SetWidth(im->cols);
- mat->SetHeight(im->rows);
+ Resize::Run(mat, new_shape_w, new_shape_h, -1.0, -1.0, 3, false, lib);
+ Pad::Run(mat, top, bottom, left, right, color_, lib);
return true;
}
diff --git a/fastdeploy/vision/common/processors/letter_box.h b/fastdeploy/vision/tracking/pptracking/letter_box_resize.h
similarity index 87%
rename from fastdeploy/vision/common/processors/letter_box.h
rename to fastdeploy/vision/tracking/pptracking/letter_box_resize.h
index f69647e892..77e04557b8 100644
--- a/fastdeploy/vision/common/processors/letter_box.h
+++ b/fastdeploy/vision/tracking/pptracking/letter_box_resize.h
@@ -27,13 +27,13 @@ class LetterBoxResize : public Processor {
color_ = color;
}
- bool ImplByOpenCV(Mat* mat);
-
std::string Name() { return "LetterBoxResize"; }
+ virtual bool operator()(Mat* mat, ProcLib lib = ProcLib::DEFAULT);
+
static bool Run(Mat* mat, const std::vector& target_size,
- const std::vector& color,
- ProcLib lib = ProcLib::DEFAULT);
+ const std::vector& color,
+ ProcLib lib = ProcLib::DEFAULT);
private:
std::vector target_size_;
diff --git a/fastdeploy/vision/tracking/pptracking/model.cc b/fastdeploy/vision/tracking/pptracking/model.cc
index 0ae550ad24..2047497b92 100644
--- a/fastdeploy/vision/tracking/pptracking/model.cc
+++ b/fastdeploy/vision/tracking/pptracking/model.cc
@@ -13,6 +13,7 @@
// limitations under the License.
#include "fastdeploy/vision/tracking/pptracking/model.h"
+#include "fastdeploy/vision/tracking/pptracking/letter_box_resize.h"
#include "yaml-cpp/yaml.h"
namespace fastdeploy {
diff --git a/tests/models/test_mobilenetv2.py b/tests/models/test_mobilenetv2.py
new file mode 100755
index 0000000000..c2cec02208
--- /dev/null
+++ b/tests/models/test_mobilenetv2.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+import pickle
+import numpy as np
+import runtime_config as rc
+
+
+def test_classification_mobilenetv2():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_x0_25_infer.tgz"
+ input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg"
+ fd.download_and_decompress(model_url, "resources")
+ fd.download(input_url, "resources")
+ model_path = "resources/MobileNetV1_x0_25_infer"
+
+ model_file = "resources/MobileNetV1_x0_25_infer/inference.pdmodel"
+ params_file = "resources/MobileNetV1_x0_25_infer/inference.pdiparams"
+ config_file = "resources/MobileNetV1_x0_25_infer/inference_cls.yaml"
+ model = fd.vision.classification.PaddleClasModel(
+ model_file, params_file, config_file, runtime_option=rc.test_option)
+
+ expected_label_ids = [153, 333, 259, 338, 265, 154]
+ expected_scores = [
+ 0.221088, 0.109457, 0.078668, 0.076814, 0.052401, 0.048206
+ ]
+ # compare diff
+ im = cv2.imread("./resources/ILSVRC2012_val_00000010.jpeg")
+ for i in range(2):
+ result = model.predict(im, topk=6)
+ diff_label = np.fabs(
+ np.array(result.label_ids) - np.array(expected_label_ids))
+ diff_scores = np.fabs(
+ np.array(result.scores) - np.array(expected_scores))
+ assert diff_label.max() < 1e-06, "There's difference in classify label."
+ assert diff_scores.max(
+ ) < 1e-05, "There's difference in classify score."
From 40e080a40cf6b6a42c08e7b9caaaaa627bc69bf5 Mon Sep 17 00:00:00 2001
From: leiqing <54695910+leiqing1@users.noreply.github.com>
Date: Sun, 6 Nov 2022 07:22:05 +0800
Subject: [PATCH 05/30] Update README_CN.md
---
README_CN.md | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/README_CN.md b/README_CN.md
index ab6030aa23..4c61b6c4a6 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -28,6 +28,13 @@
## 近期更新
+- 🔥 **2022.11.09 20:30~21:30,【直播分享】《覆盖云边端全场景,150+热门模型快速部署》。扫码报名**
+- 🔥 **2022.11.10 20:30~21:30,【直播分享】《瑞芯微、晶晨、恩智浦等10+AI硬件部署,直达产业落地》。扫码报名**
+- 🔥 **2022.11.10 19:00~20:00,【直播分享】《10+热门模型在RK3588、RK3568部署实战》。扫码报名**
+-
+

+
+
- 🔥 **2022.10.15:Release FastDeploy [release v0.3.0](https://github.com/PaddlePaddle/FastDeploy/tree/release%2F0.3.0)**
- **New server-side deployment upgrade:更快的推理性能,一键量化,更多的视觉和NLP模型**
- 集成 OpenVINO 推理引擎,并且保证了使用 OpenVINO 与 使用 TensorRT、ONNX Runtime、 Paddle Inference一致的开发体验;
@@ -278,7 +285,7 @@ int main(int argc, char* argv[]) {
- **加入社区👬:** 微信扫描二维码,进入**FastDeploy技术交流群**
-

+
## Acknowledge
From 295af8f4676d2a4945589334ac7e1581a58ff71f Mon Sep 17 00:00:00 2001
From: DefTruth <31974251+DefTruth@users.noreply.github.com>
Date: Sun, 6 Nov 2022 13:29:46 +0800
Subject: [PATCH 06/30] [FlyCV] update vision processors with latest FlyCV API
(#503)
* [Other] Add static create methods to Mat
* [Other] avoid field name conflicts
* [FlyCV] update vision processors with latest FlyCV API
* [FlyCV] update vision processors with latest FlyCV API
* [Model] update rvm cv::Mat usage to FD Mat usage
---
cmake/flycv.cmake | 2 +-
fastdeploy/vision/common/processors/mat.cc | 17 -----
fastdeploy/vision/common/processors/mat.h | 1 -
.../vision/common/processors/normalize.cc | 2 +-
.../vision/common/processors/proc_lib.cc | 17 +++++
.../vision/common/processors/proc_lib.h | 2 +
fastdeploy/vision/common/processors/utils.cc | 62 +++++++++----------
fastdeploy/vision/matting/contrib/rvm.cc | 14 +++--
8 files changed, 60 insertions(+), 57 deletions(-)
diff --git a/cmake/flycv.cmake b/cmake/flycv.cmake
index 52a77fb806..9bb56df5fe 100644
--- a/cmake/flycv.cmake
+++ b/cmake/flycv.cmake
@@ -64,7 +64,7 @@ else()
endif(WIN32)
set(FLYCV_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/")
-set(FLYCV_VERSION "1.0.1025")
+set(FLYCV_VERSION "1.3")
if(WIN32)
message(FATAL_ERROR "FlyCV is not supported on Windows now.")
set(FLYCV_FILE "flycv-win-x64-${FLYCV_VERSION}.zip")
diff --git a/fastdeploy/vision/common/processors/mat.cc b/fastdeploy/vision/common/processors/mat.cc
index ba0eddcb1f..e2a64ea04e 100644
--- a/fastdeploy/vision/common/processors/mat.cc
+++ b/fastdeploy/vision/common/processors/mat.cc
@@ -101,23 +101,6 @@ FDDataType Mat::Type() {
return OpenCVDataTypeToFD(cpu_mat.type());
}
-std::ostream& operator<<(std::ostream& out, const ProcLib& p) {
- switch (p) {
- case ProcLib::DEFAULT:
- out << "ProcLib::DEFAULT";
- break;
- case ProcLib::OPENCV:
- out << "ProcLib::OPENCV";
- break;
- case ProcLib::FLYCV:
- out << "ProcLib::FLYCV";
- break;
- default:
- FDASSERT(false, "Unknow type of ProcLib.");
- }
- return out;
-}
-
Mat Mat::Create(const FDTensor& tensor) {
if (DefaultProcLib::default_lib == ProcLib::FLYCV) {
#ifdef ENABLE_FLYCV
diff --git a/fastdeploy/vision/common/processors/mat.h b/fastdeploy/vision/common/processors/mat.h
index cfec2fbd5e..5e618057ca 100644
--- a/fastdeploy/vision/common/processors/mat.h
+++ b/fastdeploy/vision/common/processors/mat.h
@@ -22,7 +22,6 @@ namespace vision {
enum Layout { HWC, CHW };
-FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out, const ProcLib& p);
struct FASTDEPLOY_DECL Mat {
explicit Mat(const cv::Mat& mat) {
diff --git a/fastdeploy/vision/common/processors/normalize.cc b/fastdeploy/vision/common/processors/normalize.cc
index cf4888613c..726ba67a74 100644
--- a/fastdeploy/vision/common/processors/normalize.cc
+++ b/fastdeploy/vision/common/processors/normalize.cc
@@ -80,7 +80,7 @@ bool Normalize::ImplByFlyCV(Mat* mat) {
mean[i] = -1 * beta_[i] * std[i];
}
fcv::Mat new_im(im->width(), im->height(),
- fcv::FCVImageType::PACKAGE_BGR_F32);
+ fcv::FCVImageType::PKG_BGR_F32);
fcv::normalize_to_submean_to_reorder(*im, mean, std, std::vector(),
new_im, true);
mat->SetMat(new_im);
diff --git a/fastdeploy/vision/common/processors/proc_lib.cc b/fastdeploy/vision/common/processors/proc_lib.cc
index e5009d9a63..07f8e83fd0 100644
--- a/fastdeploy/vision/common/processors/proc_lib.cc
+++ b/fastdeploy/vision/common/processors/proc_lib.cc
@@ -19,5 +19,22 @@ namespace vision {
ProcLib DefaultProcLib::default_lib = ProcLib::DEFAULT;
+std::ostream& operator<<(std::ostream& out, const ProcLib& p) {
+ switch (p) {
+ case ProcLib::DEFAULT:
+ out << "ProcLib::DEFAULT";
+ break;
+ case ProcLib::OPENCV:
+ out << "ProcLib::OPENCV";
+ break;
+ case ProcLib::FLYCV:
+ out << "ProcLib::FLYCV";
+ break;
+ default:
+ FDASSERT(false, "Unknow type of ProcLib.");
+ }
+ return out;
+}
+
} // namespace vision
} // namespace fastdeploy
diff --git a/fastdeploy/vision/common/processors/proc_lib.h b/fastdeploy/vision/common/processors/proc_lib.h
index 6db6b5177e..deee4b4f23 100644
--- a/fastdeploy/vision/common/processors/proc_lib.h
+++ b/fastdeploy/vision/common/processors/proc_lib.h
@@ -20,6 +20,8 @@ namespace vision {
enum class FASTDEPLOY_DECL ProcLib { DEFAULT, OPENCV, FLYCV };
+FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out, const ProcLib& p);
+
struct FASTDEPLOY_DECL DefaultProcLib {
// default_lib has the highest priority
// all the function in `processor` will force to use
diff --git a/fastdeploy/vision/common/processors/utils.cc b/fastdeploy/vision/common/processors/utils.cc
index e38a1687c8..4d2c9a0d02 100644
--- a/fastdeploy/vision/common/processors/utils.cc
+++ b/fastdeploy/vision/common/processors/utils.cc
@@ -73,61 +73,61 @@ int CreateOpenCVDataType(FDDataType type, int channel) {
FDDataType FlyCVDataTypeToFD(fcv::FCVImageType type) {
if (type == fcv::FCVImageType::GRAY_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_BGR_U8) {
+ } else if (type == fcv::FCVImageType::PKG_BGR_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_RGB_U8) {
+ } else if (type == fcv::FCVImageType::PKG_RGB_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_BGR_U8) {
+ } else if (type == fcv::FCVImageType::PKG_BGR_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_RGB_U8) {
+ } else if (type == fcv::FCVImageType::PKG_RGB_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PLANAR_BGR_U8) {
+ } else if (type == fcv::FCVImageType::PLA_BGR_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PLANAR_RGB_U8) {
+ } else if (type == fcv::FCVImageType::PLA_RGB_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PLANAR_BGRA_U8) {
+ } else if (type == fcv::FCVImageType::PLA_BGRA_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PLANAR_RGBA_U8) {
+ } else if (type == fcv::FCVImageType::PLA_RGBA_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PLANAR_BGR_F32) {
+ } else if (type == fcv::FCVImageType::PLA_BGR_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PLANAR_RGB_F32) {
+ } else if (type == fcv::FCVImageType::PLA_RGB_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PLANAR_BGRA_F32) {
+ } else if (type == fcv::FCVImageType::PLA_BGRA_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PLANAR_RGBA_F32) {
+ } else if (type == fcv::FCVImageType::PLA_RGBA_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_BGRA_U8) {
+ } else if (type == fcv::FCVImageType::PKG_BGRA_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_RGBA_U8) {
+ } else if (type == fcv::FCVImageType::PKG_RGBA_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_BGRA_U8) {
+ } else if (type == fcv::FCVImageType::PKG_BGRA_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_RGBA_U8) {
+ } else if (type == fcv::FCVImageType::PKG_RGBA_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_BGR565_U8) {
+ } else if (type == fcv::FCVImageType::PKG_BGR565_U8) {
return FDDataType::UINT8;
- } else if (type == fcv::FCVImageType::PACKAGE_RGB565_U8) {
+ } else if (type == fcv::FCVImageType::PKG_RGB565_U8) {
return FDDataType::UINT8;
} else if (type == fcv::FCVImageType::GRAY_S32) {
return FDDataType::INT32;
} else if (type == fcv::FCVImageType::GRAY_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_BGR_F32) {
+ } else if (type == fcv::FCVImageType::PKG_BGR_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_RGB_F32) {
+ } else if (type == fcv::FCVImageType::PKG_RGB_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_BGR_F32) {
+ } else if (type == fcv::FCVImageType::PKG_BGR_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_RGB_F32) {
+ } else if (type == fcv::FCVImageType::PKG_RGB_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_BGRA_F32) {
+ } else if (type == fcv::FCVImageType::PKG_BGRA_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_RGBA_F32) {
+ } else if (type == fcv::FCVImageType::PKG_RGBA_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_BGRA_F32) {
+ } else if (type == fcv::FCVImageType::PKG_BGRA_F32) {
return FDDataType::FP32;
- } else if (type == fcv::FCVImageType::PACKAGE_RGBA_F32) {
+ } else if (type == fcv::FCVImageType::PKG_RGBA_F32) {
return FDDataType::FP32;
} else if (type == fcv::FCVImageType::GRAY_F64) {
return FDDataType::FP64;
@@ -144,21 +144,21 @@ fcv::FCVImageType CreateFlyCVDataType(FDDataType type, int channel) {
if (channel == 1) {
return fcv::FCVImageType::GRAY_U8;
} else if (channel == 3) {
- return fcv::FCVImageType::PACKAGE_BGR_U8;
+ return fcv::FCVImageType::PKG_BGR_U8;
} else {
- return fcv::FCVImageType::PACKAGE_BGRA_U8;
+ return fcv::FCVImageType::PKG_BGRA_U8;
}
} else if (type == FDDataType::FP32) {
if (channel == 1) {
return fcv::FCVImageType::GRAY_F32;
} else if (channel == 3) {
- return fcv::FCVImageType::PACKAGE_BGR_F32;
+ return fcv::FCVImageType::PKG_BGR_F32;
} else {
- return fcv::FCVImageType::PACKAGE_BGRA_F32;
+ return fcv::FCVImageType::PKG_BGRA_F32;
}
}
FDASSERT(false, "Data type of %s is not supported.", Str(type).c_str());
- return fcv::FCVImageType::PACKAGE_BGR_F32;
+ return fcv::FCVImageType::PKG_BGR_F32;
}
fcv::Mat ConvertOpenCVMatToFlyCV(cv::Mat& im) {
diff --git a/fastdeploy/vision/matting/contrib/rvm.cc b/fastdeploy/vision/matting/contrib/rvm.cc
index 6f48a38652..846db6bd60 100755
--- a/fastdeploy/vision/matting/contrib/rvm.cc
+++ b/fastdeploy/vision/matting/contrib/rvm.cc
@@ -120,19 +120,21 @@ bool RobustVideoMatting::Postprocess(
// for alpha
float* alpha_ptr = static_cast(alpha.Data());
- cv::Mat alpha_zero_copy_ref(out_h, out_w, CV_32FC1, alpha_ptr);
- Mat alpha_resized(alpha_zero_copy_ref); // ref-only, zero copy.
+ // cv::Mat alpha_zero_copy_ref(out_h, out_w, CV_32FC1, alpha_ptr);
+ // Mat alpha_resized(alpha_zero_copy_ref); // ref-only, zero copy.
+ Mat alpha_resized = Mat::Create(out_h, out_w, 1, FDDataType::FP32,
+ alpha_ptr); // ref-only, zero copy.
if ((out_h != in_h) || (out_w != in_w)) {
- // already allocated a new continuous memory after resize.
Resize::Run(&alpha_resized, in_w, in_h, -1, -1);
}
// for foreground
float* fgr_ptr = static_cast(fgr.Data());
- cv::Mat fgr_zero_copy_ref(out_h, out_w, CV_32FC1, fgr_ptr);
- Mat fgr_resized(fgr_zero_copy_ref); // ref-only, zero copy.
+ // cv::Mat fgr_zero_copy_ref(out_h, out_w, CV_32FC1, fgr_ptr);
+ // Mat fgr_resized(fgr_zero_copy_ref); // ref-only, zero copy.
+ Mat fgr_resized = Mat::Create(out_h, out_w, 1, FDDataType::FP32,
+ fgr_ptr); // ref-only, zero copy.
if ((out_h != in_h) || (out_w != in_w)) {
- // already allocated a new continuous memory after resize.
Resize::Run(&fgr_resized, in_w, in_h, -1, -1);
}
From 6408af263a2abb976c4b064e974b4da67f328be1 Mon Sep 17 00:00:00 2001
From: Zheng_Bicheng <58363586+Zheng-Bicheng@users.noreply.github.com>
Date: Sun, 6 Nov 2022 17:29:00 +0800
Subject: [PATCH 07/30] [Add Model]Add RKPicodet (#495)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* 11-02/14:35
* 新增输入数据format错误判断
* 优化推理过程,减少内存分配次数
* 支持多输入rknn模型
* rknn模型输出shape为三维时,输出将被强制对齐为4纬。现在将直接抹除rknn补充的shape,方便部分对输出shape进行判断的模型进行正确的后处理。
* 11-03/17:25
* 支持导出多输入RKNN模型
* 更新各种文档
* ppseg改用Fastdeploy中的模型进行转换
* 11-03/17:25
* 新增开源头
* 11-03/21:48
* 删除无用debug代码,补充注释
* 11-04/01:00
* 新增rkpicodet代码
* 11-04/13:13
* 提交编译缺少的文件
* 11-04/14:03
* 更新安装文档
* 11-04/14:21
* 更新picodet_s配置文件
* 11-04/14:21
* 更新picodet自适应输出结果
* 11-04/14:21
* 更新文档
* * 更新配置文件
* * 修正配置文件
* * 添加缺失的python文件
* * 修正文档
* * 修正代码格式问题0
* * 按照要求修改
* * 按照要求修改
* * 按照要求修改
* * 按照要求修改
* * 按照要求修改
* test
---
docs/cn/faq/rknpu2/export.md | 6 +-
.../paddledetection/rknpu2/README.md | 38 ++++
.../paddledetection/rknpu2/cpp/CMakeLists.txt | 37 ++++
.../paddledetection/rknpu2/cpp/README.md | 71 +++++++
.../rknpu2/cpp/infer_picodet.cc | 86 ++++++++
.../paddledetection/rknpu2/python/README.md | 35 +++
.../paddledetection/rknpu2/python/infer.py | 59 +++++
fastdeploy/vision.h | 1 +
.../vision/detection/contrib/rknpu2/model.h | 16 ++
.../detection/contrib/rknpu2/rkdet_pybind.cc | 29 +++
.../detection/contrib/rknpu2/rkpicodet.cc | 201 ++++++++++++++++++
.../detection/contrib/rknpu2/rkpicodet.h | 46 ++++
.../vision/detection/detection_pybind.cc | 3 +
.../fastdeploy/vision/detection/__init__.py | 1 +
.../vision/detection/rknpu2/__init__.py | 44 ++++
.../RK3568/picodet_s_416_coco_lcnet.yaml | 7 +
.../config/RK3568/picodet_s_416_coco_npu.yaml | 5 +
.../RK3588/picodet_s_416_coco_lcnet.yaml | 7 +
.../config/RK3588/picodet_s_416_coco_npu.yaml | 5 +
19 files changed, 694 insertions(+), 3 deletions(-)
create mode 100644 examples/vision/detection/paddledetection/rknpu2/README.md
create mode 100644 examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt
create mode 100644 examples/vision/detection/paddledetection/rknpu2/cpp/README.md
create mode 100644 examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc
create mode 100644 examples/vision/detection/paddledetection/rknpu2/python/README.md
create mode 100644 examples/vision/detection/paddledetection/rknpu2/python/infer.py
create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/model.h
create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc
create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc
create mode 100644 fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h
create mode 100644 python/fastdeploy/vision/detection/rknpu2/__init__.py
create mode 100644 tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml
create mode 100644 tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml
create mode 100644 tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml
create mode 100644 tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml
diff --git a/docs/cn/faq/rknpu2/export.md b/docs/cn/faq/rknpu2/export.md
index 9399c78d5d..6992506cf6 100644
--- a/docs/cn/faq/rknpu2/export.md
+++ b/docs/cn/faq/rknpu2/export.md
@@ -22,8 +22,8 @@ model_path: ./portrait_pp_humansegv2_lite_256x144_pretrained.onnx
output_folder: ./
target_platform: RK3588
normalize:
- mean: [0.5,0.5,0.5]
- std: [0.5,0.5,0.5]
+ mean: [[0.5,0.5,0.5]]
+ std: [[0.5,0.5,0.5]]
outputs: None
```
@@ -45,4 +45,4 @@ python tools/export.py --config_path=./config.yaml
## 模型导出要注意的事项
-* 请不要导出带softmax和argmax的模型,这两个算子存在bug,请在外部进行运算
\ No newline at end of file
+* 请不要导出带softmax和argmax的模型,这两个算子存在bug,请在外部进行运算
diff --git a/examples/vision/detection/paddledetection/rknpu2/README.md b/examples/vision/detection/paddledetection/rknpu2/README.md
new file mode 100644
index 0000000000..32eff20a6f
--- /dev/null
+++ b/examples/vision/detection/paddledetection/rknpu2/README.md
@@ -0,0 +1,38 @@
+# PaddleDetection RKNPU2部署示例
+
+## 支持模型列表
+
+目前FastDeploy支持如下模型的部署
+- [PicoDet系列模型](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet)
+
+## 准备PaddleDetection部署模型以及转换模型
+RKNPU部署模型前需要将Paddle模型转换成RKNN模型,具体步骤如下:
+* Paddle动态图模型转换为ONNX模型,请参考[PaddleDetection导出模型](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/deploy/EXPORT_MODEL.md)
+ ,注意在转换时请设置**export.nms=True**.
+* ONNX模型转换RKNN模型的过程,请参考[转换文档](../../../../../docs/cn/faq/rknpu2/export.md)进行转换。
+
+
+## 模型转换example
+下面以Picodet-npu为例子,教大家如何转换PaddleDetection模型到RKNN模型。
+```bash
+## 下载Paddle静态图模型并解压
+wget https://bj.bcebos.com/fastdeploy/models/rknn2/picodet_s_416_coco_npu.zip
+unzip -qo picodet_s_416_coco_npu.zip
+
+# 静态图转ONNX模型,注意,这里的save_file请和压缩包名对齐
+paddle2onnx --model_dir picodet_s_416_coco_npu \
+ --model_filename model.pdmodel \
+ --params_filename model.pdiparams \
+ --save_file picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx \
+ --enable_dev_version True
+
+python -m paddle2onnx.optimize --input_model picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx \
+ --output_model picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx \
+ --input_shape_dict "{'image':[1,3,416,416]}"
+# ONNX模型转RKNN模型
+# 转换模型,模型将生成在picodet_s_320_coco_lcnet_non_postprocess目录下
+python tools/rknpu2/export.py --config_path tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml
+```
+
+- [Python部署](./python)
+- [视觉模型预测结果](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt b/examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt
new file mode 100644
index 0000000000..b4eca78ec5
--- /dev/null
+++ b/examples/vision/detection/paddledetection/rknpu2/cpp/CMakeLists.txt
@@ -0,0 +1,37 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 3.10)
+project(rknpu2_test)
+
+set(CMAKE_CXX_STANDARD 14)
+
+# 指定下载解压后的fastdeploy库路径
+set(FASTDEPLOY_INSTALL_DIR "thirdpartys/fastdeploy-0.0.3")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeployConfig.cmake)
+include_directories(${FastDeploy_INCLUDE_DIRS})
+
+add_executable(infer_picodet infer_picodet.cc)
+target_link_libraries(infer_picodet ${FastDeploy_LIBS})
+
+
+
+set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
+
+install(TARGETS infer_picodet DESTINATION ./)
+
+install(DIRECTORY model DESTINATION ./)
+install(DIRECTORY images DESTINATION ./)
+
+file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
+message("${FASTDEPLOY_LIBS}")
+install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
+
+file(GLOB ONNXRUNTIME_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/onnxruntime/lib/*)
+install(PROGRAMS ${ONNXRUNTIME_LIBS} DESTINATION lib)
+
+install(DIRECTORY ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib DESTINATION ./)
+
+file(GLOB PADDLETOONNX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddle2onnx/lib/*)
+install(PROGRAMS ${PADDLETOONNX_LIBS} DESTINATION lib)
+
+file(GLOB RKNPU2_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/rknpu2_runtime/RK3588/lib/*)
+install(PROGRAMS ${RKNPU2_LIBS} DESTINATION lib)
diff --git a/examples/vision/detection/paddledetection/rknpu2/cpp/README.md b/examples/vision/detection/paddledetection/rknpu2/cpp/README.md
new file mode 100644
index 0000000000..d0b1319712
--- /dev/null
+++ b/examples/vision/detection/paddledetection/rknpu2/cpp/README.md
@@ -0,0 +1,71 @@
+# PaddleDetection C++部署示例
+
+本目录下提供`infer_xxxxx.cc`快速完成PPDetection模型在Rockchip板子上上通过二代NPU加速部署的示例。
+
+在部署前,需确认以下两个步骤:
+
+1. 软硬件环境满足要求
+2. 根据开发环境,下载预编译部署库或者从头编译FastDeploy仓库
+
+以上步骤请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)实现
+
+## 生成基本目录文件
+
+该例程由以下几个部分组成
+```text
+.
+├── CMakeLists.txt
+├── build # 编译文件夹
+├── image # 存放图片的文件夹
+├── infer_cpu_npu.cc
+├── infer_cpu_npu.h
+├── main.cc
+├── model # 存放模型文件的文件夹
+└── thirdpartys # 存放sdk的文件夹
+```
+
+首先需要先生成目录结构
+```bash
+mkdir build
+mkdir images
+mkdir model
+mkdir thirdpartys
+```
+
+## 编译
+
+### 编译并拷贝SDK到thirdpartys文件夹
+
+请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)仓库编译SDK,编译完成后,将在build目录下生成
+fastdeploy-0.0.3目录,请移动它至thirdpartys目录下.
+
+### 拷贝模型文件,以及配置文件至model文件夹
+在Paddle动态图模型 -> Paddle静态图模型 -> ONNX模型的过程中,将生成ONNX文件以及对应的yaml配置文件,请将配置文件存放到model文件夹内。
+转换为RKNN后的模型文件也需要拷贝至model。
+
+### 准备测试图片至image文件夹
+```bash
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+cp 000000014439.jpg ./images
+```
+
+### 编译example
+
+```bash
+cd build
+cmake ..
+make -j8
+make install
+```
+
+## 运行例程
+
+```bash
+cd ./build/install
+./rknpu_test
+```
+
+
+- [模型介绍](../../)
+- [Python部署](../python)
+- [视觉模型预测结果](../../../../../../docs/api/vision_results/)
diff --git a/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc b/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc
new file mode 100644
index 0000000000..297fa52e52
--- /dev/null
+++ b/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc
@@ -0,0 +1,86 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include
+#include
+#include "fastdeploy/vision.h"
+
+void InferPicodet(const std::string& device = "cpu");
+
+int main() {
+ InferPicodet("npu");
+ return 0;
+}
+
+fastdeploy::RuntimeOption GetOption(const std::string& device) {
+ auto option = fastdeploy::RuntimeOption();
+ if (device == "npu") {
+ option.UseRKNPU2();
+ } else {
+ option.UseCpu();
+ }
+ return option;
+}
+
+fastdeploy::ModelFormat GetFormat(const std::string& device) {
+ auto format = fastdeploy::ModelFormat::ONNX;
+ if (device == "npu") {
+ format = fastdeploy::ModelFormat::RKNN;
+ } else {
+ format = fastdeploy::ModelFormat::ONNX;
+ }
+ return format;
+}
+
+std::string GetModelPath(std::string& model_path, const std::string& device) {
+ if (device == "npu") {
+ model_path += "rknn";
+ } else {
+ model_path += "onnx";
+ }
+ return model_path;
+}
+
+void InferPicodet(const std::string &device) {
+ std::string model_file = "./model/picodet_s_416_coco_npu/picodet_s_416_coco_npu_rk3588.";
+ std::string params_file;
+ std::string config_file = "./model/picodet_s_416_coco_npu/infer_cfg.yml";
+
+ fastdeploy::RuntimeOption option = GetOption(device);
+ fastdeploy::ModelFormat format = GetFormat(device);
+ model_file = GetModelPath(model_file, device);
+ auto model = fastdeploy::vision::detection::RKPicoDet(
+ model_file, params_file, config_file,option,format);
+
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+ auto image_file = "./images/000000014439.jpg";
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ clock_t start = clock();
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ clock_t end = clock();
+ auto dur = static_cast(end - start);
+ printf("picodet_npu use time:%f\n", (dur / CLOCKS_PER_SEC));
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im, res,0.5);
+ cv::imwrite("picodet_npu_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./picodet_npu_result.jpg" << std::endl;
+}
\ No newline at end of file
diff --git a/examples/vision/detection/paddledetection/rknpu2/python/README.md b/examples/vision/detection/paddledetection/rknpu2/python/README.md
new file mode 100644
index 0000000000..23b13cd3b9
--- /dev/null
+++ b/examples/vision/detection/paddledetection/rknpu2/python/README.md
@@ -0,0 +1,35 @@
+# PaddleDetection Python部署示例
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/rknpu2.md)
+
+本目录下提供`infer.py`快速完成Picodet在RKNPU上部署的示例。执行如下脚本即可完成
+
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/paddledetection/rknpu2/python
+
+# 下载图片
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
+# copy model
+cp -r ./picodet_s_416_coco_npu /path/to/FastDeploy/examples/vision/detection/rknpu2detection/paddledetection/python
+
+# 推理
+python3 infer.py --model_file ./picodet_s_416_coco_npu/picodet_s_416_coco_npu_3588.rknn \
+ --config_file ./picodet_s_416_coco_npu/infer_cfg.yml \
+ --image 000000014439.jpg
+```
+
+
+## 注意事项
+RKNPU上对模型的输入要求是使用NHWC格式,且图片归一化操作会在转RKNN模型时,内嵌到模型中,因此我们在使用FastDeploy部署时,
+需要先调用DisableNormalizePermute(C++)或`disable_normalize_permute(Python),在预处理阶段禁用归一化以及数据格式的转换。
+## 其它文档
+
+- [PaddleDetection 模型介绍](..)
+- [PaddleDetection C++部署](../cpp)
+- [模型预测结果说明](../../../../../../docs/api/vision_results/)
+- [转换PaddleDetection RKNN模型文档](../README.md)
diff --git a/examples/vision/detection/paddledetection/rknpu2/python/infer.py b/examples/vision/detection/paddledetection/rknpu2/python/infer.py
new file mode 100644
index 0000000000..ae2d8796a6
--- /dev/null
+++ b/examples/vision/detection/paddledetection/rknpu2/python/infer.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model_file", required=True, help="Path of rknn model.")
+ parser.add_argument("--config_file", required=True, help="Path of config.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+ option.use_rknpu2()
+ return option
+
+
+args = parse_arguments()
+
+# 配置runtime,加载模型
+runtime_option = build_option(args)
+model_file = args.model_file
+params_file = ""
+config_file = args.config_file
+model = fd.vision.detection.RKPicoDet(
+ model_file,
+ params_file,
+ config_file,
+ runtime_option=runtime_option,
+ model_format=fd.ModelFormat.RKNN)
+
+# 预测图片分割结果
+im = cv2.imread(args.image)
+result = model.predict(im.copy())
+print(result)
+
+# 可视化结果
+vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h
index d9ceb5dda1..44054ee937 100755
--- a/fastdeploy/vision.h
+++ b/fastdeploy/vision.h
@@ -29,6 +29,7 @@
#include "fastdeploy/vision/detection/contrib/yolov7end2end_trt.h"
#include "fastdeploy/vision/detection/contrib/yolox.h"
#include "fastdeploy/vision/detection/ppdet/model.h"
+#include "fastdeploy/vision/detection/contrib/rknpu2/model.h"
#include "fastdeploy/vision/facedet/contrib/retinaface.h"
#include "fastdeploy/vision/facedet/contrib/scrfd.h"
#include "fastdeploy/vision/facedet/contrib/ultraface.h"
diff --git a/fastdeploy/vision/detection/contrib/rknpu2/model.h b/fastdeploy/vision/detection/contrib/rknpu2/model.h
new file mode 100644
index 0000000000..f0f8616eed
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/rknpu2/model.h
@@ -0,0 +1,16 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h"
diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc b/fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc
new file mode 100644
index 0000000000..6482ea6755
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/rknpu2/rkdet_pybind.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "fastdeploy/pybind/main.h"
+
+namespace fastdeploy {
+void BindRKDet(pybind11::module& m) {
+ pybind11::class_(m, "RKPicoDet")
+ .def(pybind11::init())
+ .def("predict",
+ [](vision::detection::RKPicoDet& self, pybind11::array& data) {
+ auto mat = PyArrayToCvMat(data);
+ vision::DetectionResult res;
+ self.Predict(&mat, &res);
+ return res;
+ });
+}
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc
new file mode 100644
index 0000000000..926214d86d
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h"
+#include "yaml-cpp/yaml.h"
+namespace fastdeploy {
+namespace vision {
+namespace detection {
+
+RKPicoDet::RKPicoDet(const std::string& model_file,
+ const std::string& params_file,
+ const std::string& config_file,
+ const RuntimeOption& custom_option,
+ const ModelFormat& model_format) {
+ config_file_ = config_file;
+ valid_cpu_backends = {Backend::ORT};
+ valid_rknpu_backends = {Backend::RKNPU2};
+ if ((model_format == ModelFormat::RKNN) ||
+ (model_format == ModelFormat::ONNX)) {
+ has_nms_ = false;
+ }
+ runtime_option = custom_option;
+ runtime_option.model_format = model_format;
+ runtime_option.model_file = model_file;
+ runtime_option.params_file = params_file;
+
+ // NMS parameters come from RKPicoDet_s_nms
+ background_label = -1;
+ keep_top_k = 100;
+ nms_eta = 1;
+ nms_threshold = 0.5;
+ nms_top_k = 1000;
+ normalized = true;
+ score_threshold = 0.3;
+ initialized = Initialize();
+}
+
+bool RKPicoDet::Initialize() {
+ if (!BuildPreprocessPipelineFromConfig()) {
+ FDERROR << "Failed to build preprocess pipeline from configuration file."
+ << std::endl;
+ return false;
+ }
+ if (!InitRuntime()) {
+ FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+bool RKPicoDet::Preprocess(Mat* mat, std::vector* outputs) {
+ int origin_w = mat->Width();
+ int origin_h = mat->Height();
+ for (size_t i = 0; i < processors_.size(); ++i) {
+ if (!(*(processors_[i].get()))(mat)) {
+ FDERROR << "Failed to process image data in " << processors_[i]->Name()
+ << "." << std::endl;
+ return false;
+ }
+ }
+
+ Cast::Run(mat, "float");
+
+ scale_factor.resize(2);
+ scale_factor[0] = mat->Height() * 1.0 / origin_h;
+ scale_factor[1] = mat->Width() * 1.0 / origin_w;
+
+ outputs->resize(1);
+ (*outputs)[0].name = InputInfoOfRuntime(0).name;
+ mat->ShareWithTensor(&((*outputs)[0]));
+ // reshape to [1, c, h, w]
+ (*outputs)[0].shape.insert((*outputs)[0].shape.begin(), 1);
+ return true;
+}
+
+bool RKPicoDet::BuildPreprocessPipelineFromConfig() {
+ processors_.clear();
+ YAML::Node cfg;
+ try {
+ cfg = YAML::LoadFile(config_file_);
+ } catch (YAML::BadFile& e) {
+ FDERROR << "Failed to load yaml file " << config_file_
+ << ", maybe you should check this file." << std::endl;
+ return false;
+ }
+
+ processors_.push_back(std::make_shared());
+
+ for (const auto& op : cfg["Preprocess"]) {
+ std::string op_name = op["type"].as();
+ if (op_name == "NormalizeImage") {
+ continue;
+ } else if (op_name == "Resize") {
+ bool keep_ratio = op["keep_ratio"].as();
+ auto target_size = op["target_size"].as>();
+ int interp = op["interp"].as();
+ FDASSERT(target_size.size() == 2,
+ "Require size of target_size be 2, but now it's %lu.",
+ target_size.size());
+ if (!keep_ratio) {
+ int width = target_size[1];
+ int height = target_size[0];
+ processors_.push_back(
+ std::make_shared(width, height, -1.0, -1.0, interp, false));
+ } else {
+ int min_target_size = std::min(target_size[0], target_size[1]);
+ int max_target_size = std::max(target_size[0], target_size[1]);
+ std::vector max_size;
+ if (max_target_size > 0) {
+ max_size.push_back(max_target_size);
+ max_size.push_back(max_target_size);
+ }
+ processors_.push_back(std::make_shared(
+ min_target_size, interp, true, max_size));
+ }
+ } else if (op_name == "Permute") {
+ continue;
+ } else if (op_name == "Pad") {
+ auto size = op["size"].as>();
+ auto value = op["fill_value"].as>();
+ processors_.push_back(std::make_shared("float"));
+ processors_.push_back(
+ std::make_shared(size[1], size[0], value));
+ } else if (op_name == "PadStride") {
+ auto stride = op["stride"].as();
+ processors_.push_back(
+ std::make_shared(stride, std::vector(3, 0)));
+ } else {
+ FDERROR << "Unexcepted preprocess operator: " << op_name << "."
+ << std::endl;
+ return false;
+ }
+ }
+ return true;
+}
+
+bool RKPicoDet::Postprocess(std::vector& infer_result,
+ DetectionResult* result) {
+ FDASSERT(infer_result[1].shape[0] == 1,
+ "Only support batch = 1 in FastDeploy now.");
+
+ if (!has_nms_) {
+ int boxes_index = 0;
+ int scores_index = 1;
+ if (infer_result[0].shape[1] == infer_result[1].shape[2]) {
+ boxes_index = 0;
+ scores_index = 1;
+ } else if (infer_result[0].shape[2] == infer_result[1].shape[1]) {
+ boxes_index = 1;
+ scores_index = 0;
+ } else {
+ FDERROR << "The shape of boxes and scores should be [batch, boxes_num, "
+ "4], [batch, classes_num, boxes_num]"
+ << std::endl;
+ return false;
+ }
+
+ backend::MultiClassNMS nms;
+ nms.background_label = background_label;
+ nms.keep_top_k = keep_top_k;
+ nms.nms_eta = nms_eta;
+ nms.nms_threshold = nms_threshold;
+ nms.score_threshold = score_threshold;
+ nms.nms_top_k = nms_top_k;
+ nms.normalized = normalized;
+ nms.Compute(static_cast(infer_result[boxes_index].Data()),
+ static_cast(infer_result[scores_index].Data()),
+ infer_result[boxes_index].shape,
+ infer_result[scores_index].shape);
+ if (nms.out_num_rois_data[0] > 0) {
+ result->Reserve(nms.out_num_rois_data[0]);
+ }
+ for (size_t i = 0; i < nms.out_num_rois_data[0]; ++i) {
+ result->label_ids.push_back(nms.out_box_data[i * 6]);
+ result->scores.push_back(nms.out_box_data[i * 6 + 1]);
+ result->boxes.emplace_back(
+ std::array{nms.out_box_data[i * 6 + 2] / scale_factor[1],
+ nms.out_box_data[i * 6 + 3] / scale_factor[0],
+ nms.out_box_data[i * 6 + 4] / scale_factor[1],
+ nms.out_box_data[i * 6 + 5] / scale_factor[0]});
+ }
+ } else {
+ FDERROR << "Picodet in Backend::RKNPU2 don't support NMS" << std::endl;
+ }
+ return true;
+}
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h
new file mode 100644
index 0000000000..dbb48c16d9
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/rknpu2/rkpicodet.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/detection/ppdet/ppyoloe.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace detection {
+class FASTDEPLOY_DECL RKPicoDet : public PPYOLOE {
+ public:
+ RKPicoDet(const std::string& model_file,
+ const std::string& params_file,
+ const std::string& config_file,
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::RKNN);
+
+ virtual std::string ModelName() const { return "RKPicoDet"; }
+
+ protected:
+ /// Build the preprocess pipeline from the loaded model
+ virtual bool BuildPreprocessPipelineFromConfig();
+ /// Preprocess an input image, and set the preprocessed results to `outputs`
+ virtual bool Preprocess(Mat* mat, std::vector* outputs);
+
+ /// Postprocess the inferenced results, and set the final result to `result`
+ virtual bool Postprocess(std::vector& infer_result,
+ DetectionResult* result);
+ virtual bool Initialize();
+ private:
+ std::vector scale_factor{1.0, 1.0};
+};
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/detection_pybind.cc b/fastdeploy/vision/detection/detection_pybind.cc
index b3a7a6ad94..f55bf68bf1 100644
--- a/fastdeploy/vision/detection/detection_pybind.cc
+++ b/fastdeploy/vision/detection/detection_pybind.cc
@@ -27,6 +27,8 @@ void BindNanoDetPlus(pybind11::module& m);
void BindPPDet(pybind11::module& m);
void BindYOLOv7End2EndTRT(pybind11::module& m);
void BindYOLOv7End2EndORT(pybind11::module& m);
+void BindRKDet(pybind11::module& m);
+
void BindDetection(pybind11::module& m) {
auto detection_module =
@@ -42,5 +44,6 @@ void BindDetection(pybind11::module& m) {
BindNanoDetPlus(detection_module);
BindYOLOv7End2EndTRT(detection_module);
BindYOLOv7End2EndORT(detection_module);
+ BindRKDet(detection_module);
}
} // namespace fastdeploy
diff --git a/python/fastdeploy/vision/detection/__init__.py b/python/fastdeploy/vision/detection/__init__.py
index 89441f7a27..a4fe4c035b 100644
--- a/python/fastdeploy/vision/detection/__init__.py
+++ b/python/fastdeploy/vision/detection/__init__.py
@@ -24,3 +24,4 @@ from .contrib.yolov6 import YOLOv6
from .contrib.yolov7end2end_trt import YOLOv7End2EndTRT
from .contrib.yolov7end2end_ort import YOLOv7End2EndORT
from .ppdet import PPYOLOE, PPYOLO, PPYOLOv2, PaddleYOLOX, PicoDet, FasterRCNN, YOLOv3, MaskRCNN
+from .rknpu2 import RKPicoDet
diff --git a/python/fastdeploy/vision/detection/rknpu2/__init__.py b/python/fastdeploy/vision/detection/rknpu2/__init__.py
new file mode 100644
index 0000000000..57fcecc64b
--- /dev/null
+++ b/python/fastdeploy/vision/detection/rknpu2/__init__.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+from typing import Union, List
+import logging
+from .... import FastDeployModel, ModelFormat
+from .... import c_lib_wrap as C
+from .. import PPYOLOE
+
+
+class RKPicoDet(PPYOLOE):
+ def __init__(self,
+ model_file,
+ params_file,
+ config_file,
+ runtime_option=None,
+ model_format=ModelFormat.RKNN):
+ """Load a PicoDet model exported by PaddleDetection.
+
+ :param model_file: (str)Path of model file, e.g picodet/model.pdmodel
+ :param params_file: (str)Path of parameters file, e.g picodet/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param config_file: (str)Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+
+ super(PPYOLOE, self).__init__(runtime_option)
+
+ assert model_format == ModelFormat.RKNN, "RKPicoDet model only support model format of ModelFormat.RKNN now."
+ self._model = C.vision.detection.RKPicoDet(
+ model_file, params_file, config_file, self._runtime_option,
+ model_format)
+ assert self.initialized, "RKPicoDet model initialize failed."
diff --git a/tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml b/tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml
new file mode 100644
index 0000000000..9854891634
--- /dev/null
+++ b/tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml
@@ -0,0 +1,7 @@
+model_path: ./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx
+output_folder: ./picodet_s_416_coco_lcnet
+target_platform: RK3568
+normalize:
+ mean: [[0.485,0.456,0.406]]
+ std: [[0.229,0.224,0.225]]
+outputs: ['tmp_16','p2o.Concat.9']
diff --git a/tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml b/tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml
new file mode 100644
index 0000000000..723acc8b55
--- /dev/null
+++ b/tools/rknpu2/config/RK3568/picodet_s_416_coco_npu.yaml
@@ -0,0 +1,5 @@
+model_path: ./picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx
+output_folder: ./picodet_s_416_coco_npu
+target_platform: RK3568
+normalize: None
+outputs: ['tmp_16','p2o.Concat.17']
diff --git a/tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml b/tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml
new file mode 100644
index 0000000000..6110e8c0f9
--- /dev/null
+++ b/tools/rknpu2/config/RK3588/picodet_s_416_coco_lcnet.yaml
@@ -0,0 +1,7 @@
+model_path: ./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx
+output_folder: ./picodet_s_416_coco_lcnet
+target_platform: RK3588
+normalize:
+ mean: [[0.485,0.456,0.406]]
+ std: [[0.229,0.224,0.225]]
+outputs: ['tmp_16','p2o.Concat.9']
diff --git a/tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml b/tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml
new file mode 100644
index 0000000000..356fcfad88
--- /dev/null
+++ b/tools/rknpu2/config/RK3588/picodet_s_416_coco_npu.yaml
@@ -0,0 +1,5 @@
+model_path: ./picodet_s_416_coco_npu/picodet_s_416_coco_npu.onnx
+output_folder: ./picodet_s_416_coco_npu
+target_platform: RK3588
+normalize: None
+outputs: ['tmp_16','p2o.Concat.17']
From 08a384f7c26b84510d754fd24acbe1e94c5cbb73 Mon Sep 17 00:00:00 2001
From: Jack Zhou
Date: Sun, 6 Nov 2022 20:17:35 +0800
Subject: [PATCH 08/30] [Other]Fix the fd tensor copy assignment (#506)
Fix the fd tensor copy assignment
---
fastdeploy/core/fd_tensor.cc | 22 ++++-----
tests/core/test_fd_tensor.cc | 89 ++++++++++++++++++++++++++++++++++++
2 files changed, 100 insertions(+), 11 deletions(-)
create mode 100644 tests/core/test_fd_tensor.cc
diff --git a/fastdeploy/core/fd_tensor.cc b/fastdeploy/core/fd_tensor.cc
index e98a81e1b7..8b739d844a 100644
--- a/fastdeploy/core/fd_tensor.cc
+++ b/fastdeploy/core/fd_tensor.cc
@@ -89,8 +89,9 @@ void FDTensor::Squeeze(int64_t axis) {
size_t ndim = shape.size();
FDASSERT(axis >= 0 && axis < ndim,
"The allowed 'axis' must be in range of (0, %lu)!", ndim);
- FDASSERT(shape[axis]==1,
- "The No.%ld dimension of shape should be 1, but it is %ld!", (long)axis, (long)shape[axis]);
+ FDASSERT(shape[axis] == 1,
+ "The No.%ld dimension of shape should be 1, but it is %ld!",
+ (long)axis, (long)shape[axis]);
shape.erase(shape.begin() + axis);
}
@@ -220,9 +221,9 @@ bool FDTensor::ReallocFn(size_t nbytes) {
return buffer_ != nullptr;
#else
FDASSERT(false,
- "The FastDeploy FDTensor allocator didn't compile under "
- "-DWITH_GPU=ON,"
- "so this is an unexpected problem happend.");
+ "The FastDeploy FDTensor allocator didn't compile under "
+ "-DWITH_GPU=ON,"
+ "so this is an unexpected problem happend.");
#endif
}
buffer_ = realloc(buffer_, nbytes);
@@ -316,16 +317,15 @@ FDTensor& FDTensor::operator=(const FDTensor& other) {
if (other.buffer_ == nullptr) {
FreeFn();
buffer_ = nullptr;
+ shape = other.shape;
+ name = other.name;
+ dtype = other.dtype;
+ device = other.device;
} else {
- Resize(other.shape);
+ Resize(other.shape, other.dtype, other.name, other.device);
size_t nbytes = Nbytes();
CopyBuffer(buffer_, other.buffer_, nbytes);
}
-
- shape = other.shape;
- name = other.name;
- dtype = other.dtype;
- device = other.device;
external_data_ptr = other.external_data_ptr;
}
return *this;
diff --git a/tests/core/test_fd_tensor.cc b/tests/core/test_fd_tensor.cc
new file mode 100644
index 0000000000..ad4d639e4e
--- /dev/null
+++ b/tests/core/test_fd_tensor.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include
+#include
+#include
+#include "fastdeploy/core/fd_tensor.h"
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+namespace fastdeploy {
+
+TEST(fastdeploy, fd_tensor_constructor) {
+ CheckShape check_shape;
+ CheckData check_data;
+
+ FDTensor tensor1;
+ check_shape(tensor1.shape, {0});
+ ASSERT_EQ(tensor1.name, "");
+ ASSERT_EQ(tensor1.dtype, FDDataType::INT8);
+ ASSERT_EQ(tensor1.device, Device::CPU);
+
+ std::vector inputs = {2, 4, 3, 7, 1, 5};
+ tensor1.SetExternalData({2, 3}, FDDataType::INT32, inputs.data());
+ ASSERT_EQ(tensor1.dtype, FDDataType::INT32);
+
+ FDTensor tensor2(tensor1);
+ check_shape(tensor1.shape, {2, 3});
+ ASSERT_EQ(tensor2.name, "");
+ ASSERT_EQ(tensor2.dtype, FDDataType::INT32);
+ ASSERT_EQ(tensor2.device, Device::CPU);
+
+ FDTensor tensor3;
+ tensor3.Resize({2, 3}, FDDataType::INT32, "tensor3");
+ check_shape(tensor3.shape, {2, 3});
+ ASSERT_EQ(tensor3.Nbytes(), 24);
+
+ // Copy constructor
+ FDTensor tensor4(tensor3);
+ check_shape(tensor4.shape, {2, 3});
+ ASSERT_EQ(tensor3.Nbytes(), tensor4.Nbytes());
+ check_data(reinterpret_cast(tensor3.Data()),
+ reinterpret_cast(tensor4.Data()), tensor4.Numel());
+
+ // Move constructor
+ ASSERT_NE(tensor1.external_data_ptr, nullptr);
+ FDTensor tensor5(std::move(tensor1));
+ ASSERT_EQ(tensor1.external_data_ptr, nullptr);
+ ASSERT_EQ(tensor5.external_data_ptr, inputs.data());
+ check_shape(tensor5.shape, {2, 3});
+}
+
+TEST(fastdeploy, fd_tensor_assignment) {
+ CheckShape check_shape;
+ CheckData check_data;
+
+ FDTensor tensor1("T1");
+ std::vector inputs = {2, 4, 3, 7, 1, 5};
+ tensor1.SetExternalData({2, 3}, FDDataType::INT32, inputs.data());
+
+ FDTensor tensor2;
+ tensor2 = tensor1;
+ ASSERT_EQ(tensor2.name, "T1");
+ ASSERT_EQ(tensor2.dtype, FDDataType::INT32);
+ ASSERT_EQ(tensor2.device, Device::CPU);
+ ASSERT_EQ(tensor2.Data(), inputs.data());
+ check_shape(tensor2.shape, {2, 3});
+
+ FDTensor tensor3;
+ tensor3 = std::move(tensor1);
+ ASSERT_EQ(tensor3.name, "T1");
+ ASSERT_EQ(tensor3.dtype, FDDataType::INT32);
+ ASSERT_EQ(tensor3.device, Device::CPU);
+ ASSERT_EQ(tensor3.Data(), inputs.data());
+ ASSERT_EQ(tensor1.Data(), nullptr);
+}
+
+} // namespace fastdeploy
\ No newline at end of file
From 25d0521c3e5048d63878d88890ede0a19f48fee5 Mon Sep 17 00:00:00 2001
From: leiqing <54695910+leiqing1@users.noreply.github.com>
Date: Sun, 6 Nov 2022 23:09:58 +0800
Subject: [PATCH 09/30] Update README_EN.md
---
docs/README_EN.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/README_EN.md b/docs/README_EN.md
index fe958a6739..de2e2bef84 100644
--- a/docs/README_EN.md
+++ b/docs/README_EN.md
@@ -13,8 +13,8 @@
## A Quick Start - Demos
-- [PP-YOLOE Python Deployment Demo](en/quick_start/models/python.md)
-- [PP-YOLOE C++ Deployment Demo](en/quick_start/models/cpp.md)
+- [Python Deployment Demo](en/quick_start/models/python.md)
+- [C++ Deployment Demo](en/quick_start/models/cpp.md)
- [A Quick Start on Runtime Python](en/quick_start/runtime/python.md)
- [A Quick Start on Runtime C++](en/quick_start/runtime/cpp.md)
From faa4f9b048f81f79824f3113bc4b8951a1882bcc Mon Sep 17 00:00:00 2001
From: leiqing <54695910+leiqing1@users.noreply.github.com>
Date: Sun, 6 Nov 2022 23:52:13 +0800
Subject: [PATCH 10/30] Update README_CN.md
---
README_CN.md | 334 ++++++++++++++++++++++++++++++---------------------
1 file changed, 197 insertions(+), 137 deletions(-)
diff --git a/README_CN.md b/README_CN.md
index 4c61b6c4a6..383f88b62e 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -16,56 +16,89 @@
-**⚡️FastDeploy**是一款**易用高效**的推理部署开发套件。覆盖业界🔥**热门AI模型**并提供📦**开箱即用**的部署体验,包括图像分类、目标检测、图像分割、人脸检测、人脸识别、人体关键点识别、文字识别、语义理解等多任务,满足开发者**多场景**,**多硬件**、**多平台**的产业部署需求。
+**⚡️FastDeploy**是一款**易用高效**的推理部署开发套件。覆盖业界🔥**热门CV、NLP、Speech的AI模型**并提供📦**开箱即用**的部署体验,包括图像分类、目标检测、图像分割、人脸检测、人脸识别、人体关键点识别、文字识别、语义理解等多任务,满足开发者**多场景**,**多硬件**、**多平台**的产业部署需求。
-| Potrait Segmentation | Image Matting | Semantic Segmentation | Real-Time Matting |
+| [Object Detection](examples/vision) | [3D Object Detection](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|:---------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
-|
|
|
|
|
-| **OCR** | **Behavior Recognition** | **Object Detection** |**Pose Estimation**
-|
|
|
|
|
-| **Face Alignment** | **3D Object Detection** | **Face Editing** | **Image Animation**
-|
|
|
|
+|
|
|
|
|
+| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/ocr)
+|
|
|
|
|
+| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) |[**Speech**](examples/audio/pp-tts)
+|
|
|
| **input** :早上好,今天是2020
/10/29,最低温度是-3°C。
**output**: [
](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)
|
+
## 近期更新
-- 🔥 **2022.11.09 20:30~21:30,【直播分享】《覆盖云边端全场景,150+热门模型快速部署》。扫码报名**
-- 🔥 **2022.11.10 20:30~21:30,【直播分享】《瑞芯微、晶晨、恩智浦等10+AI硬件部署,直达产业落地》。扫码报名**
-- 🔥 **2022.11.10 19:00~20:00,【直播分享】《10+热门模型在RK3588、RK3568部署实战》。扫码报名**
--
-

+- 🔥 **【直播分享】2022.11.09 20:30~21:30,《覆盖云边端全场景,150+热门模型快速部署》。微信扫码报名**
+- 🔥 **【直播分享】2022.11.10 20:30~21:30,《瑞芯微、晶晨、恩智浦等10+AI硬件部署,直达产业落地》。微信扫码报名**
+- 🔥 **【直播分享】2022.11.10 19:00~20:00,《10+热门模型在RK3588、RK3568部署实战》。微信扫码报名**
+
+
-
-- 🔥 **2022.10.15:Release FastDeploy [release v0.3.0](https://github.com/PaddlePaddle/FastDeploy/tree/release%2F0.3.0)**
- - **New server-side deployment upgrade:更快的推理性能,一键量化,更多的视觉和NLP模型**
- - 集成 OpenVINO 推理引擎,并且保证了使用 OpenVINO 与 使用 TensorRT、ONNX Runtime、 Paddle Inference一致的开发体验;
- - 提供[一键模型量化工具](tools/quantization),支持YOLOv7、YOLOv6、YOLOv5等视觉模型,在CPU和GPU推理速度可提升1.5~2倍;
- - 新增加 PP-OCRv3, PP-OCRv2, PP-Matting, PP-HumanMatting, ModNet 等视觉模型并提供[端到端部署示例](examples/vision);
- - 新增加NLP信息抽取模型 UIE 并提供[端到端部署示例](examples/text/uie).
- -
-- 🔥 **2022.8.18:发布FastDeploy [release v0.2.0](https://github.com/PaddlePaddle/FastDeploy/tree/release%2F0.2.0)**
- - **服务端部署全新升级:更快的推理性能,更多的视觉模型支持**
- - 发布基于x86 CPU、NVIDIA GPU的高性能推理引擎SDK,推理速度大幅提升
- - 集成Paddle Inference、ONNX Runtime、TensorRT等推理引擎并提供统一的部署体验
- - 支持YOLOv7、YOLOv6、YOLOv5、PP-YOLOE等全系列目标检测模型并提供[端到端部署示例](examples/vision/detection/)
- - 支持人脸检测、人脸识别、实时人像抠图、图像分割等40+重点模型及[Demo示例](examples/vision/)
- - 支持Python和C++两种语言部署
- - **边缘移动端部署新增瑞芯微、晶晨、恩智浦等NPU芯片部署能力**
- - 发布轻量化目标检测[Picodet-NPU部署Demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/linux/picodet_detection),提供极致的INT8全量化推理能力
+- 🔥 **2022.10.31:Release FastDeploy [release v0.5.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.5.0)**
+ - **🖥️ 服务端部署:支持推理速度更快的后端,支持更多的模型**
+ - 集成 Paddle Inference TensorRT后端,并保证其使用与Paddle Inference、TensorRT、OpenVINO、ONNX Runtime、Paddle Lite等一致的开发体验;
+ - 支持并测试 Graphcore IPU 通过 Paddle Inference后端;
+ - 优化[一键模型量化工具](tools/quantization),支持YOLOv7、YOLOv6、YOLOv5等视觉模型,在CPU和GPU推理速度可提升1.5~2倍;
+ - 新增 [PP-Tracking](./examples/vision/tracking/pptracking) 和 [RobustVideoMatting](./examples/vision/matting) 等模型;
+
+- 🔥 **2022.10.24:Release FastDeploy [release v0.4.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.4.0)**
+ - **🖥️ 服务端部署:推理速度大升级**
+ - 升级 GPU 端到端的优化,在YOLO系列上,模型推理速度从 43ms 提升到 25ms;
+ - 新增 [TinyPose](examples/vision/keypointdetection/tiny_pose) and [PicoDetji lianTinyPose](examples/vision/keypointdetection/det_keypoint_unite)Pipeline部署能力;
+ - **📲 移动端和端侧部署:移动端后端能力升级,支持更多的CV模型**
+ - 集成 Paddle Lite,并保证其使用与服务端常用推理引擎 Paddle Inference、TensorRT、OpenVINO、ONNX Runtime 等一致的开发体验;
+ - 新增 [轻量化目标检测模型](examples/vision/detection/paddledetection/android)和[分类模型](examples/vision/classification/paddleclas/android)的安卓端部署能力;
+ - **

Web和小程序部署:新增Web端部署能力**
+ - 集成 Paddle.js部署能力,新增 OCR、目标检测、人像分割背景替换、物体识别等Web端部署能力和Demo(examples/application/js);
## 目录
-* **服务端部署**
+
+*