mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2026-04-22 16:07:51 +08:00
[LLM] First commit the llm deployment code
This commit is contained in:
@@ -1,98 +0,0 @@
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function(get_fastdeploy_example_names NAME_SPACE CLASS_NAME CC_FILE)
|
||||
get_filename_component(CPP_DIR ${CC_FILE} DIRECTORY)
|
||||
get_filename_component(CLASS_DIR ${CPP_DIR} DIRECTORY)
|
||||
get_filename_component(NAME_SPACE_DIR ${CLASS_DIR} DIRECTORY)
|
||||
get_filename_component(_CLASS_NAME ${CLASS_DIR} NAME)
|
||||
get_filename_component(_NAME_SPACE ${NAME_SPACE_DIR} NAME)
|
||||
set(${NAME_SPACE} ${_NAME_SPACE} PARENT_SCOPE)
|
||||
set(${CLASS_NAME} ${_CLASS_NAME} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
function(config_fastdeploy_executable_link_flags TARGET_NAME)
|
||||
if(ANDROID)
|
||||
# some specific flags for Android.
|
||||
set(COMMON_LINK_FLAGS "-Wl,-exclude-libs,ALL")
|
||||
set(COMMON_LINK_FLAGS_REL "-Wl,-s,--gc-sections,-exclude-libs,ALL")
|
||||
if(WITH_OPENCV_STATIC OR WITH_LITE_STATIC)
|
||||
set(COMMON_LINK_FLAGS "${COMMON_LINK_FLAGS},--allow-multiple-definition")
|
||||
set(COMMON_LINK_FLAGS_REL "${COMMON_LINK_FLAGS_REL},--allow-multiple-definition")
|
||||
endif()
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS ${COMMON_LINK_FLAGS})
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS_RELEASE ${COMMON_LINK_FLAGS_REL})
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS_MINSIZEREL ${COMMON_LINK_FLAGS_REL})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
set(EXAMPLES_NUM 0)
|
||||
function(add_fastdeploy_executable FIELD CC_FILE)
|
||||
# temp target name/file var in function scope
|
||||
set(TEMP_TARGET_FILE ${CC_FILE})
|
||||
get_filename_component(FILE_NAME ${CC_FILE} NAME)
|
||||
string(REGEX REPLACE ".cc" "" FILE_NAME ${FILE_NAME})
|
||||
get_fastdeploy_example_names(NAME_SPACE CLASS_NAME ${CC_FILE})
|
||||
set(TEMP_TARGET_NAME ${FIELD}_${NAME_SPACE}_${CLASS_NAME}_${FILE_NAME})
|
||||
if(EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy)
|
||||
add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE})
|
||||
target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy)
|
||||
target_link_libraries(${TEMP_TARGET_NAME} PRIVATE ${GFLAGS_LIBRARIES})
|
||||
config_fastdeploy_executable_link_flags(${TEMP_TARGET_NAME})
|
||||
math(EXPR _EXAMPLES_NUM "${EXAMPLES_NUM} + 1")
|
||||
set(EXAMPLES_NUM ${_EXAMPLES_NUM} PARENT_SCOPE)
|
||||
string(LENGTH ${EXAMPLES_NUM} len)
|
||||
set(MESSAGE_HEAD "[${EXAMPLES_NUM}]")
|
||||
if(${len} EQUAL 1)
|
||||
set(MESSAGE_HEAD "[00${EXAMPLES_NUM}]")
|
||||
elseif(${len} EQUAL 2)
|
||||
set(MESSAGE_HEAD "[0${EXAMPLES_NUM}]")
|
||||
endif()
|
||||
message(STATUS " ${MESSAGE_HEAD} Added FastDeploy Executable : ${TEMP_TARGET_NAME}")
|
||||
endif()
|
||||
unset(TEMP_TARGET_FILE)
|
||||
unset(TEMP_TARGET_NAME)
|
||||
endfunction()
|
||||
|
||||
# vision examples
|
||||
if(BUILD_EXAMPLES AND ENABLE_VISION)
|
||||
if(EXISTS ${PROJECT_SOURCE_DIR}/examples/vision)
|
||||
message(STATUS "")
|
||||
message(STATUS "*************FastDeploy Vision Examples Summary**********")
|
||||
file(GLOB_RECURSE ALL_VISION_EXAMPLE_SRCS ${PROJECT_SOURCE_DIR}/examples/vision/*/*/cpp/*.cc)
|
||||
if(ANDROID)
|
||||
file(GLOB_RECURSE TRACKING_SRCS ${PROJECT_SOURCE_DIR}/examples/vision/tracking/*/cpp/*.cc)
|
||||
list(REMOVE_ITEM ALL_VISION_EXAMPLE_SRCS ${TRACKING_SRCS})
|
||||
endif()
|
||||
foreach(_CC_FILE ${ALL_VISION_EXAMPLE_SRCS})
|
||||
add_fastdeploy_executable(vision ${_CC_FILE})
|
||||
endforeach()
|
||||
message(STATUS " [FastDeploy Executable Path] : ${EXECUTABLE_OUTPUT_PATH}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# text examples
|
||||
if(BUILD_EXAMPLES AND ENABLE_TEXT)
|
||||
if(EXISTS ${PROJECT_SOURCE_DIR}/examples/text)
|
||||
message(STATUS "")
|
||||
message(STATUS "*************FastDeploy Text Examples Summary**********")
|
||||
file(GLOB_RECURSE ALL_TEXT_EXAMPLE_SRCS ${PROJECT_SOURCE_DIR}/examples/text/*.cc)
|
||||
foreach(_CC_FILE ${ALL_TEXT_EXAMPLE_SRCS})
|
||||
add_fastdeploy_executable(text ${_CC_FILE})
|
||||
endforeach()
|
||||
message(STATUS " [FastDeploy Executable Path] : ${EXECUTABLE_OUTPUT_PATH}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# other examples ...
|
||||
@@ -1,57 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
# PaddleDetection Golang Deployment Example
|
||||
|
||||
This directory provides examples that `infer.go` uses CGO to call FastDeploy C API and fast finish the deployment of PaddleDetection models, including PPYOLOE on CPU/GPU.
|
||||
|
||||
Before deployment, two steps require confirmation
|
||||
|
||||
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
Taking inference on Linux as an example, the compilation test can be completed by executing the following command in this directory. FastDeploy version 1.0.4 above (x.x.x>1.0.4) or develop version (x.x.x=0.0.0) is required to support this model.
|
||||
|
||||
### Use Golang and CGO to deploy PPYOLOE model
|
||||
|
||||
Download the FastDeploy precompiled library. Users can choose your appropriate version in the `FastDeploy Precompiled Library` mentioned above.
|
||||
```bash
|
||||
wget https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-0.0.0.tgz
|
||||
```
|
||||
|
||||
Copy FastDeploy C APIs from precompiled library to the current directory.
|
||||
```bash
|
||||
cp -r fastdeploy-linux-x64-0.0.0/include/fastdeploy_capi .
|
||||
```
|
||||
|
||||
Download the PPYOLOE model file and test images.
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
tar xvf ppyoloe_crn_l_300e_coco.tgz
|
||||
```
|
||||
|
||||
Configure the `cgo CFLAGS: -I` to FastDeploy C API directory path and the `cgo LDFLAGS: -L` to FastDeploy dynamic library path. The FastDeploy dynamic library is located in the `/lib` directory.
|
||||
```bash
|
||||
cgo CFLAGS: -I./fastdeploy_capi
|
||||
cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
```
|
||||
|
||||
Use the following command to add Fastdeploy library path to the environment variable.
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
Compile the Go file `infer.go`.
|
||||
```bash
|
||||
go build infer.go
|
||||
```
|
||||
|
||||
After compiling, use the following command to obtain the predicted results.
|
||||
```bash
|
||||
# CPU inference
|
||||
./infer -model ./ppyoloe_crn_l_300e_coco -image 000000014439.jpg -device 0
|
||||
# GPU inference
|
||||
./infer -model ./ppyoloe_crn_l_300e_coco -image 000000014439.jpg -device 1
|
||||
```
|
||||
|
||||
Then visualized inspection result is saved in the local image `vis_result.jpg`.
|
||||
@@ -1,56 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
# PaddleDetection Golang 部署示例
|
||||
|
||||
本目录下提供`infer.go`, 使用CGO调用FastDeploy C API快速完成PaddleDetection模型PPYOLOE在CPU/GPU上部署的示例
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>1.0.4)或FastDeploy的Develop版本(x.x.x=0.0.0)
|
||||
### 使用Golang和CGO工具进行PPYOLOE模型推理部署
|
||||
|
||||
在当前目录下,下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
|
||||
```bash
|
||||
wget https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-0.0.0.tgz
|
||||
```
|
||||
|
||||
将FastDeploy C API文件拷贝至当前目录
|
||||
```bash
|
||||
cp -r fastdeploy-linux-x64-0.0.0/include/fastdeploy_capi .
|
||||
```
|
||||
|
||||
下载PPYOLOE模型文件和测试图片
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
tar xvf ppyoloe_crn_l_300e_coco.tgz
|
||||
```
|
||||
|
||||
配置`infer.go`中的`cgo CFLAGS: -I`参数配置为C API文件路径,`cgo LDFLAGS: -L`参数配置为FastDeploy的动态库路径,动态库位于预编译库的`/lib`目录中
|
||||
```bash
|
||||
cgo CFLAGS: -I./fastdeploy_capi
|
||||
cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
```
|
||||
|
||||
将FastDeploy的库路径添加到环境变量
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
编译Go文件`infer.go`
|
||||
```bash
|
||||
go build infer.go
|
||||
```
|
||||
|
||||
编译完成后,使用如下命令执行可得到预测结果
|
||||
```bash
|
||||
# CPU推理
|
||||
./infer -model ./ppyoloe_crn_l_300e_coco -image 000000014439.jpg -device 0
|
||||
# GPU推理
|
||||
./infer -model ./ppyoloe_crn_l_300e_coco -image 000000014439.jpg -device 1
|
||||
```
|
||||
|
||||
可视化的检测结果图片保存在本地`vis_result.jpg`
|
||||
@@ -1,185 +0,0 @@
|
||||
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
// #cgo CFLAGS: -I./fastdeploy_capi
|
||||
// #cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
// #include <fastdeploy_capi/vision.h>
|
||||
// #include <stdio.h>
|
||||
// #include <stdbool.h>
|
||||
// #include <stdlib.h>
|
||||
/*
|
||||
#include <stdio.h>
|
||||
#ifdef WIN32
|
||||
const char sep = '\\';
|
||||
#else
|
||||
const char sep = '/';
|
||||
#endif
|
||||
|
||||
char* GetModelFilePath(char* model_dir, char* model_file, int max_size){
|
||||
snprintf(model_file, max_size, "%s%c%s", model_dir, sep, "model.pdmodel");
|
||||
return model_file;
|
||||
}
|
||||
|
||||
char* GetParametersFilePath(char* model_dir, char* params_file, int max_size){
|
||||
snprintf(params_file, max_size, "%s%c%s", model_dir, sep, "model.pdiparams");
|
||||
return params_file;
|
||||
}
|
||||
|
||||
char* GetConfigFilePath(char* model_dir, char* config_file, int max_size){
|
||||
snprintf(config_file, max_size, "%s%c%s", model_dir, sep, "infer_cfg.yml");
|
||||
return config_file;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func FDBooleanToGo(b C.FD_C_Bool) bool {
|
||||
var cFalse C.FD_C_Bool
|
||||
if b != cFalse {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func CpuInfer(modelDir *C.char, imageFile *C.char) {
|
||||
|
||||
var modelFile = (*C.char)(C.malloc(C.size_t(100)))
|
||||
var paramsFile = (*C.char)(C.malloc(C.size_t(100)))
|
||||
var configFile = (*C.char)(C.malloc(C.size_t(100)))
|
||||
var maxSize = 99
|
||||
|
||||
modelFile = C.GetModelFilePath(modelDir, modelFile, C.int(maxSize))
|
||||
paramsFile = C.GetParametersFilePath(modelDir, paramsFile, C.int(maxSize))
|
||||
configFile = C.GetConfigFilePath(modelDir, configFile, C.int(maxSize))
|
||||
|
||||
var option *C.FD_C_RuntimeOptionWrapper = C.FD_C_CreateRuntimeOptionWrapper()
|
||||
C.FD_C_RuntimeOptionWrapperUseCpu(option)
|
||||
|
||||
var model *C.FD_C_PPYOLOEWrapper = C.FD_C_CreatePPYOLOEWrapper(
|
||||
modelFile, paramsFile, configFile, option, C.FD_C_ModelFormat_PADDLE)
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_PPYOLOEWrapperInitialized(model)) {
|
||||
fmt.Printf("Failed to initialize.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyPPYOLOEWrapper(model)
|
||||
return
|
||||
}
|
||||
|
||||
var image C.FD_C_Mat = C.FD_C_Imread(imageFile)
|
||||
|
||||
var result *C.FD_C_DetectionResult = C.FD_C_CreateDetectionResult()
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_PPYOLOEWrapperPredict(model, image, result)) {
|
||||
fmt.Printf("Failed to predict.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyPPYOLOEWrapper(model)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.free(unsafe.Pointer(result))
|
||||
return
|
||||
}
|
||||
|
||||
var visImage C.FD_C_Mat = C.FD_C_VisDetection(image, result, 0.5, 1, 0.5)
|
||||
|
||||
C.FD_C_Imwrite(C.CString("vis_result.jpg"), visImage)
|
||||
fmt.Printf("Visualized result saved in ./vis_result.jpg\n")
|
||||
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyPPYOLOEWrapper(model)
|
||||
C.FD_C_DestroyDetectionResult(result)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.FD_C_DestroyMat(visImage)
|
||||
}
|
||||
|
||||
func GpuInfer(modelDir *C.char, imageFile *C.char) {
|
||||
|
||||
var modelFile = (*C.char)(C.malloc(C.size_t(100)))
|
||||
var paramsFile = (*C.char)(C.malloc(C.size_t(100)))
|
||||
var configFile = (*C.char)(C.malloc(C.size_t(100)))
|
||||
var maxSize = 99
|
||||
|
||||
modelFile = C.GetModelFilePath(modelDir, modelFile, C.int(maxSize))
|
||||
paramsFile = C.GetParametersFilePath(modelDir, paramsFile, C.int(maxSize))
|
||||
configFile = C.GetConfigFilePath(modelDir, configFile, C.int(maxSize))
|
||||
|
||||
var option *C.FD_C_RuntimeOptionWrapper = C.FD_C_CreateRuntimeOptionWrapper()
|
||||
C.FD_C_RuntimeOptionWrapperUseGpu(option, 0)
|
||||
|
||||
var model *C.FD_C_PPYOLOEWrapper = C.FD_C_CreatePPYOLOEWrapper(
|
||||
modelFile, paramsFile, configFile, option, C.FD_C_ModelFormat_PADDLE)
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_PPYOLOEWrapperInitialized(model)) {
|
||||
fmt.Printf("Failed to initialize.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyPPYOLOEWrapper(model)
|
||||
return
|
||||
}
|
||||
|
||||
var image C.FD_C_Mat = C.FD_C_Imread(imageFile)
|
||||
|
||||
var result *C.FD_C_DetectionResult = C.FD_C_CreateDetectionResult()
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_PPYOLOEWrapperPredict(model, image, result)) {
|
||||
fmt.Printf("Failed to predict.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyPPYOLOEWrapper(model)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.free(unsafe.Pointer(result))
|
||||
return
|
||||
}
|
||||
|
||||
var visImage C.FD_C_Mat = C.FD_C_VisDetection(image, result, 0.5, 1, 0.5)
|
||||
|
||||
C.FD_C_Imwrite(C.CString("vis_result.jpg"), visImage)
|
||||
fmt.Printf("Visualized result saved in ./vis_result.jpg\n")
|
||||
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyPPYOLOEWrapper(model)
|
||||
C.FD_C_DestroyDetectionResult(result)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.FD_C_DestroyMat(visImage)
|
||||
}
|
||||
|
||||
var (
|
||||
modelDir string
|
||||
imageFile string
|
||||
deviceType int
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&modelDir, "model", "", "paddle detection model to use")
|
||||
flag.StringVar(&imageFile, "image", "", "image to predict")
|
||||
flag.IntVar(&deviceType, "device", 0, "The data type of run_option is int, 0: run with cpu; 1: run with gpu")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if modelDir != "" && imageFile != "" {
|
||||
if deviceType == 0 {
|
||||
CpuInfer(C.CString(modelDir), C.CString(imageFile))
|
||||
} else if deviceType == 1 {
|
||||
GpuInfer(C.CString(modelDir), C.CString(imageFile))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Usage: ./infer -model path/to/model_dir -image path/to/image -device run_option \n")
|
||||
fmt.Printf("e.g ./infer -model ./ppyoloe_crn_l_300e_coco -image 000000014439.jpg -device 0 \n")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
# YOLOv5 Golang Deployment Example
|
||||
|
||||
This directory provides examples that `infer.go` uses CGO to call FastDeploy C API and finish the deployment of YOLOv5 model on CPU/GPU.
|
||||
|
||||
Before deployment, two steps require confirmation
|
||||
|
||||
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
Taking inference on Linux as an example, the compilation test can be completed by executing the following command in this directory. FastDeploy version 1.0.4 above (x.x.x>1.0.4) or develop version (x.x.x=0.0.0) is required to support this model.
|
||||
|
||||
### Use Golang and CGO to deploy YOLOv5 model
|
||||
|
||||
Download the FastDeploy precompiled library. Users can choose your appropriate version in the `FastDeploy Precompiled Library` mentioned above.
|
||||
```bash
|
||||
wget https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-0.0.0.tgz
|
||||
```
|
||||
|
||||
Copy FastDeploy C APIs from precompiled library to the current directory.
|
||||
```bash
|
||||
cp -r fastdeploy-linux-x64-0.0.0/include/fastdeploy_capi .
|
||||
```
|
||||
|
||||
Download the YOLOv5 ONNX model file and test images
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
Configure the `cgo CFLAGS: -I` to FastDeploy C API directory path and the `cgo LDFLAGS: -L` to FastDeploy dynamic library path. The FastDeploy dynamic library is located in the `/lib` directory.
|
||||
```bash
|
||||
cgo CFLAGS: -I./fastdeploy_capi
|
||||
cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
```
|
||||
|
||||
Use the following command to add Fastdeploy library path to the environment variable.
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
Compile the Go file `infer.go`.
|
||||
```bash
|
||||
go build infer.go
|
||||
```
|
||||
|
||||
After compiling, use the following command to obtain the predicted results.
|
||||
```bash
|
||||
# CPU inference
|
||||
./infer -model yolov5s.onnx -image 000000014439.jpg -device 0
|
||||
# GPU inference
|
||||
./infer -model yolov5s.onnx -image 000000014439.jpg -device 1
|
||||
```
|
||||
|
||||
Then visualized inspection result is saved in the local image `vis_result.jpg`.
|
||||
@@ -1,55 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
# YOLOv5 Golang 部署示例
|
||||
|
||||
本目录下提供`infer.go`, 使用CGO调用FastDeploy C API快速完成YOLOv5模型在CPU/GPU上部署的示例
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>1.0.4)或FastDeploy的Develop版本(x.x.x=0.0.0)
|
||||
### 使用Golang和CGO工具进行YOLOv5模型推理部署
|
||||
|
||||
在当前目录下,下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
|
||||
```bash
|
||||
wget https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-0.0.0.tgz
|
||||
```
|
||||
|
||||
将FastDeploy C API文件拷贝至当前目录
|
||||
```bash
|
||||
cp -r fastdeploy-linux-x64-0.0.0/include/fastdeploy_capi .
|
||||
```
|
||||
|
||||
下载官方转换好的 YOLOv5 ONNX 模型文件和测试图片
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
配置`infer.go`中的`cgo CFLAGS: -I`参数配置为C API文件路径,`cgo LDFLAGS: -L`参数配置为FastDeploy的动态库路径,动态库位于预编译库的`/lib`目录中
|
||||
```bash
|
||||
cgo CFLAGS: -I./fastdeploy_capi
|
||||
cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
```
|
||||
|
||||
将FastDeploy的库路径添加到环境变量
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
编译Go文件`infer.go`
|
||||
```bash
|
||||
go build infer.go
|
||||
```
|
||||
|
||||
编译完成后,使用如下命令执行可得到预测结果
|
||||
```bash
|
||||
# CPU推理
|
||||
./infer -model yolov5s.onnx -image 000000014439.jpg -device 0
|
||||
# GPU推理
|
||||
./infer -model yolov5s.onnx -image 000000014439.jpg -device 1
|
||||
```
|
||||
|
||||
可视化的检测结果图片保存在本地`vis_result.jpg`
|
||||
@@ -1,144 +0,0 @@
|
||||
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
// #cgo CFLAGS: -I./fastdeploy_capi
|
||||
// #cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
// #include <fastdeploy_capi/vision.h>
|
||||
// #include <stdio.h>
|
||||
// #include <stdbool.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func FDBooleanToGo(b C.FD_C_Bool) bool {
|
||||
var cFalse C.FD_C_Bool
|
||||
if b != cFalse {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func CpuInfer(modelFile *C.char, imageFile *C.char) {
|
||||
|
||||
var option *C.FD_C_RuntimeOptionWrapper = C.FD_C_CreateRuntimeOptionWrapper()
|
||||
C.FD_C_RuntimeOptionWrapperUseCpu(option)
|
||||
|
||||
var model *C.FD_C_YOLOv5Wrapper = C.FD_C_CreateYOLOv5Wrapper(
|
||||
modelFile, C.CString(""), option, C.FD_C_ModelFormat_ONNX)
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv5WrapperInitialized(model)) {
|
||||
fmt.Printf("Failed to initialize.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv5Wrapper(model)
|
||||
return
|
||||
}
|
||||
|
||||
var image C.FD_C_Mat = C.FD_C_Imread(imageFile)
|
||||
|
||||
var result *C.FD_C_DetectionResult = C.FD_C_CreateDetectionResult()
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv5WrapperPredict(model, image, result)) {
|
||||
fmt.Printf("Failed to predict.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv5Wrapper(model)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.free(unsafe.Pointer(result))
|
||||
return
|
||||
}
|
||||
|
||||
var visImage C.FD_C_Mat = C.FD_C_VisDetection(image, result, 0.5, 1, 0.5)
|
||||
|
||||
C.FD_C_Imwrite(C.CString("vis_result.jpg"), visImage)
|
||||
fmt.Printf("Visualized result saved in ./vis_result.jpg\n")
|
||||
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv5Wrapper(model)
|
||||
C.FD_C_DestroyDetectionResult(result)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.FD_C_DestroyMat(visImage)
|
||||
}
|
||||
|
||||
func GpuInfer(modelFile *C.char, imageFile *C.char) {
|
||||
|
||||
var option *C.FD_C_RuntimeOptionWrapper = C.FD_C_CreateRuntimeOptionWrapper()
|
||||
C.FD_C_RuntimeOptionWrapperUseGpu(option, 0)
|
||||
|
||||
var model *C.FD_C_YOLOv5Wrapper = C.FD_C_CreateYOLOv5Wrapper(
|
||||
modelFile, C.CString(""), option, C.FD_C_ModelFormat_ONNX)
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv5WrapperInitialized(model)) {
|
||||
fmt.Printf("Failed to initialize.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv5Wrapper(model)
|
||||
return
|
||||
}
|
||||
|
||||
var image C.FD_C_Mat = C.FD_C_Imread(imageFile)
|
||||
|
||||
var result *C.FD_C_DetectionResult = C.FD_C_CreateDetectionResult()
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv5WrapperPredict(model, image, result)) {
|
||||
fmt.Printf("Failed to predict.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv5Wrapper(model)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.free(unsafe.Pointer(result))
|
||||
return
|
||||
}
|
||||
|
||||
var visImage C.FD_C_Mat = C.FD_C_VisDetection(image, result, 0.5, 1, 0.5)
|
||||
|
||||
C.FD_C_Imwrite(C.CString("vis_result.jpg"), visImage)
|
||||
fmt.Printf("Visualized result saved in ./vis_result.jpg\n")
|
||||
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv5Wrapper(model)
|
||||
C.FD_C_DestroyDetectionResult(result)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.FD_C_DestroyMat(visImage)
|
||||
}
|
||||
|
||||
var (
|
||||
modelFile string
|
||||
imageFile string
|
||||
deviceType int
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&modelFile, "model", "", "paddle detection model to use")
|
||||
flag.StringVar(&imageFile, "image", "", "image to predict")
|
||||
flag.IntVar(&deviceType, "device", 0, "The data type of run_option is int, 0: run with cpu; 1: run with gpu")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if modelFile != "" && imageFile != "" {
|
||||
if deviceType == 0 {
|
||||
CpuInfer(C.CString(modelFile), C.CString(imageFile))
|
||||
} else if deviceType == 1 {
|
||||
GpuInfer(C.CString(modelFile), C.CString(imageFile))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Usage: ./infer -model path/to/model_dir -image path/to/image -device run_option \n")
|
||||
fmt.Printf("e.g ./infer -model yolov5s.onnx -image 000000014439.jpg -device 0 \n")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
# YOLOv8 Golang Deployment Example
|
||||
|
||||
This directory provides examples that `infer.go` uses CGO to call FastDeploy C API and finish the deployment of YOLOv8 model on CPU/GPU.
|
||||
|
||||
Before deployment, two steps require confirmation
|
||||
|
||||
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
Taking inference on Linux as an example, the compilation test can be completed by executing the following command in this directory. FastDeploy version 1.0.4 above (x.x.x>1.0.4) or develop version (x.x.x=0.0.0) is required to support this model.
|
||||
|
||||
### Use Golang and CGO to deploy YOLOv8 model
|
||||
|
||||
Download the FastDeploy precompiled library. Users can choose your appropriate version in the `FastDeploy Precompiled Library` mentioned above.
|
||||
```bash
|
||||
wget https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-0.0.0.tgz
|
||||
```
|
||||
|
||||
Copy FastDeploy C APIs from precompiled library to the current directory.
|
||||
```bash
|
||||
cp -r fastdeploy-linux-x64-0.0.0/include/fastdeploy_capi .
|
||||
```
|
||||
|
||||
Download the YOLOv8 ONNX model file and test images
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov8s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
Configure the `cgo CFLAGS: -I` to FastDeploy C API directory path and the `cgo LDFLAGS: -L` to FastDeploy dynamic library path. The FastDeploy dynamic library is located in the `/lib` directory.
|
||||
```bash
|
||||
cgo CFLAGS: -I./fastdeploy_capi
|
||||
cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
```
|
||||
|
||||
Use the following command to add Fastdeploy library path to the environment variable.
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
Compile the Go file `infer.go`.
|
||||
```bash
|
||||
go build infer.go
|
||||
```
|
||||
|
||||
After compiling, use the following command to obtain the predicted results.
|
||||
```bash
|
||||
# CPU inference
|
||||
./infer -model yolov8s.onnx -image 000000014439.jpg -device 0
|
||||
# GPU inference
|
||||
./infer -model yolov8s.onnx -image 000000014439.jpg -device 1
|
||||
```
|
||||
|
||||
Then visualized inspection result is saved in the local image `vis_result.jpg`.
|
||||
@@ -1,55 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
# YOLOv8 Golang 部署示例
|
||||
|
||||
本目录下提供`infer.go`, 使用CGO调用FastDeploy C API快速完成YOLOv8模型在CPU/GPU上部署的示例
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>1.0.4)或FastDeploy的Develop版本(x.x.x=0.0.0)
|
||||
### 使用Golang和CGO工具进行YOLOv8模型推理部署
|
||||
|
||||
在当前目录下,下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
|
||||
```bash
|
||||
wget https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-0.0.0.tgz
|
||||
```
|
||||
|
||||
将FastDeploy C API文件拷贝至当前目录
|
||||
```bash
|
||||
cp -r fastdeploy-linux-x64-0.0.0/include/fastdeploy_capi .
|
||||
```
|
||||
|
||||
下载官方转换好的 YOLOv8 ONNX 模型文件和测试图片
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov8s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
配置`infer.go`中的`cgo CFLAGS: -I`参数配置为C API文件路径,`cgo LDFLAGS: -L`参数配置为FastDeploy的动态库路径,动态库位于预编译库的`/lib`目录中
|
||||
```bash
|
||||
cgo CFLAGS: -I./fastdeploy_capi
|
||||
cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
```
|
||||
|
||||
将FastDeploy的库路径添加到环境变量
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
编译Go文件`infer.go`
|
||||
```bash
|
||||
go build infer.go
|
||||
```
|
||||
|
||||
编译完成后,使用如下命令执行可得到预测结果
|
||||
```bash
|
||||
# CPU推理
|
||||
./infer -model yolov8s.onnx -image 000000014439.jpg -device 0
|
||||
# GPU推理
|
||||
./infer -model yolov8s.onnx -image 000000014439.jpg -device 1
|
||||
```
|
||||
|
||||
可视化的检测结果图片保存在本地`vis_result.jpg`
|
||||
@@ -1,144 +0,0 @@
|
||||
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
// #cgo CFLAGS: -I./fastdeploy_capi
|
||||
// #cgo LDFLAGS: -L./fastdeploy-linux-x64-0.0.0/lib -lfastdeploy
|
||||
// #include <fastdeploy_capi/vision.h>
|
||||
// #include <stdio.h>
|
||||
// #include <stdbool.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func FDBooleanToGo(b C.FD_C_Bool) bool {
|
||||
var cFalse C.FD_C_Bool
|
||||
if b != cFalse {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func CpuInfer(modelFile *C.char, imageFile *C.char) {
|
||||
|
||||
var option *C.FD_C_RuntimeOptionWrapper = C.FD_C_CreateRuntimeOptionWrapper()
|
||||
C.FD_C_RuntimeOptionWrapperUseCpu(option)
|
||||
|
||||
var model *C.FD_C_YOLOv8Wrapper = C.FD_C_CreateYOLOv8Wrapper(
|
||||
modelFile, C.CString(""), option, C.FD_C_ModelFormat_ONNX)
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv8WrapperInitialized(model)) {
|
||||
fmt.Printf("Failed to initialize.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv8Wrapper(model)
|
||||
return
|
||||
}
|
||||
|
||||
var image C.FD_C_Mat = C.FD_C_Imread(imageFile)
|
||||
|
||||
var result *C.FD_C_DetectionResult = C.FD_C_CreateDetectionResult()
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv8WrapperPredict(model, image, result)) {
|
||||
fmt.Printf("Failed to predict.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv8Wrapper(model)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.free(unsafe.Pointer(result))
|
||||
return
|
||||
}
|
||||
|
||||
var visImage C.FD_C_Mat = C.FD_C_VisDetection(image, result, 0.5, 1, 0.5)
|
||||
|
||||
C.FD_C_Imwrite(C.CString("vis_result.jpg"), visImage)
|
||||
fmt.Printf("Visualized result saved in ./vis_result.jpg\n")
|
||||
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv8Wrapper(model)
|
||||
C.FD_C_DestroyDetectionResult(result)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.FD_C_DestroyMat(visImage)
|
||||
}
|
||||
|
||||
func GpuInfer(modelFile *C.char, imageFile *C.char) {
|
||||
|
||||
var option *C.FD_C_RuntimeOptionWrapper = C.FD_C_CreateRuntimeOptionWrapper()
|
||||
C.FD_C_RuntimeOptionWrapperUseGpu(option, 0)
|
||||
|
||||
var model *C.FD_C_YOLOv8Wrapper = C.FD_C_CreateYOLOv8Wrapper(
|
||||
modelFile, C.CString(""), option, C.FD_C_ModelFormat_ONNX)
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv8WrapperInitialized(model)) {
|
||||
fmt.Printf("Failed to initialize.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv8Wrapper(model)
|
||||
return
|
||||
}
|
||||
|
||||
var image C.FD_C_Mat = C.FD_C_Imread(imageFile)
|
||||
|
||||
var result *C.FD_C_DetectionResult = C.FD_C_CreateDetectionResult()
|
||||
|
||||
if !FDBooleanToGo(C.FD_C_YOLOv8WrapperPredict(model, image, result)) {
|
||||
fmt.Printf("Failed to predict.\n")
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv8Wrapper(model)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.free(unsafe.Pointer(result))
|
||||
return
|
||||
}
|
||||
|
||||
var visImage C.FD_C_Mat = C.FD_C_VisDetection(image, result, 0.5, 1, 0.5)
|
||||
|
||||
C.FD_C_Imwrite(C.CString("vis_result.jpg"), visImage)
|
||||
fmt.Printf("Visualized result saved in ./vis_result.jpg\n")
|
||||
|
||||
C.FD_C_DestroyRuntimeOptionWrapper(option)
|
||||
C.FD_C_DestroyYOLOv8Wrapper(model)
|
||||
C.FD_C_DestroyDetectionResult(result)
|
||||
C.FD_C_DestroyMat(image)
|
||||
C.FD_C_DestroyMat(visImage)
|
||||
}
|
||||
|
||||
var (
|
||||
modelFile string
|
||||
imageFile string
|
||||
deviceType int
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&modelFile, "model", "", "paddle detection model to use ")
|
||||
flag.StringVar(&imageFile, "image", "", "image to predict")
|
||||
flag.IntVar(&deviceType, "device", 0, "The data type of run_option is int, 0: run with cpu; 1: run with gpu")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if modelFile != "" && imageFile != "" {
|
||||
if deviceType == 0 {
|
||||
CpuInfer(C.CString(modelFile), C.CString(imageFile))
|
||||
} else if deviceType == 1 {
|
||||
GpuInfer(C.CString(modelFile), C.CString(imageFile))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Usage: ./infer -model path/to/model_dir -image path/to/image -device run_option \n")
|
||||
fmt.Printf("e.g ./infer -model yolov8s.onnx -image 000000014439.jpg -device 0 \n")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
# PPYOLOE Java Deployment Example
|
||||
|
||||
This directory provides examples that `java/InferDemo.java` uses `Java` to call FastDeploy `C++` API and finish the deployment of `PPYOLOE` model。
|
||||
|
||||
|
||||
Before deployment, two steps require confirmation
|
||||
|
||||
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
|
||||
Using `Java` to call `C++` API can be divided into two steps:
|
||||
* Generate dynamic link library in `C++` side.
|
||||
* Call the dynamic link library in `Java` side.
|
||||
|
||||
## Generates dynamic link library
|
||||
First, switch the path to `cpp` directory and copy `jni.h` and `jni_md.h` which in `jdk` directory to current directory `cpp`.
|
||||
```shell
|
||||
cp /PathJdk/jdk-17.0.6/include/jni.h ./
|
||||
cp /Pathjdk/jdk-17.0.6/include/linux/jni_md.h ./
|
||||
```
|
||||
|
||||
Then, execute the following command in the `cpp` directory to compile and generate the dynamic link library.
|
||||
> Note: you will need to specify the location of the FASTDEPLOY_INSTALL_DIR pre-compile library at compile time, but also the location of your own compiled FastDeploy library.
|
||||
```shell
|
||||
mkdir build && cd build
|
||||
cmake .. -FASTDEPLOY_INSTALL_DIR /fast-deploy-path
|
||||
make -j
|
||||
```
|
||||
After successful compilation, the dynamic link library will be stored in the `cpp/build` directory, ending in `.so` under `Linux` and `.dll` under `Windows`.
|
||||
|
||||
## Invoke dynamic link libraries using JAVA
|
||||
Switch the path to the `java` directory and use the following command to add Fastdeploy library path to the environment variable. Note the path of the `FastDeploy` library replaced with your own.
|
||||
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-1.0.4/fastdeploy_init.sh
|
||||
```
|
||||
Download the `PPYOLOE` model file and test images.
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
tar xvf ppyoloe_crn_l_300e_coco.tgz
|
||||
```
|
||||
|
||||
Compiling Java files.
|
||||
```shell
|
||||
javac InferDemo.java
|
||||
```
|
||||
|
||||
After compiling, run the following command to get the predicted result, where the first parameter indicates the path of the downloaded model and the second parameter indicates the path of the test image.
|
||||
```shell
|
||||
java InferDemo ./ppyoloe_crn_l_300e_coco ./000000014439.jpg
|
||||
```
|
||||
Then visualized inspection result is saved in the local image `vis_result.jpg`.
|
||||
@@ -1,57 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
# PPYOLOE Java 部署示例
|
||||
|
||||
本目录下提供`java/InferDemo.java`, 使用`Java`调用`C++`API快速完成`PaddleDetection`模型`PPYOLOE`在Linux上部署的示例。
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
|
||||
|
||||
使用`Java`调用`C++` API 可以分为两步:
|
||||
|
||||
* 在`C++`端生成动态链接库。
|
||||
* 在`Java`端调用动态链接库。
|
||||
|
||||
## C++端生成动态链接库
|
||||
首先,切换路径到`cpp`目录,将`jdk`目录下的`jni.h`和`jni_md.h`拷贝到当前`cpp`目录下。
|
||||
```shell
|
||||
cp /PathJdk/jdk-17.0.6/include/jni.h ./
|
||||
cp /Pathjdk/jdk-17.0.6/include/linux/jni_md.h ./
|
||||
```
|
||||
|
||||
接着,在`cpp`目录下执行以下命令,进行编译,生成动态链接库。
|
||||
> 注意:编译时需要通过`FASTDEPLOY_INSTALL_DIR`选项指明`FastDeploy`预编译库位置, 当然也可以是自己编译的`FastDeploy`库位置。
|
||||
```shell
|
||||
mkdir build && cd build
|
||||
cmake .. -FASTDEPLOY_INSTALL_DIR /fast-deploy-path
|
||||
make -j
|
||||
```
|
||||
编译成功后,动态链接库会存放在`cpp/build`目录下,`Linux`下以`.so`结尾,`Windows`下以`.dll`结尾。
|
||||
|
||||
## 使用JAVA调用动态链接库
|
||||
切换路径到`java`目录下,将`FastDeploy`的库路径添加到环境变量,注意替换为自己的`FastDeploy`库所在路径。
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
下载PPYOLOE模型文件和测试图片
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
tar xvf ppyoloe_crn_l_300e_coco.tgz
|
||||
```
|
||||
|
||||
编译Java文件
|
||||
```shell
|
||||
javac InferDemo.java
|
||||
```
|
||||
|
||||
编译完成后,执行如下命令可得到预测结果,其中第一个参数指明下载的模型路径,第二个参数指明测试图片路径。
|
||||
```shell
|
||||
java InferDemo ./ppyoloe_crn_l_300e_coco ./000000014439.jpg
|
||||
```
|
||||
可视化的检测结果图片保存在本地`vis_result.jpg`。
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Sets the minimum version of CMake required to build the native library.
|
||||
cmake_minimum_required(VERSION 3.22.1)
|
||||
|
||||
# Declares and names the project.
|
||||
project("inferDemo")
|
||||
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
set(FastDeploy_DIR ${FASTDEPLOY_INSTALL_DIR})
|
||||
find_package(FastDeploy REQUIRED)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
include_directories(${FastDeploy_INCLUDE_DIRS})
|
||||
|
||||
add_library(
|
||||
inferDemo
|
||||
SHARED
|
||||
infer_demo.cc
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
inferDemo
|
||||
${FASTDEPLOY_LIBS}
|
||||
)
|
||||
@@ -1,59 +0,0 @@
|
||||
#include "infer_demo.h"
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
|
||||
std::string ConvertTo(JNIEnv *env, jstring jstr) {
|
||||
if (!jstr) {
|
||||
return "";
|
||||
}
|
||||
const jclass jstring_clazz = env->GetObjectClass(jstr);
|
||||
const jmethodID getBytesID =
|
||||
env->GetMethodID(jstring_clazz, "getBytes", "(Ljava/lang/String;)[B");
|
||||
const jbyteArray jstring_bytes = reinterpret_cast<jbyteArray>(
|
||||
env->CallObjectMethod(jstr, getBytesID, env->NewStringUTF("UTF-8")));
|
||||
|
||||
size_t length = static_cast<size_t>(env->GetArrayLength(jstring_bytes));
|
||||
jbyte *jstring_bytes_ptr = env->GetByteArrayElements(jstring_bytes, NULL);
|
||||
|
||||
std::string res =
|
||||
std::string(reinterpret_cast<char *>(jstring_bytes_ptr), length);
|
||||
env->ReleaseByteArrayElements(jstring_bytes, jstring_bytes_ptr, JNI_ABORT);
|
||||
|
||||
env->DeleteLocalRef(jstring_bytes);
|
||||
env->DeleteLocalRef(jstring_clazz);
|
||||
return res;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_InferDemo_infer(JNIEnv *env, jobject thiz,
|
||||
jstring modelPath,
|
||||
jstring imagePath) {
|
||||
std::string model_path = ConvertTo(env, modelPath);
|
||||
if (model_path[model_path.length() - 1] != '/') {
|
||||
model_path += "/";
|
||||
}
|
||||
std::string model_file = model_path + "model.pdmodel";
|
||||
std::string params_file = model_path + "model.pdiparams";
|
||||
std::string infer_cfg_file = model_path + "infer_cfg.yml";
|
||||
|
||||
// Configuration information for model inference
|
||||
fastdeploy::RuntimeOption option;
|
||||
auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file,
|
||||
infer_cfg_file, option);
|
||||
|
||||
assert(model.Initialized()); // Check whether the model is successfully
|
||||
// initialized
|
||||
|
||||
std::string image_path = ConvertTo(env, imagePath);
|
||||
cv::Mat im = cv::imread(image_path);
|
||||
fastdeploy::vision::DetectionResult result;
|
||||
|
||||
assert(model.Predict(&im,
|
||||
&result)); // Check whether the prediction is successful
|
||||
|
||||
std::cout << result.Str() << std::endl;
|
||||
|
||||
cv::Mat vis_im = fastdeploy::vision::Visualize::VisDetection(im, result, 0.5);
|
||||
// sava the visual results
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result save in vis_result.jpg" << std::endl;
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
/* DO NOT EDIT THIS FILE - it is machine generated */
|
||||
#include <jni.h>
|
||||
/* Header for class InferDemo */
|
||||
|
||||
#ifndef EXAMPLES_APPLICATION_JAVA_PPYOLOE_CPP_INFER_DEMO_H_
|
||||
#define EXAMPLES_APPLICATION_JAVA_PPYOLOE_CPP_INFER_DEMO_H_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/*
|
||||
* Class: InferDemo
|
||||
* Method: infer
|
||||
* Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V
|
||||
*/
|
||||
JNIEXPORT void JNICALL Java_InferDemo_infer
|
||||
(JNIEnv *env, jobject thiz, jstring modelPath, jstring imagePath);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // EXAMPLES_APPLICATION_JAVA_PPYOLOE_CPP_INFER_DEMO_H_
|
||||
@@ -1,24 +0,0 @@
|
||||
public class InferDemo {
|
||||
|
||||
private native void infer(String modelPath, String imagePath);
|
||||
|
||||
private final static String JNI_LIB_NAME = "../cpp/build/libinferDemo.so";
|
||||
|
||||
static {
|
||||
System.load(InferDemo.class.getResource("/").getPath() + JNI_LIB_NAME);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
if (args.length < 2) {
|
||||
System.out.println("Please input enough params. e.g. java test.java param-dir image-path");
|
||||
return;
|
||||
}
|
||||
String modelPath = args[0];
|
||||
String imagePath = args[1];
|
||||
|
||||
InferDemo inferDemo = new InferDemo();
|
||||
|
||||
inferDemo.infer(modelPath, imagePath);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
# YOLOv5 Java Deployment Example
|
||||
|
||||
This directory provides examples that `java/InferDemo.java` uses `Java` to call FastDeploy `C++` API and finish the deployment of `YOLOv5` model。
|
||||
|
||||
|
||||
Before deployment, two steps require confirmation
|
||||
|
||||
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
|
||||
Using `Java` to call `C++` API can be divided into two steps:
|
||||
* Generate dynamic link library in `C++` side.
|
||||
* Call the dynamic link library in `Java` side.
|
||||
|
||||
## Generates dynamic link library
|
||||
First, switch the path to `app` directory and copy `jni.h` and `jni_md.h` which in `jdk` directory to current directory `cpp`.
|
||||
```shell
|
||||
cp /PathJdk/jdk-17.0.6/include/jni.h ./
|
||||
cp /Pathjdk/jdk-17.0.6/include/linux/jni_md.h ./
|
||||
```
|
||||
|
||||
Then, execute the following command in the `cpp` directory to compile and generate the dynamic link library.
|
||||
> Note: you will need to specify the location of the FASTDEPLOY_INSTALL_DIR pre-compile library at compile time, but also the location of your own compiled FastDeploy library.
|
||||
```shell
|
||||
mkdir build && cd build
|
||||
cmake .. -FASTDEPLOY_INSTALL_DIR /fast-deploy-path
|
||||
make -j
|
||||
```
|
||||
After successful compilation, the dynamic link library will be stored in the `cpp/build` directory, ending in `.so` under `Linux` and `.dll` under `Windows`.
|
||||
|
||||
## Invoke dynamic link libraries using JAVA
|
||||
Switch the path to the `java` directory and use the following command to add Fastdeploy library path to the environment variable. Note the path of the `FastDeploy` library replaced with your own.
|
||||
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-1.0.4/fastdeploy_init.sh
|
||||
```
|
||||
Download the `YOLOv5` model file and test images.
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
Compiling Java files.
|
||||
```shell
|
||||
javac InferDemo.java
|
||||
```
|
||||
|
||||
After compiling, run the following command to get the predicted result, where the first parameter indicates the path of the downloaded model and the second parameter indicates the path of the test image.
|
||||
```shell
|
||||
java InferDemo ./yolov5s.onnx ./000000014439.jpg
|
||||
```
|
||||
Then visualized inspection result is saved in the local image `vis_result.jpg`.
|
||||
@@ -1,55 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
# YOLOv5 Java 部署示例
|
||||
|
||||
本目录下提供`java/InferDemo.java`, 使用`Java`调用`C++`API快速完成`PaddleDetection`模型`YOLOv5`在Linux上部署的示例。
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
|
||||
|
||||
使用`Java`调用`C++` API 可以分为两步:
|
||||
|
||||
* 在`C++`端生成动态链接库。
|
||||
* 在`Java`端调用动态链接库。
|
||||
|
||||
## C++端生成动态链接库
|
||||
首先,切换路径到`cpp`目录,将`jdk`目录下的`jni.h`和`jni_md.h`拷贝到当前`cpp`目录下。
|
||||
```shell
|
||||
cp /PathJdk/jdk-17.0.6/include/jni.h ./
|
||||
cp /Pathjdk/jdk-17.0.6/include/linux/jni_md.h ./
|
||||
```
|
||||
|
||||
接着,在`cpp`目录下执行以下命令,进行编译,生成动态链接库。
|
||||
> 注意:编译时需要通过`FASTDEPLOY_INSTALL_DIR`选项指明`FastDeploy`预编译库位置, 当然也可以是自己编译的`FastDeploy`库位置。
|
||||
```shell
|
||||
mkdir build && cd build
|
||||
cmake .. -FASTDEPLOY_INSTALL_DIR /fast-deploy-path
|
||||
make -j
|
||||
```
|
||||
编译成功后,动态链接库会存放在`cpp/build`目录下,`Linux`下以`.so`结尾,`Windows`下以`.dll`结尾。
|
||||
|
||||
## 使用JAVA调用动态链接库
|
||||
|
||||
将`FastDeploy`的库路径添加到环境变量,注意替换为自己的`FastDeploy`库所在路径。
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-1.0.4/fastdeploy_init.sh
|
||||
```
|
||||
下载YOLOv5模型文件和测试图片
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
编译Java文件
|
||||
```shell
|
||||
javac InferDemo.java
|
||||
```
|
||||
|
||||
编译完成后,执行如下命令可得到预测结果,其中第一个参数指明下载的模型路径,第二个参数指明测试图片路径。
|
||||
```shell
|
||||
java InferDemo ./yolov5s.onnx ./000000014439.jpg
|
||||
```
|
||||
可视化的检测结果图片保存在本地`vis_result.jpg`。
|
||||
@@ -1,24 +0,0 @@
|
||||
# Sets the minimum version of CMake required to build the native library.
|
||||
cmake_minimum_required(VERSION 3.22.1)
|
||||
|
||||
# Declares and names the project.
|
||||
project("inferDemo")
|
||||
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
set(FastDeploy_DIR ${FASTDEPLOY_INSTALL_DIR})
|
||||
find_package(FastDeploy REQUIRED)
|
||||
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
include_directories(${FastDeploy_INCLUDE_DIRS})
|
||||
|
||||
add_library(
|
||||
inferDemo
|
||||
SHARED
|
||||
infer_demo.cc
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
inferDemo
|
||||
${FASTDEPLOY_LIBS}
|
||||
)
|
||||
@@ -1,53 +0,0 @@
|
||||
#include "infer_demo.h"
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
|
||||
std::string ConvertTo(JNIEnv *env, jstring jstr) {
|
||||
if (!jstr) {
|
||||
return "";
|
||||
}
|
||||
const jclass jstring_clazz = env->GetObjectClass(jstr);
|
||||
const jmethodID getBytesID =
|
||||
env->GetMethodID(jstring_clazz, "getBytes", "(Ljava/lang/String;)[B");
|
||||
const jbyteArray jstring_bytes = reinterpret_cast<jbyteArray>(
|
||||
env->CallObjectMethod(jstr, getBytesID, env->NewStringUTF("UTF-8")));
|
||||
|
||||
size_t length = static_cast<size_t>(env->GetArrayLength(jstring_bytes));
|
||||
jbyte *jstring_bytes_ptr = env->GetByteArrayElements(jstring_bytes, NULL);
|
||||
|
||||
std::string res =
|
||||
std::string(reinterpret_cast<char *>(jstring_bytes_ptr), length);
|
||||
env->ReleaseByteArrayElements(jstring_bytes, jstring_bytes_ptr, JNI_ABORT);
|
||||
|
||||
env->DeleteLocalRef(jstring_bytes);
|
||||
env->DeleteLocalRef(jstring_clazz);
|
||||
return res;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_InferDemo_infer(JNIEnv *env, jobject thiz,
|
||||
jstring modelPath,
|
||||
jstring imagePath) {
|
||||
std::string model_path = ConvertTo(env, modelPath);
|
||||
|
||||
// Configuration information for model inference
|
||||
fastdeploy::RuntimeOption option;
|
||||
auto model = fastdeploy::vision::detection::YOLOv5(
|
||||
model_path, "", option, fastdeploy::ModelFormat::ONNX);
|
||||
|
||||
assert(model.Initialized()); // Check whether the model is successfully
|
||||
// initialized
|
||||
|
||||
std::string image_path = ConvertTo(env, imagePath);
|
||||
cv::Mat im = cv::imread(image_path);
|
||||
fastdeploy::vision::DetectionResult result;
|
||||
|
||||
assert(model.Predict(&im,
|
||||
&result)); // Check whether the prediction is successful
|
||||
|
||||
std::cout << result.Str() << std::endl;
|
||||
|
||||
cv::Mat vis_im = fastdeploy::vision::Visualize::VisDetection(im, result, 0.5);
|
||||
// sava the visual results
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result save in vis_result.jpg" << std::endl;
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
/* DO NOT EDIT THIS FILE - it is machine generated */
|
||||
#include <jni.h>
|
||||
/* Header for class InferDemo */
|
||||
|
||||
#ifndef EXAMPLES_APPLICATION_JAVA_YOLOV5_CPP_INFER_DEMO_H_
|
||||
#define EXAMPLES_APPLICATION_JAVA_YOLOV5_CPP_INFER_DEMO_H_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/*
|
||||
* Class: InferDemo
|
||||
* Method: infer
|
||||
* Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V
|
||||
*/
|
||||
JNIEXPORT void JNICALL Java_InferDemo_infer
|
||||
(JNIEnv *env, jobject thiz, jstring modelPath, jstring imagePath);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // EXAMPLES_APPLICATION_JAVA_YOLOV5_CPP_INFER_DEMO_H_
|
||||
@@ -1,24 +0,0 @@
|
||||
public class InferDemo {
|
||||
|
||||
private native void infer(String modelPath, String imagePath);
|
||||
|
||||
private final static String JNI_LIB_NAME = "../cpp/build/libinferDemo.so";
|
||||
|
||||
static {
|
||||
System.load(InferDemo.class.getResource("/").getPath() + JNI_LIB_NAME);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
if (args.length < 2) {
|
||||
System.out.println("Please input enough params. e.g. java test.java param-dir image-path");
|
||||
return;
|
||||
}
|
||||
String modelPath = args[0];
|
||||
String imagePath = args[1];
|
||||
|
||||
InferDemo inferDemo = new InferDemo();
|
||||
|
||||
inferDemo.infer(modelPath, imagePath);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
# YOLOv8 Java Deployment Example
|
||||
|
||||
This directory provides examples that `java/InferDemo.java` uses `Java` to call FastDeploy `C++` API and finish the deployment of `YOLOv8` model。
|
||||
|
||||
|
||||
Before deployment, two steps require confirmation
|
||||
|
||||
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
|
||||
Using `Java` to call `C++` API can be divided into two steps:
|
||||
* Generate dynamic link library in `C++` side.
|
||||
* Call the dynamic link library in `Java` side.
|
||||
|
||||
## Generates dynamic link library
|
||||
First, switch the path to the `cpp` directory and copy `jni.h` and `jni_md.h` which in `jdk` directory to current directory `cpp`.
|
||||
```shell
|
||||
cp /PathJdk/jdk-17.0.6/include/jni.h ./
|
||||
cp /Pathjdk/jdk-17.0.6/include/linux/jni_md.h ./
|
||||
```
|
||||
|
||||
Then, execute the following command in the `cpp` directory to compile and generate the dynamic link library.
|
||||
> Note: you will need to specify the location of the FASTDEPLOY_INSTALL_DIR pre-compile library at compile time, but also the location of your own compiled FastDeploy library.
|
||||
```shell
|
||||
mkdir build && cd build
|
||||
cmake .. -FASTDEPLOY_INSTALL_DIR /fast-deploy-path
|
||||
make -j
|
||||
```
|
||||
After successful compilation, the dynamic link library will be stored in the `cpp/build` directory, ending in `.so` under `Linux` and `.dll` under `Windows`.
|
||||
|
||||
## Invoke dynamic link libraries using JAVA
|
||||
Switch the path to the `java` directory and use the following command to add Fastdeploy library path to the environment variable. Note the path of the `FastDeploy` library replaced with your own.
|
||||
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-1.0.4/fastdeploy_init.sh
|
||||
```
|
||||
Download the `YOLOv8` model file and test images.
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov8s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
Compiling Java files.
|
||||
```shell
|
||||
javac InferDemo.java
|
||||
```
|
||||
|
||||
After compiling, run the following command to get the predicted result, where the first parameter indicates the path of the downloaded model and the second parameter indicates the path of the test image.
|
||||
```shell
|
||||
java InferDemo ./yolov8s.onnx ./000000014439.jpg
|
||||
```
|
||||
Then visualized inspection result is saved in the local image `vis_result.jpg`.
|
||||
@@ -1,55 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
# YOLOv8 Java 部署示例
|
||||
|
||||
本目录下提供`java/InferDemo.java`, 使用`Java`调用`C++`API快速完成`PaddleDetection`模型`YOLOv8`在Linux上部署的示例。
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
|
||||
|
||||
使用`Java`调用`C++` API 可以分为两步:
|
||||
|
||||
* 在`C++`端生成动态链接库。
|
||||
* 在`Java`端调用动态链接库。
|
||||
|
||||
## C++端生成动态链接库
|
||||
首先,切换路径到`cpp`目录,将`jdk`目录下的`jni.h`和`jni_md.h`拷贝到当前`cpp`目录下。
|
||||
```shell
|
||||
cp /PathJdk/jdk-17.0.6/include/jni.h ./
|
||||
cp /Pathjdk/jdk-17.0.6/include/linux/jni_md.h ./
|
||||
```
|
||||
|
||||
接着,在`cpp`目录下执行以下命令,进行编译,生成动态链接库。
|
||||
> 注意:编译时需要通过`FASTDEPLOY_INSTALL_DIR`选项指明`FastDeploy`预编译库位置, 当然也可以是自己编译的`FastDeploy`库位置。
|
||||
```shell
|
||||
mkdir build && cd build
|
||||
cmake .. -FASTDEPLOY_INSTALL_DIR /fast-deploy-path
|
||||
make -j
|
||||
```
|
||||
编译成功后,动态链接库会存放在`cpp/build`目录下,`Linux`下以`.so`结尾,`Windows`下以`.dll`结尾。
|
||||
|
||||
## 使用JAVA调用动态链接库
|
||||
切换路径到`java`目录下,将`FastDeploy`的库路径添加到环境变量,注意替换为自己的`FastDeploy`库所在路径。
|
||||
```bash
|
||||
source /Path/to/fastdeploy-linux-x64-0.0.0/fastdeploy_init.sh
|
||||
```
|
||||
|
||||
下载YOLOv8模型文件和测试图片
|
||||
```bash
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov8s.onnx
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
编译Java文件
|
||||
```shell
|
||||
javac InferDemo.java
|
||||
```
|
||||
|
||||
编译完成后,执行如下命令可得到预测结果,其中第一个参数指明下载的模型路径,第二个参数指明测试图片路径。
|
||||
```shell
|
||||
java InferDemo ./yolov8s.onnx ./000000014439.jpg
|
||||
```
|
||||
可视化的检测结果图片保存在本地`vis_result.jpg`。
|
||||
@@ -1,23 +0,0 @@
|
||||
# Sets the minimum version of CMake required to build the native library.
|
||||
cmake_minimum_required(VERSION 3.22.1)
|
||||
|
||||
# Declares and names the project.
|
||||
project("inferDemo")
|
||||
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
set(FastDeploy_DIR ${FASTDEPLOY_INSTALL_DIR})
|
||||
find_package(FastDeploy REQUIRED)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
include_directories(${FastDeploy_INCLUDE_DIRS})
|
||||
|
||||
add_library(
|
||||
inferDemo
|
||||
SHARED
|
||||
infer_demo.cc
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
inferDemo
|
||||
${FASTDEPLOY_LIBS}
|
||||
)
|
||||
@@ -1,53 +0,0 @@
|
||||
#include "infer_demo.h"
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
|
||||
std::string ConvertTo(JNIEnv *env, jstring jstr) {
|
||||
if (!jstr) {
|
||||
return "";
|
||||
}
|
||||
const jclass jstring_clazz = env->GetObjectClass(jstr);
|
||||
const jmethodID getBytesID =
|
||||
env->GetMethodID(jstring_clazz, "getBytes", "(Ljava/lang/String;)[B");
|
||||
const jbyteArray jstring_bytes = reinterpret_cast<jbyteArray>(
|
||||
env->CallObjectMethod(jstr, getBytesID, env->NewStringUTF("UTF-8")));
|
||||
|
||||
size_t length = static_cast<size_t>(env->GetArrayLength(jstring_bytes));
|
||||
jbyte *jstring_bytes_ptr = env->GetByteArrayElements(jstring_bytes, NULL);
|
||||
|
||||
std::string res =
|
||||
std::string(reinterpret_cast<char *>(jstring_bytes_ptr), length);
|
||||
env->ReleaseByteArrayElements(jstring_bytes, jstring_bytes_ptr, JNI_ABORT);
|
||||
|
||||
env->DeleteLocalRef(jstring_bytes);
|
||||
env->DeleteLocalRef(jstring_clazz);
|
||||
return res;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_InferDemo_infer(JNIEnv *env, jobject thiz,
|
||||
jstring modelPath,
|
||||
jstring imagePath) {
|
||||
std::string model_path = ConvertTo(env, modelPath);
|
||||
|
||||
// Configuration information for model inference
|
||||
fastdeploy::RuntimeOption option;
|
||||
auto model = fastdeploy::vision::detection::YOLOv8(
|
||||
model_path, "", option, fastdeploy::ModelFormat::ONNX);
|
||||
|
||||
assert(model.Initialized()); // Check whether the model is successfully
|
||||
// initialized
|
||||
|
||||
std::string image_path = ConvertTo(env, imagePath);
|
||||
cv::Mat im = cv::imread(image_path);
|
||||
fastdeploy::vision::DetectionResult result;
|
||||
|
||||
assert(model.Predict(im,
|
||||
&result)); // Check whether the prediction is successful
|
||||
|
||||
std::cout << result.Str() << std::endl;
|
||||
|
||||
cv::Mat vis_im = fastdeploy::vision::Visualize::VisDetection(im, result, 0.5);
|
||||
// sava the visual results
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result save in vis_result.jpg" << std::endl;
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
/* DO NOT EDIT THIS FILE - it is machine generated */
|
||||
#include <jni.h>
|
||||
/* Header for class InferDemo */
|
||||
|
||||
#ifndef EXAMPLES_APPLICATION_JAVA_YOLOV8_CPP_INFER_DEMO_H_
|
||||
#define EXAMPLES_APPLICATION_JAVA_YOLOV8_CPP_INFER_DEMO_H_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/*
|
||||
* Class: InferDemo
|
||||
* Method: infer
|
||||
* Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V
|
||||
*/
|
||||
JNIEXPORT void JNICALL Java_InferDemo_infer
|
||||
(JNIEnv *env, jobject thiz, jstring modelPath, jstring imagePath);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // EXAMPLES_APPLICATION_JAVA_YOLOV8_CPP_INFER_DEMO_H_
|
||||
@@ -1,24 +0,0 @@
|
||||
public class InferDemo {
|
||||
|
||||
private native void infer(String modelPath, String imagePath);
|
||||
|
||||
private final static String JNI_LIB_NAME = "../cpp/build/libinferDemo.so";
|
||||
|
||||
static {
|
||||
System.load(InferDemo.class.getResource("/").getPath() + JNI_LIB_NAME);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
if (args.length < 2) {
|
||||
System.out.println("Please input enough params. e.g. java test.java param-dir image-path");
|
||||
return;
|
||||
}
|
||||
String modelPath = args[0];
|
||||
String imagePath = args[1];
|
||||
|
||||
InferDemo inferDemo = new InferDemo();
|
||||
|
||||
inferDemo.infer(modelPath, imagePath);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
|
||||
# Front-end AI application
|
||||
|
||||
The development of artificial intelligence technology has led to industrial upgrading in the fields of computer vision(CV) and natural language processing(NLP). In addition, the deployment of AI models in browsers to achieve front-end intelligence has already provided good basic conditions with the steady increase in computing power on PCs and mobile devices, iterative updates of model compression technologies, and the continuous emergence of various innovative needs.
|
||||
In response to the difficulty of deploying AI deep learning models on the front-end, Baidu has open-sourced the Paddle.js front-end deep learning model deployment framework, which can easily deploy deep learning models into front-end projects.
|
||||
|
||||
# Introduction of Paddle.js
|
||||
|
||||
[Paddle.js](https://github.com/PaddlePaddle/Paddle.js) is a web sub-project of Baidu `PaddlePaddle`, an open source deep learning framework running in the browser. `Paddle.js` can load the deep learning model trained by `PaddlePaddle`, and convert it into a browser-friendly model through the model conversion tool `paddlejs-converter` of `Paddle.js`, which is easy to use for online reasoning and prediction. `Paddle.js` supports running in browsers of `WebGL/WebGPU/WebAssembly`, and can also run in the environment of Baidu applet and WeChat applet.
|
||||
|
||||
Finally, we can launch AI functions in front-end application scenarios such as browsers and mini-program using `Paddle.js`, including but not limited to AI capabilities such as object detection, image segmentation, OCR, and item classification.
|
||||
|
||||
## Web Demo
|
||||
|
||||
Refer to this [document](./WebDemo.md) for steps to run computer vision demo in the browser.
|
||||
|
||||
|demo|web demo directory|visualization|
|
||||
|-|-|-|
|
||||
|object detection|[ScrewDetection、FaceDetection](./web_demo/src/pages/cv/detection/)| <img src="https://user-images.githubusercontent.com/26592129/196874536-b7fa2c0a-d71f-4271-8c40-f9088bfad3c9.png" height="200px">|
|
||||
|human segmentation|[HumanSeg](./web_demo/src/pages/cv/segmentation/HumanSeg)|<img src="https://user-images.githubusercontent.com/26592129/196874452-4ef2e770-fbb3-4a35-954b-f871716d6669.png" height="200px">|
|
||||
|classification|[GestureRecognition、ItemIdentification](./web_demo/src/pages/cv/recognition/)|<img src="https://user-images.githubusercontent.com/26592129/196874416-454e6bb0-4ebd-4b51-a88a-8c40614290ae.png" height="200px">|
|
||||
|OCR|[TextDetection、TextRecognition](./web_demo/src/pages/cv/ocr/)|<img src="https://user-images.githubusercontent.com/26592129/196874354-1b5eecb0-f273-403c-aa6c-4463bf6d78db.png" height="200px">|
|
||||
|
||||
|
||||
## Wechat Mini-program
|
||||
|
||||
Run the official demo reference in the WeChat mini-program [document](./mini_program/README.md)
|
||||
|
||||
|Name|Directory|
|
||||
|-|-|
|
||||
|OCR Text Detection| [ocrdetecXcx](./mini_program/ocrdetectXcx/) |
|
||||
|OCR Text Recognition| [ocrXcx](./mini_program/ocrXcx/) |
|
||||
|object detection| coming soon |
|
||||
|Image segmentation | coming soon |
|
||||
|Item Category| coming soon |
|
||||
|
||||
## Contributor
|
||||
|
||||
Thanks to Paddle Paddle Developer Expert (PPDE) Chen Qianhe (github: [chenqianhe](https://github.com/chenqianhe)) for the Web demo, mini-program.
|
||||
@@ -1,42 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
|
||||
# 前端AI应用
|
||||
|
||||
人工智能技术的快速发展带动了计算机视觉、自然语言处理领域的产业升级。另外,随着PC和移动设备上算力的稳步增强、模型压缩技术迭代更新以及各种创新需求的不断催生,在浏览器中部署AI模型实现前端智能已经具备了良好的基础条件。
|
||||
针对前端部署AI深度学习模型困难的问题,百度开源了Paddle.js前端深度学习模型部署框架,可以很容易的将深度学习模型部署到前端项目中。
|
||||
|
||||
## Paddle.js简介
|
||||
|
||||
[Paddle.js](https://github.com/PaddlePaddle/Paddle.js)是百度`PaddlePaddle`的web方向子项目,是一个运行在浏览器中的开源深度学习框架。`Paddle.js`可以加载`PaddlePaddle`动转静的模型,经过`Paddle.js`的模型转换工具`paddlejs-converter`转换成浏览器友好的模型,易于在线推理预测使用。`Paddle.js`支持`WebGL/WebGPU/WebAssembly`的浏览器中运行,也可以在百度小程序和微信小程序环境下运行。
|
||||
|
||||
简言之,利用Paddle.js,我们可以在浏览器、小程序等前端应用场景上线AI功能,包括但不限于目标检测,图像分割,OCR,物品分类等AI能力。
|
||||
|
||||
## Web Demo使用
|
||||
|
||||
在浏览器中直接运行官方demo参考[文档](./WebDemo.md)
|
||||
|
||||
|demo名称|web demo目录|可视化|
|
||||
|-|-|-|
|
||||
|目标检测|[ScrewDetection、FaceDetection](./web_demo/src/pages/cv/detection/)| <img src="https://user-images.githubusercontent.com/26592129/196874536-b7fa2c0a-d71f-4271-8c40-f9088bfad3c9.png" height="200px">|
|
||||
|人像分割背景替换|[HumanSeg](./web_demo/src/pages/cv/segmentation/HumanSeg)|<img src="https://user-images.githubusercontent.com/26592129/196874452-4ef2e770-fbb3-4a35-954b-f871716d6669.png" height="200px">|
|
||||
|物体识别|[GestureRecognition、ItemIdentification](./web_demo/src/pages/cv/recognition/)|<img src="https://user-images.githubusercontent.com/26592129/196874416-454e6bb0-4ebd-4b51-a88a-8c40614290ae.png" height="200px">|
|
||||
|OCR|[TextDetection、TextRecognition](./web_demo/src/pages/cv/ocr/)|<img src="https://user-images.githubusercontent.com/26592129/196874354-1b5eecb0-f273-403c-aa6c-4463bf6d78db.png" height="200px">|
|
||||
|
||||
|
||||
## 微信小程序Demo使用
|
||||
|
||||
在微信小程序运行官方demo参考[文档](./mini_program/README.md)
|
||||
|
||||
|名称|目录|
|
||||
|-|-|
|
||||
|OCR文本检测| [ocrdetecXcx](./mini_program/ocrdetectXcx/) |
|
||||
|OCR文本识别| [ocrXcx](./mini_program/ocrXcx/) |
|
||||
|目标检测| coming soon |
|
||||
|图像分割| coming soon |
|
||||
|物品分类| coming soon |
|
||||
|
||||
|
||||
## Contributor
|
||||
|
||||
感谢飞桨开发者专家(PPDE) 陈千鹤(github: [chenqianhe](https://github.com/chenqianhe))贡献的Web demo, 小程序。
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
English | [简体中文](WebDemo_CN.md)
|
||||
|
||||
# Introduction to Web Demo
|
||||
|
||||
- [Introduction](#0)
|
||||
- [1. Quick Start](#1)
|
||||
- [2. npm package call](#2)
|
||||
- [3. Model Replacement](#3)
|
||||
- [4. custom hyperparameters](#4)
|
||||
- [5. Other](#5)
|
||||
|
||||
<a name="0"></a>
|
||||
## Introduction
|
||||
|
||||
Based on [Paddle.js](https://github.com/PaddlePaddle/Paddle.js), this project implements computer vision tasks such as target detection, portrait segmentation, OCR, and item classification in the browser.
|
||||
|
||||
|
||||
|demo name|web demo component|source directory|npm package|
|
||||
|-|-|-|-|
|
||||
|Face Detection|[FaceDetection](./web_demo/src/pages/cv/detection/FaceDetection/)| [facedetect](./package/packages/paddlejs-models/facedetect)|[@paddle-js-models/ facedetect](https://www.npmjs.com/package/@paddle-js-models/facedetect)|
|
||||
|Screw Detection|[ScrewDetection](./web_demo/src/pages/cv/detection/ScrewDetection)| [detect](./package/packages/paddlejs-models/detect)|[@paddle-js-models/detect](https://www.npmjs.com/package/@paddle-js-models/detect)|
|
||||
|Portrait segmentation background replacement|[HumanSeg](./web_demo/src/pages/cv/segmentation/HumanSeg)|[humanseg](./package/packages/paddlejs-models/humanseg)|[@paddle-js-models/ humanseg](https://www.npmjs.com/package/@paddle-js-models/humanseg)|
|
||||
|Gesture Recognition AI Guessing Shell|[GestureRecognition](./web_demo/src/pages/cv/recognition/GestureRecognition)|[gesture](./package/packages/paddlejs-models/gesture)|[@paddle-js- models/gesture](https://www.npmjs.com/package/@paddle-js-models/gesture)|
|
||||
|1000 Item Identification|[ItemIdentification](./web_demo/src/pages/cv/recognition/ItemIdentification)|[mobilenet](./package/packages/paddlejs-models/mobilenet)|[@paddle-js-models/ mobilenet](https://www.npmjs.com/package/@paddle-js-models/mobilenet)|
|
||||
|Text Detection|[TextDetection](./web_demo/src/pages/cv/ocr/TextDetection)|[ocrdetection](./package/packages/paddlejs-models/ocrdetection)|[@paddle-js-models/ocrdet](https://www.npmjs.com/package/@paddle-js-models/ocrdet)|
|
||||
|Text Recognition|[TextRecognition](./web_demo/src/pages/cv/ocr/TextRecognition)|[ocr](./package/packages/paddlejs-models/ocr)|[@paddle-js-models/ocr](https://www.npmjs.com/package/@paddle-js-models/ocr)|
|
||||
|
||||
|
||||
<a name="1"></a>
|
||||
## 1. Quick Start
|
||||
|
||||
This section describes how to run the official demo directly in the browser.
|
||||
|
||||
**1. Install Node.js**
|
||||
|
||||
Download the `Node.js` installation package suitable for your platform from the `Node.js` official website https://nodejs.org/en/download/ and install it.
|
||||
|
||||
**2. Install demo dependencies and start**
|
||||
Execute the following command in the `./web_demo` directory:
|
||||
|
||||
````
|
||||
# install dependencies
|
||||
npm install
|
||||
# start demo
|
||||
npm run dev
|
||||
````
|
||||
|
||||
Open the URL `http://localhost:5173/main/index.html` in the browser to quickly experience running computer vision tasks in the browser.
|
||||
|
||||

|
||||
|
||||
|
||||
<a name="2"></a>
|
||||
## 2. npm package call
|
||||
|
||||
This section introduces how to use npm packages. Each demo provides an easy-to-use interface. Users only need to initialize and upload images to get the results. The steps are as follows:
|
||||
1. Call the module
|
||||
2. Initialize the model
|
||||
3. Pass in input, perform prediction
|
||||
|
||||
Taking OCR as an example, in a front-end project, the `@paddle-js-models/ocr` package is used as follows:
|
||||
|
||||
````
|
||||
// 1. Call the ocr module
|
||||
import * as ocr from '@paddle-js-models/ocr';
|
||||
|
||||
// 2. Initialize the ocr model
|
||||
await ocr.init();
|
||||
|
||||
// 3. Pass in an image of type HTMLImageElement as input and get the result
|
||||
const res = await ocr.recognize(img);
|
||||
|
||||
// Print the text coordinates and text content obtained by the OCR model
|
||||
console.log(res.text);
|
||||
console.log(res.points);
|
||||
````
|
||||
|
||||
<a name="3"></a>
|
||||
## 3. Model replacement
|
||||
|
||||
Due to the limitations of the front-end environment and computing resources, when deploying deep learning models on the front-end, we have stricter requirements on the performance of the models. In short, the models need to be lightweight enough. In theory, the smaller the input shape of the model and the smaller the model size, the smaller the flops of the corresponding model, and the smoother the front-end operation. Based on experience, the model storage deployed with `Paddle.js` should not exceed *5M* as much as possible, and the actual situation depends on the hardware and computing resources.
|
||||
|
||||
In practical applications, models are often customized according to vertical scenarios, and the official demo supports modifying incoming parameters to replace models.
|
||||
|
||||
Take the OCR demo as an example, [ocr.init()function](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/package/packages/paddlejs-models/ocr/src/index.ts#L52), contains the default initialization model link, if you want to replace the model, please refer to the following steps.
|
||||
|
||||
Step 1: Convert the model to js format:
|
||||
````
|
||||
# Install paddlejsconverter
|
||||
pip3 install paddlejsconverter
|
||||
# Convert the model format, the input model is the inference model
|
||||
paddlejsconverter --modelPath=./inference.pdmodel --paramPath=./inference.pdiparams --outputDir=./ --useGPUOpt=True
|
||||
# Note: The useGPUOpt option is not enabled by default. If the model is used on the gpu backend (webgl/webgpu), enable useGPUOpt. If the model is running on (wasm/plain js), do not enable it.
|
||||
````
|
||||
|
||||
After the export is successful, files such as `model.json chunk_1.dat` will appear in the local directory, which are the network structure and model parameter binary files corresponding to the js model.
|
||||
|
||||
Step 2: Upload the exported js model to a server that supports cross-domain access. For the CORS configuration of the server, refer to the following image:
|
||||

|
||||
|
||||
|
||||
Step 3: Modify the code to replace the default model. Take the OCR demo as an example, modify the [model initialization code](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo/src/pages/cv/ocr/TextRecognition/TextRecognition.vue#L64) in the OCR web demo , i.e.
|
||||
|
||||
````
|
||||
await ocr.init();
|
||||
change into:
|
||||
await ocr.init({modelPath: "https://js-models.bj.bcebos.com/PaddleOCR/PP-OCRv3/ch_PP-OCRv3_det_infer_js_960/model.json"}); # The first parameter passes in the new text Check dictionary type parameter
|
||||
````
|
||||
|
||||
Re-execute the following command in the demo directory to experience the new model effect.
|
||||
````
|
||||
npm run dev
|
||||
````
|
||||
|
||||
<a name="4"></a>
|
||||
## 4. custom hyperparameters
|
||||
|
||||
**Custom preprocessing parameters**
|
||||
|
||||
In different computer vision tasks, different models may have different preprocessing parameters, such as mean, std, keep_ratio and other parameters. After replacing the model, the preprocessing parameters also need to be modified. A simple solution for customizing preprocessing parameters is provided in the npm package published by paddle.js. You only need to pass in custom parameters when calling the model initialization function.
|
||||
|
||||
````
|
||||
# Default parameter initialization
|
||||
await model.init();
|
||||
|
||||
Custom parameter initialization
|
||||
const Config = {mean: [0.5, 0.5, 0.5], std: [0.5, 0.5, 0.5], keepratio: false};
|
||||
await model.init(Config);
|
||||
````
|
||||
|
||||
Taking the OCR text detection demo as an example, to modify the mean and std parameters of the model preprocessing, you only need to pass in the custom mean and std parameters when the model is initialized.
|
||||
````
|
||||
await ocr.init();
|
||||
change into:
|
||||
const detConfig = {mean: [0.5, 0.5, 0.5], std: [0.5, 0.5, 0.5]};
|
||||
await ocr.init(detConfig); # The first parameter passes in the new text detection model link
|
||||
````
|
||||
|
||||
**Custom postprocessing parameters**
|
||||
|
||||
Similarly, the npm package published by paddle.js also provides a custom solution for post-processing parameters.
|
||||
|
||||
````
|
||||
# run with default parameters
|
||||
await model.predict();
|
||||
|
||||
# custom post-processing parameters
|
||||
const postConfig = {thresh: 0.5};
|
||||
await model.predict(Config);
|
||||
````
|
||||
|
||||
Take the OCR text detection demo as an example, modify the parameters of the text detection post-processing to achieve the effect of expanding the text detection frame, and modify the OCR web demo to execute the [model prediction code](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo/src/pages/cv/ocr/TextRecognition/TextRecognition.vue#L99), ie:
|
||||
|
||||
````
|
||||
const res = await ocr.recognize(img, { canvas: canvas.value });
|
||||
change into:
|
||||
// Define hyperparameters, increase the unclip_ratio parameter from 1.5 to 3.5
|
||||
const detConfig = {shape: 960, thresh: 0.3, box_thresh: 0.6, unclip_ratio:3.5};
|
||||
const res = await ocr.recognize(img, { canvas: canvas.value }, detConfig);
|
||||
````
|
||||
|
||||
Note: Different tasks have different post-processing parameters. For detailed parameters, please refer to the API in the npm package.
|
||||
|
||||
<a name="5"></a>
|
||||
## 5. Others
|
||||
|
||||
The converted model of `Paddle.js` can not only be used in the browser, but also can be run in the Baidu mini-program and WeChat mini-program environment.
|
||||
|
||||
|Name|Directory|
|
||||
|-|-|
|
||||
|OCR Text Detection| [ocrdetecXcx](./mini_program/ocrdetectXcx/) |
|
||||
|OCR Text Recognition| [ocrXcx](./mini_program/ocrXcx/) |
|
||||
|target detection| coming soon |
|
||||
| Image segmentation | coming soon |
|
||||
|Item Category| coming soon |
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
[English](WebDemo.md) | 简体中文
|
||||
|
||||
# Web Demo介绍
|
||||
|
||||
- [简介](#0)
|
||||
- [1. 快速开始](#1)
|
||||
- [2. npm包调用](#2)
|
||||
- [3. 模型替换](#3)
|
||||
- [4. 自定义前后处理参数](#4)
|
||||
- [5. 其他](#5)
|
||||
|
||||
<a name="0"></a>
|
||||
## 简介
|
||||
|
||||
本项目基于[Paddle.js](https://github.com/PaddlePaddle/Paddle.js)在浏览器中实现目标检测,人像分割,OCR,物品分类等计算机视觉任务。
|
||||
|
||||
|
||||
|demo名称|web demo组件|源码目录|npm包|
|
||||
|-|-|-|-|
|
||||
|人脸检测|[FaceDetection](./web_demo/src/pages/cv/detection/FaceDetection/)| [facedetect](./package/packages/paddlejs-models/facedetect)|[@paddle-js-models/facedetect](https://www.npmjs.com/package/@paddle-js-models/facedetect)|
|
||||
|螺丝钉检测|[ScrewDetection](./web_demo/src/pages/cv/detection/ScrewDetection)| [detect](./package/packages/paddlejs-models/detect)|[@paddle-js-models/detect](https://www.npmjs.com/package/@paddle-js-models/detect)|
|
||||
|人像分割背景替换|[HumanSeg](./web_demo/src/pages/cv/segmentation/HumanSeg)|[humanseg](./package/packages/paddlejs-models/humanseg)|[@paddle-js-models/humanseg](https://www.npmjs.com/package/@paddle-js-models/humanseg)|
|
||||
|手势识别AI猜丁壳|[GestureRecognition](./web_demo/src/pages/cv/recognition/GestureRecognition)|[gesture](./package/packages/paddlejs-models/gesture)|[@paddle-js-models/gesture](https://www.npmjs.com/package/@paddle-js-models/gesture)|
|
||||
|1000种物品识别|[ItemIdentification](./web_demo/src/pages/cv/recognition/ItemIdentification)|[mobilenet](./package/packages/paddlejs-models/mobilenet)|[@paddle-js-models/mobilenet](https://www.npmjs.com/package/@paddle-js-models/mobilenet)|
|
||||
|文本检测|[TextDetection](./web_demo/src/pages/cv/ocr/TextDetection)|[ocrdetection](./package/packages/paddlejs-models/ocrdetection)|[@paddle-js-models/ocrdet](https://www.npmjs.com/package/@paddle-js-models/ocrdet)|
|
||||
|文本识别|[TextRecognition](./web_demo/src/pages/cv/ocr/TextRecognition)|[ocr](./package/packages/paddlejs-models/ocr)|[@paddle-js-models/ocr](https://www.npmjs.com/package/@paddle-js-models/ocr)|
|
||||
|
||||
|
||||
<a name="1"></a>
|
||||
## 1. 快速开始
|
||||
|
||||
本节介绍如何在浏览器中直接运行官方demo。
|
||||
|
||||
**1. 安装Node.js**
|
||||
|
||||
从`Node.js`官网https://nodejs.org/en/download/ 下载适合自己平台的`Node.js`安装包并安装。
|
||||
|
||||
**2. 安装demo依赖并启动**
|
||||
在`./web_demo`目录下执行如下指令:
|
||||
|
||||
```
|
||||
# 安装依赖
|
||||
npm install
|
||||
# 启动demo
|
||||
npm run dev
|
||||
```
|
||||
|
||||
在浏览器中打开网址 `http://localhost:5173/main/index.html` 即可快速体验在浏览器中运行计算机视觉任务。
|
||||
|
||||

|
||||
|
||||
<a name="2"></a>
|
||||
## 2. npm包调用
|
||||
|
||||
本节介绍npm包的使用方式,每个demo均提供简单易用的接口,用户只需初始化上传图片即可获得结果,使用步骤如下:
|
||||
1. 调用模块
|
||||
2. 初始化模型
|
||||
3. 传入输入,执行预测
|
||||
|
||||
以 OCR 为例,在前端项目中,`@paddle-js-models/ocr`包的使用方式如下:
|
||||
|
||||
```
|
||||
// 1. 调用ocr模块
|
||||
import * as ocr from '@paddle-js-models/ocr';
|
||||
|
||||
// 2. 初始化ocr模型
|
||||
await ocr.init();
|
||||
|
||||
// 3. 传入HTMLImageElement类型的图像作为输入并获得结果
|
||||
const res = await ocr.recognize(img);
|
||||
|
||||
// 打印OCR模型得到的文本坐标以及文本内容
|
||||
console.log(res.text);
|
||||
console.log(res.points);
|
||||
```
|
||||
|
||||
<a name="3"></a>
|
||||
## 3. 模型替换
|
||||
|
||||
由于前端环境和计算资源限制,在前端部署深度学习模型时,我们对模型的性能有着更严格的要求,简单来说,模型需要足够轻量化。理论上模型的输入shape越小、模型大小越小,则对应的模型的flops越小,在前端运行也能更流畅。经验总结,使用`Paddle.js`部署的模型存储尽量不超过*5M*,实际情况根据硬件和计算资源情况决定。
|
||||
|
||||
在实际应用中,常常根据垂类的场景定制化模型,官方的demo支持修改传入参数替换模型。
|
||||
|
||||
以OCR demo为例,[ocr.init()函数](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/package/packages/paddlejs-models/ocr/src/index.ts#L52)中,包含默认初始化的模型链接,如果要替换模型参考下述步骤。
|
||||
|
||||
步骤1:将模型转成js格式:
|
||||
```
|
||||
# 安装paddlejsconverter
|
||||
pip3 install paddlejsconverter
|
||||
# 转换模型格式,输入模型为inference模型
|
||||
paddlejsconverter --modelPath=./inference.pdmodel --paramPath=./inference.pdiparams --outputDir=./ --useGPUOpt=True
|
||||
# 注意:useGPUOpt 选项默认不开启,如果模型用在 gpu backend(webgl/webgpu),则开启 useGPUOpt,如果模型运行在(wasm/plain js)则不要开启。
|
||||
```
|
||||
|
||||
导出成功后,本地目录下会出现 `model.json chunk_1.dat`等文件,分别是对应js模型的网络结构、模型参数二进制文件。
|
||||
|
||||
步骤2:将导出的js模型上传到支持跨域访问的服务器,服务器的CORS配置参考下图:
|
||||

|
||||
|
||||
|
||||
步骤3:修改代码替换默认的模型。以OCR demo为例,修改OCR web demo中[模型初始化代码](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo/src/pages/cv/ocr/TextRecognition/TextRecognition.vue#L64),即
|
||||
|
||||
```
|
||||
await ocr.init();
|
||||
修改为:
|
||||
await ocr.init({modelPath: "https://js-models.bj.bcebos.com/PaddleOCR/PP-OCRv3/ch_PP-OCRv3_det_infer_js_960/model.json"}); # 第一个参数传入新的文本检测字典类型参数
|
||||
```
|
||||
|
||||
重新在demo目录下执行下述命令,即可体验新的模型效果。
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
|
||||
<a name="4"></a>
|
||||
## 4. 自定义前后处理参数
|
||||
|
||||
**自定义前处理参数**
|
||||
|
||||
在不同计算机视觉任务中,不同的模型可能有不同的预处理参数,比如mean,std,keep_ratio等参数,替换模型后也需要对预处理参数进行修改。paddle.js发布的npm包中提供了自定义预处理参数的简单方案。只需要在调用模型初始化函数时,传入自定义的参数即可。
|
||||
|
||||
```
|
||||
# 默认参数初始化
|
||||
await model.init();
|
||||
|
||||
自定义参数初始化
|
||||
const Config = {mean: [0.5, 0.5, 0.5], std: [0.5, 0.5, 0.5], keepratio: false};
|
||||
await model.init(Config);
|
||||
```
|
||||
|
||||
以OCR文本检测demo为例,修改模型前处理的mean和std参数,只需要在模型初始化时传入自定义的mean和std参数。
|
||||
```
|
||||
await ocr.init();
|
||||
修改为:
|
||||
const detConfig = {mean: [0.5, 0.5, 0.5], std: [0.5, 0.5, 0.5]};
|
||||
await ocr.init(detConfig); # 第一个参数传入新的文本检测模型链接
|
||||
```
|
||||
|
||||
**自定义后处理参数**
|
||||
|
||||
同理,paddle.js发布的npm包也提供了后处理参数的自定义方案。
|
||||
|
||||
```
|
||||
# 默认参数运行
|
||||
await model.predict();
|
||||
|
||||
# 自定义后处理参数
|
||||
const postConfig = {thresh: 0.5};
|
||||
await model.predict(Config);
|
||||
```
|
||||
|
||||
以OCR文本检测 demo为例,修改文本检测后处理的参数实现扩大文本检测框的效果,修改OCR web demo中执行[模型预测代码](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo/src/pages/cv/ocr/TextRecognition/TextRecognition.vue#L99),即:
|
||||
|
||||
```
|
||||
const res = await ocr.recognize(img, { canvas: canvas.value });
|
||||
修改为:
|
||||
// 定义超参数,将unclip_ratio参数从1.5 增大为3.5
|
||||
const detConfig = {shape: 960, thresh: 0.3, box_thresh: 0.6, unclip_ratio:3.5};
|
||||
const res = await ocr.recognize(img, { canvas: canvas.value }, detConfig);
|
||||
```
|
||||
|
||||
注:不同的任务有不同的后处理参数,详细参数参考npm包中的API。
|
||||
|
||||
<a name="5"></a>
|
||||
## 5. 其他
|
||||
|
||||
`Paddle.js`转换后的模型不仅支持浏览器中使用,也可以在百度小程序和微信小程序环境下运行。
|
||||
|
||||
|名称|目录|
|
||||
|-|-|
|
||||
|OCR文本检测| [ocrdetecXcx](./mini_program/ocrdetectXcx/) |
|
||||
|OCR文本识别| [ocrXcx](./mini_program/ocrXcx/) |
|
||||
|目标检测| coming soon |
|
||||
|图像分割| coming soon |
|
||||
|物品分类| coming soon |
|
||||
@@ -1,73 +0,0 @@
|
||||
English | [简体中文](DEVELOPMENT_CN.md)
|
||||
# paddlejs-converter
|
||||
|
||||
paddlejs-converter is a model transformation tool for Paddle.js. Its role is to convert PaddlePaddle models (also known as fluid models) into a browser-friendly format that Paddle.js can use to load and predict usage in browsers as well as other environments. In addition, paddlejs-converter provides powerful model optimization capabilities to help developers optimize the model structure and improve runtime performance.
|
||||
|
||||
## 1. Tutorial
|
||||
|
||||
### 1.1. Environment Construction
|
||||
#### Python Version
|
||||
Confirm whether the python environment and version of the running platform meet the requirements. If Python 3 is used, you may need to change the `python` in subsequent commands to `python3`:
|
||||
- Python3: 3.5.1+ / 3.6 / 3.7
|
||||
- Python2: 2.7.15+
|
||||
|
||||
#### Install Virtual Environment
|
||||
*Since the development environment may have multiple versions of Python installed, there may be different versions of dependent packages. In order to avoid conflicts, it is strongly recommended to use Python virtual environment to execute the commands required by the conversion tool to avoid various problems. If you are not using a virtual environment or if you have a virtual environment installed, you can skip this step.*
|
||||
|
||||
Take Anaconda as an example:
|
||||
Go to [Anaconda](https://www.anaconda.com/) main page,Select the corresponding platform and python version of anaconda and install it according to the official prompts;
|
||||
|
||||
After installation, execute the following command on the command line to create a python virtual environment:
|
||||
``` bash
|
||||
conda create --name <your_env_name>
|
||||
```
|
||||
|
||||
Execute the following command to switch to the virtual environment
|
||||
``` bash
|
||||
# Linux or macOS
|
||||
source activate <your_env_name>
|
||||
|
||||
# Windows
|
||||
activate <your_env_name>
|
||||
```
|
||||
|
||||
#### Installation Dependency
|
||||
- If you don't need to optimize model, execute the command:
|
||||
``` bash
|
||||
python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
|
||||
```
|
||||
- Otherwise,execute the command:
|
||||
``` bash
|
||||
python -m pip install paddlepaddle paddlelite==2.6.0 -i https://mirror.baidu.com/pypi/simple
|
||||
```
|
||||
|
||||
### 1.2. Get Start
|
||||
- If the weight file of fluid model to be converted is merged format which means one model corresponds to one weight file, then execute:
|
||||
``` bash
|
||||
python convertToPaddleJSModel.py --modelPath=<fluid_model_file_path> --paramPath=<fluid_param_file_path> --outputDir=<paddlejs_model_directory>
|
||||
```
|
||||
- Otherwise,execute:
|
||||
``` bash
|
||||
# Note that in this way, you need to ensure that the model file name '__ model__ ' in the inputDir
|
||||
python convertToPaddleJSModel.py --inputDir=<fluid_model_directory> --outputDir=<paddlejs_model_directory>
|
||||
````
|
||||
The model converter generates the following two types of files for Paddle.js:
|
||||
|
||||
- model.json (Contains the model structure and parameter list)
|
||||
- chunk_\*.dat (The collection of binary weight files)
|
||||
|
||||
## 2. Detailed Documentation
|
||||
|
||||
Parameter | description
|
||||
:-: | :-:
|
||||
--inputDir | The fluid model directory, If and only if weight files are not merged format, `modelPath` and `paramPath` below will be ignored,and the model file name should be `__model__`.
|
||||
--modelPath | The model file path, used when the weight file is merged.
|
||||
--paramPath | The weight file path,used when the weight file is merged.
|
||||
--outputDir | `Necessary`, the output model directory generated after converting.
|
||||
--disableOptimize | Whether to disable optimize model, `1`is to disable, `0`is use optimize(need to install Paddle Lite), default 0.
|
||||
--logModelInfo | Whether to print model structure information, `0` means not to print, `1` means to print, default 0.
|
||||
--sliceDataSize | Shard size (in KB) of each weight file. Default size is 4096.
|
||||
--useGPUOpt | Whether to use gpu opt, default is False.
|
||||
|
||||
## 3. Other information
|
||||
If the model to be converted is in `tensorflow / Cafe / onnx` format, there is [X2Paddle](https://github.com/PaddlePaddle/X2Paddle) tool in PaddlePaddle program for converting other models with different formats to fluid model, and then you can use paddlejs-converter to get a Paddle.js model.
|
||||
@@ -1,73 +0,0 @@
|
||||
[English](DEVELOPMENT.md) | 简体中文
|
||||
# paddlejs-converter
|
||||
|
||||
paddlejs-converter 是适用于 Paddle.js 的模型转换工具,其作用是将 PaddlePaddle 模型(或称为 fluid 模型)转化为浏览器友好的格式,以供Paddle.js在浏览器等环境中加载预测使用。此外,paddlejs-converter 还提供了强大的模型优化能力,帮助开发者对模型结构进行优化,提高运行时性能。
|
||||
|
||||
## 1. 使用教程
|
||||
|
||||
### 1.1. 环境搭建
|
||||
#### Python 版本确认
|
||||
确认运行平台的 Python 环境与版本是否满足要求,若使用 Python3 ,则可能需要将后续命令中的 `python` 换成 `python3`:
|
||||
- Python3: 3.5.1+ / 3.6 / 3.7
|
||||
- Python2: 2.7.15+
|
||||
|
||||
#### 安装虚拟环境
|
||||
*由于开发环境可能安装了多个版本的 Python,相关依赖包可能存在不同的版本,为避免产生冲突,**强烈建议**使用 Python 虚拟环境执行转换工具所需的各项命令,以免产生各种问题。若不使用虚拟环境或已安装虚拟环境,可跳过该步骤。*
|
||||
|
||||
以 Anaconda 为例:
|
||||
前往 [Anaconda](https://www.anaconda.com/) 主页,选择对应平台、Python 版本的 Anaconda 按照官方提示,进行安装;
|
||||
|
||||
安装完毕后,在命令行执行以下命令,创建Python 虚拟环境:
|
||||
``` bash
|
||||
conda create --name <your_env_name>
|
||||
```
|
||||
|
||||
执行以下命令,切换至虚拟环境
|
||||
``` bash
|
||||
# Linux 或 macOS下请执行
|
||||
source activate <your_env_name>
|
||||
|
||||
# Windows 下请执行
|
||||
activate <your_env_name>
|
||||
```
|
||||
|
||||
#### 安装依赖
|
||||
- 如果`不需要`使用优化模型的能力,执行命令:
|
||||
``` bash
|
||||
python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
|
||||
```
|
||||
- 如果`需要`使用优化模型的能力,执行命令:
|
||||
``` bash
|
||||
python -m pip install paddlepaddle paddlelite==2.6.0 -i https://mirror.baidu.com/pypi/simple
|
||||
```
|
||||
|
||||
### 1.2. 快速上手
|
||||
- 如果待转换的 fluid 模型为`合并参数文件`,即一个模型对应一个参数文件:
|
||||
``` bash
|
||||
python convertToPaddleJSModel.py --modelPath=<fluid_model_file_path> --paramPath=<fluid_param_file_path> --outputDir=<paddlejs_model_directory>
|
||||
```
|
||||
- 如果待转换的 fluid 模型为`分片参数文件`,即一个模型文件对应多个参数文件:
|
||||
``` bash
|
||||
# 注意,使用这种方式调用转换器,需要保证 inputDir 中,模型文件名为'__model__'
|
||||
python convertToPaddleJSModel.py --inputDir=<fluid_model_directory> --outputDir=<paddlejs_model_directory>
|
||||
````
|
||||
模型转换器将生成以下两种类型的文件以供 Paddle.js 使用:
|
||||
|
||||
- model.json (模型结构与参数清单)
|
||||
- chunk_\*.dat (二进制参数文件集合)
|
||||
|
||||
## 2. 详细文档
|
||||
参数 | 描述
|
||||
:-: | :-:
|
||||
--inputDir | fluid 模型所在目录,当且仅当使用分片参数文件时使用该参数,将忽略 `modelPath` 和 `paramPath` 参数,且模型文件名必须为`__model__`
|
||||
--modelPath | fluid 模型文件所在路径,使用合并参数文件时使用该参数
|
||||
--paramPath | fluid 参数文件所在路径,使用合并参数文件时使用该参数
|
||||
--outputDir | `必要参数`, Paddle.js 模型输出路径
|
||||
--disableOptimize | 是否关闭模型优化, `1` 为关闭优化,`0` 为开启优化(需安装 Paddle Lite ),默认执行优化
|
||||
--logModelInfo | 是否打印模型结构信息, `0` 为不打印, `1` 为打印,默认不打印
|
||||
--sliceDataSize | 分片输出 Paddle.js 参数文件时,每片文件的大小,单位:KB,默认 4096
|
||||
--useGPUOpt | 是否开启模型 GPU 优化,默认不开启(当模型准备运行在 webgl/webgpu 计算方案时,可以设置为 True 开启,在 wasm/plainjs 方案,则不用开启)
|
||||
|
||||
## 3. 其他信息
|
||||
若需要转换的模型为 `TensorFlow/Caffe/ONNX` 格式,可使用 PaddlePaddle 项目下的 `X2Paddle`工具,将其他格式的模型转为 fluid 模型后,再使用本工具转化为 Paddle.js 模型。
|
||||
详细请参考 [X2Paddle 项目](https://github.com/PaddlePaddle/X2Paddle)
|
||||
@@ -1,30 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
# PaddleJsConverter
|
||||
|
||||
## Installation
|
||||
|
||||
System Requirements:
|
||||
|
||||
* paddlepaddle >= 2.0.0
|
||||
* paddlejslite >= 0.0.2
|
||||
* Python3: 3.5.1+ / 3.6 / 3.7
|
||||
* Python2: 2.7.15+
|
||||
|
||||
#### Install PaddleJsConverter
|
||||
|
||||
<img src="https://img.shields.io/pypi/v/paddlejsconverter" alt="version">
|
||||
|
||||
```shell
|
||||
pip install paddlejsconverter
|
||||
|
||||
# or
|
||||
pip3 install paddlejsconverter
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
paddlejsconverter --modelPath=user_model_path --paramPath=user_model_params_path --outputDir=model_saved_path --useGPUOpt=True
|
||||
```
|
||||
Note: The option useGPUOpt is not turned on by default. Turn on useGPUOpt if the model is used on gpu backend (webgl/webgpu), don't turn on if is running on (wasm/plain js).
|
||||
@@ -1,30 +0,0 @@
|
||||
简体中文 | [English](README.md)
|
||||
# PaddleJsConverter
|
||||
|
||||
## Installation
|
||||
|
||||
System Requirements:
|
||||
|
||||
* paddlepaddle >= 2.0.0
|
||||
* paddlejslite >= 0.0.2
|
||||
* Python3: 3.5.1+ / 3.6 / 3.7
|
||||
* Python2: 2.7.15+
|
||||
|
||||
#### Install PaddleJsConverter
|
||||
|
||||
<img src="https://img.shields.io/pypi/v/paddlejsconverter" alt="version">
|
||||
|
||||
```shell
|
||||
pip install paddlejsconverter
|
||||
|
||||
# or
|
||||
pip3 install paddlejsconverter
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
paddlejsconverter --modelPath=user_model_path --paramPath=user_model_params_path --outputDir=model_saved_path --useGPUOpt=True
|
||||
```
|
||||
注意:useGPUOpt 选项默认不开启,如果模型用在 gpu backend(webgl/webgpu),则开启 useGPUOpt,如果模型运行在(wasm/plain js)则不要开启。
|
||||
@@ -1,80 +0,0 @@
|
||||
English | [简体中文](RNN_CN.md)
|
||||
# The computation process of RNN operator
|
||||
|
||||
## 1. Understanding of RNN
|
||||
|
||||
**RNN** is a recurrent neural network, including an input layer, a hidden layer and an output layer, which is specialized in processing sequential data.
|
||||
|
||||

|
||||
paddle official document: https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/RNN_cn.html#rnn
|
||||
|
||||
paddle source code implementation: https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/rnn_op.h#L812
|
||||
|
||||
## 2. How to compute RNN
|
||||
|
||||
At moment t, the input layer is , hidden layer is , output layer is . As the picture above, isn't just decided by ,it is also related to . The formula is as follows.:
|
||||
|
||||

|
||||
|
||||
## 3. RNN operator implementation in pdjs
|
||||
|
||||
Because the gradient disappearance problem exists in RNN, and more contextual information cannot be obtained, **LSTM (Long Short Term Memory)** is used in CRNN, which is a special kind of RNN that can preserve long-term dependencies.
|
||||
|
||||
Based on the image sequence, the two directions of context are mutually useful and complementary. Since the LSTM is unidirectional, two LSTMs, one forward and one backward, are combined into a **bidirectional LSTM**. In addition, multiple layers of bidirectional LSTMs can be stacked. ch_PP-OCRv2_rec_infer recognition model is using a two-layer bidirectional LSTM structure. The calculation process is shown as follows.
|
||||
|
||||
#### Take ch_ppocr_mobile_v2.0_rec_infer model, rnn operator as an example
|
||||
```javascript
|
||||
{
|
||||
Attr: {
|
||||
mode: 'LSTM'
|
||||
// Whether bidirectional, if true, it is necessary to traverse both forward and reverse.
|
||||
is_bidirec: true
|
||||
// Number of hidden layers, representing the number of loops.
|
||||
num_layers: 2
|
||||
}
|
||||
|
||||
Input: [
|
||||
transpose_1.tmp_0[25, 1, 288]
|
||||
]
|
||||
|
||||
PreState: [
|
||||
fill_constant_batch_size_like_0.tmp_0[4, 1, 48],
|
||||
fill_constant_batch_size_like_1.tmp_0[4, 1, 48]
|
||||
]
|
||||
|
||||
WeightList: [
|
||||
lstm_cell_0.w_0[192, 288], lstm_cell_0.w_1[192, 48],
|
||||
lstm_cell_1.w_0[192, 288], lstm_cell_1.w_1[192, 48],
|
||||
lstm_cell_2.w_0[192, 96], lstm_cell_2.w_1[192, 48],
|
||||
lstm_cell_3.w_0[192, 96], lstm_cell_3.w_1[192, 48],
|
||||
lstm_cell_0.b_0[192], lstm_cell_0.b_1[192],
|
||||
lstm_cell_1.b_0[192], lstm_cell_1.b_1[192],
|
||||
lstm_cell_2.b_0[192], lstm_cell_2.b_1[192],
|
||||
lstm_cell_3.b_0[192], lstm_cell_3.b_1[192]
|
||||
]
|
||||
|
||||
Output: [
|
||||
lstm_0.tmp_0[25, 1, 96]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Overall computation process
|
||||

|
||||
#### Add op in rnn calculation
|
||||
1) rnn_origin
|
||||
Formula: blas.MatMul(Input, WeightList_ih, blas_ih) + blas.MatMul(PreState, WeightList_hh, blas_hh)
|
||||
|
||||
2) rnn_matmul
|
||||
Formula: rnn_matmul = rnn_origin + Matmul( $ S_{t-1} $, WeightList_hh)
|
||||
|
||||
3) rnn_cell
|
||||
Method: Split the rnn_matmul op output into 4 copies, each copy performs a different activation function calculation, and finally outputs lstm_x_y.tmp_c[1, 1, 48]. x∈[0, 3], y∈[0, 24].
|
||||
For details, please refer to [rnn_cell](https://github.com/PaddlePaddle/Paddle.js/blob/release/v2.2.5/packages/paddlejs-backend-webgl/src/ops/shader/rnn/rnn_cell.ts).
|
||||
|
||||
|
||||
4) rnn_hidden
|
||||
Split the rnn_matmul op output into 4 copies, each copy performs a different activation function calculation, and finally outputs lstm_x_y.tmp_h[1, 1, 48]. x∈[0, 3], y∈[0, 24].
|
||||
For details, please refer to [rnn_hidden](https://github.com/PaddlePaddle/Paddle.js/blob/release/v2.2.5/packages/paddlejs-backend-webgl/src/ops/shader/rnn/rnn_hidden.ts).
|
||||
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
简体中文 | [English](RNN.md)
|
||||
# RNN算子计算过程
|
||||
|
||||
## 一、RNN理解
|
||||
|
||||
**RNN** 是循环神经网络,由输入层、隐藏层和输出层组成,擅长对序列数据进行处理。
|
||||
|
||||

|
||||
paddle官网文档:https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/RNN_cn.html#rnn
|
||||
|
||||
paddle源码实现:https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/rnn_op.h#L812
|
||||
|
||||
##二、RNN计算方式
|
||||
|
||||
t 时刻,输入层为  ,隐藏层为  ,输出层为  。由上图可知, 的值不仅仅取决于  ,还取决于  。计算公式如下:
|
||||
|
||||

|
||||
|
||||
## 三、pdjs中RNN算子实现
|
||||
|
||||
因为 RNN 有梯度消失问题,不能获取更多上下文信息,所以 CRNN 中使用的是 **LSTM(Long Short Term Memory)**,LSTM 是一种特殊的 RNN,能够保存长期的依赖关系。
|
||||
|
||||
基于图像的序列,两个方向的上下文是相互有用且互补的。由于 LSTM 是单向的,所以将两个 LSTM,一个向前和一个向后组合到一个**双向 LSTM** 中。此外,可以堆叠多层双向 LSTM。ch_PP-OCRv2_rec_infer 识别模型就是使用的双层双向 LSTM 结构。计算过程如下图所示:
|
||||
|
||||
#### 以ch_ppocr_mobile_v2.0_rec_infer 模型 rnn算子为例:
|
||||
```javascript
|
||||
{
|
||||
Attr: {
|
||||
mode: 'LSTM'
|
||||
// 是否双向,为true则正向反向都需要遍历
|
||||
is_bidirec: true
|
||||
// 隐藏层层数,代表循环次数
|
||||
num_layers: 2
|
||||
}
|
||||
|
||||
Input: [
|
||||
transpose_1.tmp_0[25, 1, 288]
|
||||
]
|
||||
|
||||
PreState: [
|
||||
fill_constant_batch_size_like_0.tmp_0[4, 1, 48],
|
||||
fill_constant_batch_size_like_1.tmp_0[4, 1, 48]
|
||||
]
|
||||
|
||||
WeightList: [
|
||||
lstm_cell_0.w_0[192, 288], lstm_cell_0.w_1[192, 48],
|
||||
lstm_cell_1.w_0[192, 288], lstm_cell_1.w_1[192, 48],
|
||||
lstm_cell_2.w_0[192, 96], lstm_cell_2.w_1[192, 48],
|
||||
lstm_cell_3.w_0[192, 96], lstm_cell_3.w_1[192, 48],
|
||||
lstm_cell_0.b_0[192], lstm_cell_0.b_1[192],
|
||||
lstm_cell_1.b_0[192], lstm_cell_1.b_1[192],
|
||||
lstm_cell_2.b_0[192], lstm_cell_2.b_1[192],
|
||||
lstm_cell_3.b_0[192], lstm_cell_3.b_1[192]
|
||||
]
|
||||
|
||||
Output: [
|
||||
lstm_0.tmp_0[25, 1, 96]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### 整体计算过程
|
||||

|
||||
#### rnn 计算中新增op:
|
||||
1)rnn_origin
|
||||
|
||||
计算公式: blas.MatMul(Input, WeightList_ih, blas_ih) + blas.MatMul(PreState, WeightList_hh, blas_hh)
|
||||
|
||||
2)rnn_matmul
|
||||
|
||||
计算公式:rnn_matmul = rnn_origin + Matmul( $ S_{t-1} $, WeightList_hh)
|
||||
|
||||
3)rnn_cell
|
||||
|
||||
计算方式:将rnn_matmul op输出结果分割成4份,每份执行不同激活函数计算,最后输出lstm_x_y.tmp_c[1, 1, 48]。x∈[0, 3],y∈[0, 24]。
|
||||
详见算子实现:[rnn_cell](https://github.com/PaddlePaddle/Paddle.js/blob/release/v2.2.5/packages/paddlejs-backend-webgl/src/ops/shader/rnn/rnn_cell.ts)
|
||||
|
||||
|
||||
4)rnn_hidden
|
||||
计算方式:将rnn_matmul op输出结果分割成4份,每份执行不同激活函数计算,最后输出lstm_x_y.tmp_h[1, 1, 48]。x∈[0, 3],y∈[0, 24]。
|
||||
详见算子实现:[rnn_hidden](https://github.com/PaddlePaddle/Paddle.js/blob/release/v2.2.5/packages/paddlejs-backend-webgl/src/ops/shader/rnn/rnn_hidden.ts)
|
||||
|
||||
|
||||
@@ -1,558 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
import json
|
||||
import collections
|
||||
import math
|
||||
import sys
|
||||
import os
|
||||
import struct
|
||||
import argparse
|
||||
import shutil
|
||||
import stat
|
||||
import traceback
|
||||
import numpy as np
|
||||
import paddle.fluid as fluid
|
||||
import paddle as paddle
|
||||
import copy
|
||||
from functools import reduce
|
||||
import rnn
|
||||
from pruningModel import pruningNoSenseTensor
|
||||
from fuseOps import opListFuse
|
||||
|
||||
|
||||
# 输入模型所在目录
|
||||
modelDir = None
|
||||
# 输入模型名
|
||||
modelName = None
|
||||
# 输入参数名,当且仅当所有模型参数被保存在一个单独的二进制文件中,它才需要被指定,若为分片模型,请设置为None
|
||||
paramsName = None
|
||||
# 是否打印模型信息
|
||||
enableLogModelInfo = False
|
||||
# 输出模型目录
|
||||
outputDir = None
|
||||
# 分片文件大小,单位:KB
|
||||
sliceDataSize = 4 * 1024
|
||||
# paddlepaddle运行程序实例
|
||||
program = None
|
||||
# 存放模型结构
|
||||
modelInfo = {"vars": {}, "ops": [], "chunkNum": 0, "dataLayout": "nchw", "feedShape": None}
|
||||
# 存放参数数值(未排序)
|
||||
paramValuesDict = {}
|
||||
|
||||
# 有一些后置算子适合在cpu中运行,所以单独统计
|
||||
postOps = []
|
||||
# 在转换过程中新生成的、需要添加到vars中的variable
|
||||
appendedVarList = []
|
||||
# rnn op索引列表
|
||||
rnnList = []
|
||||
|
||||
# 转换模型中需要过滤掉的参数
|
||||
needFilterAttributes = ['op_callstack', 'col', 'op_role', 'op_namescope', 'op_role_var',
|
||||
'data_format', 'is_test', 'use_mkldnn', 'use_cudnn', 'use_quantizer', 'workspace_size_MB',
|
||||
'mkldnn_data_type', 'op_device', '__@kernel_type_attr@__']
|
||||
|
||||
|
||||
class ObjDict(dict):
|
||||
"""
|
||||
Makes a dictionary behave like an object,with attribute-style access.
|
||||
"""
|
||||
def __getattr__(self,name):
|
||||
try:
|
||||
return self[name]
|
||||
except:
|
||||
raise AttributeError(name)
|
||||
def __setattr__(self,name,value):
|
||||
self[name]=value
|
||||
|
||||
def validateShape(shape, name):
|
||||
"""检验shape长度,超过4则截断"""
|
||||
if len(shape) > 4:
|
||||
newShape = shape[-4:]
|
||||
print('\033[31m ' + name + ' tensor shape length > 4, 处理为丢弃头部shape \033[0m')
|
||||
return newShape
|
||||
return shape
|
||||
|
||||
def splitLargeNum(x):
|
||||
"""将x拆分成两个因数相乘"""
|
||||
# 获取最小值
|
||||
num = math.floor(math.sqrt(x))
|
||||
while (num):
|
||||
if x % num == 0:
|
||||
return [num, int(x / num)]
|
||||
num -= 1
|
||||
|
||||
return [1, x]
|
||||
|
||||
def logModel(info):
|
||||
""" 打印信息 """
|
||||
if enableLogModelInfo:
|
||||
print(info)
|
||||
|
||||
def sortDict(oldDict, reverse=False):
|
||||
""" 对字典进行排序,返回有序字典,默认升序 """
|
||||
# 获得排序后的key list
|
||||
keys = sorted(oldDict.keys(), reverse=reverse)
|
||||
orderDict = collections.OrderedDict()
|
||||
# 遍历 key 列表
|
||||
for key in keys:
|
||||
orderDict[key] = oldDict[key]
|
||||
return orderDict
|
||||
|
||||
def dumpModelToJsonFile(outputDir):
|
||||
""" 导出模型数据到json文件 """
|
||||
print("Dumping model structure to json file...")
|
||||
if not os.path.exists(outputDir):
|
||||
os.makedirs(outputDir)
|
||||
outputModelPath = os.path.join(outputDir, "model.json")
|
||||
with open(outputModelPath, 'w') as outputFile:
|
||||
json.dump(modelInfo, outputFile, indent=4, separators=(", ", ": "), sort_keys=True)
|
||||
print("Dumping model structure to json file successfully")
|
||||
|
||||
def sliceDataToBinaryFile(paramValueList, outputDir):
|
||||
""" 将参数数据分片输出到文件,默认分片策略为按4M分片 """
|
||||
totalParamValuesCount = len(paramValueList)
|
||||
countPerSlice = int(sliceDataSize * 1024 / 4)
|
||||
|
||||
if not os.path.exists(outputDir):
|
||||
os.makedirs(outputDir)
|
||||
currentChunkIndex = 0
|
||||
currentParamDataIndex = 0
|
||||
|
||||
while currentParamDataIndex < totalParamValuesCount - 1:
|
||||
remainCount = totalParamValuesCount - currentParamDataIndex
|
||||
if remainCount < countPerSlice:
|
||||
countPerSlice = remainCount
|
||||
chunkPath = os.path.join(outputDir, 'chunk_%s.dat' % (currentChunkIndex + 1))
|
||||
file = open(chunkPath, 'wb')
|
||||
for i in paramValueList[currentParamDataIndex : currentParamDataIndex + countPerSlice]:
|
||||
byte = struct.pack('f', float(i))
|
||||
file.write(byte)
|
||||
file.close()
|
||||
currentParamDataIndex = currentParamDataIndex + countPerSlice
|
||||
currentChunkIndex = currentChunkIndex + 1
|
||||
print("Output No." + str(currentChunkIndex)+ " binary file, remain " + str(totalParamValuesCount - currentParamDataIndex) + " param values.")
|
||||
print("Slicing data to binary files successfully. (" + str(currentChunkIndex)+ " output files and " + str(currentParamDataIndex) + " param values)")
|
||||
|
||||
def reorderParamsValue():
|
||||
""" 对参数文件中的数值,按照variable.name字母序排序,返回排序后组合完成的value list """
|
||||
paramValuesOrderDict = sortDict(paramValuesDict)
|
||||
paramValues = []
|
||||
for value in paramValuesOrderDict.values():
|
||||
paramValues += value
|
||||
return paramValues
|
||||
|
||||
|
||||
def mapToPaddleJSTypeName(fluidOPName):
|
||||
""" 处理fluid的OP type与PaddleJS的OP type不对应情况 """
|
||||
if fluidOPName == "batch_norm":
|
||||
return "batchnorm"
|
||||
return fluidOPName
|
||||
|
||||
def excludeNegativeShape(shape):
|
||||
varShape = list(shape)
|
||||
varShapeExcludeNegativeOne = []
|
||||
for s in varShape:
|
||||
# 模型中 ?会自动转为 -1,需要单独处理成 1
|
||||
if s == -1:
|
||||
s = 1
|
||||
varShapeExcludeNegativeOne.append(s)
|
||||
return varShapeExcludeNegativeOne
|
||||
|
||||
def organizeModelVariableInfo(result):
|
||||
""" 组织参数信息 """
|
||||
print("Organizing model variables info...")
|
||||
index = 0
|
||||
# 存放var信息(未排序)
|
||||
varInfoDict = {}
|
||||
# 获取program中所有的var,遍历并获取所有未排序的var信息和参数数值
|
||||
vars = list(program.list_vars())
|
||||
for v in vars:
|
||||
# 跳过feed和fetch
|
||||
if "feed" == v.name:
|
||||
continue
|
||||
if "fetch" == v.name:
|
||||
continue
|
||||
|
||||
varShape = excludeNegativeShape(v.shape)
|
||||
# FIXME:end
|
||||
|
||||
# 存放variable信息,在dump成json时排序
|
||||
varInfo = {}
|
||||
varInfo["shape"] = varShape
|
||||
# 数据是否是持久化数据,如tensor为持久化数据,op的output不是持久化数据
|
||||
# 只输出持久化数据,paddlejs中也仅读取持久化数据
|
||||
varInfo["persistable"] = v.persistable
|
||||
varInfoDict[v.name] = varInfo
|
||||
|
||||
logModel("[Var index:" + str(index) + " name:" + v.name + "]")
|
||||
jsonDumpsIndentStr = json.dumps(varInfo, indent=2)
|
||||
logModel(jsonDumpsIndentStr)
|
||||
logModel("")
|
||||
index += 1
|
||||
|
||||
# persistable数据存入paramValuesDict,等待排序
|
||||
if v.persistable:
|
||||
tensor = np.array(fluid.global_scope().find_var(v.name).get_tensor())
|
||||
data = tensor.flatten().tolist()
|
||||
paramValuesDict[v.name] = data
|
||||
|
||||
# shape推断校正
|
||||
feed_target_names = result[1]
|
||||
fetch_targets = result[2]
|
||||
# 获取输入shape
|
||||
feedData = {}
|
||||
feeded_vars = [program.global_block().vars[varname] for varname in feed_target_names]
|
||||
for feedItem in feeded_vars:
|
||||
curShape = feedItem.shape
|
||||
feedName = feedItem.name
|
||||
feedData[feedName] = np.full(excludeNegativeShape(curShape), 1.0, "float32")
|
||||
|
||||
for v in program.list_vars():
|
||||
if not v.persistable:
|
||||
v.persistable = True
|
||||
exe.run(program, feed=feedData, fetch_list=fetch_targets, return_numpy=False)
|
||||
|
||||
for varKey in varInfoDict:
|
||||
var = fluid.global_scope().find_var(varKey)
|
||||
varData = np.array(var.get_tensor())
|
||||
varShape = list(varData.shape)
|
||||
varInfoDict[varKey]['shape'] = validateShape(varShape, varKey)
|
||||
|
||||
# vars追加
|
||||
vars = modelInfo['vars']
|
||||
for appendedVar in appendedVarList:
|
||||
appendedName = appendedVar['name']
|
||||
newName = appendedVar['new']
|
||||
for curVarKey in varInfoDict:
|
||||
if curVarKey == appendedName:
|
||||
newVar = copy.deepcopy(varInfoDict[curVarKey])
|
||||
varInfoDict[newName] = newVar
|
||||
break
|
||||
# 对var信息dict,按照key(var名)进行字母顺序排序
|
||||
varInfoOrderDict = sortDict(varInfoDict)
|
||||
# 将var信息按照顺序,添加到model info的vars中
|
||||
for key, value in varInfoOrderDict.items():
|
||||
value["name"] = key
|
||||
modelInfo["vars"][key] = value
|
||||
print("Organizing model variables info successfully.")
|
||||
|
||||
def organizeModelOpInfo():
|
||||
""" 组织模型OP结构信息 """
|
||||
print("Organizing model operators info...")
|
||||
ops = program.current_block().ops
|
||||
feedOutputName = None
|
||||
index = 0
|
||||
for op in ops:
|
||||
opInfo = {}
|
||||
|
||||
# 获取OP type,需要映射到PaddleJS的名字
|
||||
opInfo["type"] = mapToPaddleJSTypeName(op.type)
|
||||
|
||||
opInputs = op.input_names
|
||||
opOutputs = op.output_names
|
||||
|
||||
# 获取OP input
|
||||
inputs = {}
|
||||
for name in opInputs:
|
||||
value = op.input(name)
|
||||
if len(value) <= 0:
|
||||
continue
|
||||
if value[0] == feedOutputName:
|
||||
# FIXME:workaround,PaddleJSfeed 输入必须是image,且为单输入,这里修改feed后面的OP的input为image,建立前后关联
|
||||
inputs[name] = ["image"]
|
||||
else:
|
||||
inputs[name] = value
|
||||
opInfo["inputs"] = inputs
|
||||
|
||||
# 获取OP output
|
||||
outputs = {}
|
||||
# 将outputs转换为数组
|
||||
if op.type == 'density_prior_box' or op.type == 'prior_box' or op.type == 'box_coder':
|
||||
outputs['Out'] = []
|
||||
for name in opOutputs:
|
||||
value = op.output(name)
|
||||
if len(value) <= 0:
|
||||
continue
|
||||
outputs['Out'].append(value[0])
|
||||
else:
|
||||
for name in opOutputs:
|
||||
value = op.output(name)
|
||||
if len(value) <= 0:
|
||||
continue
|
||||
if op.type == "feed":
|
||||
# FIXME:workaround,PaddleJSfeed 输入必须是image,且为单输入,这里保存原始的输出名,以便映射
|
||||
feedOutputName = value[0]
|
||||
outputs[name] = ["image"]
|
||||
else:
|
||||
outputs[name] = value
|
||||
|
||||
opInfo["outputs"] = outputs
|
||||
|
||||
# 收敛outputs[name]
|
||||
if "Output" in opInfo["outputs"]:
|
||||
opInfo["outputs"]["Out"] = opInfo["outputs"]["Output"]
|
||||
del opInfo["outputs"]["Output"]
|
||||
|
||||
elif "Y" in opInfo["outputs"]:
|
||||
opInfo["outputs"]["Out"] = opInfo["outputs"]["Y"]
|
||||
del opInfo["outputs"]["Y"]
|
||||
|
||||
if "Out" not in opInfo["outputs"]:
|
||||
print("\033[31moutputs[name] not exist Out.\033[0m")
|
||||
sys.exit(1)
|
||||
|
||||
# 有的模型如人脸关键点,会出现两个算子合并的情况,如lmk_demo,elementwise_add后接了relu算子,relu的输入输出相等,兼容一下
|
||||
# inputs与outputs只有一个,名称相等,则,输入加后缀,改上一层算子。
|
||||
if 'X' in inputs and 'Out' in outputs:
|
||||
curInputs = inputs['X']
|
||||
curOutputs = outputs['Out']
|
||||
if len(curInputs) == 1 and len(curOutputs) == 1 and curInputs[0] == curOutputs[0] and index > 1:
|
||||
originName = curInputs[0]
|
||||
changedName = inputs['X'][0] = curInputs[0] = originName + '_changed'
|
||||
opInfo["inputs"]['X'] = curInputs
|
||||
# 获取上一层算子
|
||||
prevOpOutputs = modelInfo["ops"][index - 1]['outputs']
|
||||
for name in prevOpOutputs:
|
||||
values = prevOpOutputs[name]
|
||||
for i, curName in enumerate(values):
|
||||
if (curName == originName):
|
||||
modelInfo["ops"][index - 1]['outputs'][name][i] = changedName
|
||||
appendedVarList.append({'name': originName, 'new': changedName})
|
||||
|
||||
# 获取OP attribute
|
||||
attrs = {}
|
||||
for name in op.attr_names:
|
||||
# 过滤不需要的参数
|
||||
if name in needFilterAttributes:
|
||||
continue
|
||||
value = op.attr(name)
|
||||
attrs[name] = value
|
||||
opInfo["attrs"] = attrs
|
||||
|
||||
if (op.type == 'rnn'):
|
||||
global rnnList
|
||||
rnnList.append(index)
|
||||
|
||||
# multiclass_nms 单独处理
|
||||
if (op.type.startswith('multiclass_nms')):
|
||||
opInfo["type"] = 'multiclass_nms'
|
||||
postOps.append(opInfo)
|
||||
else:
|
||||
# 存入modelInfo
|
||||
modelInfo["ops"].append(opInfo)
|
||||
logModel("[OP index:" + str(index) + " type:" + op.type + "]")
|
||||
jsonDumpsIndentStr = json.dumps(opInfo, indent=2)
|
||||
logModel(jsonDumpsIndentStr)
|
||||
logModel("")
|
||||
index += 1
|
||||
print("Organizing model operators info successfully.")
|
||||
|
||||
def addChunkNumToJson(paramValueList):
|
||||
totalParamValuesCount = len(paramValueList)
|
||||
countPerSlice = int(sliceDataSize * 1024 / 4)
|
||||
count = totalParamValuesCount / countPerSlice
|
||||
modelInfo["chunkNum"] = math.ceil(count)
|
||||
print("Model chunkNum set successfully.")
|
||||
|
||||
def appendConnectOp(fetch_targets):
|
||||
targets = []
|
||||
inputNames = []
|
||||
totalShape = 0
|
||||
|
||||
# 从fetch_targets中提取输出算子信息
|
||||
for target in fetch_targets:
|
||||
name = target.name
|
||||
curVar = fluid.global_scope().find_var(name)
|
||||
curTensor = np.array(curVar.get_tensor())
|
||||
shape = list(curTensor.shape)
|
||||
totalShape += reduce(lambda x, y: x * y, shape)
|
||||
targets.append({'name': name, 'shape': excludeNegativeShape(shape)})
|
||||
inputNames.append(name)
|
||||
|
||||
# 构造connect算子
|
||||
op = {
|
||||
'attrs': {},
|
||||
'inputs': {'X': inputNames},
|
||||
'outputs': {'Out': ['connect_result']},
|
||||
'type': 'connect'
|
||||
}
|
||||
# 构造输出var
|
||||
outputVar = {'name': 'connect_result', 'shape': splitLargeNum(totalShape)}
|
||||
|
||||
ops = modelInfo['ops']
|
||||
vars = modelInfo['vars']
|
||||
|
||||
# 收集要删除的算子index
|
||||
delList = []
|
||||
for index, item in enumerate(ops):
|
||||
if item['type'] == 'fetch':
|
||||
delList.append(index)
|
||||
|
||||
# 去除fetch算子
|
||||
delCount = 0
|
||||
for delIndex in delList:
|
||||
del ops[delIndex - delCount]
|
||||
delCount += 1
|
||||
|
||||
fetchOp = {
|
||||
"attrs": {},
|
||||
"inputs": {
|
||||
"X": [
|
||||
"connect_result"
|
||||
]
|
||||
},
|
||||
"outputs": {
|
||||
"Out": [
|
||||
"fetch"
|
||||
]
|
||||
},
|
||||
"type": "fetch"
|
||||
}
|
||||
ops.append(op)
|
||||
ops.append(fetchOp)
|
||||
|
||||
vars['connect_result'] = outputVar
|
||||
modelInfo['multiOutputs'] = targets
|
||||
return targets
|
||||
|
||||
def genModelFeedShape(feed):
|
||||
if len(feed) != 1:
|
||||
print("\033[33;1mModel has more than one input feed.\033[0m")
|
||||
return
|
||||
|
||||
originFeedShape = modelInfo['vars'][feed[0]]['shape']
|
||||
feedShape = {}
|
||||
if len(originFeedShape) == 3:
|
||||
feedShape['fc'] = originFeedShape[0]
|
||||
feedShape['fh'] = originFeedShape[1]
|
||||
feedShape['fw'] = originFeedShape[2]
|
||||
elif len(originFeedShape) == 4:
|
||||
feedShape['fc'] = originFeedShape[1]
|
||||
feedShape['fh'] = originFeedShape[2]
|
||||
feedShape['fw'] = originFeedShape[3]
|
||||
elif len(originFeedShape) == 2:
|
||||
feedShape['fh'] = originFeedShape[0]
|
||||
feedShape['fw'] = originFeedShape[1]
|
||||
else:
|
||||
print("\033[33;1mFeedShape length is " + str(len(originFeedShape)) + ".\033[0m")
|
||||
return
|
||||
|
||||
modelInfo['feedShape'] = feedShape
|
||||
print("\033[32mModel FeedShape set successfully.\033[0m")
|
||||
|
||||
def convertToPaddleJSModel(modelDir, modelName, paramsName, outputDir, useGPUOpt):
|
||||
""" 转换fluid modle为paddleJS model """
|
||||
|
||||
|
||||
#In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and 'load_inference_model()' is only supported in static graph mode. So call 'paddle.enable_static()' before this api to enter static graph mode.
|
||||
paddle.enable_static()
|
||||
|
||||
# 初始化fluid运行环境和配置
|
||||
global exe
|
||||
exe = fluid.Executor(fluid.CPUPlace())
|
||||
result = fluid.io.load_inference_model(dirname=modelDir, executor=exe, model_filename=modelName, params_filename=paramsName)
|
||||
global program
|
||||
program = result[0]
|
||||
fetch_targets = result[2]
|
||||
feed_target_names = result[1]
|
||||
|
||||
# 获取program中所有的op,按op顺序加入到model info
|
||||
organizeModelOpInfo()
|
||||
|
||||
# 获取program中所有的var,按照字母顺序加入到model info,同时读取参数数值
|
||||
organizeModelVariableInfo(result)
|
||||
|
||||
# 拆分rnn op
|
||||
if len(rnnList):
|
||||
for index in rnnList:
|
||||
rnn.splice_rnn_op(modelInfo, index)
|
||||
|
||||
if useGPUOpt:
|
||||
# 算子融合
|
||||
modelInfo['gpuOpt'] = True
|
||||
opListFuse(modelInfo['ops'])
|
||||
|
||||
# 对多输出模型追加connect算子
|
||||
if len(fetch_targets) > 1:
|
||||
appendConnectOp(fetch_targets)
|
||||
|
||||
if (postOps and len(postOps) > 0):
|
||||
for op in postOps:
|
||||
if (op['type'].startswith('multiclass_nms')):
|
||||
inputNames = []
|
||||
for input, value in op['inputs'].items():
|
||||
if len(value) <= 0:
|
||||
continue
|
||||
cur = ObjDict()
|
||||
cur.name = value[0]
|
||||
inputNames.append(cur)
|
||||
targets = appendConnectOp(inputNames)
|
||||
# op['inputs'] = targets
|
||||
keys = op['inputs'].keys()
|
||||
for i, val in enumerate(keys):
|
||||
op['inputs'][val] = targets[i]
|
||||
|
||||
|
||||
modelInfo['postOps'] = postOps
|
||||
|
||||
# 对参数数值dict,按照key(参数名)进行字母顺序排序,并组合到一起
|
||||
paramValues = reorderParamsValue()
|
||||
|
||||
# model.json 设置分片参数
|
||||
addChunkNumToJson(paramValues)
|
||||
|
||||
# model.json 设置 feedShape 输入 shape 信息
|
||||
genModelFeedShape(feed_target_names)
|
||||
|
||||
# 去掉无意义的 tensor 和对应 op
|
||||
pruningNoSenseTensor(modelInfo)
|
||||
|
||||
# 导出模型文件到json
|
||||
dumpModelToJsonFile(outputDir)
|
||||
|
||||
# 导出分片参数文件
|
||||
sliceDataToBinaryFile(paramValues, outputDir)
|
||||
|
||||
def main():
|
||||
|
||||
global sliceDataSize
|
||||
global enableLogModelInfo
|
||||
|
||||
try:
|
||||
p = argparse.ArgumentParser(description='模型转换参数解析')
|
||||
p.add_argument('--inputDir', help='fluid模型所在目录。当且仅当使用分片参数文件时使用该参数。将过滤modelPath和paramsPath参数,且模型文件名必须为`__model__`', required=False)
|
||||
p.add_argument('--modelPath', help='fluid模型文件所在路径,使用合并参数文件时使用该参数', required=False)
|
||||
p.add_argument('--paramPath', help='fluid参数文件所在路径,使用合并参数文件时使用该参数', required=False)
|
||||
p.add_argument("--outputDir", help='paddleJS模型输出路径,必要参数', required=True)
|
||||
p.add_argument("--logModelInfo", type=int, default=0, help='是否输出模型结构信息,非必要参数,0为不输出,1为输出,默认不输出', required=False)
|
||||
p.add_argument("--sliceDataSize", type=int, default=4096, help='分片输出参数文件时,每片文件的大小,单位:KB,非必要参数,默认4096KB', required=False)
|
||||
p.add_argument('--useGPUOpt', help='转换模型是否执行GPU优化方法', required=False)
|
||||
|
||||
args = p.parse_args()
|
||||
modelDir = args.inputDir
|
||||
modelPath = args.modelPath
|
||||
paramPath = args.paramPath
|
||||
useGPUOpt = args.useGPUOpt
|
||||
|
||||
if not modelDir:
|
||||
modelDir, modelName = os.path.split(modelPath)
|
||||
paramDir, paramsName = os.path.split(paramPath)
|
||||
if paramDir != modelDir:
|
||||
print("\033[31mModel and param file should be put in a same directory!\033[0m")
|
||||
raise Exception()
|
||||
outputDir = args.outputDir
|
||||
sliceDataSize = args.sliceDataSize
|
||||
|
||||
if args.logModelInfo == 1:
|
||||
enableLogModelInfo = True
|
||||
|
||||
convertToPaddleJSModel(modelDir, modelName, paramsName, outputDir, useGPUOpt)
|
||||
|
||||
except Exception as identifier:
|
||||
print("\033[31mA fetal error occured. Failed to convert model.\033[0m")
|
||||
print(traceback.format_exc())
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
import shutil
|
||||
import stat
|
||||
import traceback
|
||||
import copy
|
||||
|
||||
|
||||
def cleanTempModel(optimizedModelTempDir):
|
||||
""" 清理opt优化完的临时模型文件 """
|
||||
if os.path.exists(optimizedModelTempDir):
|
||||
print("Cleaning optimized temporary model...")
|
||||
shutil.rmtree(optimizedModelTempDir, onerror=grantWritePermission)
|
||||
|
||||
|
||||
def grantWritePermission(func, path, execinfo):
|
||||
""" 文件授权 """
|
||||
os.chmod(path, stat.S_IWRITE)
|
||||
func(path)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Example:
|
||||
'python convertToPaddleJSModel.py --modelPath=../infer_model/MobileNetV2/model --paramPath=../infer_model/MobileNetV2/params --outputDir=../jsmodel'
|
||||
"""
|
||||
try:
|
||||
p = argparse.ArgumentParser(description='转化为PaddleJS模型参数解析')
|
||||
p.add_argument(
|
||||
'--inputDir',
|
||||
help='fluid模型所在目录。当且仅当使用分片参数文件时使用该参数。将过滤modelPath和paramsPath参数,且模型文件名必须为`__model__`',
|
||||
required=False)
|
||||
p.add_argument(
|
||||
'--modelPath', help='fluid模型文件所在路径,使用合并参数文件时使用该参数', required=False)
|
||||
p.add_argument(
|
||||
'--paramPath', help='fluid参数文件所在路径,使用合并参数文件时使用该参数', required=False)
|
||||
p.add_argument(
|
||||
"--outputDir", help='paddleJS模型输出路径,必要参数', required=True)
|
||||
p.add_argument(
|
||||
"--disableOptimize",
|
||||
type=int,
|
||||
default=0,
|
||||
help='是否关闭模型优化,非必要参数,1为关闭优化,0为开启优化,默认开启优化',
|
||||
required=False)
|
||||
p.add_argument(
|
||||
"--logModelInfo",
|
||||
type=int,
|
||||
default=0,
|
||||
help='是否输出模型结构信息,非必要参数,0为不输出,1为输出,默认不输出',
|
||||
required=False)
|
||||
p.add_argument(
|
||||
"--sliceDataSize",
|
||||
type=int,
|
||||
default=4096,
|
||||
help='分片输出参数文件时,每片文件的大小,单位:KB,非必要参数,默认4096KB',
|
||||
required=False)
|
||||
p.add_argument('--useGPUOpt', help='转换模型是否执行GPU优化方法', required=False)
|
||||
|
||||
args = p.parse_args()
|
||||
# 获取当前用户使用的 python 解释器 bin 位置
|
||||
pythonCmd = sys.executable
|
||||
|
||||
# TODO: 由于Paddle Lite和PaddlePaddle存在包冲突,因此将整个模型转换工具拆成两个python文件,由一个入口python文件通过命令行调用
|
||||
# 区分本地执行和命令行执行
|
||||
if os.path.exists("optimizeModel.py"):
|
||||
optimizeCmd = pythonCmd + " optimizeModel.py"
|
||||
else:
|
||||
optimizeCmd = "pdjsOptimizeModel"
|
||||
|
||||
if os.path.exists("convertModel.py"):
|
||||
convertCmd = pythonCmd + " convertModel.py"
|
||||
else:
|
||||
convertCmd = "pdjsConvertModel"
|
||||
|
||||
inputDir = args.inputDir
|
||||
modelPath = args.modelPath
|
||||
paramPath = args.paramPath
|
||||
outputDir = args.outputDir
|
||||
disableOptimization = args.disableOptimize
|
||||
args.disableOptimize = None
|
||||
enableLogModelInfo = args.logModelInfo
|
||||
sliceDataSize = args.sliceDataSize
|
||||
|
||||
optArgs = copy.deepcopy(args)
|
||||
|
||||
enableOptimization = 1 - disableOptimization
|
||||
optimizedModelTempDir = None
|
||||
if enableOptimization == 1:
|
||||
optimizedModelTempDir = os.path.join(outputDir, "optimize")
|
||||
optArgs.outputDir = optimizedModelTempDir
|
||||
if inputDir:
|
||||
args.inputDir = optimizedModelTempDir
|
||||
else:
|
||||
args.modelPath = os.path.join(optimizedModelTempDir, "model")
|
||||
args.paramPath = os.path.join(optimizedModelTempDir, "params")
|
||||
|
||||
print("============Convert Model Args=============")
|
||||
if inputDir:
|
||||
print("inputDir: " + inputDir)
|
||||
else:
|
||||
print("modelPath: " + modelPath)
|
||||
print("paramPath: " + paramPath)
|
||||
print("outputDir: " + outputDir)
|
||||
print("enableOptimizeModel: " + str(enableOptimization))
|
||||
print("enableLogModelInfo: " + str(enableLogModelInfo))
|
||||
print("sliceDataSize:" + str(sliceDataSize))
|
||||
|
||||
print("Starting...")
|
||||
if enableOptimization:
|
||||
print("Optimizing model...")
|
||||
for param in ["inputDir", "modelPath", "paramPath", "outputDir"]:
|
||||
if optArgs.__dict__[param]:
|
||||
# 用""框起命令参数值,解决路径中的空格问题
|
||||
optimizeCmd += " --" + param + "=" + '"' + str(
|
||||
optArgs.__dict__[param]) + '"'
|
||||
os.system(optimizeCmd)
|
||||
try:
|
||||
os.listdir(optimizedModelTempDir)
|
||||
except Exception as identifier:
|
||||
print("\n\033[31mOptimizing model failed.\033[0m")
|
||||
# restore inputDir or modelPath paramPath from optimize
|
||||
if inputDir:
|
||||
args.inputDir = inputDir
|
||||
else:
|
||||
args.modelPath = modelPath
|
||||
args.paramPath = paramPath
|
||||
else:
|
||||
print("\n\033[32mOptimizing model successfully.\033[0m")
|
||||
else:
|
||||
print(
|
||||
"\033[33mYou choosed not to optimize model, consequently, optimizing model is skiped.\033[0m"
|
||||
)
|
||||
|
||||
print("\nConverting model...")
|
||||
for param in args.__dict__:
|
||||
if args.__dict__[param]:
|
||||
# 用""框起参数,解决路径中的空格问题
|
||||
convertCmd += " --" + param + "=" + '"' + str(args.__dict__[
|
||||
param]) + '"'
|
||||
os.system(convertCmd)
|
||||
try:
|
||||
file = os.listdir(outputDir)
|
||||
if len(file) == 0:
|
||||
raise Exception
|
||||
except Exception as identifier:
|
||||
print("\033[31m============ALL DONE============\033[0m")
|
||||
else:
|
||||
if enableOptimization:
|
||||
cleanTempModel(optimizedModelTempDir)
|
||||
print("Temporary files has been deleted successfully.")
|
||||
print("\033[32mConverting model successfully.\033[0m")
|
||||
print("\033[32m============ALL DONE============\033[0m")
|
||||
|
||||
except Exception as identifier:
|
||||
print("\033[31mA fetal error occured. Failed to convert model.\033[0m")
|
||||
print(traceback.format_exc())
|
||||
exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
|
||||
def opListFuse(ops):
|
||||
""" 算子融合 """
|
||||
fuseOpList = [
|
||||
'relu', 'relu6', 'leaky_relu', 'scale', 'sigmoid', 'hard_sigmoid',
|
||||
'pow', 'sqrt', 'tanh', 'hard_swish', 'dropout'
|
||||
]
|
||||
|
||||
# 判断op是否为单节点
|
||||
def opExistSingleNode(opName):
|
||||
name = opName
|
||||
if name:
|
||||
nodeNum = 0
|
||||
for i in range(len(ops)):
|
||||
op = ops[i]
|
||||
if 'X' not in op['inputs']:
|
||||
continue
|
||||
|
||||
inputName = op['inputs']['X']
|
||||
for x in inputName:
|
||||
if x == name:
|
||||
nodeNum = nodeNum + 1
|
||||
|
||||
return True if nodeNum == 1 else False
|
||||
|
||||
else:
|
||||
return False
|
||||
|
||||
for index in reversed(range(len(ops))):
|
||||
if index > 0:
|
||||
op = ops[index]
|
||||
|
||||
# 兼容 Paddle Lite 算子融合字段
|
||||
if 'act_type' in op['attrs']:
|
||||
name = op['attrs']['act_type']
|
||||
op['attrs']['fuse_opt'] = {}
|
||||
op['attrs']['fuse_opt'][name] = {}
|
||||
|
||||
if name == 'hard_swish':
|
||||
op['attrs']['fuse_opt'][name]['offset'] = op['attrs'][
|
||||
'hard_swish_offset']
|
||||
op['attrs']['fuse_opt'][name]['scale'] = op['attrs'][
|
||||
'hard_swish_scale']
|
||||
op['attrs']['fuse_opt'][name]['threshold'] = op['attrs'][
|
||||
'hard_swish_threshold']
|
||||
|
||||
if name == 'relu6':
|
||||
op['attrs']['fuse_opt'][name]['threshold'] = op['attrs'][
|
||||
'fuse_brelu_threshold']
|
||||
|
||||
for fuse in fuseOpList:
|
||||
if op['type'] == fuse:
|
||||
prevOp = ops[index - 1]
|
||||
|
||||
if opExistSingleNode(prevOp['outputs']['Out'][0]) and len(
|
||||
prevOp['outputs']['Out']) == 1:
|
||||
prevOp['attrs']['fuse_opt'] = {}
|
||||
if 'fuse_opt' in op['attrs']:
|
||||
prevOp['attrs']['fuse_opt'] = op['attrs'][
|
||||
'fuse_opt']
|
||||
del op['attrs']['fuse_opt']
|
||||
|
||||
prevOp['attrs']['fuse_opt'][fuse] = op['attrs']
|
||||
prevOp['outputs']['Out'] = op['outputs']['Out']
|
||||
|
||||
del ops[index]
|
||||
@@ -1,53 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
import collections
|
||||
import argparse
|
||||
import traceback
|
||||
from paddlejslite import lite
|
||||
import pkg_resources
|
||||
from packaging import version
|
||||
|
||||
lite_version = pkg_resources.get_distribution("paddlelite").version
|
||||
|
||||
def optimizeModel(inputDir, modelPath, paramPath, outputDir):
|
||||
""" 使用opt python接口执行模型优化 """
|
||||
opt = lite.Opt()
|
||||
if inputDir:
|
||||
# 分片参数文件优化
|
||||
opt.set_model_dir(inputDir)
|
||||
else:
|
||||
# 合并参数文件优化
|
||||
opt.set_model_file(modelPath)
|
||||
opt.set_param_file(paramPath)
|
||||
|
||||
opt.set_valid_places("arm")
|
||||
opt.set_model_type("protobuf")
|
||||
opt.set_optimize_out(outputDir)
|
||||
opt.run()
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
p = argparse.ArgumentParser('模型优化参数解析')
|
||||
p.add_argument('--inputDir', help='fluid模型所在目录。当且仅当使用分片参数文件时使用该参数。将过滤modelPath和paramsPath参数,且模型文件名必须为`__model__`', required=False)
|
||||
p.add_argument('--modelPath', help='fluid模型文件所在路径,使用合并参数文件时使用该参数', required=False)
|
||||
p.add_argument('--paramPath', help='fluid参数文件所在路径,使用合并参数文件时使用该参数', required=False)
|
||||
p.add_argument("--outputDir", help='优化后fluid模型目录,必要参数', required=True)
|
||||
|
||||
args = p.parse_args()
|
||||
inputDir = args.inputDir
|
||||
modelPath = args.modelPath
|
||||
paramPath = args.paramPath
|
||||
outputDir = args.outputDir
|
||||
|
||||
optimizeModel(inputDir, modelPath, paramPath, outputDir)
|
||||
|
||||
except Exception as identifier:
|
||||
print("\033[31mA fetal error occured. Failed to optimize model.\033[0m")
|
||||
print(traceback.format_exc())
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
|
||||
# pruning op tensor and relatad op with no sense, like ShapeTensor and OutSize
|
||||
def pruningNoSenseTensor(model):
|
||||
global ops
|
||||
ops = model["ops"]
|
||||
global vars
|
||||
vars = model["vars"]
|
||||
for op in ops[:]:
|
||||
shapeTensor = op["inputs"].get("ShapeTensor")
|
||||
outSizeTensor = op["inputs"].get("OutSize")
|
||||
|
||||
noSenseTensor = shapeTensor or outSizeTensor
|
||||
if not noSenseTensor:
|
||||
continue
|
||||
|
||||
print(noSenseTensor)
|
||||
if shapeTensor:
|
||||
del op["inputs"]["ShapeTensor"]
|
||||
if outSizeTensor:
|
||||
del op["inputs"]["OutSize"]
|
||||
|
||||
for tensorId in noSenseTensor:
|
||||
delLeafOpWithoutChildren(tensorId)
|
||||
|
||||
|
||||
# delete leaf op which has no child
|
||||
def delLeafOpWithoutChildren(tensorId):
|
||||
# judge if there is an op which used the tensor
|
||||
for op in ops[:]:
|
||||
inputsTensor = op["inputs"]
|
||||
input = inputsTensor.get("Input") or inputsTensor.get("X")
|
||||
|
||||
if input and (tensorId in input):
|
||||
return
|
||||
|
||||
op = getOpByOutputTensor(tensorId)
|
||||
if not op:
|
||||
return
|
||||
|
||||
# del op
|
||||
ops.remove(op)
|
||||
# del vars
|
||||
del vars[tensorId]
|
||||
|
||||
# upward recursion
|
||||
delOpinputsTensor = op["inputs"]
|
||||
input = delOpinputsTensor.get("Input") or delOpinputsTensor.get("X")
|
||||
if not input:
|
||||
return
|
||||
for inputTensorId in input:
|
||||
delLeafOpWithoutChildren(inputTensorId)
|
||||
|
||||
|
||||
|
||||
# find op by output tensor id
|
||||
def getOpByOutputTensor(tensorId):
|
||||
for op in ops[:]:
|
||||
outputTensor = op["outputs"]
|
||||
out = outputTensor.get("Out") or outputTensor.get("Output") or outputTensor.get("Y")
|
||||
if out[0] == tensorId:
|
||||
return op
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
shopt -s extglob
|
||||
|
||||
|
||||
|
||||
if [ $# == 0 ];then
|
||||
ENV="test"
|
||||
else
|
||||
ENV=$1
|
||||
fi
|
||||
|
||||
# clear dist
|
||||
rm -rf dist
|
||||
|
||||
# build
|
||||
python3 setup.py sdist bdist_wheel
|
||||
|
||||
|
||||
# publish
|
||||
|
||||
if [ ${ENV} == 'production' ];then
|
||||
echo "publish to https://pypi.org"
|
||||
python3 -m twine upload dist/*
|
||||
else
|
||||
echo "publish to https://test.pypi.org/"
|
||||
python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/*
|
||||
fi
|
||||
|
||||
# del egg-info and build
|
||||
rm -rf paddlejsconverter.egg-info
|
||||
rm -rf build
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
def splice_rnn_op(model_info, rnn_index):
|
||||
|
||||
global input_shape
|
||||
global weight_0_shape
|
||||
global weight_1_shape
|
||||
global rnn_input_name
|
||||
ops = model_info['ops']
|
||||
vars = model_info['vars']
|
||||
op = ops[rnn_index]
|
||||
|
||||
rnn_input_name = op['inputs']['Input'][0]
|
||||
rnn_output_name = op['outputs']['Out'][0]
|
||||
|
||||
is_bidirec = 2 if op['attrs']['is_bidirec'] else 1
|
||||
num_layers = op['attrs']['num_layers']
|
||||
hidden_size = op['attrs']['hidden_size']
|
||||
layer_num = num_layers * is_bidirec
|
||||
# concat input最大值
|
||||
max_concat_num = 15
|
||||
|
||||
def concat_mul(index, list, num):
|
||||
global rnn_input_name
|
||||
end = len(list)
|
||||
|
||||
if end < max_concat_num:
|
||||
concat_output_name = 'lstm_' + str(index - 1) + '_' + str(num) + '.tmp_concat'
|
||||
# 非最后一层遍历,将concat作为下一层输入
|
||||
if index < is_bidirec * num_layers - 1:
|
||||
rnn_input_name = concat_output_name
|
||||
|
||||
# 最后一层遍历,将rnn_output_name赋给最后一个concat
|
||||
else:
|
||||
concat_output_name = rnn_output_name
|
||||
|
||||
concat_op = {
|
||||
'attrs': {
|
||||
'axis': 0
|
||||
},
|
||||
'inputs': {
|
||||
'X': []
|
||||
},
|
||||
'outputs': {'Out': [concat_output_name]},
|
||||
'type': 'concat'
|
||||
}
|
||||
|
||||
concat_output_shape = 0
|
||||
|
||||
for x in range(0, end):
|
||||
x_input_name = 'lstm_' + str(index - 1) + '_' + str(list[x]) + '.tmp_concat'
|
||||
concat_op['inputs']['X'].append(x_input_name)
|
||||
concat_output_shape += vars[x_input_name]['shape'][0]
|
||||
|
||||
concat_var = {
|
||||
'name': concat_output_name,
|
||||
'persistable': False,
|
||||
'shape': [concat_output_shape, 1, weight_1_shape[1] * 2]
|
||||
}
|
||||
|
||||
ops.append(concat_op)
|
||||
|
||||
if index < is_bidirec * num_layers - 1:
|
||||
vars[concat_output_name] = concat_var
|
||||
return
|
||||
|
||||
# concat新列表
|
||||
new_list = []
|
||||
|
||||
for i in range(0, end, max_concat_num):
|
||||
if i + max_concat_num > end:
|
||||
for n in range(i, end):
|
||||
new_list.append(list[n])
|
||||
break
|
||||
|
||||
concat_output_name = 'lstm_' + str(index - 1) + '_' + str(num) + '.tmp_concat'
|
||||
# concat_list长度为max_concat_num && 最后一层遍历,将rnn_output_name赋给最后一个concat
|
||||
if end == max_concat_num and index == is_bidirec * num_layers - 1:
|
||||
concat_output_name = rnn_output_name
|
||||
|
||||
concat_op = {
|
||||
'attrs': {
|
||||
'axis': 0
|
||||
},
|
||||
'inputs': {
|
||||
'X': []
|
||||
},
|
||||
'outputs': {'Out': [concat_output_name]},
|
||||
'type': 'concat'
|
||||
}
|
||||
|
||||
concat_output_shape = 0
|
||||
|
||||
for x in range(0, max_concat_num):
|
||||
x_input_name = 'lstm_' + str(index - 1) + '_' + str(list[i + x]) + '.tmp_concat'
|
||||
concat_op['inputs']['X'].append(x_input_name)
|
||||
concat_output_shape += vars[x_input_name]['shape'][0]
|
||||
|
||||
concat_var = {
|
||||
'name': concat_output_name,
|
||||
'persistable': False,
|
||||
'shape': [concat_output_shape, 1, weight_1_shape[1] * 2]
|
||||
}
|
||||
|
||||
ops.append(concat_op)
|
||||
vars[concat_output_name] = concat_var
|
||||
new_list.append(num)
|
||||
|
||||
# 若concat_list长度为max_concat_num,在下一次递归时直接修改rnn_input_name,结束递归,num无需+1
|
||||
if end != max_concat_num:
|
||||
num += 1
|
||||
|
||||
concat_mul(index, new_list, num)
|
||||
|
||||
for index in range(layer_num):
|
||||
last_hidden = op['inputs']['PreState'][0]
|
||||
last_cell = op['inputs']['PreState'][1]
|
||||
weight_list_0 = op['inputs']['WeightList'][index * 2]
|
||||
weight_list_1 = op['inputs']['WeightList'][index * 2 + 1]
|
||||
weight_list_2 = op['inputs']['WeightList'][(index + num_layers * is_bidirec) * 2]
|
||||
weight_list_3 = op['inputs']['WeightList'][(index + num_layers * is_bidirec) * 2 + 1]
|
||||
output_name = 'rnn_origin_' + str(index)
|
||||
input_shape = vars[rnn_input_name]['shape']
|
||||
batch = input_shape[0]
|
||||
|
||||
if vars[weight_list_0]:
|
||||
weight_0_shape = vars[weight_list_0]['shape']
|
||||
|
||||
if vars[weight_list_1]:
|
||||
weight_1_shape = vars[weight_list_1]['shape']
|
||||
|
||||
if batch == 0:
|
||||
continue
|
||||
|
||||
origin_op = {
|
||||
'attrs': {
|
||||
'state_axis': index
|
||||
},
|
||||
'inputs': {
|
||||
'Input': [rnn_input_name],
|
||||
'PreState': [last_hidden],
|
||||
'WeightList': [
|
||||
weight_list_0,
|
||||
weight_list_1,
|
||||
weight_list_2,
|
||||
weight_list_3
|
||||
]
|
||||
},
|
||||
'outputs': {'Out': [output_name]},
|
||||
'type': 'rnn_origin'
|
||||
}
|
||||
|
||||
origin_var = {
|
||||
'name': output_name,
|
||||
'persistable': False,
|
||||
'shape': [input_shape[0], input_shape[1], weight_0_shape[0]]
|
||||
}
|
||||
ops.append(origin_op)
|
||||
vars[output_name] = origin_var
|
||||
|
||||
for bat in range(batch):
|
||||
matmul_output_name = 'lstm_' + str(index) + '_' + str(bat) + '.tmp_matmul'
|
||||
cell_output_name = 'lstm_' + str(index) + '_' + str(bat) + '.tmp_c'
|
||||
hidden_output_name = 'lstm_' + str(index) + '_' + str(bat) + '.tmp_h'
|
||||
|
||||
matmul_op = {
|
||||
'attrs': {
|
||||
'input_axis': bat,
|
||||
'state_axis': index if bat == 0 else 0,
|
||||
'batch': batch,
|
||||
'reverse': False if index % 2 == 0 else True
|
||||
},
|
||||
'inputs': {
|
||||
'Input': [output_name],
|
||||
'PreState': [last_hidden],
|
||||
'WeightList': [weight_list_1]
|
||||
},
|
||||
'outputs': {'Out': [matmul_output_name]},
|
||||
'type': 'rnn_matmul'
|
||||
}
|
||||
|
||||
matmul_var = {
|
||||
'name': matmul_output_name,
|
||||
'persistable': False,
|
||||
'shape': [1, 1, weight_0_shape[0]]
|
||||
}
|
||||
|
||||
ops.append(matmul_op)
|
||||
vars[matmul_output_name] = matmul_var
|
||||
|
||||
cell_op = {
|
||||
'attrs': {
|
||||
'state_axis': index if bat == 0 else 0,
|
||||
'hidden_size': hidden_size
|
||||
},
|
||||
'inputs': {
|
||||
'X': [matmul_output_name],
|
||||
'Y': [last_cell]
|
||||
},
|
||||
'outputs': {'Out': [cell_output_name]},
|
||||
'type': 'rnn_cell'
|
||||
}
|
||||
|
||||
cell_var = {
|
||||
'name': cell_output_name,
|
||||
'persistable': False,
|
||||
'shape': [1, 1, weight_1_shape[1]]
|
||||
}
|
||||
|
||||
ops.append(cell_op)
|
||||
vars[cell_output_name] = cell_var
|
||||
|
||||
hidden_op = {
|
||||
'attrs': {
|
||||
'state_axis': index if bat == 0 else 0,
|
||||
'hidden_size': hidden_size
|
||||
},
|
||||
'inputs': {
|
||||
'X': [matmul_output_name],
|
||||
'Y': [last_cell]
|
||||
},
|
||||
'outputs': {'Out': [hidden_output_name]},
|
||||
'type': 'rnn_hidden'
|
||||
}
|
||||
|
||||
hidden_var = {
|
||||
'name': hidden_output_name,
|
||||
'persistable': False,
|
||||
'shape': [1, 1, weight_1_shape[1]]
|
||||
}
|
||||
|
||||
ops.append(hidden_op)
|
||||
vars[hidden_output_name] = hidden_var
|
||||
|
||||
last_hidden = hidden_output_name
|
||||
last_cell = cell_output_name
|
||||
|
||||
# concat
|
||||
if index % 2 == 1:
|
||||
|
||||
concat_list = []
|
||||
concat_num = 0
|
||||
# concat forword and backword
|
||||
for bat in range(batch):
|
||||
x_input_name_0 = 'lstm_' + str(index - 1) + '_' + str(bat) + '.tmp_h'
|
||||
x_input_name_1 = 'lstm_' + str(index) + '_' + str(batch - bat - 1) + '.tmp_h'
|
||||
concat_output_name = 'lstm_' + str(index - 1) + '_' + str(bat) + '.tmp_concat'
|
||||
concat_op = {
|
||||
'attrs': {
|
||||
'axis': 2
|
||||
},
|
||||
'inputs': {
|
||||
'X': [x_input_name_0, x_input_name_1]
|
||||
},
|
||||
'outputs': {'Out': [concat_output_name]},
|
||||
'type': 'concat'
|
||||
}
|
||||
|
||||
concat_var = {
|
||||
'name': concat_output_name,
|
||||
'persistable': False,
|
||||
'shape': [1, 1, weight_1_shape[1] * 2]
|
||||
}
|
||||
ops.append(concat_op)
|
||||
vars[concat_output_name] = concat_var
|
||||
concat_list.append(bat)
|
||||
concat_num += 1
|
||||
|
||||
concat_mul(index, concat_list, concat_num)
|
||||
|
||||
# 删除rnn op
|
||||
del ops[rnn_index]
|
||||
@@ -1,37 +0,0 @@
|
||||
import setuptools
|
||||
|
||||
PY_MODILES = ["convertToPaddleJSModel", "convertModel", "optimizeModel", "pruningModel", "rnn", "fuseOps"]
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setuptools.setup(
|
||||
name="paddlejsconverter",
|
||||
version="1.0.7",
|
||||
author="paddlejs",
|
||||
author_email="382248373@qq.com",
|
||||
description="Paddlejs model converter",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/PaddlePaddle/Paddle.js",
|
||||
py_modules=PY_MODILES,
|
||||
classifiers=[
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
],
|
||||
python_requires='>=3.6',
|
||||
install_requires=[
|
||||
"paddlepaddle >= 2.0.0",
|
||||
"paddlejslite >= 0.0.2",
|
||||
"numpy"
|
||||
],
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"paddlejsconverter = convertToPaddleJSModel:main",
|
||||
"pdjsConvertModel = convertModel:main",
|
||||
"pdjsOptimizeModel = optimizeModel:main"
|
||||
]
|
||||
}
|
||||
)
|
||||
@@ -1,125 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
|
||||
# Paddle.js WeChat mini-program Demo
|
||||
|
||||
- [1. Introduction](#1)
|
||||
- [2. Project Start](#2)
|
||||
* [2.1 Preparations](#21)
|
||||
* [2.2 Startup steps](#22)
|
||||
* [2.3 visualization](#23)
|
||||
- [3. Model inference pipeline](#3)
|
||||
- [4. FAQ](#4)
|
||||
|
||||
<a name="1"></a>
|
||||
## 1 Introduction
|
||||
|
||||
|
||||
This directory contains the text detection, text recognition mini-program demo, by using [Paddle.js](https://github.com/PaddlePaddle/Paddle.js) and [Paddle.js WeChat mini-program plugin](https://mp.weixin.qq.com/wxopen/plugindevdoc?appid=wx7138a7bb793608c3&token=956931339&lang=zh_CN) to complete the text detection frame selection effect on the mini-program using the computing power of the user terminal.
|
||||
|
||||
<a name="2"></a>
|
||||
## 2. Project start
|
||||
|
||||
<a name="21"></a>
|
||||
### 2.1 Preparations
|
||||
* [Apply for a WeChat mini-program account](https://mp.weixin.qq.com/)
|
||||
* [WeChat Mini Program Developer Tools](https://developers.weixin.qq.com/miniprogram/dev/devtools/download.html)
|
||||
* Front-end development environment preparation: node, npm
|
||||
* Configure the server domain name in the mini-program management background, or open the developer tool [do not verify the legal domain name]
|
||||
|
||||
For details, please refer to [document.](https://mp.weixin.qq.com/wxamp/devprofile/get_profile?token=1132303404&lang=zh_CN)
|
||||
|
||||
<a name="22"></a>
|
||||
### 2.2 Startup steps
|
||||
|
||||
#### **1. Clone the demo code**
|
||||
````sh
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy
|
||||
cd FastDeploy/examples/application/js/mini_program
|
||||
````
|
||||
|
||||
#### **2. Enter the mini-program directory and install dependencies**
|
||||
|
||||
````sh
|
||||
# Run the text recognition demo and enter the ocrXcx directory
|
||||
cd ./ocrXcx && npm install
|
||||
# Run the text detection demo and enter the ocrdetectXcx directory
|
||||
# cd ./ocrdetectXcx && npm install
|
||||
````
|
||||
|
||||
#### **3. WeChat mini-program import code**
|
||||
Open WeChat Developer Tools --> Import --> Select a directory and enter relevant information
|
||||
|
||||
#### **4. Add Paddle.js WeChat mini-program plugin**
|
||||
Mini Program Management Interface --> Settings --> Third Party Settings --> Plugin Management --> Add Plugins --> Search for `wx7138a7bb793608c3` and add
|
||||
[Reference document](https://developers.weixin.qq.com/miniprogram/dev/framework/plugin/using.html)
|
||||
|
||||
#### **5. Build dependencies**
|
||||
Click on the menu bar in the developer tools: Tools --> Build npm
|
||||
|
||||
Reason: The node_modules directory will not be involved in compiling, uploading and packaging. If a small program wants to use npm packages, it must go through the process of "building npm". After the construction is completed, a miniprogram_npm directory will be generated, which will store the built and packaged npm packages. It is the npm package that the mini-program actually uses. *
|
||||
[Reference Documentation](https://developers.weixin.qq.com/miniprogram/dev/devtools/npm.html)
|
||||
|
||||
<a name="23"></a>
|
||||
### 2.3 visualization
|
||||
|
||||
<img src="https://user-images.githubusercontent.com/43414102/157648579-cdbbee61-9866-4364-9edd-a97ac0eda0c1.png" width="300px">
|
||||
|
||||
<a name="3"></a>
|
||||
## 3. Model inference pipeline
|
||||
|
||||
```typescript
|
||||
// Introduce paddlejs and paddlejs-plugin, register the mini-program environment variables and the appropriate backend
|
||||
import * as paddlejs from '@paddlejs/paddlejs-core';
|
||||
import '@paddlejs/paddlejs-backend-webgl';
|
||||
const plugin = requirePlugin('paddlejs-plugin');
|
||||
plugin.register(paddlejs, wx);
|
||||
|
||||
// Initialize the inference engine
|
||||
const runner = new paddlejs.Runner({modelPath, feedShape, mean, std});
|
||||
await runner.init();
|
||||
|
||||
// get image information
|
||||
wx.canvasGetImageData({
|
||||
canvasId: canvasId,
|
||||
x: 0,
|
||||
y: 0,
|
||||
width: canvas.width,
|
||||
height: canvas.height,
|
||||
success(res) {
|
||||
// inference prediction
|
||||
runner.predict({
|
||||
data: res.data,
|
||||
width: canvas.width,
|
||||
height: canvas.height,
|
||||
}, function (data) {
|
||||
// get the inference result
|
||||
console.log(data)
|
||||
});
|
||||
}
|
||||
});
|
||||
````
|
||||
|
||||
<a name="4"></a>
|
||||
## 4. FAQ
|
||||
|
||||
- 4.1 An error occurs `Invalid context type [webgl2] for Canvas#getContext`
|
||||
|
||||
**A:** You can leave it alone, it will not affect the normal code operation and demo function
|
||||
|
||||
- 4.2 Preview can't see the result
|
||||
|
||||
**A:** It is recommended to try real machine debugging
|
||||
|
||||
- 4.3 A black screen appears in the WeChat developer tool, and then there are too many errors
|
||||
|
||||
**A:** Restart WeChat Developer Tools
|
||||
|
||||
- 4.4 The debugging results of the simulation and the real machine are inconsistent; the simulation cannot detect the text, etc.
|
||||
|
||||
**A:** The real machine can prevail;
|
||||
If the simulation cannot detect the text, etc., you can try to change the code at will (add, delete, newline, etc.) and then click to compile
|
||||
|
||||
|
||||
- 4.5 Prompts such as no response for a long time appear when the phone is debugged or running
|
||||
|
||||
**A:** Please continue to wait, model inference will take some time
|
||||
@@ -1,126 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
|
||||
# Paddle.js微信小程序Demo
|
||||
|
||||
- [1.简介](#1)
|
||||
- [2. 项目启动](#2)
|
||||
* [2.1 准备工作](#21)
|
||||
* [2.2 启动步骤](#22)
|
||||
* [2.3 效果展示](#23)
|
||||
- [3. 模型推理pipeline](#3)
|
||||
- [4. 常见问题](#4)
|
||||
|
||||
<a name="1"></a>
|
||||
## 1.简介
|
||||
|
||||
|
||||
本目录下包含文本检测、文本识别小程序demo,通过使用 [Paddle.js](https://github.com/PaddlePaddle/Paddle.js) 以及 [Paddle.js微信小程序插件](https://mp.weixin.qq.com/wxopen/plugindevdoc?appid=wx7138a7bb793608c3&token=956931339&lang=zh_CN) 完成在小程序上利用用户终端算力实现文本检测框选效果。
|
||||
|
||||
<a name="2"></a>
|
||||
## 2. 项目启动
|
||||
|
||||
<a name="21"></a>
|
||||
### 2.1 准备工作
|
||||
* [申请微信小程序账号](https://mp.weixin.qq.com/)
|
||||
* [微信小程序开发者工具](https://developers.weixin.qq.com/miniprogram/dev/devtools/download.html)
|
||||
* 前端开发环境准备:node、npm
|
||||
* 小程序管理后台配置服务器域名,或打开开发者工具【不校验合法域名】
|
||||
|
||||
详情参考:https://mp.weixin.qq.com/wxamp/devprofile/get_profile?token=1132303404&lang=zh_CN)
|
||||
|
||||
<a name="22"></a>
|
||||
### 2.2 启动步骤
|
||||
|
||||
#### **1. 克隆Demo代码**
|
||||
```sh
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy
|
||||
cd FastDeploy/examples/application/js/mini_program
|
||||
```
|
||||
|
||||
#### **2. 进入小程序目录,安装依赖**
|
||||
|
||||
```sh
|
||||
# 运行文本识别demo,进入到ocrXcx目录
|
||||
cd ./ocrXcx && npm install
|
||||
# 运行文本检测demo,进入到ocrdetectXcx目录
|
||||
# cd ./ocrdetectXcx && npm install
|
||||
```
|
||||
|
||||
#### **3. 微信小程序导入代码**
|
||||
打开微信开发者工具 --> 导入 --> 选定目录,输入相关信息
|
||||
|
||||
#### **4. 添加 Paddle.js微信小程序插件**
|
||||
小程序管理界面 --> 设置 --> 第三方设置 --> 插件管理 --> 添加插件 --> 搜索 `wx7138a7bb793608c3` 并添加
|
||||
[参考文档](https://developers.weixin.qq.com/miniprogram/dev/framework/plugin/using.html)
|
||||
|
||||
#### **5. 构建依赖**
|
||||
点击开发者工具中的菜单栏:工具 --> 构建 npm
|
||||
|
||||
原因:node_modules 目录不会参与编译、上传和打包中,小程序想要使用 npm 包必须走一遍“构建 npm”的过程,构建完成会生成一个 miniprogram_npm 目录,里面会存放构建打包后的 npm 包,也就是小程序真正使用的 npm 包。*
|
||||
[参考文档](https://developers.weixin.qq.com/miniprogram/dev/devtools/npm.html)
|
||||
|
||||
<a name="23"></a>
|
||||
### 2.3 效果展示
|
||||
|
||||
<img src="https://user-images.githubusercontent.com/43414102/157648579-cdbbee61-9866-4364-9edd-a97ac0eda0c1.png" width="300px">
|
||||
|
||||
<a name="3"></a>
|
||||
## 3. 模型推理pipeline
|
||||
|
||||
```typescript
|
||||
// 引入 paddlejs 和 paddlejs-plugin,注册小程序环境变量和合适的 backend
|
||||
import * as paddlejs from '@paddlejs/paddlejs-core';
|
||||
import '@paddlejs/paddlejs-backend-webgl';
|
||||
const plugin = requirePlugin('paddlejs-plugin');
|
||||
plugin.register(paddlejs, wx);
|
||||
|
||||
// 初始化推理引擎
|
||||
const runner = new paddlejs.Runner({modelPath, feedShape, mean, std});
|
||||
await runner.init();
|
||||
|
||||
// 获取图像信息
|
||||
wx.canvasGetImageData({
|
||||
canvasId: canvasId,
|
||||
x: 0,
|
||||
y: 0,
|
||||
width: canvas.width,
|
||||
height: canvas.height,
|
||||
success(res) {
|
||||
// 推理预测
|
||||
runner.predict({
|
||||
data: res.data,
|
||||
width: canvas.width,
|
||||
height: canvas.height,
|
||||
}, function (data) {
|
||||
// 获取推理结果
|
||||
console.log(data)
|
||||
});
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
<a name="4"></a>
|
||||
## 4. 常见问题
|
||||
|
||||
- 4.1 出现报错 `Invalid context type [webgl2] for Canvas#getContext`
|
||||
|
||||
**答:** 可以不管,不影响正常代码运行和demo功能
|
||||
|
||||
- 4.2 预览看不到结果
|
||||
|
||||
**答:** 建议尝试真机调试
|
||||
|
||||
- 4.3 微信开发者工具出现黑屏,然后出现超多报错
|
||||
|
||||
**答:** 重启微信开发者工具
|
||||
|
||||
- 4.4 模拟和真机调试结果不一致;模拟检测不到文本等
|
||||
|
||||
**答:** 可以以真机为准;模拟检测不到文本等可以尝试随意改动下代码(增删换行等)再点击编译
|
||||
|
||||
|
||||
- 4.5 手机调试或运行时出现 长时间无反应等提示
|
||||
|
||||
**答:** 请继续等待,模型推理需要一定时间
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
/* global wx, App */
|
||||
import * as paddlejs from '@paddlejs/paddlejs-core';
|
||||
import '@paddlejs/paddlejs-backend-webgl';
|
||||
// eslint-disable-next-line no-undef
|
||||
const plugin = requirePlugin('paddlejs-plugin');
|
||||
plugin.register(paddlejs, wx);
|
||||
|
||||
App({
|
||||
globalData: {
|
||||
Paddlejs: paddlejs.Runner
|
||||
}
|
||||
});
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"pages": [
|
||||
"pages/index/index"
|
||||
],
|
||||
"plugins": {
|
||||
"paddlejs-plugin": {
|
||||
"version": "2.0.1",
|
||||
"provider": "wx7138a7bb793608c3"
|
||||
}
|
||||
},
|
||||
"sitemapLocation": "sitemap.json"
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
{
|
||||
"name": "paddlejs-demo",
|
||||
"version": "0.0.1",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "paddlejs-demo",
|
||||
"version": "0.0.1",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@paddlejs/paddlejs-backend-webgl": "^1.2.0",
|
||||
"@paddlejs/paddlejs-core": "^2.1.18",
|
||||
"d3-polygon": "2.0.0",
|
||||
"js-clipper": "1.0.1",
|
||||
"number-precision": "1.5.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@paddlejs/paddlejs-backend-webgl": {
|
||||
"version": "1.2.9",
|
||||
"resolved": "https://registry.npmjs.org/@paddlejs/paddlejs-backend-webgl/-/paddlejs-backend-webgl-1.2.9.tgz",
|
||||
"integrity": "sha512-cVDa0/Wbw2EyfsYqdYUPhFeqKsET79keEUWjyhYQmQkJfWg8j1qdR6yp7g6nx9qAGrqFvwuj1s0EqkYA1dok6A=="
|
||||
},
|
||||
"node_modules/@paddlejs/paddlejs-core": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@paddlejs/paddlejs-core/-/paddlejs-core-2.2.0.tgz",
|
||||
"integrity": "sha512-P3rPkF9fFHtq8uSte5gA7fJQwBNl9Ytsvj6aTcfQSsirnBO/HxMNu0gJyh7+lItvEtF92PR15eI0eOwJYfZDhQ=="
|
||||
},
|
||||
"node_modules/d3-polygon": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-2.0.0.tgz",
|
||||
"integrity": "sha512-MsexrCK38cTGermELs0cO1d79DcTsQRN7IWMJKczD/2kBjzNXxLUWP33qRF6VDpiLV/4EI4r6Gs0DAWQkE8pSQ=="
|
||||
},
|
||||
"node_modules/js-clipper": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/js-clipper/-/js-clipper-1.0.1.tgz",
|
||||
"integrity": "sha512-0XYAS0ZoCki5K0fWwj8j8ug4mgxHXReW3ayPbVqr4zXPJuIs2pyvemL1sALadsEiAywZwW5Ify1XfU4bNJvokg=="
|
||||
},
|
||||
"node_modules/number-precision": {
|
||||
"version": "1.5.2",
|
||||
"resolved": "https://registry.npmjs.org/number-precision/-/number-precision-1.5.2.tgz",
|
||||
"integrity": "sha512-q7C1ZW3FyjsJ+IpGB6ykX8OWWa5+6M+hEY0zXBlzq1Sq1IPY9GeI3CQ9b2i6CMIYoeSuFhop2Av/OhCxClXqag=="
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@paddlejs/paddlejs-backend-webgl": {
|
||||
"version": "1.2.9",
|
||||
"resolved": "https://registry.npmjs.org/@paddlejs/paddlejs-backend-webgl/-/paddlejs-backend-webgl-1.2.9.tgz",
|
||||
"integrity": "sha512-cVDa0/Wbw2EyfsYqdYUPhFeqKsET79keEUWjyhYQmQkJfWg8j1qdR6yp7g6nx9qAGrqFvwuj1s0EqkYA1dok6A=="
|
||||
},
|
||||
"@paddlejs/paddlejs-core": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@paddlejs/paddlejs-core/-/paddlejs-core-2.2.0.tgz",
|
||||
"integrity": "sha512-P3rPkF9fFHtq8uSte5gA7fJQwBNl9Ytsvj6aTcfQSsirnBO/HxMNu0gJyh7+lItvEtF92PR15eI0eOwJYfZDhQ=="
|
||||
},
|
||||
"d3-polygon": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-2.0.0.tgz",
|
||||
"integrity": "sha512-MsexrCK38cTGermELs0cO1d79DcTsQRN7IWMJKczD/2kBjzNXxLUWP33qRF6VDpiLV/4EI4r6Gs0DAWQkE8pSQ=="
|
||||
},
|
||||
"js-clipper": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/js-clipper/-/js-clipper-1.0.1.tgz",
|
||||
"integrity": "sha512-0XYAS0ZoCki5K0fWwj8j8ug4mgxHXReW3ayPbVqr4zXPJuIs2pyvemL1sALadsEiAywZwW5Ify1XfU4bNJvokg=="
|
||||
},
|
||||
"number-precision": {
|
||||
"version": "1.5.2",
|
||||
"resolved": "https://registry.npmjs.org/number-precision/-/number-precision-1.5.2.tgz",
|
||||
"integrity": "sha512-q7C1ZW3FyjsJ+IpGB6ykX8OWWa5+6M+hEY0zXBlzq1Sq1IPY9GeI3CQ9b2i6CMIYoeSuFhop2Av/OhCxClXqag=="
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"name": "paddlejs-demo",
|
||||
"version": "0.0.1",
|
||||
"description": "",
|
||||
"main": "app.js",
|
||||
"dependencies": {
|
||||
"@paddlejs/paddlejs-backend-webgl": "^1.2.0",
|
||||
"@paddlejs/paddlejs-core": "^2.1.18",
|
||||
"d3-polygon": "2.0.0",
|
||||
"js-clipper": "1.0.1",
|
||||
"number-precision": "1.5.2"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC"
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 222 KiB |
@@ -1,578 +0,0 @@
|
||||
/* global wx, Page */
|
||||
import * as paddlejs from '@paddlejs/paddlejs-core';
|
||||
import '@paddlejs/paddlejs-backend-webgl';
|
||||
import clipper from 'js-clipper';
|
||||
import { divide, enableBoundaryChecking, plus } from 'number-precision';
|
||||
|
||||
import { recDecode } from 'recPostprocess.js';
|
||||
// eslint-disable-next-line no-undef
|
||||
const plugin = requirePlugin('paddlejs-plugin');
|
||||
const Polygon = require('d3-polygon');
|
||||
|
||||
global.wasm_url = 'pages/index/wasm/opencv_js.wasm.br';
|
||||
const CV = require('./wasm/opencv.js');
|
||||
|
||||
plugin.register(paddlejs, wx);
|
||||
|
||||
let DETSHAPE = 960;
|
||||
let RECWIDTH = 320;
|
||||
const RECHEIGHT = 32;
|
||||
|
||||
// 声明后续图像变换要用到的canvas;此时未绑定
|
||||
let canvas_det;
|
||||
let canvas_rec;
|
||||
let my_canvas;
|
||||
let my_canvas_ctx;
|
||||
|
||||
|
||||
const imgList = [
|
||||
'https://paddlejs.bj.bcebos.com/xcx/ocr.png'
|
||||
];
|
||||
|
||||
// eslint-disable-next-line max-lines-per-function
|
||||
const outputBox = (res) => {
|
||||
const thresh = 0.3;
|
||||
const box_thresh = 0.5;
|
||||
const max_candidates = 1000;
|
||||
const min_size = 3;
|
||||
const width = 960;
|
||||
const height = 960;
|
||||
const pred = res;
|
||||
const segmentation = [];
|
||||
pred.forEach(item => {
|
||||
segmentation.push(item > thresh ? 255 : 0);
|
||||
});
|
||||
|
||||
function get_mini_boxes(contour) {
|
||||
// 生成最小外接矩形
|
||||
const bounding_box = CV.minAreaRect(contour);
|
||||
const points = [];
|
||||
const mat = new CV.Mat();
|
||||
// 获取矩形的四个顶点坐标
|
||||
CV.boxPoints(bounding_box, mat);
|
||||
for (let i = 0; i < mat.data32F.length; i += 2) {
|
||||
const arr = [];
|
||||
arr[0] = mat.data32F[i];
|
||||
arr[1] = mat.data32F[i + 1];
|
||||
points.push(arr);
|
||||
}
|
||||
|
||||
function sortNumber(a, b) {
|
||||
return a[0] - b[0];
|
||||
}
|
||||
points.sort(sortNumber);
|
||||
let index_1 = 0;
|
||||
let index_2 = 1;
|
||||
let index_3 = 2;
|
||||
let index_4 = 3;
|
||||
if (points[1][1] > points[0][1]) {
|
||||
index_1 = 0;
|
||||
index_4 = 1;
|
||||
}
|
||||
else {
|
||||
index_1 = 1;
|
||||
index_4 = 0;
|
||||
}
|
||||
|
||||
if (points[3][1] > points[2][1]) {
|
||||
index_2 = 2;
|
||||
index_3 = 3;
|
||||
}
|
||||
else {
|
||||
index_2 = 3;
|
||||
index_3 = 2;
|
||||
}
|
||||
const box = [
|
||||
points[index_1],
|
||||
points[index_2],
|
||||
points[index_3],
|
||||
points[index_4]
|
||||
];
|
||||
const side = Math.min(bounding_box.size.height, bounding_box.size.width);
|
||||
mat.delete();
|
||||
return {
|
||||
points: box,
|
||||
side
|
||||
};
|
||||
}
|
||||
|
||||
function box_score_fast(bitmap, _box) {
|
||||
const h = height;
|
||||
const w = width;
|
||||
const box = JSON.parse(JSON.stringify(_box));
|
||||
const x = [];
|
||||
const y = [];
|
||||
box.forEach(item => {
|
||||
x.push(item[0]);
|
||||
y.push(item[1]);
|
||||
});
|
||||
// clip这个函数将将数组中的元素限制在a_min, a_max之间,大于a_max的就使得它等于 a_max,小于a_min,的就使得它等于a_min。
|
||||
const xmin = clip(Math.floor(Math.min(...x)), 0, w - 1);
|
||||
const xmax = clip(Math.ceil(Math.max(...x)), 0, w - 1);
|
||||
const ymin = clip(Math.floor(Math.min(...y)), 0, h - 1);
|
||||
const ymax = clip(Math.ceil(Math.max(...y)), 0, h - 1);
|
||||
// eslint-disable-next-line new-cap
|
||||
const mask = new CV.Mat.zeros(ymax - ymin + 1, xmax - xmin + 1, CV.CV_8UC1);
|
||||
box.forEach(item => {
|
||||
item[0] = Math.max(item[0] - xmin, 0);
|
||||
item[1] = Math.max(item[1] - ymin, 0);
|
||||
});
|
||||
const npts = 4;
|
||||
const point_data = new Uint8Array(box.flat());
|
||||
const points = CV.matFromArray(npts, 1, CV.CV_32SC2, point_data);
|
||||
const pts = new CV.MatVector();
|
||||
pts.push_back(points);
|
||||
const color = new CV.Scalar(255);
|
||||
// 多个多边形填充
|
||||
CV.fillPoly(mask, pts, color, 1);
|
||||
const sliceArr = [];
|
||||
for (let i = ymin; i < ymax + 1; i++) {
|
||||
sliceArr.push(...bitmap.slice(960 * i + xmin, 960 * i + xmax + 1));
|
||||
}
|
||||
const mean = num_mean(sliceArr, mask.data);
|
||||
mask.delete();
|
||||
points.delete();
|
||||
pts.delete();
|
||||
return mean;
|
||||
}
|
||||
|
||||
function clip(data, min, max) {
|
||||
return data < min ? min : data > max ? max : data;
|
||||
}
|
||||
|
||||
function unclip(box) {
|
||||
const unclip_ratio = 1.6;
|
||||
const area = Math.abs(Polygon.polygonArea(box));
|
||||
const length = Polygon.polygonLength(box);
|
||||
const distance = area * unclip_ratio / length;
|
||||
const tmpArr = [];
|
||||
box.forEach(item => {
|
||||
const obj = {
|
||||
X: 0,
|
||||
Y: 0
|
||||
};
|
||||
obj.X = item[0];
|
||||
obj.Y = item[1];
|
||||
tmpArr.push(obj);
|
||||
});
|
||||
const offset = new clipper.ClipperOffset();
|
||||
offset.AddPath(tmpArr, clipper.JoinType.jtRound, clipper.EndType.etClosedPolygon);
|
||||
const expanded = [];
|
||||
offset.Execute(expanded, distance);
|
||||
let expandedArr = [];
|
||||
expanded[0] && expanded[0].forEach(item => {
|
||||
expandedArr.push([item.X, item.Y]);
|
||||
});
|
||||
expandedArr = [].concat(...expandedArr);
|
||||
return expandedArr;
|
||||
}
|
||||
|
||||
function num_mean(data, mask) {
|
||||
let sum = 0;
|
||||
let length = 0;
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
if (mask[i]) {
|
||||
sum = plus(sum, data[i]);
|
||||
length++;
|
||||
}
|
||||
}
|
||||
return divide(sum, length);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line new-cap
|
||||
const src = new CV.matFromArray(960, 960, CV.CV_8UC1, segmentation);
|
||||
const contours = new CV.MatVector();
|
||||
const hierarchy = new CV.Mat();
|
||||
// 获取轮廓
|
||||
CV.findContours(src, contours, hierarchy, CV.RETR_LIST, CV.CHAIN_APPROX_SIMPLE);
|
||||
const num_contours = Math.min(contours.size(), max_candidates);
|
||||
const boxes = [];
|
||||
const scores = [];
|
||||
const arr = [];
|
||||
for (let i = 0; i < num_contours; i++) {
|
||||
const contour = contours.get(i);
|
||||
let {
|
||||
points,
|
||||
side
|
||||
} = get_mini_boxes(contour);
|
||||
if (side < min_size) {
|
||||
continue;
|
||||
}
|
||||
const score = box_score_fast(pred, points);
|
||||
if (box_thresh > score) {
|
||||
continue;
|
||||
}
|
||||
let box = unclip(points);
|
||||
// eslint-disable-next-line new-cap
|
||||
const boxMap = new CV.matFromArray(box.length / 2, 1, CV.CV_32SC2, box);
|
||||
const resultObj = get_mini_boxes(boxMap);
|
||||
box = resultObj.points;
|
||||
side = resultObj.side;
|
||||
if (side < min_size + 2) {
|
||||
continue;
|
||||
}
|
||||
box.forEach(item => {
|
||||
item[0] = clip(Math.round(item[0]), 0, 960);
|
||||
item[1] = clip(Math.round(item[1]), 0, 960);
|
||||
});
|
||||
boxes.push(box);
|
||||
scores.push(score);
|
||||
arr.push(i);
|
||||
boxMap.delete();
|
||||
}
|
||||
src.delete();
|
||||
contours.delete();
|
||||
hierarchy.delete();
|
||||
return {
|
||||
boxes,
|
||||
scores
|
||||
};
|
||||
};
|
||||
|
||||
const sorted_boxes = (box) => {
|
||||
function sortNumber(a, b) {
|
||||
return a[0][1] - b[0][1];
|
||||
}
|
||||
|
||||
const boxes = box.sort(sortNumber);
|
||||
const num_boxes = boxes.length;
|
||||
for (let i = 0; i < num_boxes - 1; i++) {
|
||||
if (Math.abs(boxes[i + 1][0][1] - boxes[i][0][1]) < 10
|
||||
&& boxes[i + 1][0][0] < boxes[i][0][0]) {
|
||||
const tmp = boxes[i];
|
||||
boxes[i] = boxes[i + 1];
|
||||
boxes[i + 1] = tmp;
|
||||
}
|
||||
}
|
||||
return boxes;
|
||||
}
|
||||
|
||||
function flatten(arr) {
|
||||
return arr.toString().split(',').map(item => +item);
|
||||
}
|
||||
|
||||
function int(num) {
|
||||
return num > 0 ? Math.floor(num) : Math.ceil(num);
|
||||
}
|
||||
|
||||
function clip(data, min, max) {
|
||||
return data < min ? min : data > max ? max : data;
|
||||
}
|
||||
|
||||
function get_rotate_crop_image(img, points) {
|
||||
const img_crop_width = int(Math.max(
|
||||
linalg_norm(points[0], points[1]),
|
||||
linalg_norm(points[2], points[3])
|
||||
));
|
||||
const img_crop_height = int(Math.max(
|
||||
linalg_norm(points[0], points[3]),
|
||||
linalg_norm(points[1], points[2])
|
||||
));
|
||||
const pts_std = [
|
||||
[0, 0],
|
||||
[img_crop_width, 0],
|
||||
[img_crop_width, img_crop_height],
|
||||
[0, img_crop_height]
|
||||
];
|
||||
const srcTri = CV.matFromArray(4, 1, CV.CV_32FC2, flatten(points));
|
||||
const dstTri = CV.matFromArray(4, 1, CV.CV_32FC2, flatten(pts_std));
|
||||
// 获取到目标矩阵
|
||||
const M = CV.getPerspectiveTransform(srcTri, dstTri);
|
||||
const src = CV.imread(img);
|
||||
const dst = new CV.Mat();
|
||||
const dsize = new CV.Size(img_crop_width, img_crop_height);
|
||||
// 透视转换
|
||||
CV.warpPerspective(src, dst, M, dsize, CV.INTER_CUBIC, CV.BORDER_REPLICATE, new CV.Scalar());
|
||||
|
||||
const dst_img_height = dst.rows;
|
||||
const dst_img_width = dst.cols;
|
||||
let dst_rot;
|
||||
// 图像旋转
|
||||
if (dst_img_height / dst_img_width >= 1.5) {
|
||||
dst_rot = new CV.Mat();
|
||||
const dsize_rot = new CV.Size(dst.rows, dst.cols);
|
||||
const center = new CV.Point(dst.cols / 2, dst.cols / 2);
|
||||
const M = CV.getRotationMatrix2D(center, 90, 1);
|
||||
CV.warpAffine(dst, dst_rot, M, dsize_rot, CV.INTER_CUBIC, CV.BORDER_REPLICATE, new CV.Scalar());
|
||||
}
|
||||
|
||||
const dst_resize = new CV.Mat();
|
||||
const dsize_resize = new CV.Size(0, 0);
|
||||
let scale;
|
||||
if (dst_rot) {
|
||||
scale = RECHEIGHT / dst_rot.rows;
|
||||
CV.resize(dst_rot, dst_resize, dsize_resize, scale, scale, CV.INTER_AREA);
|
||||
dst_rot.delete();
|
||||
}
|
||||
else {
|
||||
scale = RECHEIGHT / dst_img_height;
|
||||
CV.resize(dst, dst_resize, dsize_resize, scale, scale, CV.INTER_AREA);
|
||||
}
|
||||
|
||||
canvas_det.width = dst_resize.cols;
|
||||
canvas_det.height = dst_resize.rows;
|
||||
canvas_det.getContext('2d').clearRect(0, 0, canvas_det.width, canvas_det.height);
|
||||
CV.imshow(canvas_det, dst_resize);
|
||||
|
||||
src.delete();
|
||||
dst.delete();
|
||||
dst_resize.delete();
|
||||
srcTri.delete();
|
||||
dstTri.delete();
|
||||
}
|
||||
|
||||
function linalg_norm(x, y) {
|
||||
return Math.sqrt(Math.pow(x[0] - y[0], 2) + Math.pow(x[1] - y[1], 2));
|
||||
}
|
||||
|
||||
function resize_norm_img_splice(
|
||||
image,
|
||||
origin_width,
|
||||
origin_height,
|
||||
index = 0
|
||||
) {
|
||||
canvas_rec.width = RECWIDTH;
|
||||
canvas_rec.height = RECHEIGHT;
|
||||
const ctx = canvas_rec.getContext('2d');
|
||||
ctx.fillStyle = '#fff';
|
||||
ctx.clearRect(0, 0, canvas_rec.width, canvas_rec.height);
|
||||
// ctx.drawImage(image, -index * RECWIDTH, 0, origin_width, origin_height);
|
||||
ctx.putImageData(image, -index * RECWIDTH, 0);
|
||||
}
|
||||
|
||||
// 声明检测和识别Runner;未初始化
|
||||
let detectRunner;
|
||||
let recRunner;
|
||||
|
||||
Page({
|
||||
data: {
|
||||
photo_src:'',
|
||||
imgList: imgList,
|
||||
imgInfo: {},
|
||||
result: '',
|
||||
select_mode: false,
|
||||
loaded: false
|
||||
},
|
||||
switch_choose(){
|
||||
this.setData({
|
||||
select_mode: true
|
||||
})
|
||||
},
|
||||
switch_example(){
|
||||
this.setData({
|
||||
select_mode: false
|
||||
})
|
||||
},
|
||||
chose_photo:function(evt){
|
||||
let _this = this
|
||||
wx.chooseImage({
|
||||
count: 1,
|
||||
sizeType: ['original', 'compressed'],
|
||||
sourceType: ['album', 'camera'],
|
||||
success(res) {
|
||||
console.log(res.tempFilePaths) //一个数组,每个元素都是“http://...”图片地址
|
||||
_this.setData({
|
||||
photo_src: res.tempFilePaths[0]
|
||||
})
|
||||
}
|
||||
})
|
||||
},
|
||||
reselect:function(evt){
|
||||
let _this = this
|
||||
wx.chooseImage({
|
||||
count: 1,
|
||||
sizeType: ['original', 'compressed'],
|
||||
sourceType: ['album', 'camera'],
|
||||
success(res) {
|
||||
_this.setData({
|
||||
photo_src: res.tempFilePaths[0]
|
||||
})
|
||||
}
|
||||
})
|
||||
},
|
||||
photo_preview:function(evt){
|
||||
let _this = this;
|
||||
let imgs = [];
|
||||
imgs.push(_this.data.photo_src);
|
||||
wx.previewImage({
|
||||
urls:imgs
|
||||
})
|
||||
},
|
||||
|
||||
predect_choose_img() {
|
||||
console.log(this.data.photo_src)
|
||||
this.getImageInfo(this.data.photo_src);
|
||||
},
|
||||
|
||||
onLoad() {
|
||||
enableBoundaryChecking(false);
|
||||
// 绑定canvas;该操作是异步,因此最好加延迟保证后续使用时已完成绑定
|
||||
wx.createSelectorQuery()
|
||||
.select('#canvas_det')
|
||||
.fields({ node: true, size: true })
|
||||
.exec(async(res) => {
|
||||
canvas_det = res[0].node;
|
||||
});
|
||||
|
||||
wx.createSelectorQuery()
|
||||
.select('#canvas_rec')
|
||||
.fields({ node: true, size: true })
|
||||
.exec(async(res) => {
|
||||
canvas_rec = res[0].node;
|
||||
});
|
||||
|
||||
wx.createSelectorQuery()
|
||||
.select('#myCanvas')
|
||||
.fields({ node: true, size: true })
|
||||
.exec((res) => {
|
||||
my_canvas = res[0].node;
|
||||
my_canvas_ctx = my_canvas.getContext('2d');
|
||||
});
|
||||
|
||||
const me = this;
|
||||
// 初始化Runner
|
||||
detectRunner = new paddlejs.Runner({
|
||||
modelPath: 'https://paddleocr.bj.bcebos.com/PaddleJS/PP-OCRv3/ch/ch_PP-OCRv3_det_infer_js_960/model.json',
|
||||
mean: [0.485, 0.456, 0.406],
|
||||
std: [0.229, 0.224, 0.225],
|
||||
bgr: true,
|
||||
webglFeedProcess: true
|
||||
});
|
||||
recRunner = new paddlejs.Runner({
|
||||
modelPath: 'https://paddleocr.bj.bcebos.com/PaddleJS/PP-OCRv3/ch/ch_PP-OCRv3_rec_infer_js/model.json',
|
||||
fill: '#000',
|
||||
mean: [0.5, 0.5, 0.5],
|
||||
std: [0.5, 0.5, 0.5],
|
||||
bgr: true,
|
||||
webglFeedProcess: true
|
||||
});
|
||||
// 等待模型数据全部加载完成
|
||||
Promise.all([detectRunner.init(), recRunner.init()]).then(_ => {
|
||||
me.setData({
|
||||
loaded: true
|
||||
});
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
selectImage(event) {
|
||||
const imgPath = this.data.imgList[event.target.dataset.index];
|
||||
this.getImageInfo(imgPath);
|
||||
},
|
||||
|
||||
getImageInfo(imgPath) {
|
||||
const me = this;
|
||||
wx.getImageInfo({
|
||||
src: imgPath,
|
||||
success: (imgInfo) => {
|
||||
const {
|
||||
path,
|
||||
width,
|
||||
height
|
||||
} = imgInfo;
|
||||
const canvasPath = imgPath.includes('http') ? path : imgPath;
|
||||
|
||||
let sw = 960;
|
||||
let sh = 960;
|
||||
let x = 0;
|
||||
let y = 0;
|
||||
|
||||
if (height / width >= 1) {
|
||||
sw = Math.round(sh * width / height);
|
||||
x = Math.floor((960 - sw) / 2);
|
||||
}
|
||||
else {
|
||||
sh = Math.round(sw * height / width);
|
||||
y = Math.floor((960 - sh) / 2);
|
||||
}
|
||||
my_canvas.width = sw;
|
||||
my_canvas.height = sh;
|
||||
|
||||
// 微信上canvas输入图片
|
||||
const image = my_canvas.createImage();
|
||||
image.src = canvasPath;
|
||||
image.onload = () => {
|
||||
my_canvas_ctx.clearRect(0, 0, my_canvas.width, my_canvas.height);
|
||||
my_canvas_ctx.drawImage(image, x, y, sw, sh);
|
||||
const imageData = my_canvas_ctx.getImageData(0, 0, sw, sh);
|
||||
// 开始识别
|
||||
me.recognize({
|
||||
data: imageData.data,
|
||||
width: 960,
|
||||
height: 960
|
||||
}, {canvasPath, sw, sh, x, y});
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
async recognize(res, img) {
|
||||
const me = this;
|
||||
// 文本框选坐标点
|
||||
let points;
|
||||
await detectRunner.predict(res, function (detectRes) {
|
||||
points = outputBox(detectRes);
|
||||
});
|
||||
|
||||
// 绘制文本框
|
||||
me.drawCanvasPoints(img, points.boxes);
|
||||
|
||||
// 排序,使得最后结果输出尽量按照从上到下的顺序
|
||||
const boxes = sorted_boxes(points.boxes);
|
||||
|
||||
const text_list = [];
|
||||
|
||||
for (let i = 0; i < boxes.length; i++) {
|
||||
const tmp_box = JSON.parse(JSON.stringify(boxes[i]));
|
||||
// 获取tmp_box对应图片到canvas_det
|
||||
get_rotate_crop_image(res, tmp_box);
|
||||
// 这里是计算要识别文字的图片片段是否大于识别模型要求的输入宽度;超过了的话会分成多次识别,再拼接结果
|
||||
const width_num = Math.ceil(canvas_det.width / RECWIDTH);
|
||||
|
||||
let text_list_tmp = '';
|
||||
for (let j = 0; j < width_num; j++) {
|
||||
// 根据原图的宽度进行裁剪拼接,超出指定宽度会被截断;然后再次识别,最后拼接起来
|
||||
resize_norm_img_splice(canvas_det.getContext('2d').getImageData(0, 0, canvas_det.width, canvas_det.height), canvas_det.width, canvas_det.height, j);
|
||||
|
||||
const imgData = canvas_rec.getContext('2d').getImageData(0, 0, canvas_rec.width, canvas_rec.height);
|
||||
|
||||
await recRunner.predict(imgData, function(output){
|
||||
// 将输出向量转化为idx再传化为对应字符
|
||||
const text = recDecode(output);
|
||||
text_list_tmp = text_list_tmp.concat(text.text);
|
||||
});
|
||||
}
|
||||
text_list.push(text_list_tmp);
|
||||
}
|
||||
me.setData({
|
||||
result: JSON.stringify(boxes) + JSON.stringify(text_list)
|
||||
});
|
||||
},
|
||||
|
||||
drawCanvasPoints(img, points) {
|
||||
// 设置线条
|
||||
my_canvas_ctx.strokeStyle = 'blue';
|
||||
my_canvas_ctx.lineWidth = 5;
|
||||
|
||||
// 先绘制图片
|
||||
const image = my_canvas.createImage();
|
||||
image.src = img.canvasPath;
|
||||
image.onload = () => {
|
||||
my_canvas_ctx.clearRect(0, 0, my_canvas_ctx.width, my_canvas_ctx.height);
|
||||
my_canvas_ctx.drawImage(image, img.x, img.y, img.sw, img.sh);
|
||||
// 绘制线框
|
||||
points.length && points.forEach(point => {
|
||||
my_canvas_ctx.beginPath();
|
||||
// 设置路径起点坐标
|
||||
my_canvas_ctx.moveTo(point[0][0], point[0][1]);
|
||||
my_canvas_ctx.lineTo(point[1][0], point[1][1]);
|
||||
my_canvas_ctx.lineTo(point[2][0], point[2][1]);
|
||||
my_canvas_ctx.lineTo(point[3][0], point[3][1]);
|
||||
my_canvas_ctx.lineTo(point[0][0], point[0][1]);
|
||||
my_canvas_ctx.stroke();
|
||||
my_canvas_ctx.closePath();
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
});
|
||||
@@ -1,2 +0,0 @@
|
||||
{
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
<view>
|
||||
<view>
|
||||
<button bindtap="switch_choose" wx:if="{{!select_mode}}">切换选择本地图片模式</button>
|
||||
<button bindtap="switch_example" wx:else>切换示例图片模式</button>
|
||||
</view>
|
||||
<view class="photo_box" wx:if="{{select_mode}}">
|
||||
<view wx:if="{{photo_src != ''}}" class="photo_preview">
|
||||
<image src="{{photo_src}}" mode="aspectFit" bindtap="photo_preview"></image>
|
||||
<view class="reselect" bindtap="reselect">重新选择</view>
|
||||
</view>
|
||||
<view wx:else class="photo_text" bindtap="chose_photo">点击拍照或上传本地照片</view>
|
||||
<button bindtap="predect_choose_img">检测</button>
|
||||
</view>
|
||||
<view wx:else>
|
||||
<text class="title">点击图片进行预测</text>
|
||||
<scroll-view class="imgWrapper" scroll-x="true">
|
||||
<image
|
||||
class="img {{selectedIndex == index ? 'selected' : ''}}"
|
||||
wx:for="{{imgList}}"
|
||||
wx:key="index"
|
||||
src="{{item}}"
|
||||
mode="aspectFit"
|
||||
bindtap="selectImage"
|
||||
data-index="{{index}}"
|
||||
></image>
|
||||
</scroll-view>
|
||||
</view>
|
||||
<view class="img-view">
|
||||
<scroll-view class="imgWrapper" scroll-x="true" style="width: 960px; height: 960px;">
|
||||
<canvas
|
||||
id="myCanvas"
|
||||
type="2d"
|
||||
style="width: 960px; height: 960px;"
|
||||
></canvas>
|
||||
</scroll-view>
|
||||
<scroll-view class="imgWrapper" scroll-x="true" style="width: 960px; height: 960px;">
|
||||
<canvas
|
||||
id="canvas_det"
|
||||
type="2d"
|
||||
style="width: 960px; height: 960px;"
|
||||
></canvas>
|
||||
</scroll-view>
|
||||
<scroll-view class="imgWrapper" scroll-x="true" style="width: 960px; height: 960px;">
|
||||
<canvas
|
||||
id="canvas_rec"
|
||||
type="2d"
|
||||
style="width: 960px; height: 960px;"
|
||||
></canvas>
|
||||
</scroll-view>
|
||||
<text class="result" wx:if="{{result}}" style="height: 300rpx;">文本框选坐标:{{result}}</text>
|
||||
</view>
|
||||
</view>
|
||||
|
||||
<view class="mask" wx:if="{{!loaded}}">
|
||||
<text class="loading">loading…</text>
|
||||
</view>
|
||||
@@ -1,78 +0,0 @@
|
||||
.photo_box{
|
||||
width: 750rpx;
|
||||
border: 1px solid #cccccc;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
.photo_text{
|
||||
width: 100%;
|
||||
line-height: 500rpx;
|
||||
text-align: center;
|
||||
}
|
||||
.photo_preview image{
|
||||
width: 750rpx;
|
||||
}
|
||||
.photo_preview .reselect{
|
||||
width: 750rpx;
|
||||
height: 100rpx;
|
||||
background-color: #3F8EFF;
|
||||
text-align: center;
|
||||
line-height: 100rpx;
|
||||
border-top: 1px solid #cccccc;
|
||||
}
|
||||
|
||||
text {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.title {
|
||||
margin-top: 10px;
|
||||
font-size: 16px;
|
||||
line-height: 32px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.imgWrapper {
|
||||
margin: 10px 10px 0;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.img {
|
||||
width: 960px;
|
||||
height: 960px;
|
||||
border: 1px solid #f1f1f1;
|
||||
}
|
||||
|
||||
.result {
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
.selected {
|
||||
border: 1px solid #999;
|
||||
}
|
||||
|
||||
.select-btn {
|
||||
margin-top: 20px;
|
||||
width: 60%;
|
||||
}
|
||||
|
||||
.mask {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background-color: rgba(0, 0, 0, .7);
|
||||
}
|
||||
|
||||
.loading {
|
||||
color: #fff;
|
||||
font-size: 20px;
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
}
|
||||
|
||||
.img-view {
|
||||
padding-bottom: 20px;
|
||||
border-bottom: 1px solid #f1f1f1;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -1,63 +0,0 @@
|
||||
import { character } from 'ppocr_keys_v1.js';
|
||||
|
||||
const ocr_character = character;
|
||||
let preds_idx = [];
|
||||
let preds_prob = [];
|
||||
|
||||
function init(preds) {
|
||||
preds_idx = [];
|
||||
preds_prob = [];
|
||||
// preds: [1, ?, 6625]
|
||||
const pred_len = 6625;
|
||||
for (let i = 0; i < preds.length; i += pred_len) {
|
||||
const tmpArr = preds.slice(i, i + pred_len - 1);
|
||||
const tmpMax = Math.max(...tmpArr);
|
||||
const tmpIdx = tmpArr.indexOf(tmpMax);
|
||||
preds_prob.push(tmpMax);
|
||||
preds_idx.push(tmpIdx);
|
||||
}
|
||||
}
|
||||
|
||||
function get_ignored_tokens() {
|
||||
return [0];
|
||||
}
|
||||
|
||||
function decode(text_index, text_prob, is_remove_duplicate = false) {
|
||||
const ignored_tokens = get_ignored_tokens();
|
||||
const char_list = [];
|
||||
const conf_list = [];
|
||||
for (let idx = 0; idx < text_index.length; idx++) {
|
||||
if (text_index[idx] in ignored_tokens) {
|
||||
continue;
|
||||
}
|
||||
if (is_remove_duplicate) {
|
||||
if (idx > 0 && text_index[idx - 1] === text_index[idx]) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
char_list.push(ocr_character[text_index[idx] - 1]);
|
||||
if (text_prob) {
|
||||
conf_list.push(text_prob[idx]);
|
||||
}
|
||||
else {
|
||||
conf_list.push(1);
|
||||
}
|
||||
}
|
||||
let text = '';
|
||||
let mean = 0;
|
||||
|
||||
if (char_list.length) {
|
||||
text = char_list.join('');
|
||||
let sum = 0;
|
||||
conf_list.forEach(item => {
|
||||
sum += item;
|
||||
});
|
||||
mean = sum / conf_list.length;
|
||||
}
|
||||
return { text, mean };
|
||||
}
|
||||
|
||||
export function recDecode(preds) {
|
||||
init(preds);
|
||||
return decode(preds_idx, preds_prob, true);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -1,58 +0,0 @@
|
||||
{
|
||||
"description": "项目配置文件,详见文档:https://developers.weixin.qq.com/miniprogram/dev/devtools/projectconfig.html",
|
||||
"packOptions": {
|
||||
"ignore": [],
|
||||
"include": []
|
||||
},
|
||||
"setting": {
|
||||
"urlCheck": false,
|
||||
"es6": true,
|
||||
"enhance": true,
|
||||
"postcss": true,
|
||||
"preloadBackgroundData": false,
|
||||
"minified": true,
|
||||
"newFeature": false,
|
||||
"coverView": true,
|
||||
"nodeModules": true,
|
||||
"autoAudits": false,
|
||||
"showShadowRootInWxmlPanel": true,
|
||||
"scopeDataCheck": false,
|
||||
"uglifyFileName": false,
|
||||
"checkInvalidKey": true,
|
||||
"checkSiteMap": true,
|
||||
"uploadWithSourceMap": true,
|
||||
"compileHotReLoad": false,
|
||||
"lazyloadPlaceholderEnable": false,
|
||||
"useMultiFrameRuntime": true,
|
||||
"useApiHook": true,
|
||||
"useApiHostProcess": true,
|
||||
"babelSetting": {
|
||||
"ignore": [],
|
||||
"disablePlugins": [],
|
||||
"outputPath": ""
|
||||
},
|
||||
"enableEngineNative": false,
|
||||
"useIsolateContext": true,
|
||||
"userConfirmedBundleSwitch": false,
|
||||
"packNpmManually": false,
|
||||
"packNpmRelationList": [],
|
||||
"minifyWXSS": true,
|
||||
"disableUseStrict": false,
|
||||
"minifyWXML": true,
|
||||
"showES6CompileOption": false,
|
||||
"useCompilerPlugins": false,
|
||||
"useStaticServer": true,
|
||||
"ignoreUploadUnusedFiles": false
|
||||
},
|
||||
"compileType": "miniprogram",
|
||||
"libVersion": "2.22.1",
|
||||
"appid": "wx78461a9c81d1234c",
|
||||
"projectname": "mobilenet",
|
||||
"simulatorType": "wechat",
|
||||
"simulatorPluginLibVersion": {},
|
||||
"condition": {},
|
||||
"editorSetting": {
|
||||
"tabIndent": "insertSpaces",
|
||||
"tabSize": 2
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"projectname": "ocrXcx",
|
||||
"setting": {
|
||||
"compileHotReLoad": true
|
||||
},
|
||||
"description": "项目私有配置文件。此文件中的内容将覆盖 project.config.json 中的相同字段。项目的改动优先同步到此文件中。详见文档:https://developers.weixin.qq.com/miniprogram/dev/devtools/projectconfig.html",
|
||||
"libVersion": "2.23.4"
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"desc": "关于本文件的更多信息,请参考文档 https://developers.weixin.qq.com/miniprogram/dev/framework/sitemap.html",
|
||||
"rules": [{
|
||||
"action": "allow",
|
||||
"page": "*"
|
||||
}]
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
/* global wx, App */
|
||||
import * as paddlejs from '@paddlejs/paddlejs-core';
|
||||
import '@paddlejs/paddlejs-backend-webgl';
|
||||
// eslint-disable-next-line no-undef
|
||||
const plugin = requirePlugin('paddlejs-plugin');
|
||||
plugin.register(paddlejs, wx);
|
||||
|
||||
App({
|
||||
globalData: {
|
||||
Paddlejs: paddlejs.Runner
|
||||
}
|
||||
});
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"pages": [
|
||||
"pages/index/index"
|
||||
],
|
||||
"plugins": {
|
||||
"paddlejs-plugin": {
|
||||
"version": "2.0.1",
|
||||
"provider": "wx7138a7bb793608c3"
|
||||
}
|
||||
},
|
||||
"sitemapLocation": "sitemap.json"
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"name": "paddlejs-demo",
|
||||
"version": "0.0.1",
|
||||
"lockfileVersion": 1,
|
||||
"requires": true,
|
||||
"dependencies": {
|
||||
"@paddlejs/paddlejs-backend-webgl": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@paddlejs/paddlejs-backend-webgl/-/paddlejs-backend-webgl-1.2.0.tgz",
|
||||
"integrity": "sha512-bKJKJkGldC3NPOuJyk+372z0XW1dd1D9lR0f9OHqWQboY0Mkah+gX+8tkerrNg+QjYz88IW0iJaRKB0jm+6d9g=="
|
||||
},
|
||||
"@paddlejs/paddlejs-core": {
|
||||
"version": "2.1.18",
|
||||
"resolved": "https://registry.npmjs.org/@paddlejs/paddlejs-core/-/paddlejs-core-2.1.18.tgz",
|
||||
"integrity": "sha512-QrXxwaHm4llp1sxbUq/oCCqlYx4ciXanBn/Lfq09UqR4zkYi5SptacQlIxgJ70HOO6RWIxjWN4liQckMwa2TkA=="
|
||||
},
|
||||
"d3-polygon": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-2.0.0.tgz",
|
||||
"integrity": "sha512-MsexrCK38cTGermELs0cO1d79DcTsQRN7IWMJKczD/2kBjzNXxLUWP33qRF6VDpiLV/4EI4r6Gs0DAWQkE8pSQ=="
|
||||
},
|
||||
"js-clipper": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/js-clipper/-/js-clipper-1.0.1.tgz",
|
||||
"integrity": "sha1-TWsHQ0pECOfBKeMiAc5m0hR07SE="
|
||||
},
|
||||
"number-precision": {
|
||||
"version": "1.5.2",
|
||||
"resolved": "https://registry.npmjs.org/number-precision/-/number-precision-1.5.2.tgz",
|
||||
"integrity": "sha512-q7C1ZW3FyjsJ+IpGB6ykX8OWWa5+6M+hEY0zXBlzq1Sq1IPY9GeI3CQ9b2i6CMIYoeSuFhop2Av/OhCxClXqag=="
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"name": "paddlejs-demo",
|
||||
"version": "0.0.1",
|
||||
"description": "",
|
||||
"main": "app.js",
|
||||
"dependencies": {
|
||||
"@paddlejs/paddlejs-backend-webgl": "^1.2.0",
|
||||
"@paddlejs/paddlejs-core": "^2.1.18",
|
||||
"d3-polygon": "2.0.0",
|
||||
"js-clipper": "1.0.1",
|
||||
"number-precision": "1.5.2"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC"
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 222 KiB |
@@ -1,337 +0,0 @@
|
||||
/* global wx, Page */
|
||||
import * as paddlejs from '@paddlejs/paddlejs-core';
|
||||
import '@paddlejs/paddlejs-backend-webgl';
|
||||
import clipper from 'js-clipper';
|
||||
import { divide, enableBoundaryChecking, plus } from 'number-precision';
|
||||
// eslint-disable-next-line no-undef
|
||||
const plugin = requirePlugin('paddlejs-plugin');
|
||||
const Polygon = require('d3-polygon');
|
||||
|
||||
global.wasm_url = 'pages/index/wasm/opencv_js.wasm.br';
|
||||
const CV = require('./wasm/opencv.js');
|
||||
|
||||
plugin.register(paddlejs, wx);
|
||||
|
||||
const imgList = [
|
||||
'https://paddlejs.bj.bcebos.com/xcx/ocr.png',
|
||||
'./img/width.png'
|
||||
];
|
||||
|
||||
// eslint-disable-next-line max-lines-per-function
|
||||
const outputBox = res => {
|
||||
const thresh = 0.3;
|
||||
const box_thresh = 0.5;
|
||||
const max_candidates = 1000;
|
||||
const min_size = 3;
|
||||
const width = 960;
|
||||
const height = 960;
|
||||
const pred = res;
|
||||
const segmentation = [];
|
||||
pred.forEach(item => {
|
||||
segmentation.push(item > thresh ? 255 : 0);
|
||||
});
|
||||
|
||||
function get_mini_boxes(contour) {
|
||||
// 生成最小外接矩形
|
||||
const bounding_box = CV.minAreaRect(contour);
|
||||
const points = [];
|
||||
const mat = new CV.Mat();
|
||||
// 获取矩形的四个顶点坐标
|
||||
CV.boxPoints(bounding_box, mat);
|
||||
for (let i = 0; i < mat.data32F.length; i += 2) {
|
||||
const arr = [];
|
||||
arr[0] = mat.data32F[i];
|
||||
arr[1] = mat.data32F[i + 1];
|
||||
points.push(arr);
|
||||
}
|
||||
|
||||
function sortNumber(a, b) {
|
||||
return a[0] - b[0];
|
||||
}
|
||||
points.sort(sortNumber);
|
||||
let index_1 = 0;
|
||||
let index_2 = 1;
|
||||
let index_3 = 2;
|
||||
let index_4 = 3;
|
||||
if (points[1][1] > points[0][1]) {
|
||||
index_1 = 0;
|
||||
index_4 = 1;
|
||||
}
|
||||
else {
|
||||
index_1 = 1;
|
||||
index_4 = 0;
|
||||
}
|
||||
|
||||
if (points[3][1] > points[2][1]) {
|
||||
index_2 = 2;
|
||||
index_3 = 3;
|
||||
}
|
||||
else {
|
||||
index_2 = 3;
|
||||
index_3 = 2;
|
||||
}
|
||||
const box = [
|
||||
points[index_1],
|
||||
points[index_2],
|
||||
points[index_3],
|
||||
points[index_4]
|
||||
];
|
||||
const side = Math.min(bounding_box.size.height, bounding_box.size.width);
|
||||
mat.delete();
|
||||
return {
|
||||
points: box,
|
||||
side
|
||||
};
|
||||
}
|
||||
|
||||
function box_score_fast(bitmap, _box) {
|
||||
const h = height;
|
||||
const w = width;
|
||||
const box = JSON.parse(JSON.stringify(_box));
|
||||
const x = [];
|
||||
const y = [];
|
||||
box.forEach(item => {
|
||||
x.push(item[0]);
|
||||
y.push(item[1]);
|
||||
});
|
||||
// clip这个函数将将数组中的元素限制在a_min, a_max之间,大于a_max的就使得它等于 a_max,小于a_min,的就使得它等于a_min。
|
||||
const xmin = clip(Math.floor(Math.min(...x)), 0, w - 1);
|
||||
const xmax = clip(Math.ceil(Math.max(...x)), 0, w - 1);
|
||||
const ymin = clip(Math.floor(Math.min(...y)), 0, h - 1);
|
||||
const ymax = clip(Math.ceil(Math.max(...y)), 0, h - 1);
|
||||
// eslint-disable-next-line new-cap
|
||||
const mask = new CV.Mat.zeros(ymax - ymin + 1, xmax - xmin + 1, CV.CV_8UC1);
|
||||
box.forEach(item => {
|
||||
item[0] = Math.max(item[0] - xmin, 0);
|
||||
item[1] = Math.max(item[1] - ymin, 0);
|
||||
});
|
||||
const npts = 4;
|
||||
const point_data = new Uint8Array(box.flat());
|
||||
const points = CV.matFromArray(npts, 1, CV.CV_32SC2, point_data);
|
||||
const pts = new CV.MatVector();
|
||||
pts.push_back(points);
|
||||
const color = new CV.Scalar(255);
|
||||
// 多个多边形填充
|
||||
CV.fillPoly(mask, pts, color, 1);
|
||||
const sliceArr = [];
|
||||
for (let i = ymin; i < ymax + 1; i++) {
|
||||
sliceArr.push(...bitmap.slice(960 * i + xmin, 960 * i + xmax + 1));
|
||||
}
|
||||
const mean = num_mean(sliceArr, mask.data);
|
||||
mask.delete();
|
||||
points.delete();
|
||||
pts.delete();
|
||||
return mean;
|
||||
}
|
||||
|
||||
function clip(data, min, max) {
|
||||
return data < min ? min : data > max ? max : data;
|
||||
}
|
||||
|
||||
function unclip(box) {
|
||||
const unclip_ratio = 1.6;
|
||||
const area = Math.abs(Polygon.polygonArea(box));
|
||||
const length = Polygon.polygonLength(box);
|
||||
const distance = area * unclip_ratio / length;
|
||||
const tmpArr = [];
|
||||
box.forEach(item => {
|
||||
const obj = {
|
||||
X: 0,
|
||||
Y: 0
|
||||
};
|
||||
obj.X = item[0];
|
||||
obj.Y = item[1];
|
||||
tmpArr.push(obj);
|
||||
});
|
||||
const offset = new clipper.ClipperOffset();
|
||||
offset.AddPath(tmpArr, clipper.JoinType.jtRound, clipper.EndType.etClosedPolygon);
|
||||
const expanded = [];
|
||||
offset.Execute(expanded, distance);
|
||||
let expandedArr = [];
|
||||
expanded[0] && expanded[0].forEach(item => {
|
||||
expandedArr.push([item.X, item.Y]);
|
||||
});
|
||||
expandedArr = [].concat(...expandedArr);
|
||||
return expandedArr;
|
||||
}
|
||||
|
||||
function num_mean(data, mask) {
|
||||
let sum = 0;
|
||||
let length = 0;
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
if (mask[i]) {
|
||||
sum = plus(sum, data[i]);
|
||||
length++;
|
||||
}
|
||||
}
|
||||
return divide(sum, length);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line new-cap
|
||||
const src = new CV.matFromArray(960, 960, CV.CV_8UC1, segmentation);
|
||||
const contours = new CV.MatVector();
|
||||
const hierarchy = new CV.Mat();
|
||||
// 获取轮廓
|
||||
CV.findContours(src, contours, hierarchy, CV.RETR_LIST, CV.CHAIN_APPROX_SIMPLE);
|
||||
const num_contours = Math.min(contours.size(), max_candidates);
|
||||
const boxes = [];
|
||||
const scores = [];
|
||||
const arr = [];
|
||||
for (let i = 0; i < num_contours; i++) {
|
||||
const contour = contours.get(i);
|
||||
let {
|
||||
points,
|
||||
side
|
||||
} = get_mini_boxes(contour);
|
||||
if (side < min_size) {
|
||||
continue;
|
||||
}
|
||||
const score = box_score_fast(pred, points);
|
||||
if (box_thresh > score) {
|
||||
continue;
|
||||
}
|
||||
let box = unclip(points);
|
||||
// eslint-disable-next-line new-cap
|
||||
const boxMap = new CV.matFromArray(box.length / 2, 1, CV.CV_32SC2, box);
|
||||
const resultObj = get_mini_boxes(boxMap);
|
||||
box = resultObj.points;
|
||||
side = resultObj.side;
|
||||
if (side < min_size + 2) {
|
||||
continue;
|
||||
}
|
||||
box.forEach(item => {
|
||||
item[0] = clip(Math.round(item[0]), 0, 960);
|
||||
item[1] = clip(Math.round(item[1]), 0, 960);
|
||||
});
|
||||
boxes.push(box);
|
||||
scores.push(score);
|
||||
arr.push(i);
|
||||
boxMap.delete();
|
||||
}
|
||||
src.delete();
|
||||
contours.delete();
|
||||
hierarchy.delete();
|
||||
return {
|
||||
boxes,
|
||||
scores
|
||||
};
|
||||
};
|
||||
|
||||
let detectRunner;
|
||||
|
||||
Page({
|
||||
data: {
|
||||
imgList: imgList,
|
||||
imgInfo: {},
|
||||
result: '',
|
||||
loaded: false
|
||||
},
|
||||
|
||||
onLoad() {
|
||||
enableBoundaryChecking(false);
|
||||
const me = this;
|
||||
detectRunner = new paddlejs.Runner({
|
||||
modelPath: 'https://paddleocr.bj.bcebos.com/PaddleJS/PP-OCRv3/ch/ch_PP-OCRv3_det_infer_js_960/model.json',
|
||||
mean: [0.485, 0.456, 0.406],
|
||||
std: [0.229, 0.224, 0.225],
|
||||
bgr: true,
|
||||
webglFeedProcess: true
|
||||
});
|
||||
detectRunner.init().then(_ => {
|
||||
me.setData({
|
||||
loaded: true
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
selectImage(event) {
|
||||
const imgPath = this.data.imgList[event.target.dataset.index];
|
||||
this.getImageInfo(imgPath);
|
||||
},
|
||||
|
||||
getImageInfo(imgPath) {
|
||||
const me = this;
|
||||
wx.getImageInfo({
|
||||
src: imgPath,
|
||||
success: imgInfo => {
|
||||
const {
|
||||
path,
|
||||
width,
|
||||
height
|
||||
} = imgInfo;
|
||||
|
||||
const canvasPath = imgPath.includes('http') ? path : imgPath;
|
||||
const canvasId = 'myCanvas';
|
||||
const ctx = wx.createCanvasContext(canvasId);
|
||||
let sw = 960;
|
||||
let sh = 960;
|
||||
let x = 0;
|
||||
let y = 0;
|
||||
if (height / width >= 1) {
|
||||
sw = Math.round(sh * width / height);
|
||||
x = Math.floor((960 - sw) / 2);
|
||||
}
|
||||
else {
|
||||
sh = Math.round(sw * height / width);
|
||||
y = Math.floor((960 - sh) / 2);
|
||||
}
|
||||
ctx.drawImage(canvasPath, x, y, sw, sh);
|
||||
ctx.draw(false, () => {
|
||||
// API 1.9.0 获取图像数据
|
||||
wx.canvasGetImageData({
|
||||
canvasId: canvasId,
|
||||
x: 0,
|
||||
y: 0,
|
||||
width: 960,
|
||||
height: 960,
|
||||
success(res) {
|
||||
me.predict({
|
||||
data: res.data,
|
||||
width: 960,
|
||||
height: 960
|
||||
}, {
|
||||
canvasPath,
|
||||
sw,
|
||||
sh,
|
||||
x,
|
||||
y
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
predict(res, img) {
|
||||
const me = this;
|
||||
detectRunner.predict(res, function (data) {
|
||||
// 获取坐标
|
||||
const points = outputBox(data);
|
||||
me.drawCanvasPoints(img, points.boxes);
|
||||
me.setData({
|
||||
result: JSON.stringify(points.boxes)
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
drawCanvasPoints(img, points) {
|
||||
const canvasId = 'result';
|
||||
const ctx = wx.createCanvasContext(canvasId);
|
||||
ctx.drawImage(img.canvasPath, img.x, img.y, img.sw, img.sh);
|
||||
points.length && points.forEach(point => {
|
||||
// 开始一个新的绘制路径
|
||||
ctx.beginPath();
|
||||
// 设置线条颜色为蓝色
|
||||
ctx.strokeStyle = 'blue';
|
||||
// 设置路径起点坐标
|
||||
ctx.moveTo(point[0][0], point[0][1]);
|
||||
ctx.lineTo(point[1][0], point[1][1]);
|
||||
ctx.lineTo(point[2][0], point[2][1]);
|
||||
ctx.lineTo(point[3][0], point[3][1]);
|
||||
ctx.closePath();
|
||||
ctx.stroke();
|
||||
});
|
||||
ctx.draw();
|
||||
}
|
||||
});
|
||||
@@ -1,2 +0,0 @@
|
||||
{
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
<view>
|
||||
<view class="img-view">
|
||||
<text class="title">点击图片进行预测</text>
|
||||
<scroll-view class="imgWrapper" scroll-x="true">
|
||||
<image
|
||||
class="img {{selectedIndex == index ? 'selected' : ''}}"
|
||||
wx:for="{{imgList}}"
|
||||
wx:key="index"
|
||||
src="{{item}}"
|
||||
mode="aspectFit"
|
||||
bindtap="selectImage"
|
||||
data-index="{{index}}"
|
||||
></image>
|
||||
<canvas
|
||||
canvas-id="myCanvas"
|
||||
style="width: 960px; height: 960px; position: absolute; z-index: -1"
|
||||
></canvas>
|
||||
<canvas
|
||||
canvas-id="result"
|
||||
style="width: 960px; height: 960px;"
|
||||
></canvas>
|
||||
</scroll-view>
|
||||
<text class="result" wx:if="{{result}}" style="height: 300rpx;">文本框选坐标:{{result}}</text>
|
||||
</view>
|
||||
</view>
|
||||
|
||||
<view class="mask" wx:if="{{!loaded}}">
|
||||
<text class="loading">loading…</text>
|
||||
</view>
|
||||
@@ -1,56 +0,0 @@
|
||||
text {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.title {
|
||||
margin-top: 10px;
|
||||
font-size: 16px;
|
||||
line-height: 32px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.imgWrapper {
|
||||
margin: 10px 10px 0;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.img {
|
||||
width: 960px;
|
||||
height: 960px;
|
||||
border: 1px solid #f1f1f1;
|
||||
}
|
||||
|
||||
.result {
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
.selected {
|
||||
border: 1px solid #999;
|
||||
}
|
||||
|
||||
.select-btn {
|
||||
margin-top: 20px;
|
||||
width: 60%;
|
||||
}
|
||||
|
||||
.mask {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background-color: rgba(0, 0, 0, .7);
|
||||
}
|
||||
|
||||
.loading {
|
||||
color: #fff;
|
||||
font-size: 20px;
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
}
|
||||
|
||||
.img-view {
|
||||
padding-bottom: 20px;
|
||||
border-bottom: 1px solid #f1f1f1;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -1,75 +0,0 @@
|
||||
{
|
||||
"description": "项目配置文件",
|
||||
"packOptions": {
|
||||
"ignore": []
|
||||
},
|
||||
"setting": {
|
||||
"urlCheck": false,
|
||||
"es6": true,
|
||||
"enhance": true,
|
||||
"postcss": true,
|
||||
"preloadBackgroundData": false,
|
||||
"minified": true,
|
||||
"newFeature": false,
|
||||
"coverView": true,
|
||||
"nodeModules": true,
|
||||
"autoAudits": false,
|
||||
"showShadowRootInWxmlPanel": true,
|
||||
"scopeDataCheck": false,
|
||||
"uglifyFileName": false,
|
||||
"checkInvalidKey": true,
|
||||
"checkSiteMap": true,
|
||||
"uploadWithSourceMap": true,
|
||||
"compileHotReLoad": false,
|
||||
"lazyloadPlaceholderEnable": false,
|
||||
"useMultiFrameRuntime": true,
|
||||
"useApiHook": true,
|
||||
"useApiHostProcess": true,
|
||||
"babelSetting": {
|
||||
"ignore": [],
|
||||
"disablePlugins": [],
|
||||
"outputPath": ""
|
||||
},
|
||||
"enableEngineNative": false,
|
||||
"useIsolateContext": true,
|
||||
"userConfirmedBundleSwitch": false,
|
||||
"packNpmManually": false,
|
||||
"packNpmRelationList": [],
|
||||
"minifyWXSS": true,
|
||||
"disableUseStrict": false,
|
||||
"minifyWXML": true,
|
||||
"showES6CompileOption": false,
|
||||
"useCompilerPlugins": false
|
||||
},
|
||||
"compileType": "miniprogram",
|
||||
"libVersion": "2.22.1",
|
||||
"appid": "wxc43cbd2fafe0aec2",
|
||||
"projectname": "mobilenet",
|
||||
"debugOptions": {
|
||||
"hidedInDevtools": []
|
||||
},
|
||||
"scripts": {},
|
||||
"isGameTourist": false,
|
||||
"simulatorType": "wechat",
|
||||
"simulatorPluginLibVersion": {},
|
||||
"condition": {
|
||||
"search": {
|
||||
"list": []
|
||||
},
|
||||
"conversation": {
|
||||
"list": []
|
||||
},
|
||||
"game": {
|
||||
"list": []
|
||||
},
|
||||
"plugin": {
|
||||
"list": []
|
||||
},
|
||||
"gamePlugin": {
|
||||
"list": []
|
||||
},
|
||||
"miniprogram": {
|
||||
"list": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"desc": "关于本文件的更多信息,请参考文档 https://developers.weixin.qq.com/miniprogram/dev/framework/sitemap.html",
|
||||
"rules": [{
|
||||
"action": "allow",
|
||||
"page": "*"
|
||||
}]
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
|
||||
# Introduction to Paddle.js Demo Module
|
||||
|
||||
This part is a model library developed based on Paddle.js, which mainly provides the ability to directly introduce and use models on the web side.
|
||||
|
||||
| demo name | source directory | npm package |
|
||||
| - | - | - |
|
||||
| face detection | [facedetect](./packages/paddlejs-models/facedetect) | [@paddle-js-models/facedetect](https://www.npmjs.com/package/@paddle-js-models/facedetect) |
|
||||
| Screw detection | [detect](./packages/paddlejs-models/detect) | [@paddle-js-models/detect](https://www.npmjs.com/package/@paddle-js-models/detect ) |
|
||||
| Portrait segmentation background replacement | [humanseg](./packages/paddlejs-models/humanseg) | [@paddle-js-models/humanseg](https://www.npmjs.com/package/@paddle-js-models/humanseg) |
|
||||
| Gesture Recognition AI Guessing Shell | [gesture](./packages/paddlejs-models/gesture) | [@paddle-js-models/gesture](https://www.npmjs.com/package/@paddle-js-models/gesture) |
|
||||
| 1000 Item Recognition | [mobilenet](./packages/paddlejs-models/mobilenet) | [@paddle-js-models/mobilenet](https://www.npmjs.com/package/@paddle-js-models/mobilenet) |
|
||||
| Text Detection | [ocrdetection](./packages/paddlejs-models/ocrdetection) | [@paddle-js-models/ocrdet](https://www.npmjs.com/package/@paddle-js-models/ocrdet ) |
|
||||
| Text Recognition | [ocr](./packages/paddlejs-models/ocr) | [@paddle-js-models/ocr](https://www.npmjs.com/package/@paddle-js-models/ocr) |
|
||||
|
||||
## Usage
|
||||
|
||||
This part is Menorepo built with `pnpm`
|
||||
|
||||
### Install dependencies
|
||||
|
||||
````sh
|
||||
pnpm i
|
||||
````
|
||||
|
||||
### Development
|
||||
See Package.json for development testing with `yalc`.
|
||||
|
||||
````sh
|
||||
pnpm run dev:xxx
|
||||
````
|
||||
|
||||
### Overall Introduction
|
||||
|
||||
1. Use rollup to package the code of commonjs and es specifications at one time; at the same time, it is extensible; at present, there are some problems with the dependent cv library; there is no configuration for umd packaging.
|
||||
2. The d.ts file is generated based on api-extractor during packaging, and the introduction of ts is supported to generate our package
|
||||
3. Support testing based on jest and display test related coverage, etc.
|
||||
4. Maintain code style based on ts and eslint to ensure better code development
|
||||
5. Generate custom keywords based on conventional-changelog-cli and generate changelog accordingly
|
||||
6. Implement local packaging development and testing based on yalc
|
||||
@@ -1,43 +0,0 @@
|
||||
[English](README.md) | 简体中文
|
||||
|
||||
# Paddle.js Model Module介绍
|
||||
|
||||
该部分是基于 Paddle.js 进行开发的模型库,主要提供 Web 端可直接引入使用模型的能力。
|
||||
|
||||
| demo名称 | 源码目录 | npm包 |
|
||||
| ---------------- | ------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 人脸检测 | [facedetect](./packages/paddlejs-models/facedetect) | [@paddle-js-models/facedetect](https://www.npmjs.com/package/@paddle-js-models/facedetect) |
|
||||
| 螺丝钉检测 | [detect](./packages/paddlejs-models/detect) | [@paddle-js-models/detect](https://www.npmjs.com/package/@paddle-js-models/detect) |
|
||||
| 人像分割背景替换 | [humanseg](./packages/paddlejs-models/humanseg) | [@paddle-js-models/humanseg](https://www.npmjs.com/package/@paddle-js-models/humanseg) |
|
||||
| 手势识别AI猜丁壳 | [gesture](./packages/paddlejs-models/gesture) | [@paddle-js-models/gesture](https://www.npmjs.com/package/@paddle-js-models/gesture) |
|
||||
| 1000种物品识别 | [mobilenet](./packages/paddlejs-models/mobilenet) | [@paddle-js-models/mobilenet](https://www.npmjs.com/package/@paddle-js-models/mobilenet) |
|
||||
| 文本检测 | [ocrdetection](./packages/paddlejs-models/ocrdetection) | [@paddle-js-models/ocrdet](https://www.npmjs.com/package/@paddle-js-models/ocrdet) |
|
||||
| 文本识别 | [ocr](./packages/paddlejs-models/ocr) | [@paddle-js-models/ocr](https://www.npmjs.com/package/@paddle-js-models/ocr) |
|
||||
|
||||
## 开发使用
|
||||
|
||||
该部分是使用 `pnpm` 搭建的 Menorepo
|
||||
|
||||
### 安装依赖
|
||||
|
||||
```sh
|
||||
pnpm i
|
||||
```
|
||||
|
||||
### 开发
|
||||
参考 Package.json 使用 `yalc` 进行开发测试。
|
||||
|
||||
```sh
|
||||
pnpm run dev:xxx
|
||||
```
|
||||
|
||||
### 整体简介
|
||||
|
||||
1. 使用 rollup 一次性打包生成 commonjs 和 es 规范的代码;同时具有可扩展性;目前由于依赖的cv库有些问题;就没有配置umd打包。
|
||||
2. 打包时基于 api-extractor 实现 d.ts 文件生成,实现支持 ts 引入生成我们的包
|
||||
3. 基于 jest 支持测试并显示测试相关覆盖率等
|
||||
4. 基于 ts 和 eslint 维护代码风格,保证代码更好开发
|
||||
5. 基于 conventional-changelog-cli 实现自定义关键词生成对应生成changelog
|
||||
6. 基于 yalc 实现本地打包开发测试
|
||||
|
||||
|
||||
-6927
File diff suppressed because it is too large
Load Diff
@@ -1,51 +0,0 @@
|
||||
{
|
||||
"name": "paddle-js",
|
||||
"version": "3.0.0",
|
||||
"description": "paddlejs",
|
||||
"keywords": [
|
||||
"paddlejs",
|
||||
"web AI",
|
||||
"typescript"
|
||||
],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"scripts": {
|
||||
"dev:@paddle-js-models/detect": "pnpm -F @paddle-js-models/detect dev",
|
||||
"dev:@paddle-js-models/facedetect": "pnpm -F @paddle-js-models/facedetect dev",
|
||||
"dev:@paddle-js-models/ocrdet": "pnpm -F @paddle-js-models/ocrdet dev",
|
||||
"dev:@paddle-js-models/ocr": "pnpm -F @paddle-js-models/ocr dev",
|
||||
"dev:@paddle-js-models/gesture": "pnpm -F @paddle-js-models/gesture dev",
|
||||
"dev:@paddle-js-models/humanseg": "pnpm -F @paddle-js-models/humanseg dev",
|
||||
"dev:@paddle-js-models/humanseg_gpu": "pnpm -F @paddle-js-models/humanseg_gpu dev",
|
||||
"dev:@paddle-js-models/mobilenet": "pnpm -F @paddle-js-models/mobilenet dev",
|
||||
"build:@paddle-js-models/detect": "pnpm -F @paddle-js-models/detect build",
|
||||
"build:@paddle-js-models/facedetect": "pnpm -F @paddle-js-models/facedetect build",
|
||||
"build:@paddle-js-models/ocrdet": "pnpm -F @paddle-js-models/ocrdet build",
|
||||
"build:@paddle-js-models/ocr": "pnpm -F @paddle-js-models/ocr build",
|
||||
"build:@paddle-js-models/gesture": "pnpm -F @paddle-js-models/gesture build",
|
||||
"build:@paddle-js-models/humanseg": "pnpm -F @paddle-js-models/humanseg build",
|
||||
"build:@paddle-js-models/humanseg_gpu": "pnpm -F @paddle-js-models/humanseg_gpu build",
|
||||
"build:@paddle-js-models/mobilenet": "pnpm -F @paddle-js-models/mobilenet build",
|
||||
"publish:@paddle-js-models/detect": "pnpm -F @paddle-js-models/detect publish",
|
||||
"publish:@paddle-js-models/facedetect": "pnpm -F @paddle-js-models/facedetect publish",
|
||||
"publish:@paddle-js-models/ocrdet": "pnpm -F @paddle-js-models/ocrdet publish",
|
||||
"publish:@paddle-js-models/ocr": "pnpm -F @paddle-js-models/ocr publish",
|
||||
"publish:@paddle-js-models/gesture": "pnpm -F @paddle-js-models/gesture publish",
|
||||
"publish:@paddle-js-models/humanseg": "pnpm -F @paddle-js-models/humanseg publish",
|
||||
"publish:@paddle-js-models/humanseg_gpu": "pnpm -F @paddle-js-models/humanseg_gpu publish",
|
||||
"publish:@paddle-js-models/mobilenet": "pnpm -F @paddle-js-models/mobilenet publish",
|
||||
"lint": "eslint --ext .js,.ts packages --fix",
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@commitlint/cli": "^17.1.2",
|
||||
"@commitlint/config-conventional": "^17.1.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.37.0",
|
||||
"@typescript-eslint/parser": "^5.37.0",
|
||||
"commitlint": "^17.1.2",
|
||||
"eslint": "8.22.0",
|
||||
"husky": "^8.0.1",
|
||||
"lint-staged": "^13.0.3",
|
||||
"typescript": "^4.8.3"
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"presets": [
|
||||
[
|
||||
"@babel/preset-env",
|
||||
{
|
||||
"modules": false
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
module.exports = {
|
||||
extends: ['@commitlint/config-conventional'], // 使用预设的配置 https://github.com/conventional-changelog/commitlint/blob/master/@commitlint/config-conventional/index.js
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
node_modules
|
||||
@@ -1,27 +0,0 @@
|
||||
module.exports = {
|
||||
parser: '@typescript-eslint/parser', // 使用 ts 解析器
|
||||
extends: [
|
||||
'eslint:recommended', // eslint 推荐规则
|
||||
'plugin:@typescript-eslint/recommended', // ts 推荐规则
|
||||
'plugin:jest/recommended',
|
||||
],
|
||||
plugins: [
|
||||
'@typescript-eslint',
|
||||
'jest',
|
||||
],
|
||||
env: {
|
||||
browser: true,
|
||||
node: true,
|
||||
es6: true,
|
||||
},
|
||||
parserOptions: {
|
||||
project: 'tsconfig.eslint.json',
|
||||
ecmaVersion: 2019,
|
||||
sourceType: 'module',
|
||||
ecmaFeatures: {
|
||||
experimentalObjectRestSpread: true
|
||||
}
|
||||
},
|
||||
rules: {}
|
||||
}
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
.DS_Store
|
||||
dist
|
||||
etc
|
||||
lib
|
||||
dist-ssr
|
||||
coverage
|
||||
*.local
|
||||
|
||||
/cypress/videos/
|
||||
/cypress/screenshots/
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
@@ -1 +0,0 @@
|
||||
auto-install-peers=true
|
||||
@@ -1,38 +0,0 @@
|
||||
[中文版](./README_cn.md)
|
||||
|
||||
# detect
|
||||
|
||||
detect model is used to detect the position of label frame in the image.
|
||||
|
||||
<img src="https://img.shields.io/npm/v/@paddle-js-models/detect?color=success" alt="version"> <img src="https://img.shields.io/bundlephobia/min/@paddle-js-models/detect" alt="size"> <img src="https://img.shields.io/npm/dm/@paddle-js-models/detect?color=orange" alt="downloads"> <img src="https://img.shields.io/npm/dt/@paddle-js-models/detect" alt="downloads">
|
||||
|
||||
# Usage
|
||||
|
||||
```js
|
||||
import * as det from '@paddle-js-models/detect';
|
||||
|
||||
// Load model
|
||||
await det.load();
|
||||
|
||||
// Get label index, confidence and coordinates
|
||||
const res = await det.detect(img);
|
||||
|
||||
res.forEach(item => {
|
||||
// Get label index
|
||||
console.log(item[0]);
|
||||
// Get label confidence
|
||||
console.log(item[1]);
|
||||
// Get label left coordinates
|
||||
console.log(item[2]);
|
||||
// Get label top coordinates
|
||||
console.log(item[3]);
|
||||
// Get label right coordinates
|
||||
console.log(item[4]);
|
||||
// Get label bottom coordinates
|
||||
console.log(item[5]);
|
||||
});
|
||||
```
|
||||
|
||||
# effect
|
||||

|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
[English](./README.md)
|
||||
|
||||
# detect
|
||||
|
||||
detect模型用于检测图像中label框选位置。
|
||||
|
||||
<img src="https://img.shields.io/npm/v/@paddle-js-models/detect?color=success" alt="version"> <img src="https://img.shields.io/bundlephobia/min/@paddle-js-models/detect" alt="size"> <img src="https://img.shields.io/npm/dm/@paddle-js-models/detect?color=orange" alt="downloads"> <img src="https://img.shields.io/npm/dt/@paddle-js-models/detect" alt="downloads">
|
||||
|
||||
# 使用
|
||||
|
||||
```js
|
||||
import * as det from '@paddle-js-models/detect';
|
||||
|
||||
// 模型加载
|
||||
await det.load();
|
||||
|
||||
// 获取label对应索引、置信度、检测框选坐标
|
||||
const res = await det.detect(img);
|
||||
|
||||
res.forEach(item => {
|
||||
// 获取label对应索引
|
||||
console.log(item[0]);
|
||||
// 获取label置信度
|
||||
console.log(item[1]);
|
||||
// 获取检测框选left顶点
|
||||
console.log(item[2]);
|
||||
// 获取检测框选top顶点
|
||||
console.log(item[3]);
|
||||
// 获取检测框选right顶点
|
||||
console.log(item[4]);
|
||||
// 获取检测框选bottom顶点
|
||||
console.log(item[5]);
|
||||
});
|
||||
```
|
||||
|
||||
# 效果
|
||||

|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
|
||||
"mainEntryPointFilePath": "./lib/index.d.ts",
|
||||
"bundledPackages": [],
|
||||
"docModel": {
|
||||
"enabled": true
|
||||
},
|
||||
"apiReport": {
|
||||
"enabled": true
|
||||
},
|
||||
"dtsRollup": {
|
||||
"enabled": true,
|
||||
"untrimmedFilePath": "./lib/index.d.ts"
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
import path from "path";
|
||||
import chalk from "chalk";
|
||||
|
||||
export const paths = {
|
||||
root: path.join(__dirname, '../'),
|
||||
input: path.join(__dirname, '../src/index.ts'),
|
||||
lib: path.join(__dirname, '../lib'),
|
||||
}
|
||||
|
||||
export const log = {
|
||||
progress: (text: string) => {
|
||||
console.log(chalk.green(text))
|
||||
},
|
||||
error: (text: string) => {
|
||||
console.log(chalk.red(text))
|
||||
},
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
import path from 'path'
|
||||
import fse from 'fs-extra'
|
||||
import { series } from "gulp"
|
||||
import { paths, log } from "./build_package/util"
|
||||
import rollupConfig from './rollup.config'
|
||||
import { rollup } from 'rollup'
|
||||
import {
|
||||
Extractor,
|
||||
ExtractorConfig,
|
||||
ExtractorResult,
|
||||
} from '@microsoft/api-extractor'
|
||||
/**
|
||||
* 这里是由于 'conventional-changelog' 未提供类型文件
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
import conventionalChangelog from 'conventional-changelog'
|
||||
|
||||
interface TaskFunc {
|
||||
// eslint-disable-next-line @typescript-eslint/ban-types
|
||||
(cb: Function): void
|
||||
}
|
||||
|
||||
const CHANGE_TRACE = ['paddlejs-models/detect', 'paddle-js-models/detect', 'paddlejs-models', 'paddle-js-models', 'all']
|
||||
|
||||
/**
|
||||
* 删除 lib 文件
|
||||
* @param cb
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const clearLibFile: TaskFunc = async (cb) => {
|
||||
fse.removeSync(paths.lib)
|
||||
log.progress('Deleted lib file')
|
||||
cb()
|
||||
}
|
||||
|
||||
/**
|
||||
* rollup 打包
|
||||
* @param cb
|
||||
*/
|
||||
const buildByRollup: TaskFunc = async (cb) => {
|
||||
const inputOptions = {
|
||||
input: rollupConfig.input,
|
||||
external: rollupConfig.external,
|
||||
plugins: rollupConfig.plugins,
|
||||
}
|
||||
const outOptions = rollupConfig.output
|
||||
const bundle = await rollup(inputOptions)
|
||||
|
||||
// 写入需要遍历输出配置
|
||||
if (Array.isArray(outOptions)) {
|
||||
for (const outOption of outOptions) {
|
||||
await bundle.write(outOption)
|
||||
}
|
||||
cb()
|
||||
log.progress('Rollup built successfully')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* api-extractor 整理 .d.ts 文件
|
||||
* @param cb
|
||||
*/
|
||||
const apiExtractorGenerate: TaskFunc = async (cb) => {
|
||||
const apiExtractorJsonPath: string = path.join(__dirname, './api-extractor.json')
|
||||
// 加载并解析 api-extractor.json 文件
|
||||
const extractorConfig: ExtractorConfig = await ExtractorConfig.loadFileAndPrepare(apiExtractorJsonPath)
|
||||
// 判断是否存在 index.d.ts 文件,这里必须异步先访问一边,不然后面找不到会报错
|
||||
const isdtxExist: boolean = await fse.pathExists(extractorConfig.mainEntryPointFilePath)
|
||||
// 判断是否存在 etc 目录,api-extractor需要该目录存在
|
||||
const isEtcExist: boolean = await fse.pathExists('./etc')
|
||||
|
||||
if (!isdtxExist) {
|
||||
log.error('API Extractor not find index.d.ts')
|
||||
return
|
||||
}
|
||||
|
||||
if (!isEtcExist) {
|
||||
fse.mkdirSync('etc');
|
||||
log.progress('Create folder etc for API Extractor')
|
||||
}
|
||||
|
||||
// 调用 API
|
||||
const extractorResult: ExtractorResult = await Extractor.invoke(extractorConfig, {
|
||||
localBuild: true,
|
||||
// 在输出中显示信息
|
||||
showVerboseMessages: true,
|
||||
})
|
||||
|
||||
if (extractorResult.succeeded) {
|
||||
// 删除多余的 .d.ts 文件
|
||||
const libFiles: string[] = await fse.readdir(paths.lib)
|
||||
for (const file of libFiles) {
|
||||
if (file.endsWith('.d.ts') && !file.includes('index')) {
|
||||
await fse.remove(path.join(paths.lib, file))
|
||||
}
|
||||
}
|
||||
log.progress('API Extractor completed successfully')
|
||||
// api-extractor 会生成 temp 文件夹,完成后进行删除
|
||||
fse.ensureDirSync('temp')
|
||||
fse.removeSync('temp')
|
||||
cb()
|
||||
} else {
|
||||
log.error(`API Extractor completed with ${extractorResult.errorCount} errors`
|
||||
+ ` and ${extractorResult.warningCount} warnings`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 完成
|
||||
* @param cb
|
||||
*/
|
||||
const complete: TaskFunc = (cb) => {
|
||||
log.progress('---- end ----')
|
||||
cb()
|
||||
}
|
||||
|
||||
/**
|
||||
* 生成 CHANGELOG
|
||||
* @param cb
|
||||
*/
|
||||
export const changelog: TaskFunc = async (cb) => {
|
||||
const checkTrace = (chunk: string) => {
|
||||
for (const keyWord of CHANGE_TRACE) {
|
||||
if (chunk.includes(keyWord)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
const changelogPath: string = path.join(paths.root, 'CHANGELOG.md')
|
||||
// 对命令 conventional-changelog -p angular -i CHANGELOG.md -w -r 0
|
||||
const changelogPipe = await conventionalChangelog({
|
||||
preset: 'angular',
|
||||
releaseCount: 0,
|
||||
})
|
||||
changelogPipe.setEncoding('utf8')
|
||||
|
||||
const resultArray = ['# 更新日志\n\n']
|
||||
changelogPipe.on('data', (chunk) => {
|
||||
// 原来的 commits 路径是进入提交列表
|
||||
chunk = chunk.replace(/\/commits\//g, '/commit/')
|
||||
/**
|
||||
* title 或 指定跟踪 才会写入CHANGELOG
|
||||
*/
|
||||
for (const log of chunk.split("\n")) {
|
||||
if (log.includes('# ') || log.includes('### ') || checkTrace(log)) {
|
||||
resultArray.push(log+"\n\n")
|
||||
}
|
||||
}
|
||||
})
|
||||
changelogPipe.on('end', async () => {
|
||||
fse.createWriteStream(changelogPath).write(resultArray.join(''))
|
||||
cb()
|
||||
log.progress('CHANGELOG generation completed')
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
exports.build = series(clearLibFile, buildByRollup, apiExtractorGenerate, complete)
|
||||
@@ -1,4 +0,0 @@
|
||||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
{
|
||||
"name": "@paddle-js-models/detect",
|
||||
"version": "3.0.1",
|
||||
"description": "",
|
||||
"main": "lib/index.js",
|
||||
"module": "lib/index.esm.js",
|
||||
"typings": "lib/index.d.js",
|
||||
"files": [
|
||||
"lib",
|
||||
"LICENSE",
|
||||
"CHANGELOG.md",
|
||||
"README.md",
|
||||
"README_cn.md"
|
||||
],
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"registry": "https://registry.npmjs.org/"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "yalc publish --push",
|
||||
"prepublish": "pnpm lint & pnpm test",
|
||||
"prepublishOnly": "pnpm build",
|
||||
"build": "gulp build",
|
||||
"lint": "eslint --ext .js,.ts src --fix",
|
||||
"api": "api-extractor run",
|
||||
"test": "jest --coverage --verbose -u",
|
||||
"changelog": "gulp changelog"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.19.0",
|
||||
"@babel/preset-env": "^7.19.0",
|
||||
"@commitlint/cli": "^17.1.2",
|
||||
"@commitlint/config-conventional": "^17.1.0",
|
||||
"@microsoft/api-extractor": "^7.30.0",
|
||||
"@types/d3-polygon": "^3.0.0",
|
||||
"@types/fs-extra": "^9.0.13",
|
||||
"@types/gulp": "^4.0.9",
|
||||
"@types/jest": "^29.0.1",
|
||||
"@types/node": "^18.7.16",
|
||||
"@typescript-eslint/eslint-plugin": "^5.36.2",
|
||||
"@typescript-eslint/parser": "^5.36.2",
|
||||
"browserify": "^17.0.0",
|
||||
"chalk": "4.1.2",
|
||||
"commitlint": "^17.1.2",
|
||||
"conventional-changelog-cli": "^2.2.2",
|
||||
"eslint": "8.22.0",
|
||||
"eslint-plugin-jest": "^27.0.4",
|
||||
"fs-extra": "^10.1.0",
|
||||
"gulp": "^4.0.2",
|
||||
"gulp-clean": "^0.4.0",
|
||||
"gulp-typescript": "6.0.0-alpha.1",
|
||||
"gulp-uglify": "^3.0.2",
|
||||
"husky": "^8.0.1",
|
||||
"jest": "^29.0.3",
|
||||
"lint-staged": "^13.0.3",
|
||||
"rollup": "^2.79.0",
|
||||
"rollup-plugin-babel": "^4.4.0",
|
||||
"rollup-plugin-commonjs": "^10.1.0",
|
||||
"rollup-plugin-eslint": "^7.0.0",
|
||||
"rollup-plugin-node-resolve": "^5.2.0",
|
||||
"rollup-plugin-string": "^3.0.0",
|
||||
"rollup-plugin-typescript2": "^0.34.0",
|
||||
"ts-jest": "^29.0.0",
|
||||
"ts-node": "^10.9.1",
|
||||
"tsify": "^5.0.4",
|
||||
"typescript": "^4.8.3",
|
||||
"vinyl-buffer": "^1.0.1",
|
||||
"vinyl-source-stream": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@paddlejs/paddlejs-backend-webgl": "^1.2.9",
|
||||
"@paddlejs/paddlejs-core": "^2.2.0"
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
import path from 'path'
|
||||
import { RollupOptions } from 'rollup'
|
||||
import rollupTypescript from 'rollup-plugin-typescript2'
|
||||
import babel from 'rollup-plugin-babel'
|
||||
import resolve from 'rollup-plugin-node-resolve'
|
||||
import commonjs from 'rollup-plugin-commonjs'
|
||||
import { eslint } from 'rollup-plugin-eslint'
|
||||
import { DEFAULT_EXTENSIONS } from '@babel/core'
|
||||
|
||||
import pkg from './package.json'
|
||||
import { paths } from "./build_package/util";
|
||||
|
||||
|
||||
// rollup 配置项
|
||||
const rollupConfig: RollupOptions = {
|
||||
input: paths.input,
|
||||
output: [
|
||||
// 输出 commonjs 规范的代码
|
||||
{
|
||||
file: path.join(paths.lib, 'index.js'),
|
||||
format: 'cjs',
|
||||
name: pkg.name,
|
||||
},
|
||||
// 输出 es 规范的代码
|
||||
{
|
||||
file: path.join(paths.lib, 'index.esm.js'),
|
||||
format: 'es',
|
||||
name: pkg.name,
|
||||
},
|
||||
],
|
||||
external: ['@paddlejs-mediapipe/opencv',
|
||||
'@paddlejs/paddlejs-backend-webgl',
|
||||
'@paddlejs/paddlejs-core',
|
||||
'@types/node',
|
||||
'd3-polygon',
|
||||
'js-clipper',
|
||||
'number-precision'],
|
||||
// plugins 需要注意引用顺序
|
||||
plugins: [
|
||||
eslint({
|
||||
throwOnError: true,
|
||||
throwOnWarning: false,
|
||||
include: ['src/**/*.ts'],
|
||||
exclude: ['node_modules/**', 'lib/**', '*.js'],
|
||||
}),
|
||||
|
||||
// 使得 rollup 支持 commonjs 规范,识别 commonjs 规范的依赖
|
||||
commonjs(),
|
||||
|
||||
// 配合 commnjs 解析第三方模块
|
||||
resolve({
|
||||
// 将自定义选项传递给解析插件
|
||||
customResolveOptions: {
|
||||
moduleDirectory: 'node_modules',
|
||||
},
|
||||
}),
|
||||
rollupTypescript(),
|
||||
babel({
|
||||
runtimeHelpers: true,
|
||||
// 只转换源代码,不运行外部依赖
|
||||
exclude: 'node_modules/**',
|
||||
// babel 默认不支持 ts 需要手动添加
|
||||
extensions: [
|
||||
...DEFAULT_EXTENSIONS,
|
||||
'.ts',
|
||||
],
|
||||
}),
|
||||
],
|
||||
}
|
||||
|
||||
export default rollupConfig
|
||||
@@ -1,29 +0,0 @@
|
||||
/**
|
||||
* @file detect model
|
||||
*/
|
||||
|
||||
import { Runner } from '@paddlejs/paddlejs-core';
|
||||
import '@paddlejs/paddlejs-backend-webgl';
|
||||
|
||||
let detectRunner = null as Runner;
|
||||
|
||||
export async function init() {
|
||||
detectRunner = new Runner({
|
||||
modelPath: 'https://paddlejs.bj.bcebos.com/models/fuse/detect/detect_fuse_activation/model.json',
|
||||
fill: '#fff',
|
||||
mean: [0.5, 0.5, 0.5],
|
||||
std: [0.5, 0.5, 0.5],
|
||||
bgr: true,
|
||||
keepRatio: false,
|
||||
webglFeedProcess: true
|
||||
});
|
||||
|
||||
await detectRunner.init();
|
||||
}
|
||||
|
||||
export async function detect(image) {
|
||||
const output = await detectRunner.predict(image);
|
||||
// 阈值
|
||||
const thresh = 0.3;
|
||||
return output.filter(item => item[1] > thresh);
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
import assert from 'assert'
|
||||
|
||||
describe('Example:', () => {
|
||||
/**
|
||||
* Example
|
||||
*/
|
||||
describe('ExampleTest', () => {
|
||||
test('Hello World!', () => {
|
||||
assert.strictEqual('Hello World!', 'Hello World!')
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"baseUrl": "./",
|
||||
"resolveJsonModule": true
|
||||
},
|
||||
"include": [
|
||||
"**/*.ts",
|
||||
"**/*.js",
|
||||
".eslintrc.js"
|
||||
]
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
/* Visit https://aka.ms/tsconfig to read more about this file */
|
||||
"target": "ESNext", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
|
||||
"lib": [
|
||||
"ESNext",
|
||||
"DOM"
|
||||
], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
|
||||
"moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */
|
||||
"baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
|
||||
"resolveJsonModule": true, /* Enable importing .json files. */
|
||||
"allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
|
||||
|
||||
"declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
|
||||
"declarationMap": true, /* Create sourcemaps for d.ts files. */
|
||||
"sourceMap": true, /* Create source map files for emitted JavaScript files. */
|
||||
"outDir": "./lib", /* Specify an output folder for all emitted files. */
|
||||
"removeComments": false, /* Disable emitting comments. */
|
||||
|
||||
"allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
|
||||
"esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
|
||||
"forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
|
||||
|
||||
"strict": true, /* Enable all strict type-checking options. */
|
||||
"noImplicitAny": false, /* Enable error reporting for expressions and declarations with an implied 'any' type. */
|
||||
"skipLibCheck": true /* Skip type checking all .d.ts files. */
|
||||
},
|
||||
"include": [
|
||||
"src"
|
||||
]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user