[Other] Change all XPU to KunlunXin (#973)

* [FlyCV] Bump up FlyCV -> official release 1.0.0

* XPU to KunlunXin

* update

* update model link

* update doc

* update device

* update code

* useless code

Co-authored-by: DefTruth <qiustudent_r@163.com>
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
yeliang2258
2022-12-27 10:02:02 +08:00
committed by GitHub
parent 6078bd9657
commit 45865c8724
111 changed files with 369 additions and 368 deletions
+4 -4
View File
@@ -52,10 +52,10 @@ void CpuInfer(const std::string &model_file, const std::string &params_file,
<< ", Cosine 02:" << cosine02 << std::endl;
}
void XpuInfer(const std::string &model_file, const std::string &params_file,
void KunlunXinInfer(const std::string &model_file, const std::string &params_file,
const std::vector<std::string> &image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
option.UseKunlunXin();
auto model = fastdeploy::vision::faceid::AdaFace(model_file, params_file);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
@@ -182,7 +182,7 @@ int main(int argc, char *argv[]) {
"test_lite_focal_AdaFace_2.JPG 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -196,7 +196,7 @@ int main(int argc, char *argv[]) {
} else if (std::atoi(argv[6]) == 2) {
TrtInfer(argv[1], argv[2], image_files);
} else if (std::atoi(argv[6]) == 3) {
CpuInfer(argv[1], argv[2], image_files);
KunlunXinInfer(argv[1], argv[2], image_files);
}
return 0;
}