[Other] FastDeploy TensorRT && ONNX backend support to load model form memory (#1130)

* Update all backends load model from buffer

* Delete redundant code

* Format code style

* Format code style

* Delete redundant code

* Delete redundant code

* Add some FDASSERTs

* Update load model form memory when cloning engine

* Update clone engine code

* Update set_model_buffer api parameters with char pointer

* Release memory buffer variables after finish init backends

* Fix conflict

* Fix bug
This commit is contained in:
huangjianhui
2023-02-01 11:36:09 +08:00
committed by GitHub
parent 5b7728e898
commit 76df90afc3
17 changed files with 201 additions and 154 deletions
+6 -14
View File
@@ -38,29 +38,21 @@ void RuntimeOption::SetModelPath(const std::string& model_path,
}
}
void RuntimeOption::SetModelBuffer(const char* model_buffer,
size_t model_buffer_size,
const char* params_buffer,
size_t params_buffer_size,
void RuntimeOption::SetModelBuffer(const std::string& model_buffer,
const std::string& params_buffer,
const ModelFormat& format) {
model_buffer_size_ = model_buffer_size;
params_buffer_size_ = params_buffer_size;
model_from_memory_ = true;
if (format == ModelFormat::PADDLE) {
model_buffer_ = std::string(model_buffer, model_buffer + model_buffer_size);
params_buffer_ =
std::string(params_buffer, params_buffer + params_buffer_size);
model_buffer_ = model_buffer;
params_buffer_ = params_buffer;
model_format = ModelFormat::PADDLE;
} else if (format == ModelFormat::ONNX) {
model_buffer_ = std::string(model_buffer, model_buffer + model_buffer_size);
model_buffer_ = model_buffer;
model_format = ModelFormat::ONNX;
} else if (format == ModelFormat::TORCHSCRIPT) {
model_buffer_ = std::string(model_buffer, model_buffer + model_buffer_size);
model_format = ModelFormat::TORCHSCRIPT;
} else {
FDASSERT(false,
"The model format only can be "
"ModelFormat::PADDLE/ModelFormat::ONNX/ModelFormat::TORCHSCRIPT.");
"ModelFormat::PADDLE/ModelFormat::ONNX.");
}
}