base_model_executor.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. // Copyright 2021 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef COMPONENTS_OPTIMIZATION_GUIDE_CORE_BASE_MODEL_EXECUTOR_H_
  5. #define COMPONENTS_OPTIMIZATION_GUIDE_CORE_BASE_MODEL_EXECUTOR_H_
  6. #include "components/optimization_guide/core/base_model_executor_helpers.h"
  7. #include "components/optimization_guide/core/execution_status.h"
  8. #include "components/optimization_guide/core/optimization_guide_features.h"
  9. #include "components/optimization_guide/core/tflite_model_executor.h"
  10. #include "components/optimization_guide/core/tflite_op_resolver.h"
  11. #include "third_party/tflite_support/src/tensorflow_lite_support/cc/task/core/base_task_api.h"
  12. namespace optimization_guide {
  13. // An ModelExecutor that executes models with arbitrary
  14. // input and output types. Note that callers will need to give an implementation
  15. // of this class to a |ModelHandler|, whereas the
  16. // handle is the actual class that calling code would own and call into.
  17. template <class OutputType, class... InputTypes>
  18. class BaseModelExecutor : public TFLiteModelExecutor<OutputType, InputTypes...>,
  19. public InferenceDelegate<OutputType, InputTypes...> {
  20. public:
  21. using ModelExecutionTask =
  22. tflite::task::core::BaseTaskApi<OutputType, InputTypes...>;
  23. BaseModelExecutor() = default;
  24. ~BaseModelExecutor() override = default;
  25. BaseModelExecutor(const BaseModelExecutor&) = delete;
  26. BaseModelExecutor& operator=(const BaseModelExecutor&) = delete;
  27. public:
  28. // TFLiteModelExecutor:
  29. void InitializeAndMoveToExecutionThread(
  30. absl::optional<base::TimeDelta> model_inference_timeout,
  31. proto::OptimizationTarget optimization_target,
  32. scoped_refptr<base::SequencedTaskRunner> execution_task_runner,
  33. scoped_refptr<base::SequencedTaskRunner> reply_task_runner) override {
  34. num_threads_ = features::OverrideNumThreadsForOptTarget(optimization_target)
  35. .value_or(-1);
  36. TFLiteModelExecutor<OutputType, InputTypes...>::
  37. InitializeAndMoveToExecutionThread(
  38. model_inference_timeout, optimization_target, execution_task_runner,
  39. reply_task_runner);
  40. }
  41. protected:
  42. absl::optional<OutputType> Execute(ModelExecutionTask* execution_task,
  43. ExecutionStatus* out_status,
  44. InputTypes... args) override {
  45. return static_cast<GenericModelExecutionTask<OutputType, InputTypes...>*>(
  46. execution_task)
  47. ->Execute(out_status, args...);
  48. }
  49. std::unique_ptr<ModelExecutionTask> BuildModelExecutionTask(
  50. base::MemoryMappedFile* model_file,
  51. ExecutionStatus* out_status) override {
  52. std::unique_ptr<tflite::task::core::TfLiteEngine> tflite_engine =
  53. std::make_unique<tflite::task::core::TfLiteEngine>(
  54. std::make_unique<TFLiteOpResolver>());
  55. absl::Status model_load_status = tflite_engine->BuildModelFromFlatBuffer(
  56. reinterpret_cast<const char*>(model_file->data()),
  57. model_file->length());
  58. if (!model_load_status.ok()) {
  59. DLOG(ERROR) << "Failed to load model: " << model_load_status.ToString();
  60. *out_status = ExecutionStatus::kErrorModelFileNotValid;
  61. return nullptr;
  62. }
  63. auto compute_settings = tflite::proto::ComputeSettings();
  64. compute_settings.mutable_tflite_settings()
  65. ->mutable_cpu_settings()
  66. ->set_num_threads(num_threads_);
  67. absl::Status interpreter_status =
  68. tflite_engine->InitInterpreter(compute_settings);
  69. if (!interpreter_status.ok()) {
  70. DLOG(ERROR) << "Failed to initialize model interpreter: "
  71. << interpreter_status.ToString();
  72. *out_status = ExecutionStatus::kErrorUnknown;
  73. return nullptr;
  74. }
  75. return std::make_unique<
  76. GenericModelExecutionTask<OutputType, InputTypes...>>(
  77. std::move(tflite_engine), this);
  78. }
  79. // InferenceDelegate:
  80. bool Preprocess(const std::vector<TfLiteTensor*>& input_tensors,
  81. InputTypes... input) override = 0;
  82. absl::optional<OutputType> Postprocess(
  83. const std::vector<const TfLiteTensor*>& output_tensors) override = 0;
  84. private:
  85. // -1 tells TFLite to use its own default number of threads.
  86. int num_threads_ = -1;
  87. };
  88. } // namespace optimization_guide
  89. #endif // COMPONENTS_OPTIMIZATION_GUIDE_CORE_BASE_MODEL_EXECUTOR_H_