// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #pragma once #include #include #include #include #include #include #include #include #include "xnnpack.h" #include "xnnpack/subgraph.h" #include "subgraph-tester.h" namespace xnnpack { class RuntimeTester : public SubgraphTester { public: using SubgraphTester::SubgraphTester; template xnnpack::Buffer RunWithFusion() { Run(); xnnpack::Buffer& tensor = this->external_tensors_.at(this->output_id_); xnnpack::Buffer output = xnnpack::Buffer(tensor.size() / sizeof(float)); std::memcpy(output.data(), tensor.data(), tensor.size()); return output; } template xnnpack::Buffer RunWithoutFusion() { Run(XNN_FLAG_NO_OPERATOR_FUSION); xnnpack::Buffer& tensor = this->external_tensors_.at(this->output_id_); xnnpack::Buffer output = xnnpack::Buffer(tensor.size() / sizeof(float)); memcpy(output.data(), tensor.data(), tensor.size()); return output; } template xnnpack::Buffer RepeatRun() { xnnpack::Buffer& tensor = this->external_tensors_.at(this->output_id_); xnn_invoke_runtime(Runtime()); xnnpack::Buffer output = xnnpack::Buffer(tensor.size() / sizeof(float)); memcpy(output.data(), tensor.data(), tensor.size()); return output; } void CreateRuntime(uint32_t flags = 0) { xnn_runtime_t runtime = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(this->subgraph_.get(), nullptr, nullptr, flags, &runtime)); ASSERT_NE(nullptr, runtime); runtime_.reset(runtime); } void SetupRuntime() { std::vector externals; for (auto it = this->external_tensors_.begin(); it != this->external_tensors_.end(); ++it) { if (it->first == this->output_id_) { // Scramble output tensor. std::fill(it->second.begin(), it->second.end(), 0xA8); } externals.push_back(xnn_external_value{it->first, it->second.data()}); } ASSERT_EQ(xnn_status_success, xnn_setup_runtime(Runtime(), externals.size(), externals.data())); externals_ = externals; } void SetupRuntimeV2() { std::vector externals; for (auto it = this->external_tensors_.begin(); it != this->external_tensors_.end(); ++it) { if (it->first == this->output_id_) { // Scramble output tensor. std::fill(it->second.begin(), it->second.end(), 0xA8); } externals.push_back(xnn_external_value{it->first, it->second.data()}); } ASSERT_EQ(xnn_status_success, xnn_setup_runtime_v2(Runtime(), externals.size(), externals.data())); externals_ = externals; } size_t NumOperators() { size_t count = 0; for (size_t i = 0; i < runtime_->num_ops; i++) { if (runtime_->opdata[i].operator_objects[0] != nullptr) { count++; } } return count; } xnn_runtime_t Runtime() const { return runtime_.get(); } void ReshapeInput(const std::vector& dims, uint32_t external_id) { xnn_status status = xnn_reshape_external_value(Runtime(), external_id, dims.size(), dims.data()); EXPECT_EQ(status, xnn_status_success); size_t num_elements = NumElements(dims); xnnpack::Buffer input(num_elements * sizeof(float) + XNN_EXTRA_BYTES * sizeof(char)); std::generate(input.begin(), input.end(), [&]() { return f32dist(rng_); }); external_tensors_[external_id] = std::move(input); } void ReshapeRuntime() { xnn_status status = xnn_reshape_runtime(Runtime()); EXPECT_EQ(status, xnn_status_success); std::vector output_dims(XNN_MAX_TENSOR_DIMS); size_t num_dims; status = xnn_get_external_value_shape(Runtime(), output_id_, &num_dims, output_dims.data()); output_dims.resize(num_dims); EXPECT_EQ(status, xnn_status_success); external_tensors_[output_id_] = xnnpack::Buffer(NumElements(output_dims) * sizeof(float)); } private: void Run(uint32_t flags = 0) { CreateRuntime(flags); SetupRuntime(); ASSERT_EQ(xnn_status_success, xnn_setup_runtime(Runtime(), externals_.size(), externals_.data())); ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(Runtime())); }; std::unique_ptr runtime_{nullptr, xnn_delete_runtime}; std::vector externals_; }; } // namespace xnnpack