// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include #include #include #include #include #include #include #include #include #include #include "xnnpack.h" #include "xnnpack/math.h" #include "xnnpack/node-type.h" #include "xnnpack/subgraph.h" #include "replicable_random_device.h" template class GlobalAveragePooling2DTest : public ::testing::Test { protected: GlobalAveragePooling2DTest() { shape_dist = std::uniform_int_distribution(3, XNN_MAX_TENSOR_DIMS); dim_dist = std::uniform_int_distribution(1, 9); f32dist = std::uniform_real_distribution(); i8dist = std::uniform_int_distribution( std::numeric_limits::min(), std::numeric_limits::max()); u8dist = std::uniform_int_distribution( std::numeric_limits::min(), std::numeric_limits::max()); scale_dist = std::uniform_real_distribution(0.1f, 5.0f); input_dims = RandomShape(); output_dims = input_dims; output_dims[output_dims.size() - 3] = 1; output_dims[output_dims.size() - 2] = 1; batch_size = 1; for (size_t i = 0; i < input_dims.size() - 3; i++) { batch_size *= input_dims[i]; } input_width = input_dims[input_dims.size() - 3] * input_dims[input_dims.size() - 2]; channels = input_dims[input_dims.size() - 1]; } std::vector RandomShape() { std::vector dims(shape_dist(rng)); std::generate(dims.begin(), dims.end(), [&] { return dim_dist(rng); }); return dims; } size_t NumElements(std::vector& dims) { return std::accumulate( dims.begin(), dims.end(), size_t(1), std::multiplies()); } xnnpack::ReplicableRandomDevice rng; std::uniform_int_distribution shape_dist; std::uniform_int_distribution dim_dist; std::uniform_real_distribution f32dist; std::uniform_real_distribution scale_dist; std::uniform_int_distribution i8dist; std::uniform_int_distribution u8dist; float output_min = -std::numeric_limits::infinity(); float output_max = std::numeric_limits::infinity(); size_t batch_size; size_t input_width; size_t channels; std::vector input_dims; std::vector output_dims; }; using GlobalAveragePooling2DTestQS8 = GlobalAveragePooling2DTest; using GlobalAveragePooling2DTestQU8 = GlobalAveragePooling2DTest; using GlobalAveragePooling2DTestF16 = GlobalAveragePooling2DTest; using GlobalAveragePooling2DTestF32 = GlobalAveragePooling2DTest; TEST_F(GlobalAveragePooling2DTestQS8, define) { ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); xnn_subgraph_t subgraph = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_subgraph(2, /*flags=*/0, &subgraph)); std::unique_ptr auto_subgraph(subgraph, xnn_delete_subgraph); uint32_t input_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_quantized_tensor_value( subgraph, xnn_datatype_qint8, 0, 1.0f, input_dims.size(), input_dims.data(), nullptr, /*external_id=*/0, /*flags=*/0, &input_id)); ASSERT_NE(input_id, XNN_INVALID_NODE_ID); uint32_t output_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_quantized_tensor_value( subgraph, xnn_datatype_qint8, 0, 1.0f, output_dims.size(), output_dims.data(), nullptr, /*external_id=*/1, /*flags=*/0, &output_id)); ASSERT_NE(output_id, XNN_INVALID_NODE_ID); ASSERT_EQ( xnn_status_success, xnn_define_global_average_pooling_2d(subgraph, output_min, output_max, input_id, output_id, /*flags=*/0)); ASSERT_EQ(subgraph->num_nodes, 1); const struct xnn_node* node = &subgraph->nodes[0]; ASSERT_EQ(node->type, xnn_node_type_static_mean); ASSERT_EQ(node->num_inputs, 1); ASSERT_EQ(node->inputs[0], input_id); ASSERT_EQ(node->num_outputs, 1); ASSERT_EQ(node->outputs[0], output_id); ASSERT_EQ(node->params.reduce.num_reduction_axes, 2); ASSERT_EQ(node->params.reduce.reduction_axes[0], input_dims.size() - 3); ASSERT_EQ(node->params.reduce.reduction_axes[1], input_dims.size() - 2); ASSERT_EQ(node->flags, 0); } TEST_F(GlobalAveragePooling2DTestQU8, define) { ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); xnn_subgraph_t subgraph = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_subgraph(2, /*flags=*/0, &subgraph)); std::unique_ptr auto_subgraph(subgraph, xnn_delete_subgraph); uint32_t input_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_quantized_tensor_value( subgraph, xnn_datatype_quint8, 0, 1.0f, input_dims.size(), input_dims.data(), nullptr, /*external_id=*/0, /*flags=*/0, &input_id)); ASSERT_NE(input_id, XNN_INVALID_NODE_ID); uint32_t output_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_quantized_tensor_value( subgraph, xnn_datatype_quint8, 0, 1.0f, output_dims.size(), output_dims.data(), nullptr, /*external_id=*/1, /*flags=*/0, &output_id)); ASSERT_NE(output_id, XNN_INVALID_NODE_ID); ASSERT_EQ( xnn_status_success, xnn_define_global_average_pooling_2d(subgraph, output_min, output_max, input_id, output_id, /*flags=*/0)); ASSERT_EQ(subgraph->num_nodes, 1); const struct xnn_node* node = &subgraph->nodes[0]; ASSERT_EQ(node->type, xnn_node_type_static_mean); ASSERT_EQ(node->num_inputs, 1); ASSERT_EQ(node->inputs[0], input_id); ASSERT_EQ(node->num_outputs, 1); ASSERT_EQ(node->outputs[0], output_id); ASSERT_EQ(node->params.reduce.num_reduction_axes, 2); ASSERT_EQ(node->params.reduce.reduction_axes[0], input_dims.size() - 3); ASSERT_EQ(node->params.reduce.reduction_axes[1], input_dims.size() - 2); ASSERT_EQ(node->flags, 0); } TEST_F(GlobalAveragePooling2DTestF16, define) { ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); xnn_subgraph_t subgraph = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_subgraph(2, /*flags=*/0, &subgraph)); std::unique_ptr auto_subgraph(subgraph, xnn_delete_subgraph); uint32_t input_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_tensor_value( subgraph, xnn_datatype_fp16, input_dims.size(), input_dims.data(), nullptr, /*external_id=*/0, /*flags=*/0, &input_id)); ASSERT_NE(input_id, XNN_INVALID_NODE_ID); uint32_t output_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_tensor_value( subgraph, xnn_datatype_fp16, output_dims.size(), output_dims.data(), nullptr, /*external_id=*/1, /*flags=*/0, &output_id)); ASSERT_NE(output_id, XNN_INVALID_NODE_ID); ASSERT_EQ( xnn_status_success, xnn_define_global_average_pooling_2d(subgraph, output_min, output_max, input_id, output_id, /*flags=*/0)); ASSERT_EQ(subgraph->num_nodes, 1); const struct xnn_node* node = &subgraph->nodes[0]; ASSERT_EQ(node->type, xnn_node_type_static_mean); ASSERT_EQ(node->num_inputs, 1); ASSERT_EQ(node->inputs[0], input_id); ASSERT_EQ(node->num_outputs, 1); ASSERT_EQ(node->outputs[0], output_id); ASSERT_EQ(node->params.reduce.num_reduction_axes, 2); ASSERT_EQ(node->params.reduce.reduction_axes[0], input_dims.size() - 3); ASSERT_EQ(node->params.reduce.reduction_axes[1], input_dims.size() - 2); ASSERT_EQ(node->flags, 0); } TEST_F(GlobalAveragePooling2DTestF32, define) { ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr)); xnn_subgraph_t subgraph = nullptr; ASSERT_EQ(xnn_status_success, xnn_create_subgraph(2, /*flags=*/0, &subgraph)); std::unique_ptr auto_subgraph(subgraph, xnn_delete_subgraph); uint32_t input_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_tensor_value( subgraph, xnn_datatype_fp32, input_dims.size(), input_dims.data(), nullptr, /*external_id=*/0, /*flags=*/0, &input_id)); ASSERT_NE(input_id, XNN_INVALID_NODE_ID); uint32_t output_id = XNN_INVALID_NODE_ID; ASSERT_EQ( xnn_status_success, xnn_define_tensor_value( subgraph, xnn_datatype_fp32, output_dims.size(), output_dims.data(), nullptr, /*external_id=*/1, /*flags=*/0, &output_id)); ASSERT_NE(output_id, XNN_INVALID_NODE_ID); ASSERT_EQ( xnn_status_success, xnn_define_global_average_pooling_2d(subgraph, output_min, output_max, input_id, output_id, /*flags=*/0)); ASSERT_EQ(subgraph->num_nodes, 1); const struct xnn_node* node = &subgraph->nodes[0]; ASSERT_EQ(node->type, xnn_node_type_static_mean); ASSERT_EQ(node->num_inputs, 1); ASSERT_EQ(node->inputs[0], input_id); ASSERT_EQ(node->num_outputs, 1); ASSERT_EQ(node->outputs[0], output_id); ASSERT_EQ(node->params.reduce.num_reduction_axes, 2); ASSERT_EQ(node->params.reduce.reduction_axes[0], input_dims.size() - 3); ASSERT_EQ(node->params.reduce.reduction_axes[1], input_dims.size() - 2); ASSERT_EQ(node->flags, 0); }