sglang_v0.5.2/pytorch_2.8.0/third_party/XNNPACK/test/f16-simd.cc.in

336 lines
12 KiB
C++

// Copyright 2024 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$TESTNAME = f"F16Simd{ARCH.upper()}Test"
$if ARCH_MACRO:
// This header needs to go first for the arch test macros.
#include "xnnpack/common.h"
#if ${ARCH_MACRO}
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xnnpack/isa-checks.h"
#include "xnnpack/simd/f16-${ARCH}.h"
#include "xnnpack/fp16.h"
#include "replicable_random_device.h"
namespace xnnpack {
class ${TESTNAME} : public ::testing::Test {
protected:
void SetUp() override {
$if TEST_REQUIRES:
${TEST_REQUIRES};
inputs_.resize(3 * xnn_simd_size_f16);
output_.resize(xnn_simd_size_f16);
std::uniform_real_distribution<float> f32dist(-10.0f, 10.0f);
std::generate(inputs_.begin(), inputs_.end(),
[&]() { return fp16_ieee_from_fp32_value(f32dist(rng_)); });
}
static std::vector<float> ToFloat32(const std::vector<uint16_t> &values) {
std::vector<float> result;
result.reserve(values.size());
for (const uint16_t &value : values) {
result.push_back(fp16_ieee_to_fp32_value(value));
}
return result;
}
static float TruncToF16(float value) {
return fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(value));
}
xnnpack::ReplicableRandomDevice rng_;
std::vector<uint16_t> inputs_;
std::vector<uint16_t> output_;
};
TEST_F(${TESTNAME}, SetZero) {
xnn_storeu_f16(output_.data(), xnn_zero_f16());
EXPECT_THAT(ToFloat32(output_), testing::Each(testing::Eq(0.0f)));
}
TEST_F(${TESTNAME}, Add) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_add_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
TruncToF16(inputs_f32[k] + inputs_f32[k + xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Mul) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_mul_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
TruncToF16(inputs_f32[k] * inputs_f32[k + xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Fmadd) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t c =
xnn_loadu_f16(inputs_.data() + 2 * xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_fmadd_f16(a, b, c);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
TruncToF16(inputs_f32[k] * inputs_f32[k + xnn_simd_size_f16] +
inputs_f32[k + 2 * xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Fmsub) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t c =
xnn_loadu_f16(inputs_.data() + 2 * xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_fmsub_f16(a, b, c);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
TruncToF16(inputs_f32[k] * inputs_f32[k + xnn_simd_size_f16] -
inputs_f32[k + 2 * xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Fnmadd) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t c =
xnn_loadu_f16(inputs_.data() + 2 * xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_fnmadd_f16(a, b, c);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
TruncToF16(-inputs_f32[k] * inputs_f32[k + xnn_simd_size_f16] +
inputs_f32[k + 2 * xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Sub) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_sub_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
TruncToF16(inputs_f32[k] - inputs_f32[k + xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Div) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_div_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_NEAR(output_f32[k],
inputs_f32[k] / inputs_f32[k + xnn_simd_size_f16],
2 * 9.77e-04 * std::abs(output_f32[k]));
}
}
TEST_F(${TESTNAME}, Max) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_max_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
std::max(inputs_f32[k], inputs_f32[k + xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Min) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_min_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k],
std::min(inputs_f32[k], inputs_f32[k + xnn_simd_size_f16]));
}
}
TEST_F(${TESTNAME}, Abs) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t res = xnn_abs_f16(a);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k], std::abs(inputs_f32[k]));
}
}
TEST_F(${TESTNAME}, Neg) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t res = xnn_neg_f16(a);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k], -inputs_f32[k]);
}
}
TEST_F(${TESTNAME}, And) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_and_f16(a, b);
xnn_storeu_f16(output_.data(), res);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_[k], inputs_[k] & inputs_[k + xnn_simd_size_f16]);
}
}
TEST_F(${TESTNAME}, Or) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_or_f16(a, b);
xnn_storeu_f16(output_.data(), res);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_[k], inputs_[k] | inputs_[k + xnn_simd_size_f16]);
}
}
TEST_F(${TESTNAME}, Xor) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_xor_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_[k], inputs_[k] ^ inputs_[k + xnn_simd_size_f16]);
}
}
TEST_F(${TESTNAME}, ShiftLeft) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
// Not using a loop since the `bits` parameter needs to be a compile-time
// constant, e.g. for `neon`.
$for BITS in range(1, 16):
{
const xnn_simd_f16_t res = xnn_sll_f16(a, ${BITS});
xnn_storeu_f16(output_.data(), res);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_[k], (inputs_[k] << ${BITS}) & 0xFFFF);
}
}
}
TEST_F(${TESTNAME}, ShiftRight) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
// Not using a loop since the `bits` parameter needs to be a compile-time
// constant, e.g. for `neon`.
$for BITS in range(1, 16):
{
const xnn_simd_f16_t res = xnn_srl_f16(a, ${BITS});
xnn_storeu_f16(output_.data(), res);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_[k], inputs_[k] >> ${BITS});
}
}
}
TEST_F(${TESTNAME}, ShiftRightSigned) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
// Not using a loop since the `bits` parameter needs to be a compile-time
// constant, e.g. for `neon`.
$for BITS in range(1, 16):
{
const xnn_simd_f16_t res = xnn_sra_f16(a, ${BITS});
xnn_storeu_f16(output_.data(), res);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_[k], inputs_[k] >> ${BITS});
}
}
}
TEST_F(${TESTNAME}, CmpEq) {
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
if (rng_() & 1) {
inputs_[k + xnn_simd_size_f16] = inputs_[k];
}
}
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t b = xnn_loadu_f16(inputs_.data() + xnn_simd_size_f16);
const xnn_simd_f16_t res = xnn_cmpeq_f16(a, b);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_[k],
inputs_[k] == inputs_[k + xnn_simd_size_f16] ? 0xFFFF : 0);
}
}
TEST_F(${TESTNAME}, GetExp) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
const xnn_simd_f16_t res = xnn_getexp_f16(a);
xnn_storeu_f16(output_.data(), res);
std::vector<float> output_f32 = ToFloat32(output_);
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t k = 0; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k], std::logb(inputs_f32[k]));
}
}
TEST_F(${TESTNAME}, StoreTail) {
const xnn_simd_f16_t a = xnn_loadu_f16(inputs_.data());
std::vector<float> inputs_f32 = ToFloat32(inputs_);
for (size_t num_elements = 1; num_elements < xnn_simd_size_f16;
num_elements++) {
xnn_store_tail_f16(output_.data(), a, num_elements);
std::vector<float> output_f32 = ToFloat32(output_);
for (size_t k = 0; k < num_elements; k++) {
ASSERT_EQ(output_f32[k], inputs_f32[k]);
}
for (size_t k = num_elements; k < xnn_simd_size_f16; k++) {
ASSERT_EQ(output_f32[k], fp16_ieee_from_fp32_value(0.0f));
}
}
}
} // namespace xnnpack
$if ARCH_MACRO:
#endif // ${ARCH_MACRO}