blob: 24d57dbf8c2444b385f4c9e6d59d9a078c0bba8a [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6#pragma once
7
8#include <gtest/gtest.h>
9
10#include <algorithm>
11#include <cassert>
12#include <cstddef>
13#include <cstdlib>
14#include <functional>
15#include <random>
16#include <vector>
17
18#include <xnnpack.h>
19#include <xnnpack/AlignedAllocator.h>
20#include <xnnpack/pack.h>
Marat Dukhaneeaa7bd2019-10-25 17:31:25 -070021#include <xnnpack/params-init.h>
Frank Barcharde0601b52019-10-25 17:43:34 -070022#include <xnnpack/params.h>
XNNPACK Teamb455b122019-09-27 18:10:33 -070023
24
25class VMulCAddCMicrokernelTester {
26 public:
27 enum class Variant {
28 Native,
29 Scalar,
30 };
31
Marat Dukhan49e6ee92019-11-06 15:55:29 -080032 inline VMulCAddCMicrokernelTester& channel_tile(size_t channel_tile) {
33 this->channel_tile_ = channel_tile;
XNNPACK Teamb455b122019-09-27 18:10:33 -070034 return *this;
35 }
36
Marat Dukhan49e6ee92019-11-06 15:55:29 -080037 inline size_t channel_tile() const {
38 return this->channel_tile_;
XNNPACK Teamb455b122019-09-27 18:10:33 -070039 }
40
Marat Dukhan49e6ee92019-11-06 15:55:29 -080041 inline VMulCAddCMicrokernelTester& channels(size_t channels) {
42 assert(channels != 0);
43 this->channels_ = channels;
XNNPACK Teamb455b122019-09-27 18:10:33 -070044 return *this;
45 }
46
Marat Dukhan49e6ee92019-11-06 15:55:29 -080047 inline size_t channels() const {
48 return this->channels_;
XNNPACK Teamb455b122019-09-27 18:10:33 -070049 }
50
Marat Dukhan49e6ee92019-11-06 15:55:29 -080051 inline size_t packed_channels() const {
52 return channels() % channel_tile() == 0 ? channels() : (channels() / channel_tile() + 1) * channel_tile();
XNNPACK Teamb455b122019-09-27 18:10:33 -070053 }
54
Marat Dukhan49e6ee92019-11-06 15:55:29 -080055 inline VMulCAddCMicrokernelTester& rows(size_t rows) {
56 assert(rows != 0);
57 this->rows_ = rows;
XNNPACK Teamb455b122019-09-27 18:10:33 -070058 return *this;
59 }
60
Marat Dukhan49e6ee92019-11-06 15:55:29 -080061 inline size_t rows() const {
62 return this->rows_;
XNNPACK Teamb455b122019-09-27 18:10:33 -070063 }
64
Marat Dukhan49e6ee92019-11-06 15:55:29 -080065 inline VMulCAddCMicrokernelTester& input_stride(size_t input_stride) {
66 this->input_stride_ = input_stride;
XNNPACK Teamb455b122019-09-27 18:10:33 -070067 return *this;
68 }
69
Marat Dukhan49e6ee92019-11-06 15:55:29 -080070 inline size_t input_stride() const {
71 return this->input_stride_ == 0 ? channels() : this->input_stride_;
XNNPACK Teamb455b122019-09-27 18:10:33 -070072 }
73
Marat Dukhan49e6ee92019-11-06 15:55:29 -080074 inline VMulCAddCMicrokernelTester& output_stride(size_t output_stride) {
75 this->output_stride_ = output_stride;
XNNPACK Teamb455b122019-09-27 18:10:33 -070076 return *this;
77 }
78
Marat Dukhan49e6ee92019-11-06 15:55:29 -080079 inline size_t output_stride() const {
80 return this->output_stride_ == 0 ? channels() : this->output_stride_;
XNNPACK Teamb455b122019-09-27 18:10:33 -070081 }
82
83 inline VMulCAddCMicrokernelTester& inplace(bool inplace) {
84 this->inplace_ = inplace;
85 return *this;
86 }
87
88 inline bool inplace() const {
89 return this->inplace_;
90 }
91
92 inline VMulCAddCMicrokernelTester& qmin(uint8_t qmin) {
93 this->qmin_ = qmin;
94 return *this;
95 }
96
97 inline uint8_t qmin() const {
98 return this->qmin_;
99 }
100
101 inline VMulCAddCMicrokernelTester& qmax(uint8_t qmax) {
102 this->qmax_ = qmax;
103 return *this;
104 }
105
106 inline uint8_t qmax() const {
107 return this->qmax_;
108 }
109
110 inline VMulCAddCMicrokernelTester& iterations(size_t iterations) {
111 this->iterations_ = iterations;
112 return *this;
113 }
114
115 inline size_t iterations() const {
116 return this->iterations_;
117 }
118
119 void Test(xnn_f32_vmulcaddc_ukernel_function vmulcaddc, Variant variant = Variant::Native) const {
120 std::random_device random_device;
121 auto rng = std::mt19937(random_device());
122 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
123
124 if (inplace()) {
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800125 ASSERT_EQ(input_stride(), output_stride());
XNNPACK Teamb455b122019-09-27 18:10:33 -0700126 }
127
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800128 std::vector<float> x((rows() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float));
129 std::vector<float> scale(channels());
130 std::vector<float> bias(channels());
Marat Dukhan9594db02019-12-05 14:32:37 -0800131 std::vector<float, AlignedAllocator<float, 64>> packed_w(packed_channels() * 2);
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800132 std::vector<float> y((rows() - 1) * output_stride() + channels() + (inplace() ? XNN_EXTRA_BYTES / sizeof(float) : 0));
133 std::vector<float> y_ref(rows() * channels());
XNNPACK Teamb455b122019-09-27 18:10:33 -0700134 for (size_t iteration = 0; iteration < iterations(); iteration++) {
135 std::generate(scale.begin(), scale.end(), std::ref(f32rng));
136 std::generate(bias.begin(), bias.end(), std::ref(f32rng));
137 std::generate(x.begin(), x.end(), std::ref(f32rng));
138 if (inplace()) {
139 std::copy(x.cbegin(), x.cend(), y.begin());
140 } else {
141 std::fill(y.begin(), y.end(), nanf(""));
142 }
143 const float* x_data = inplace() ? y.data() : x.data();
144
145 std::fill(packed_w.begin(), packed_w.end(), nanf(""));
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800146 xnn_pack_f32_vmulcaddc_w(channels(), channel_tile(),
XNNPACK Teamb455b122019-09-27 18:10:33 -0700147 scale.data(), bias.data(), packed_w.data());
148
149 // Compute reference results.
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800150 for (size_t i = 0; i < rows(); i++) {
151 for (size_t j = 0; j < channels(); j++) {
152 y_ref[i * channels() + j] = x_data[i * input_stride() + j] * scale[j] + bias[j];
XNNPACK Teamb455b122019-09-27 18:10:33 -0700153 }
154 }
155 const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend());
156 const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend());
157 const float accumulated_range = accumulated_max - accumulated_min;
158 const float y_max = accumulated_max - accumulated_range / 255.0f * float(255 - qmax());
159 const float y_min = accumulated_min + accumulated_range / 255.0f * float(qmin());
160 for (float& y_value : y_ref) {
161 y_value = std::max<float>(std::min<float>(y_value, y_max), y_min);
162 }
163
164 // Prepare output parameters.
165 xnn_f32_output_params output_params = { };
166 switch (variant) {
167 case Variant::Native:
Marat Dukhaneeaa7bd2019-10-25 17:31:25 -0700168 output_params = xnn_init_f32_output_params(y_min, y_max);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700169 break;
170 case Variant::Scalar:
Marat Dukhaneeaa7bd2019-10-25 17:31:25 -0700171 output_params = xnn_init_scalar_f32_output_params(y_min, y_max);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700172 break;
173 }
174
175 // Call optimized micro-kernel.
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800176 vmulcaddc(rows(), channels() * sizeof(float),
177 x_data, input_stride() * sizeof(float),
XNNPACK Teamb455b122019-09-27 18:10:33 -0700178 packed_w.data(),
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800179 y.data(), output_stride() * sizeof(float),
XNNPACK Teamb455b122019-09-27 18:10:33 -0700180 &output_params);
181
182 // Verify results.
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800183 for (size_t i = 0; i < rows(); i++) {
184 for (size_t j = 0; j < channels(); j++) {
185 ASSERT_NEAR(y[i * output_stride() + j], y_ref[i * channels() + j], std::abs(y_ref[i * channels() + j]) * 1.0e-6f)
186 << "at pixel " << i << " / " << rows()
187 << ", channel = " << j << " / " << channels();
XNNPACK Teamb455b122019-09-27 18:10:33 -0700188 }
189 }
190 }
191 }
192
193 private:
Marat Dukhan49e6ee92019-11-06 15:55:29 -0800194 size_t channel_tile_{1};
195 size_t channels_{1};
196 size_t rows_{1};
197 size_t input_stride_{0};
198 size_t output_stride_{0};
XNNPACK Teamb455b122019-09-27 18:10:33 -0700199 bool inplace_{false};
200 uint8_t qmin_{0};
201 uint8_t qmax_{255};
202 size_t iterations_{15};
203};