blob: 6297259ef4cff5180f0551a5f86493224d3883c7 [file] [log] [blame]
Marat Dukhanc07cb7f2019-11-14 15:32:05 -08001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6#pragma once
7
8#include <gtest/gtest.h>
9
10#include <algorithm>
11#include <cassert>
12#include <cstddef>
13#include <cstdlib>
14#include <functional>
15#include <random>
16#include <vector>
17
Frank Barchard9c1a7352020-06-04 20:15:01 -070018#include <fp16.h>
19
Marat Dukhanc07cb7f2019-11-14 15:32:05 -080020#include <xnnpack.h>
21#include <xnnpack/params-init.h>
22#include <xnnpack/params.h>
23
24
25class VBinOpCMicrokernelTester {
26 public:
27 enum class OpType {
28 AddC,
Marat Dukhan77ca6302019-12-06 12:48:15 -080029 DivC,
30 RDivC,
Marat Dukhan403b7d42019-12-05 12:49:11 -080031 MaxC,
32 MinC,
Marat Dukhanc07cb7f2019-11-14 15:32:05 -080033 MulC,
34 SubC,
35 RSubC,
36 };
37
38 enum class Variant {
39 Native,
40 Scalar,
41 };
42
43 inline VBinOpCMicrokernelTester& batch_size(size_t batch_size) {
44 assert(batch_size != 0);
45 this->batch_size_ = batch_size;
46 return *this;
47 }
48
49 inline size_t batch_size() const {
50 return this->batch_size_;
51 }
52
53 inline VBinOpCMicrokernelTester& inplace(bool inplace) {
54 this->inplace_ = inplace;
55 return *this;
56 }
57
58 inline bool inplace() const {
59 return this->inplace_;
60 }
61
62 inline VBinOpCMicrokernelTester& qmin(uint8_t qmin) {
63 this->qmin_ = qmin;
64 return *this;
65 }
66
67 inline uint8_t qmin() const {
68 return this->qmin_;
69 }
70
71 inline VBinOpCMicrokernelTester& qmax(uint8_t qmax) {
72 this->qmax_ = qmax;
73 return *this;
74 }
75
76 inline uint8_t qmax() const {
77 return this->qmax_;
78 }
79
80 inline VBinOpCMicrokernelTester& iterations(size_t iterations) {
81 this->iterations_ = iterations;
82 return *this;
83 }
84
85 inline size_t iterations() const {
86 return this->iterations_;
87 }
88
Frank Barchardbf31e3f2020-05-12 14:00:07 -070089 void Test(xnn_f16_vbinary_ukernel_function vbinaryc, OpType op_type) const {
Frank Barchardd793f6c2020-05-08 13:37:43 -070090 std::random_device random_device;
91 auto rng = std::mt19937(random_device());
92 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.01f, 1.0f), rng);
93 auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
94
95 std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t));
96 const uint16_t b = f16rng();
97 std::vector<uint16_t> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0));
98 std::vector<float> y_ref(batch_size());
99 for (size_t iteration = 0; iteration < iterations(); iteration++) {
100 std::generate(a.begin(), a.end(), std::ref(f16rng));
101 if (inplace()) {
102 std::generate(y.begin(), y.end(), std::ref(f16rng));
103 } else {
104 std::fill(y.begin(), y.end(), nanf(""));
105 }
106 const uint16_t* a_data = inplace() ? y.data() : a.data();
107
108 // Compute reference results.
109 for (size_t i = 0; i < batch_size(); i++) {
110 switch (op_type) {
111 case OpType::AddC:
112 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b);
113 break;
114 case OpType::DivC:
115 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b);
116 break;
117 case OpType::RDivC:
118 y_ref[i] = fp16_ieee_to_fp32_value(b) / fp16_ieee_to_fp32_value(a_data[i]);
119 break;
120 case OpType::MaxC:
121 y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b));
122 break;
123 case OpType::MinC:
124 y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b));
125 break;
126 case OpType::MulC:
127 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b);
128 break;
129 case OpType::SubC:
130 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b);
131 break;
132 case OpType::RSubC:
133 y_ref[i] = fp16_ieee_to_fp32_value(b) - fp16_ieee_to_fp32_value(a_data[i]);
134 break;
135 }
136 }
137 // Call optimized micro-kernel.
138 vbinaryc(batch_size() * sizeof(uint16_t), a_data, &b, y.data(), nullptr);
139
140 // Verify results.
141 for (size_t i = 0; i < batch_size(); i++) {
142 ASSERT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::abs(y_ref[i]) * 1.0e-2f)
143 << "at " << i << " / " << batch_size();
144 }
145 }
146 }
147
Frank Barchardbf31e3f2020-05-12 14:00:07 -0700148 void Test(xnn_f16_vbinary_minmax_ukernel_function vbinaryc_minmax, OpType op_type) const {
Frank Barchardd793f6c2020-05-08 13:37:43 -0700149 std::random_device random_device;
150 auto rng = std::mt19937(random_device());
151 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.01f, 1.0f), rng);
152 auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
153
154 std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t));
155 const uint16_t b = f16rng();
156 std::vector<uint16_t> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0));
157 std::vector<float> y_ref(batch_size());
158 for (size_t iteration = 0; iteration < iterations(); iteration++) {
159 std::generate(a.begin(), a.end(), std::ref(f16rng));
160 if (inplace()) {
161 std::generate(y.begin(), y.end(), std::ref(f16rng));
162 } else {
163 std::fill(y.begin(), y.end(), nanf(""));
164 }
165 const uint16_t* a_data = inplace() ? y.data() : a.data();
166
167 // Compute reference results.
168 for (size_t i = 0; i < batch_size(); i++) {
169 switch (op_type) {
170 case OpType::AddC:
171 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b);
172 break;
173 case OpType::DivC:
174 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b);
175 break;
176 case OpType::RDivC:
177 y_ref[i] = fp16_ieee_to_fp32_value(b) / fp16_ieee_to_fp32_value(a_data[i]);
178 break;
179 case OpType::MaxC:
180 y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b));
181 break;
182 case OpType::MinC:
183 y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b));
184 break;
185 case OpType::MulC:
186 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b);
187 break;
188 case OpType::SubC:
189 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b);
190 break;
191 case OpType::RSubC:
192 y_ref[i] = fp16_ieee_to_fp32_value(b) - fp16_ieee_to_fp32_value(a_data[i]);
193 break;
194 }
195 }
196 const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend());
197 const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend());
198 const float accumulated_range = accumulated_max - accumulated_min;
199 const float y_max = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ?
200 (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) :
201 +std::numeric_limits<float>::infinity()));
202 const float y_min = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ?
203 (accumulated_min + accumulated_range / 255.0f * float(qmin())) :
204 -std::numeric_limits<float>::infinity()));
205 for (size_t i = 0; i < batch_size(); i++) {
206 y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min);
207 }
208
Frank Barchard9f3a8432020-06-02 13:59:35 -0700209 // Prepare parameters.
Frank Barchardbf31e3f2020-05-12 14:00:07 -0700210 xnn_f16_minmax_params params = xnn_init_f16_minmax_params(
Frank Barchardd793f6c2020-05-08 13:37:43 -0700211 fp16_ieee_from_fp32_value(y_min),
212 fp16_ieee_from_fp32_value(y_max));
213
214 // Call optimized micro-kernel.
215 vbinaryc_minmax(batch_size() * sizeof(uint16_t), a_data, &b, y.data(), &params);
216
217 // Verify results.
218 for (size_t i = 0; i < batch_size(); i++) {
219 ASSERT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::abs(y_ref[i]) * 1.0e-2f)
220 << "at " << i << " / " << batch_size();
221 }
222 }
223 }
224
Marat Dukhan1e782c42019-11-21 17:02:40 -0800225 void Test(xnn_f32_vbinary_ukernel_function vbinaryc, OpType op_type, Variant variant = Variant::Native) const {
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800226 std::random_device random_device;
227 auto rng = std::mt19937(random_device());
228 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
229
230 std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float));
231 const float b = f32rng();
232 std::vector<float> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(float) : 0));
233 std::vector<float> y_ref(batch_size());
234 for (size_t iteration = 0; iteration < iterations(); iteration++) {
235 std::generate(a.begin(), a.end(), std::ref(f32rng));
236 if (inplace()) {
237 std::generate(y.begin(), y.end(), std::ref(f32rng));
238 } else {
239 std::fill(y.begin(), y.end(), nanf(""));
240 }
241 const float* a_data = inplace() ? y.data() : a.data();
242
243 // Compute reference results.
244 for (size_t i = 0; i < batch_size(); i++) {
245 switch (op_type) {
246 case OpType::AddC:
247 y_ref[i] = a_data[i] + b;
248 break;
Marat Dukhan77ca6302019-12-06 12:48:15 -0800249 case OpType::DivC:
250 y_ref[i] = a_data[i] / b;
251 break;
252 case OpType::RDivC:
253 y_ref[i] = b / a_data[i];
254 break;
Marat Dukhan403b7d42019-12-05 12:49:11 -0800255 case OpType::MaxC:
256 y_ref[i] = std::max<float>(a_data[i], b);
257 break;
258 case OpType::MinC:
259 y_ref[i] = std::min<float>(a_data[i], b);
260 break;
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800261 case OpType::MulC:
262 y_ref[i] = a_data[i] * b;
263 break;
264 case OpType::SubC:
265 y_ref[i] = a_data[i] - b;
266 break;
267 case OpType::RSubC:
268 y_ref[i] = b - a_data[i];
269 break;
270 }
271 }
Marat Dukhan91cd2b72020-04-09 23:57:31 -0700272 // Call optimized micro-kernel.
273 vbinaryc(batch_size() * sizeof(float), a_data, &b, y.data(), nullptr);
274
275 // Verify results.
276 for (size_t i = 0; i < batch_size(); i++) {
277 ASSERT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f)
278 << "at " << i << " / " << batch_size();
279 }
280 }
281 }
282
283 void Test(xnn_f32_vbinary_minmax_ukernel_function vbinaryc_minmax, OpType op_type, Variant variant = Variant::Native) const {
284 std::random_device random_device;
285 auto rng = std::mt19937(random_device());
286 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
287
288 std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float));
289 const float b = f32rng();
290 std::vector<float> y(batch_size() + (inplace() ? XNN_EXTRA_BYTES / sizeof(float) : 0));
291 std::vector<float> y_ref(batch_size());
292 for (size_t iteration = 0; iteration < iterations(); iteration++) {
293 std::generate(a.begin(), a.end(), std::ref(f32rng));
294 if (inplace()) {
295 std::generate(y.begin(), y.end(), std::ref(f32rng));
296 } else {
297 std::fill(y.begin(), y.end(), nanf(""));
298 }
299 const float* a_data = inplace() ? y.data() : a.data();
300
301 // Compute reference results.
302 for (size_t i = 0; i < batch_size(); i++) {
303 switch (op_type) {
304 case OpType::AddC:
305 y_ref[i] = a_data[i] + b;
306 break;
307 case OpType::DivC:
308 y_ref[i] = a_data[i] / b;
309 break;
310 case OpType::RDivC:
311 y_ref[i] = b / a_data[i];
312 break;
313 case OpType::MaxC:
314 y_ref[i] = std::max<float>(a_data[i], b);
315 break;
316 case OpType::MinC:
317 y_ref[i] = std::min<float>(a_data[i], b);
318 break;
319 case OpType::MulC:
320 y_ref[i] = a_data[i] * b;
321 break;
322 case OpType::SubC:
323 y_ref[i] = a_data[i] - b;
324 break;
325 case OpType::RSubC:
326 y_ref[i] = b - a_data[i];
327 break;
328 }
329 }
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800330 const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend());
331 const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend());
332 const float accumulated_range = accumulated_max - accumulated_min;
333 const float y_max = accumulated_range > 0.0f ?
334 (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) :
335 +std::numeric_limits<float>::infinity();
336 const float y_min = accumulated_range > 0.0f ?
337 (accumulated_min + accumulated_range / 255.0f * float(qmin())) :
338 -std::numeric_limits<float>::infinity();
339 for (size_t i = 0; i < batch_size(); i++) {
340 y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min);
341 }
342
Frank Barchard9f3a8432020-06-02 13:59:35 -0700343 // Prepare parameters.
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700344 xnn_f32_minmax_params params = { };
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800345 switch (variant) {
346 case Variant::Native:
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700347 params = xnn_init_f32_minmax_params(y_min, y_max);
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800348 break;
349 case Variant::Scalar:
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700350 params = xnn_init_scalar_f32_minmax_params(y_min, y_max);
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800351 break;
352 }
353
354 // Call optimized micro-kernel.
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700355 vbinaryc_minmax(batch_size() * sizeof(float), a_data, &b, y.data(), &params);
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800356
357 // Verify results.
358 for (size_t i = 0; i < batch_size(); i++) {
359 ASSERT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f)
360 << "at " << i << " / " << batch_size();
361 }
362 }
363 }
364
365 private:
366 size_t batch_size_{1};
367 bool inplace_{false};
368 uint8_t qmin_{0};
369 uint8_t qmax_{255};
370 size_t iterations_{15};
371};