blob: f7fc0e96b8d84e82bde85be16d42938fc0d1e300 [file] [log] [blame]
Marat Dukhanc07cb7f2019-11-14 15:32:05 -08001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6#pragma once
7
8#include <gtest/gtest.h>
9
10#include <algorithm>
11#include <cassert>
12#include <cstddef>
13#include <cstdlib>
14#include <functional>
15#include <random>
16#include <vector>
17
Frank Barchard9c1a7352020-06-04 20:15:01 -070018#include <fp16.h>
19
Marat Dukhanc07cb7f2019-11-14 15:32:05 -080020#include <xnnpack.h>
21#include <xnnpack/params-init.h>
22#include <xnnpack/params.h>
23
24
25class VBinOpMicrokernelTester {
26 public:
27 enum class OpType {
28 Add,
Marat Dukhan77ca6302019-12-06 12:48:15 -080029 Div,
Marat Dukhan403b7d42019-12-05 12:49:11 -080030 Max,
31 Min,
Marat Dukhanc07cb7f2019-11-14 15:32:05 -080032 Mul,
33 Sub,
Marat Dukhan13bafb02020-06-05 00:43:11 -070034 SqrDiff,
Marat Dukhanc07cb7f2019-11-14 15:32:05 -080035 };
36
37 enum class Variant {
38 Native,
39 Scalar,
40 };
41
42 inline VBinOpMicrokernelTester& batch_size(size_t batch_size) {
43 assert(batch_size != 0);
44 this->batch_size_ = batch_size;
45 return *this;
46 }
47
48 inline size_t batch_size() const {
49 return this->batch_size_;
50 }
51
52 inline VBinOpMicrokernelTester& inplace_a(bool inplace_a) {
53 this->inplace_a_ = inplace_a;
54 return *this;
55 }
56
57 inline bool inplace_a() const {
58 return this->inplace_a_;
59 }
60
61 inline VBinOpMicrokernelTester& inplace_b(bool inplace_b) {
62 this->inplace_b_ = inplace_b;
63 return *this;
64 }
65
66 inline bool inplace_b() const {
67 return this->inplace_b_;
68 }
69
70 inline VBinOpMicrokernelTester& qmin(uint8_t qmin) {
71 this->qmin_ = qmin;
72 return *this;
73 }
74
75 inline uint8_t qmin() const {
76 return this->qmin_;
77 }
78
79 inline VBinOpMicrokernelTester& qmax(uint8_t qmax) {
80 this->qmax_ = qmax;
81 return *this;
82 }
83
84 inline uint8_t qmax() const {
85 return this->qmax_;
86 }
87
88 inline VBinOpMicrokernelTester& iterations(size_t iterations) {
89 this->iterations_ = iterations;
90 return *this;
91 }
92
93 inline size_t iterations() const {
94 return this->iterations_;
95 }
96
Frank Barchardbf31e3f2020-05-12 14:00:07 -070097 void Test(xnn_f16_vbinary_ukernel_function vbinary, OpType op_type) const {
Frank Barchardd793f6c2020-05-08 13:37:43 -070098 std::random_device random_device;
99 auto rng = std::mt19937(random_device());
100 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.01f, 1.0f), rng);
101 auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
102
103 std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t));
104 std::vector<uint16_t> b(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t));
105 std::vector<uint16_t> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0));
106 std::vector<float> y_ref(batch_size());
107 for (size_t iteration = 0; iteration < iterations(); iteration++) {
108 std::generate(a.begin(), a.end(), std::ref(f16rng));
109 std::generate(b.begin(), b.end(), std::ref(f16rng));
110 if (inplace_a() || inplace_b()) {
111 std::generate(y.begin(), y.end(), std::ref(f16rng));
112 } else {
113 std::fill(y.begin(), y.end(), nanf(""));
114 }
115 const uint16_t* a_data = inplace_a() ? y.data() : a.data();
116 const uint16_t* b_data = inplace_b() ? y.data() : b.data();
117
118 // Compute reference results.
119 for (size_t i = 0; i < batch_size(); i++) {
120 switch (op_type) {
121 case OpType::Add:
122 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b_data[i]);
123 break;
124 case OpType::Div:
125 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b_data[i]);
126 break;
127 case OpType::Max:
128 y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i]));
129 break;
130 case OpType::Min:
131 y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i]));
132 break;
133 case OpType::Mul:
134 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b_data[i]);
135 break;
Marat Dukhan13bafb02020-06-05 00:43:11 -0700136 case OpType::SqrDiff:
137 {
138 const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
139 y_ref[i] = diff * diff;
140 break;
141 }
Frank Barchardd793f6c2020-05-08 13:37:43 -0700142 case OpType::Sub:
143 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
144 break;
145 }
146 }
147
148 // Call optimized micro-kernel.
149 vbinary(batch_size() * sizeof(uint16_t), a_data, b_data, y.data(), nullptr);
150
151 // Verify results.
152 for (size_t i = 0; i < batch_size(); i++) {
153 ASSERT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::abs(y_ref[i]) * 1.0e-2f)
154 << "at " << i << " / " << batch_size();
155 }
156 }
157 }
158
Frank Barchardbf31e3f2020-05-12 14:00:07 -0700159 void Test(xnn_f16_vbinary_minmax_ukernel_function vbinary_minmax, OpType op_type) const {
Frank Barchardd793f6c2020-05-08 13:37:43 -0700160 std::random_device random_device;
161 auto rng = std::mt19937(random_device());
162 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.01f, 1.0f), rng);
163 auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
164
165 std::vector<uint16_t> a(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t));
166 std::vector<uint16_t> b(batch_size() + XNN_EXTRA_BYTES / sizeof(uint16_t));
167 std::vector<uint16_t> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(uint16_t) : 0));
168 std::vector<float> y_ref(batch_size());
169 for (size_t iteration = 0; iteration < iterations(); iteration++) {
170 std::generate(a.begin(), a.end(), std::ref(f16rng));
171 std::generate(b.begin(), b.end(), std::ref(f16rng));
172 if (inplace_a() || inplace_b()) {
173 std::generate(y.begin(), y.end(), std::ref(f16rng));
174 } else {
175 std::fill(y.begin(), y.end(), nanf(""));
176 }
177 const uint16_t* a_data = inplace_a() ? y.data() : a.data();
178 const uint16_t* b_data = inplace_b() ? y.data() : b.data();
179
180 // Compute reference results.
181 for (size_t i = 0; i < batch_size(); i++) {
182 switch (op_type) {
183 case OpType::Add:
184 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) + fp16_ieee_to_fp32_value(b_data[i]);
185 break;
186 case OpType::Div:
187 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) / fp16_ieee_to_fp32_value(b_data[i]);
188 break;
189 case OpType::Max:
190 y_ref[i] = std::max<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i]));
191 break;
192 case OpType::Min:
193 y_ref[i] = std::min<float>(fp16_ieee_to_fp32_value(a_data[i]), fp16_ieee_to_fp32_value(b_data[i]));
194 break;
195 case OpType::Mul:
196 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) * fp16_ieee_to_fp32_value(b_data[i]);
197 break;
Marat Dukhan13bafb02020-06-05 00:43:11 -0700198 case OpType::SqrDiff:
199 {
200 const float diff = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
201 y_ref[i] = diff * diff;
202 break;
203 }
Frank Barchardd793f6c2020-05-08 13:37:43 -0700204 case OpType::Sub:
205 y_ref[i] = fp16_ieee_to_fp32_value(a_data[i]) - fp16_ieee_to_fp32_value(b_data[i]);
206 break;
207 }
208 }
209
210 const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend());
211 const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend());
212 const float accumulated_range = accumulated_max - accumulated_min;
213 const float y_max = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ?
214 (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) :
215 +std::numeric_limits<float>::infinity()));
216 const float y_min = fp16_ieee_to_fp32_value(fp16_ieee_from_fp32_value(accumulated_range > 0.0f ?
217 (accumulated_min + accumulated_range / 255.0f * float(qmin())) :
218 -std::numeric_limits<float>::infinity()));
219 for (size_t i = 0; i < batch_size(); i++) {
220 y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min);
221 }
222
Frank Barchard9f3a8432020-06-02 13:59:35 -0700223 // Prepare parameters.
Frank Barchardbf31e3f2020-05-12 14:00:07 -0700224 xnn_f16_minmax_params params = xnn_init_f16_minmax_params(
Frank Barchardd793f6c2020-05-08 13:37:43 -0700225 fp16_ieee_from_fp32_value(y_min),
226 fp16_ieee_from_fp32_value(y_max));
227
228 // Call optimized micro-kernel.
229 vbinary_minmax(batch_size() * sizeof(uint16_t), a_data, b_data, y.data(), &params);
230
231 // Verify results.
232 for (size_t i = 0; i < batch_size(); i++) {
233 ASSERT_NEAR(fp16_ieee_to_fp32_value(y[i]), y_ref[i], std::abs(y_ref[i]) * 1.0e-2f)
234 << "at " << i << " / " << batch_size();
235 }
236 }
237 }
238
Marat Dukhan1e782c42019-11-21 17:02:40 -0800239 void Test(xnn_f32_vbinary_ukernel_function vbinary, OpType op_type, Variant variant = Variant::Native) const {
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800240 std::random_device random_device;
241 auto rng = std::mt19937(random_device());
Marat Dukhan77ca6302019-12-06 12:48:15 -0800242 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.01f, 1.0f), rng);
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800243
244 std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float));
245 std::vector<float> b(batch_size() + XNN_EXTRA_BYTES / sizeof(float));
246 std::vector<float> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(float) : 0));
247 std::vector<float> y_ref(batch_size());
248 for (size_t iteration = 0; iteration < iterations(); iteration++) {
249 std::generate(a.begin(), a.end(), std::ref(f32rng));
250 std::generate(b.begin(), b.end(), std::ref(f32rng));
251 if (inplace_a() || inplace_b()) {
252 std::generate(y.begin(), y.end(), std::ref(f32rng));
253 } else {
254 std::fill(y.begin(), y.end(), nanf(""));
255 }
256 const float* a_data = inplace_a() ? y.data() : a.data();
257 const float* b_data = inplace_b() ? y.data() : b.data();
258
259 // Compute reference results.
260 for (size_t i = 0; i < batch_size(); i++) {
261 switch (op_type) {
262 case OpType::Add:
263 y_ref[i] = a_data[i] + b_data[i];
264 break;
Marat Dukhan77ca6302019-12-06 12:48:15 -0800265 case OpType::Div:
266 y_ref[i] = a_data[i] / b_data[i];
267 break;
Marat Dukhan403b7d42019-12-05 12:49:11 -0800268 case OpType::Max:
269 y_ref[i] = std::max<float>(a_data[i], b_data[i]);
270 break;
271 case OpType::Min:
272 y_ref[i] = std::min<float>(a_data[i], b_data[i]);
273 break;
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800274 case OpType::Mul:
275 y_ref[i] = a_data[i] * b_data[i];
276 break;
Marat Dukhan13bafb02020-06-05 00:43:11 -0700277 case OpType::SqrDiff:
278 {
279 const float diff = a_data[i] - b_data[i];
280 y_ref[i] = diff * diff;
281 break;
282 }
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800283 case OpType::Sub:
284 y_ref[i] = a_data[i] - b_data[i];
285 break;
286 }
287 }
Marat Dukhan91cd2b72020-04-09 23:57:31 -0700288
289 // Call optimized micro-kernel.
290 vbinary(batch_size() * sizeof(float), a_data, b_data, y.data(), nullptr);
291
292 // Verify results.
293 for (size_t i = 0; i < batch_size(); i++) {
294 ASSERT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f)
295 << "at " << i << " / " << batch_size();
296 }
297 }
298 }
299
300 void Test(xnn_f32_vbinary_minmax_ukernel_function vbinary_minmax, OpType op_type, Variant variant = Variant::Native) const {
301 std::random_device random_device;
302 auto rng = std::mt19937(random_device());
303 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.01f, 1.0f), rng);
304
305 std::vector<float> a(batch_size() + XNN_EXTRA_BYTES / sizeof(float));
306 std::vector<float> b(batch_size() + XNN_EXTRA_BYTES / sizeof(float));
307 std::vector<float> y(batch_size() + (inplace_a() || inplace_b() ? XNN_EXTRA_BYTES / sizeof(float) : 0));
308 std::vector<float> y_ref(batch_size());
309 for (size_t iteration = 0; iteration < iterations(); iteration++) {
310 std::generate(a.begin(), a.end(), std::ref(f32rng));
311 std::generate(b.begin(), b.end(), std::ref(f32rng));
312 if (inplace_a() || inplace_b()) {
313 std::generate(y.begin(), y.end(), std::ref(f32rng));
314 } else {
315 std::fill(y.begin(), y.end(), nanf(""));
316 }
317 const float* a_data = inplace_a() ? y.data() : a.data();
318 const float* b_data = inplace_b() ? y.data() : b.data();
319
320 // Compute reference results.
321 for (size_t i = 0; i < batch_size(); i++) {
322 switch (op_type) {
323 case OpType::Add:
324 y_ref[i] = a_data[i] + b_data[i];
325 break;
326 case OpType::Div:
327 y_ref[i] = a_data[i] / b_data[i];
328 break;
329 case OpType::Max:
330 y_ref[i] = std::max<float>(a_data[i], b_data[i]);
331 break;
332 case OpType::Min:
333 y_ref[i] = std::min<float>(a_data[i], b_data[i]);
334 break;
335 case OpType::Mul:
336 y_ref[i] = a_data[i] * b_data[i];
337 break;
Marat Dukhan13bafb02020-06-05 00:43:11 -0700338 case OpType::SqrDiff:
339 {
340 const float diff = a_data[i] - b_data[i];
341 y_ref[i] = diff * diff;
342 break;
343 }
Marat Dukhan91cd2b72020-04-09 23:57:31 -0700344 case OpType::Sub:
345 y_ref[i] = a_data[i] - b_data[i];
346 break;
347 }
348 }
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800349 const float accumulated_min = *std::min_element(y_ref.cbegin(), y_ref.cend());
350 const float accumulated_max = *std::max_element(y_ref.cbegin(), y_ref.cend());
351 const float accumulated_range = accumulated_max - accumulated_min;
352 const float y_max = accumulated_range > 0.0f ?
353 (accumulated_max - accumulated_range / 255.0f * float(255 - qmax())) :
354 +std::numeric_limits<float>::infinity();
355 const float y_min = accumulated_range > 0.0f ?
356 (accumulated_min + accumulated_range / 255.0f * float(qmin())) :
357 -std::numeric_limits<float>::infinity();
358 for (size_t i = 0; i < batch_size(); i++) {
359 y_ref[i] = std::max<float>(std::min<float>(y_ref[i], y_max), y_min);
360 }
361
Frank Barchard9f3a8432020-06-02 13:59:35 -0700362 // Prepare parameters.
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700363 xnn_f32_minmax_params params = { };
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800364 switch (variant) {
365 case Variant::Native:
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700366 params = xnn_init_f32_minmax_params(y_min, y_max);
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800367 break;
368 case Variant::Scalar:
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700369 params = xnn_init_scalar_f32_minmax_params(y_min, y_max);
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800370 break;
371 }
372
373 // Call optimized micro-kernel.
Frank Barcharde70dbeb2020-05-01 15:46:41 -0700374 vbinary_minmax(batch_size() * sizeof(float), a_data, b_data, y.data(), &params);
Marat Dukhanc07cb7f2019-11-14 15:32:05 -0800375
376 // Verify results.
377 for (size_t i = 0; i < batch_size(); i++) {
378 ASSERT_NEAR(y[i], y_ref[i], std::abs(y_ref[i]) * 1.0e-6f)
379 << "at " << i << " / " << batch_size();
380 }
381 }
382 }
383
384 private:
385 size_t batch_size_{1};
386 bool inplace_a_{false};
387 bool inplace_b_{false};
388 uint8_t qmin_{0};
389 uint8_t qmax_{255};
390 size_t iterations_{15};
391};