blob: 7ac96232837cd4d1f544787b6addae8a1934f288 [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6#include <algorithm>
7#include <cfloat>
8#include <cmath>
9#include <functional>
10#include <random>
11#include <string>
12#include <vector>
13
XNNPACK Teamb455b122019-09-27 18:10:33 -070014#include <xnnpack.h>
15
16#include <benchmark/benchmark.h>
XNNPACK Teamb455b122019-09-27 18:10:33 -070017#ifdef BENCHMARK_TENSORFLOW_LITE
18#include "flatbuffers/include/flatbuffers/flatbuffers.h"
19#include "tensorflow/lite/interpreter.h"
20#include "tensorflow/lite/kernels/register.h"
21#include "tensorflow/lite/model.h"
22#include "tensorflow/lite/schema/schema_generated.h"
23#include "tensorflow/lite/version.h"
24#endif // BENCHMARK_TENSORFLOW_LITE */
Frank Barchardbb4c18b2019-09-30 11:05:52 -070025#include "bench/utils.h"
XNNPACK Teamb455b122019-09-27 18:10:33 -070026
Chao Meic6640272020-07-23 09:35:11 -070027#ifndef XNN_NO_QU8_OPERATORS
Marat Dukhan08b7a972020-07-14 18:17:29 -070028void xnnpack_deconvolution_qu8(benchmark::State& state, const char* net) {
XNNPACK Teamb455b122019-09-27 18:10:33 -070029 const size_t batch_size = state.range(0);
30 const size_t input_height = state.range(1);
31 const size_t input_width = state.range(2);
32 const size_t kernel_height = state.range(3);
33 const size_t kernel_width = state.range(4);
34 const size_t padding = state.range(5);
35 const size_t adjustment = state.range(6);
36 const size_t stride = state.range(7);
37 const size_t dilation = state.range(8);
38 const size_t groups = state.range(9);
39 const size_t group_input_channels = state.range(10);
40 const size_t group_output_channels = state.range(11);
41
42 std::random_device random_device;
43 auto rng = std::mt19937(random_device());
Marat Dukhanecd83112020-08-03 21:50:28 -070044 auto i32rng = std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), std::ref(rng));
Marat Dukhan44f0ca72020-08-02 21:46:58 -070045 auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), std::ref(rng));
XNNPACK Teamb455b122019-09-27 18:10:33 -070046
47 const size_t output_pixel_stride = groups * group_output_channels;
48 const size_t input_pixel_stride = groups * group_input_channels;
49 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
50 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
51 const size_t padding_left = padding / 2;
52 const size_t padding_top = padding / 2;
53 const size_t padding_right = padding - padding_left;
54 const size_t padding_bottom = padding - padding_top;
55 const size_t output_height = std::max(stride * (input_height - 1) + adjustment + effective_kernel_height, padding) - padding;
56 const size_t output_width = std::max(stride * (input_width - 1) + adjustment + effective_kernel_width, padding) - padding;
57
58 std::vector<uint8_t> input(batch_size * input_height * input_width * input_pixel_stride);
59 std::generate(input.begin(), input.end(), std::ref(u8rng));
60 std::vector<uint8_t> kernel(groups * group_output_channels * kernel_height * kernel_width * group_input_channels);
61 std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
62 std::vector<int32_t> bias(groups * group_output_channels);
Marat Dukhanecd83112020-08-03 21:50:28 -070063 std::generate(bias.begin(), bias.end(), std::ref(i32rng));
XNNPACK Teamb455b122019-09-27 18:10:33 -070064 const size_t output_elements = batch_size * output_height * output_width * output_pixel_stride;
65
Marat Dukhan04f03be2019-11-19 12:36:47 -080066 xnn_status status = xnn_initialize(nullptr /* allocator */);
XNNPACK Teamb455b122019-09-27 18:10:33 -070067 if (status != xnn_status_success) {
68 state.SkipWithError("failed to initialize XNNPACK");
69 return;
70 }
71
XNNPACK Teamb455b122019-09-27 18:10:33 -070072 const size_t num_buffers = 1 +
Marat Dukhan42323232019-10-23 02:09:02 -070073 benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(),
XNNPACK Teamb455b122019-09-27 18:10:33 -070074 sizeof(float) * (kernel.size() + bias.size() + output_elements));
75 std::vector<uint8_t> output(output_elements * num_buffers);
76
77 std::vector<xnn_operator_t> deconvolution_operators(num_buffers);
78 for (xnn_operator_t& deconvolution_op : deconvolution_operators) {
Marat Dukhan08b7a972020-07-14 18:17:29 -070079 status = xnn_create_deconvolution2d_nhwc_qu8(
XNNPACK Teamb455b122019-09-27 18:10:33 -070080 padding_top, padding_right, padding_bottom, padding_left,
XNNPACK Teamb455b122019-09-27 18:10:33 -070081 kernel_height, kernel_width,
82 stride, stride,
83 dilation, dilation,
84 groups, group_input_channels, group_output_channels,
85 input_pixel_stride, output_pixel_stride,
86 127, 0.5f, 127, 0.5f,
87 kernel.data(), bias.data(),
88 127, 0.5f, 0, 255,
89 0 /* flags */,
90 &deconvolution_op);
91 if (status != xnn_status_success) {
92 state.SkipWithError("failed to create QINT8 Deconvolution operator");
93 return;
94 }
95 }
96
97 for (size_t i = 0; i < deconvolution_operators.size(); i++) {
Marat Dukhan08b7a972020-07-14 18:17:29 -070098 status = xnn_setup_deconvolution2d_nhwc_qu8(
XNNPACK Teamb455b122019-09-27 18:10:33 -070099 deconvolution_operators[i],
100 batch_size, input_height, input_width,
Marat Dukhan1898b912019-11-05 12:25:18 -0800101 0 /* height adjustment */, 0 /* width adjustment */,
XNNPACK Teamb455b122019-09-27 18:10:33 -0700102 input.data(), output.data() + i * output_elements,
103 nullptr /* thread pool */);
104 if (status != xnn_status_success) {
105 state.SkipWithError("failed to setup QINT8 Deconvolution operator");
106 return;
107 }
108 }
109
110 size_t buffer_index = 0;
111 for (auto _ : state) {
112 state.PauseTiming();
Marat Dukhan42323232019-10-23 02:09:02 -0700113 benchmark::utils::PrefetchToL1(input.data(), input.size() * sizeof(uint8_t));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700114 buffer_index = (buffer_index + 1) % num_buffers;
115 state.ResumeTiming();
116
117 status = xnn_run_operator(deconvolution_operators[buffer_index], nullptr /* thread pool */);
118 if (status != xnn_status_success) {
119 state.SkipWithError("failed to run QINT8 Deconvolution operator");
120 return;
121 }
122 }
123
124 for (xnn_operator_t& deconvolution_op : deconvolution_operators) {
125 status = xnn_delete_operator(deconvolution_op);
126 if (status != xnn_status_success) {
127 state.SkipWithError("failed to delete QINT8 Deconvolution operator");
128 return;
129 }
130 deconvolution_op = nullptr;
131 }
132
Frank Barchardbb4c18b2019-09-30 11:05:52 -0700133 state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
134 state.counters["OPS"] = benchmark::Counter(
XNNPACK Teamb455b122019-09-27 18:10:33 -0700135 uint64_t(state.iterations()) * 2 *
136 batch_size * input_width * input_width *
137 groups * group_input_channels * group_output_channels *
138 kernel_height * kernel_width,
139 benchmark::Counter::kIsRate);
140}
Chao Meic6640272020-07-23 09:35:11 -0700141#endif // XNN_NO_QU8_OPERATORS
XNNPACK Teamb455b122019-09-27 18:10:33 -0700142
143void xnnpack_deconvolution_f32(benchmark::State& state, const char* net) {
144 const size_t batch_size = state.range(0);
145 const size_t input_height = state.range(1);
146 const size_t input_width = state.range(2);
147 const size_t kernel_height = state.range(3);
148 const size_t kernel_width = state.range(4);
149 const size_t padding = state.range(5);
150 const size_t adjustment = state.range(6);
151 const size_t stride = state.range(7);
152 const size_t dilation = state.range(8);
153 const size_t groups = state.range(9);
154 const size_t group_input_channels = state.range(10);
155 const size_t group_output_channels = state.range(11);
156
157 std::random_device random_device;
158 auto rng = std::mt19937(random_device());
Marat Dukhan44f0ca72020-08-02 21:46:58 -0700159 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), std::ref(rng));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700160
161 const size_t output_pixel_stride = groups * group_output_channels;
162 const size_t input_pixel_stride = groups * group_input_channels;
163 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
164 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
165 const size_t padding_left = padding / 2;
166 const size_t padding_top = padding / 2;
167 const size_t padding_right = padding - padding_left;
168 const size_t padding_bottom = padding - padding_top;
169 const size_t output_height = std::max(stride * (input_height - 1) + adjustment + effective_kernel_height, padding) - padding;
170 const size_t output_width = std::max(stride * (input_width - 1) + adjustment + effective_kernel_width, padding) - padding;
171
172 std::vector<float> input(batch_size * input_height * input_width * input_pixel_stride + XNN_EXTRA_BYTES / sizeof(float));
173 std::generate(input.begin(), input.end(), std::ref(f32rng));
174 std::vector<float> kernel(groups * group_output_channels * kernel_height * kernel_width * group_input_channels);
175 std::generate(kernel.begin(), kernel.end(), std::ref(f32rng));
176 std::vector<float> bias(groups * group_output_channels);
177 std::generate(bias.begin(), bias.end(), std::ref(f32rng));
178 const size_t output_elements = batch_size * output_height * output_width * output_pixel_stride;
179
Marat Dukhan04f03be2019-11-19 12:36:47 -0800180 xnn_status status = xnn_initialize(nullptr /* allocator */);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700181 if (status != xnn_status_success) {
182 state.SkipWithError("failed to initialize XNNPACK");
183 return;
184 }
185
XNNPACK Teamb455b122019-09-27 18:10:33 -0700186 const size_t num_buffers = 1 +
Marat Dukhan42323232019-10-23 02:09:02 -0700187 benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(),
XNNPACK Teamb455b122019-09-27 18:10:33 -0700188 sizeof(float) * (kernel.size() + bias.size() + output_elements));
189 std::vector<float> output(output_elements * num_buffers);
190
191 std::vector<xnn_operator_t> deconvolution_operators(num_buffers);
192 for (xnn_operator_t& deconvolution_op : deconvolution_operators) {
193 status = xnn_create_deconvolution2d_nhwc_f32(
194 padding_top, padding_right, padding_bottom, padding_left,
XNNPACK Teamb455b122019-09-27 18:10:33 -0700195 kernel_height, kernel_width,
196 stride, stride,
197 dilation, dilation,
198 groups, group_input_channels, group_output_channels,
199 input_pixel_stride, output_pixel_stride,
200 kernel.data(), bias.data(),
201 -std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity(),
202 0 /* flags */,
203 &deconvolution_op);
204 if (status != xnn_status_success) {
205 state.SkipWithError("failed to create FP32 Deconvolution operator");
206 return;
207 }
208 }
209
210 for (size_t i = 0; i < deconvolution_operators.size(); i++) {
211 status = xnn_setup_deconvolution2d_nhwc_f32(
212 deconvolution_operators[i],
213 batch_size, input_height, input_width,
Marat Dukhan1898b912019-11-05 12:25:18 -0800214 0 /* height adjustment */, 0 /* width adjustment */,
XNNPACK Teamb455b122019-09-27 18:10:33 -0700215 input.data(), output.data() + i * output_elements,
216 nullptr /* thread pool */);
217 if (status != xnn_status_success) {
218 state.SkipWithError("failed to setup QINT8 Deconvolution operator");
219 return;
220 }
221 }
222
223 size_t buffer_index = 0;
224 for (auto _ : state) {
225 state.PauseTiming();
Marat Dukhan42323232019-10-23 02:09:02 -0700226 benchmark::utils::PrefetchToL1(input.data(), input.size() * sizeof(float));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700227 buffer_index = (buffer_index + 1) % num_buffers;
228 state.ResumeTiming();
229
230 status = xnn_run_operator(deconvolution_operators[buffer_index], nullptr /* thread pool */);
231 if (status != xnn_status_success) {
232 state.SkipWithError("failed to run FP32 Deconvolution operator");
233 return;
234 }
235 }
236
237 for (xnn_operator_t& deconvolution_op : deconvolution_operators) {
238 status = xnn_delete_operator(deconvolution_op);
239 if (status != xnn_status_success) {
240 state.SkipWithError("failed to delete FP32 Deconvolution operator");
241 return;
242 }
243 deconvolution_op = nullptr;
244 }
245
246 state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
247 state.counters["FLOPS"] = benchmark::Counter(
248 uint64_t(state.iterations()) * 2 *
249 batch_size * input_width * input_width *
250 groups * group_input_channels * group_output_channels *
251 kernel_height * kernel_width,
252 benchmark::Counter::kIsRate);
253}
254
255#ifdef BENCHMARK_TENSORFLOW_LITE
256void tflite_deconvolution_f32(benchmark::State& state, const char* net) {
257 const size_t batch_size = state.range(0);
258 const size_t input_height = state.range(1);
259 const size_t input_width = state.range(2);
260 const size_t kernel_height = state.range(3);
261 const size_t kernel_width = state.range(4);
262 const size_t padding = state.range(5);
263 const size_t adjustment = state.range(6);
264 const size_t stride = state.range(7);
265 const size_t dilation = state.range(8);
266 const size_t groups = state.range(9);
267 const size_t input_channels = state.range(10);
268 const size_t output_channels = state.range(11);
269
270 if (groups != 1) {
271 state.SkipWithError("grouped deconvolution is not supported");
272 return;
273 }
274 if (dilation != 1) {
275 state.SkipWithError("dilated deconvolution is not supported");
276 return;
277 }
278
279 std::random_device random_device;
280 auto rng = std::mt19937(random_device());
Marat Dukhan44f0ca72020-08-02 21:46:58 -0700281 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), std::ref(rng));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700282
283 tflite::Padding tf_padding = tflite::Padding_VALID;
284 if (padding == (kernel_width - 1) && padding == (kernel_height - 1)) {
285 tf_padding = tflite::Padding_SAME;
286 } else if (padding == 0) {
287 tf_padding = tflite::Padding_VALID;
288 } else {
289 state.SkipWithError("unsupported padding");
290 return;
291 }
292
293 const size_t output_height = std::max(stride * (input_height - 1) + adjustment + kernel_height, padding) - padding;
294 const size_t output_width = std::max(stride * (input_width - 1) + adjustment + kernel_width, padding) - padding;
295
296 std::vector<float> kernel(output_channels * kernel_height * kernel_width * input_channels);
297 std::generate(kernel.begin(), kernel.end(), std::ref(f32rng));
298
299 flatbuffers::FlatBufferBuilder builder;
300 flatbuffers::Offset<tflite::OperatorCode> operator_code =
301 CreateOperatorCode(builder, tflite::BuiltinOperator_TRANSPOSE_CONV, 0);
302
303 flatbuffers::Offset<tflite::TransposeConvOptions> transpose_conv_options = CreateTransposeConvOptions(
304 builder,
305 tf_padding,
306 static_cast<int32_t>(stride), static_cast<int32_t>(stride));
307
308 const int32_t input_shape[4] = {
309 static_cast<int32_t>(batch_size),
310 static_cast<int32_t>(input_height),
311 static_cast<int32_t>(input_width),
312 static_cast<int32_t>(input_channels)
313 };
314 const int32_t output_shape[4] = {
315 static_cast<int32_t>(batch_size),
316 static_cast<int32_t>(output_height),
317 static_cast<int32_t>(output_width),
318 static_cast<int32_t>(output_channels)
319 };
320 const int32_t filter_shape[4] = {
321 static_cast<int32_t>(output_channels),
322 static_cast<int32_t>(kernel_height),
323 static_cast<int32_t>(kernel_width),
324 static_cast<int32_t>(input_channels)
325 };
326 const int32_t output_shape_shape[1] = { 4 };
327
328 flatbuffers::Offset<tflite::Buffer> buffers[3] = {
329 tflite::CreateBuffer(builder, builder.CreateVector({})),
330 tflite::CreateBuffer(builder, builder.CreateVector(
331 reinterpret_cast<const uint8_t*>(kernel.data()),
332 sizeof(float) * kernel.size())),
333 tflite::CreateBuffer(builder, builder.CreateVector(
334 reinterpret_cast<const uint8_t*>(output_shape),
335 sizeof(output_shape))),
336 };
337
338 flatbuffers::Offset<tflite::Tensor> tensors[4] = {
339 tflite::CreateTensor(builder,
340 builder.CreateVector<int32_t>(output_shape_shape, 1),
341 tflite::TensorType_INT32,
342 2 /* buffer id */,
343 builder.CreateString("output_shape")),
344 tflite::CreateTensor(builder,
345 builder.CreateVector<int32_t>(filter_shape, 4),
346 tflite::TensorType_FLOAT32,
347 1 /* buffer id */,
348 builder.CreateString("filter")),
349 tflite::CreateTensor(builder,
350 builder.CreateVector<int32_t>(input_shape, 4),
351 tflite::TensorType_FLOAT32,
352 0 /* buffer id */,
353 builder.CreateString("input")),
354 tflite::CreateTensor(builder,
355 builder.CreateVector<int32_t>(output_shape, 4),
356 tflite::TensorType_FLOAT32,
357 0 /* buffer id */,
358 builder.CreateString("output")),
359 };
360
361 const int32_t op_inputs[3] = { 0, 1, 2 };
362 const int32_t op_outputs[1] = { 3 };
363 flatbuffers::Offset<tflite::Operator> op = CreateOperator(
364 builder,
365 0 /* opcode_index */,
366 builder.CreateVector<int32_t>(op_inputs, 3),
367 builder.CreateVector<int32_t>(op_outputs, 1),
368 tflite::BuiltinOptions_TransposeConvOptions,
369 transpose_conv_options.Union());
370
371 const int32_t graph_inputs[1] = { 2 };
372 const int32_t graph_outputs[1] = { 3 };
373 flatbuffers::Offset<tflite::SubGraph> subgraph = CreateSubGraph(
374 builder,
375 builder.CreateVector(tensors, 4),
376 builder.CreateVector<int32_t>(graph_inputs, 1),
377 builder.CreateVector<int32_t>(graph_outputs, 1),
378 builder.CreateVector(&op, 1),
379 builder.CreateString("TransposeConv subgraph"));
380
381 flatbuffers::Offset<flatbuffers::String> description = builder.CreateString("TransposeConv model");
382
383 flatbuffers::Offset<tflite::Model> model_buffer = tflite::CreateModel(builder,
384 TFLITE_SCHEMA_VERSION,
385 builder.CreateVector(&operator_code, 1),
386 builder.CreateVector(&subgraph, 1),
387 description,
388 builder.CreateVector(buffers, 3));
389
390 builder.Finish(model_buffer);
391
392 const tflite::Model* model = tflite::GetModel(builder.GetBufferPointer());
393 tflite::ops::builtin::BuiltinOpResolver resolver;
394 tflite::InterpreterBuilder interpreterBuilder(model, resolver);
395 std::unique_ptr<tflite::Interpreter> interpreter;
396 if (interpreterBuilder(&interpreter) != kTfLiteOk) {
397 state.SkipWithError("failed to create TFLite interpreter");
398 return;
399 }
400 if (interpreter == nullptr) {
401 state.SkipWithError("TFLite interpreter is null");
402 return;
403 }
404 interpreter->SetNumThreads(1);
405
406 if (interpreter->AllocateTensors() != kTfLiteOk) {
407 state.SkipWithError("failed to allocate tensors");
408 return;
409 }
410
411 std::generate(
412 interpreter->typed_tensor<float>(2),
413 interpreter->typed_tensor<float>(2) + batch_size * input_channels * input_height * input_width,
414 std::ref(f32rng));
415
416 for (auto _ : state) {
417 state.PauseTiming();
Marat Dukhan42323232019-10-23 02:09:02 -0700418 benchmark::utils::WipeCache();
419 benchmark::utils::PrefetchToL1(
XNNPACK Teamb455b122019-09-27 18:10:33 -0700420 interpreter->typed_tensor<float>(2),
421 batch_size * input_channels * input_height * input_width * sizeof(float));
422 state.ResumeTiming();
423
424 if (interpreter->Invoke() != kTfLiteOk) {
425 state.SkipWithError("failed to invoke TFLite interpreter");
426 return;
427 }
428 }
429
430 state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
431 state.counters["FLOPS"] = benchmark::Counter(
432 uint64_t(state.iterations()) * 2 *
433 batch_size * input_width * input_width *
434 input_channels * output_channels *
435 kernel_height * kernel_width,
436 benchmark::Counter::kIsRate);
437
438 interpreter.reset();
439}
440#endif // BENCHMARK_TENSORFLOW_LITE
441
442// FCN-32 model (PASCAL VOC version).
443// We assume CIF image (352x288) on model input / output.
444static void FCN32(benchmark::internal::Benchmark* b) {
445 b->ArgNames({"N", "H", "W", "KH", "KW", "P", "A", "S", "D", "G", "GCin", "GCout"});
446
447 /* N H W KH KW P A S D G GCin GCout */
448 b->Args({1, 9, 11, 64, 64, 0, 0, 32, 1, 1, 21, 21});
449}
450
451// FCN-16 model (PASCAL VOC version).
452// We assume CIF image (352x288) on model input / output.
453static void FCN16(benchmark::internal::Benchmark* b) {
454 b->ArgNames({"N", "H", "W", "KH", "KW", "P", "A", "S", "D", "G", "GCin", "GCout"});
455
456 /* N H W KH KW P A S D G GCin GCout */
457 b->Args({1, 9, 11, 4, 4, 0, 0, 2, 1, 1, 21, 21});
458 b->Args({1, 18, 22, 32, 32, 0, 0, 16, 1, 1, 21, 21});
459}
460
461// FCN-8 model (PASCAL VOC version).
462// We assume CIF image (352x288) on model input / output.
463static void FCN8(benchmark::internal::Benchmark* b) {
464 b->ArgNames({"N", "H", "W", "KH", "KW", "P", "A", "S", "D", "G", "GCin", "GCout"});
465
466 /* N H W KH KW P A S D G GCin GCout */
467 b->Args({1, 9, 11, 4, 4, 0, 0, 2, 1, 1, 21, 21});
468 b->Args({1, 18, 22, 4, 4, 0, 0, 2, 1, 1, 21, 21});
469 b->Args({1, 36, 44, 16, 16, 0, 0, 8, 1, 1, 21, 21});
470}
471
472static void ENet(benchmark::internal::Benchmark* b) {
473 b->ArgNames({"N", "H", "W", "KH", "KW", "P", "A", "S", "D", "G", "GCin", "GCout"});
474
475 /********************* Bottleneck 4.0 ********************/
476 /* N H W KH KW P A S D G GCin GCout */
477 b->Args({1, 64, 64, 3, 3, 2, 1, 2, 1, 1, 32, 32});
478 /********************* Bottleneck 5.0 ********************/
479 /* N H W KH KW P A S D G GCin GCout */
480 b->Args({1, 128, 128, 3, 3, 2, 1, 2, 1, 1, 16, 16});
481 /***************** Final Full Convolution ****************/
482 /* N H W KH KW P A S D G GCin GCout */
483 b->Args({1, 256, 256, 2, 2, 0, 0, 2, 1, 1, 16, 12});
484}
485
486static void ESPNet(benchmark::internal::Benchmark* b) {
487 b->ArgNames({"N", "H", "W", "KH", "KW", "P", "A", "S", "D", "G", "GCin", "GCout"});
488
489 /* N H W KH KW P A S D G GCin GCout */
490 b->Args({1, 64, 128, 2, 2, 0, 0, 2, 1, 1, 20, 20});
491 b->Args({1, 128, 256, 2, 2, 0, 0, 2, 1, 1, 20, 20});
492 b->Args({1, 256, 512, 2, 2, 0, 0, 2, 1, 1, 20, 20});
493}
494
495BENCHMARK_CAPTURE(xnnpack_deconvolution_f32, fcn32, "FCN-32")->Apply(FCN32)->UseRealTime();
496BENCHMARK_CAPTURE(xnnpack_deconvolution_f32, fcn16, "FCN-16")->Apply(FCN16)->UseRealTime();
497BENCHMARK_CAPTURE(xnnpack_deconvolution_f32, fcn8, "FCN-8")->Apply(FCN8)->UseRealTime();
498BENCHMARK_CAPTURE(xnnpack_deconvolution_f32, enet, "ENet")->Apply(ENet)->UseRealTime();
499BENCHMARK_CAPTURE(xnnpack_deconvolution_f32, espnet, "ESPNet")->Apply(ESPNet)->UseRealTime();
500
Chao Meic6640272020-07-23 09:35:11 -0700501#ifndef XNN_NO_QU8_OPERATORS
Marat Dukhan08b7a972020-07-14 18:17:29 -0700502BENCHMARK_CAPTURE(xnnpack_deconvolution_qu8, fcn32, "FCN-32")->Apply(FCN32)->UseRealTime();
503BENCHMARK_CAPTURE(xnnpack_deconvolution_qu8, fcn16, "FCN-16")->Apply(FCN16)->UseRealTime();
504BENCHMARK_CAPTURE(xnnpack_deconvolution_qu8, fcn8, "FCN-8")->Apply(FCN8)->UseRealTime();
505BENCHMARK_CAPTURE(xnnpack_deconvolution_qu8, enet, "ENet")->Apply(ENet)->UseRealTime();
506BENCHMARK_CAPTURE(xnnpack_deconvolution_qu8, espnet, "ESPNet")->Apply(ESPNet)->UseRealTime();
Chao Meic6640272020-07-23 09:35:11 -0700507#endif // XNN_NO_QU8_OPERATORS
XNNPACK Teamb455b122019-09-27 18:10:33 -0700508
509#ifdef BENCHMARK_TENSORFLOW_LITE
510 BENCHMARK_CAPTURE(tflite_deconvolution_f32, fcn32, "FCN-32")->Apply(FCN32)->UseRealTime();
511 BENCHMARK_CAPTURE(tflite_deconvolution_f32, fcn16, "FCN-16")->Apply(FCN16)->UseRealTime();
512 BENCHMARK_CAPTURE(tflite_deconvolution_f32, fcn8, "FCN-8")->Apply(FCN8)->UseRealTime();
513 BENCHMARK_CAPTURE(tflite_deconvolution_f32, enet, "ENet")->Apply(ENet)->UseRealTime();
514 BENCHMARK_CAPTURE(tflite_deconvolution_f32, espnet, "ESPNet")->Apply(ESPNet)->UseRealTime();
515#endif // BENCHMARK_TENSORFLOW_LITE
516
517#ifndef XNNPACK_BENCHMARK_NO_MAIN
518BENCHMARK_MAIN();
519#endif