blob: 02f863514d823741191afde8da6beb9c209de54e [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Copyright (c) Facebook, Inc. and its affiliates.
2// All rights reserved.
3//
4// Copyright 2019 Google LLC
5//
6// This source code is licensed under the BSD-style license found in the
7// LICENSE file in the root directory of this source tree.
8
9#include <algorithm>
10#include <cfloat>
11#include <cmath>
12#include <functional>
Marat Dukhan5ce30d92020-04-14 03:31:26 -070013#include <limits>
XNNPACK Teamb455b122019-09-27 18:10:33 -070014#include <ostream>
15#include <random>
16#include <string>
17#include <vector>
18
19#include <cpuinfo.h>
20#include <xnnpack.h>
21
Frank Barchardbb4c18b2019-09-30 11:05:52 -070022#ifdef BENCHMARK_ARM_COMPUTE_LIBRARY
23#include "arm_compute/core/Types.h"
24#include "arm_compute/runtime/Tensor.h"
25#include "arm_compute/runtime/CPP/CPPScheduler.h"
26#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
27#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
28#endif // BENCHMARK_ARM_COMPUTE_LIBRARY
XNNPACK Teamb455b122019-09-27 18:10:33 -070029#include <benchmark/benchmark.h>
XNNPACK Teamb455b122019-09-27 18:10:33 -070030#ifdef BENCHMARK_TENSORFLOW_LITE
31#include "flatbuffers/include/flatbuffers/flatbuffers.h"
32#include "tensorflow/lite/interpreter.h"
33#include "tensorflow/lite/kernels/register.h"
34#include "tensorflow/lite/model.h"
XNNPACK Teamb455b122019-09-27 18:10:33 -070035#include "tensorflow/lite/schema/schema_generated.h"
36#include "tensorflow/lite/version.h"
37#endif // BENCHMARK_TENSORFLOW_LITE
Frank Barchardbb4c18b2019-09-30 11:05:52 -070038#include "bench/utils.h"
XNNPACK Teamb455b122019-09-27 18:10:33 -070039
40
41void xnnpack_convolution_q8(benchmark::State& state, const char* net) {
42 const size_t batch_size = state.range(0);
43 const size_t input_height = state.range(1);
44 const size_t input_width = state.range(2);
45 const size_t kernel_height = state.range(3);
46 const size_t kernel_width = state.range(4);
47 const size_t padding_height = state.range(5);
48 const size_t padding_width = state.range(6);
49 const size_t subsampling = state.range(7);
50 const size_t dilation = state.range(8);
51 const size_t groups = state.range(9);
52 const size_t group_input_channels = state.range(10);
53 const size_t group_output_channels = state.range(11);
54
55 std::random_device random_device;
56 auto rng = std::mt19937(random_device());
57 auto s32rng = std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
Marat Dukhan5ce30d92020-04-14 03:31:26 -070058 auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng);
XNNPACK Teamb455b122019-09-27 18:10:33 -070059
60 const size_t output_pixel_stride = groups * group_output_channels;
61 const size_t input_pixel_stride = groups * group_input_channels;
62 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
63 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
64 const size_t padding_left = padding_width / 2;
65 const size_t padding_top = padding_height / 2;
66 const size_t padding_right = padding_width - padding_left;
67 const size_t padding_bottom = padding_height - padding_top;
68 const size_t output_height = (input_height + padding_height - effective_kernel_height) / subsampling + 1;
69 const size_t output_width = (input_width + padding_width - effective_kernel_width) / subsampling + 1;
70
71 std::vector<uint8_t> input(batch_size * input_height * input_width * input_pixel_stride);
72 std::generate(input.begin(), input.end(), std::ref(u8rng));
73 std::vector<uint8_t> kernel(groups * group_output_channels * kernel_height * kernel_width * group_input_channels);
74 std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
75 std::vector<int32_t> bias(groups * group_output_channels);
76 std::generate(bias.begin(), bias.end(), std::ref(s32rng));
77 const size_t output_elements = batch_size * output_height * output_width * output_pixel_stride;
78
Marat Dukhan04f03be2019-11-19 12:36:47 -080079 xnn_status status = xnn_initialize(nullptr /* allocator */);
XNNPACK Teamb455b122019-09-27 18:10:33 -070080 if (status != xnn_status_success) {
81 state.SkipWithError("failed to initialize XNNPACK");
82 return;
83 }
84
85 if (!cpuinfo_initialize()) {
86 state.SkipWithError("cpuinfo initialization failed");
87 return;
88 }
89 const size_t num_buffers = 1 +
Marat Dukhan42323232019-10-23 02:09:02 -070090 benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(),
XNNPACK Teamb455b122019-09-27 18:10:33 -070091 sizeof(uint8_t) * kernel.size() + sizeof(int32_t) * bias.size() + sizeof(uint8_t) * output_elements);
92 std::vector<uint8_t> output(output_elements * num_buffers);
93
94 std::vector<xnn_operator_t> convolution_operators(num_buffers);
95 for (xnn_operator_t& convolution_op : convolution_operators) {
96 status = xnn_create_convolution2d_nhwc_q8(
97 padding_top, padding_right, padding_bottom, padding_left,
98 kernel_height, kernel_width,
99 subsampling, subsampling,
100 dilation, dilation,
101 groups, group_input_channels, group_output_channels,
102 input_pixel_stride, output_pixel_stride,
103 127, 0.5f,
104 127, 0.5f,
105 kernel.data(), bias.data(),
106 127, 0.5f, 0, 255,
107 0 /* flags */, &convolution_op);
108 if (status != xnn_status_success) {
109 state.SkipWithError("failed to create QINT8 Convolution operator");
110 return;
111 }
112 }
113
114 for (size_t i = 0; i < convolution_operators.size(); i++) {
115 status = xnn_setup_convolution2d_nhwc_q8(
116 convolution_operators[i],
117 batch_size, input_height, input_width,
118 input.data(), output.data() + i * output_elements,
119 nullptr /* thread pool */);
120 if (status != xnn_status_success) {
121 state.SkipWithError("failed to setup QINT8 Convolution operator");
122 return;
123 }
124 }
125
126 size_t buffer_index = 0;
127 for (auto _ : state) {
128 state.PauseTiming();
Marat Dukhan42323232019-10-23 02:09:02 -0700129 benchmark::utils::PrefetchToL1(input.data(), input.size() * sizeof(uint8_t));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700130 buffer_index = (buffer_index + 1) % num_buffers;
131 state.ResumeTiming();
132
133 status = xnn_run_operator(convolution_operators[buffer_index],
134 nullptr /* thread pool */);
135 if (status != xnn_status_success) {
136 state.SkipWithError("failed to run QINT8 Convolution operator");
137 return;
138 }
139 }
140
141 for (xnn_operator_t& convolution_op : convolution_operators) {
142 status = xnn_delete_operator(convolution_op);
143 if (status != xnn_status_success) {
144 state.SkipWithError("failed to delete QINT8 Convolution operator");
145 return;
146 }
147 convolution_op = nullptr;
148 }
149
Frank Barchardbb4c18b2019-09-30 11:05:52 -0700150 state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
XNNPACK Teamb455b122019-09-27 18:10:33 -0700151 state.counters["OPS"] = benchmark::Counter(
152 uint64_t(state.iterations()) * 2 *
153 batch_size * output_height * output_width *
154 groups * group_input_channels * group_output_channels *
155 kernel_height * kernel_width,
156 benchmark::Counter::kIsRate);
157}
158
159void xnnpack_convolution_f32(benchmark::State& state, const char* net) {
160 const size_t batch_size = state.range(0);
161 const size_t input_height = state.range(1);
162 const size_t input_width = state.range(2);
163 const size_t kernel_height = state.range(3);
164 const size_t kernel_width = state.range(4);
165 const size_t padding_height = state.range(5);
166 const size_t padding_width = state.range(6);
167 const size_t subsampling = state.range(7);
168 const size_t dilation = state.range(8);
169 const size_t groups = state.range(9);
170 const size_t group_input_channels = state.range(10);
171 const size_t group_output_channels = state.range(11);
172
173 std::random_device random_device;
174 auto rng = std::mt19937(random_device());
175 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
176
177 const size_t output_pixel_stride = groups * group_output_channels;
178 const size_t input_pixel_stride = groups * group_input_channels;
179 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
180 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
181 const size_t padding_left = padding_width / 2;
182 const size_t padding_top = padding_height / 2;
183 const size_t padding_right = padding_width - padding_left;
184 const size_t padding_bottom = padding_height - padding_top;
185 const size_t output_height = (input_height + padding_height - effective_kernel_height) / subsampling + 1;
186 const size_t output_width = (input_width + padding_width - effective_kernel_width) / subsampling + 1;
187
188 std::vector<float> input(batch_size * input_height * input_width * input_pixel_stride + XNN_EXTRA_BYTES / sizeof(float));
189 std::generate(input.begin(), input.end(), std::ref(f32rng));
190 std::vector<float> kernel(groups * group_output_channels * kernel_height * kernel_width * group_input_channels);
191 std::generate(kernel.begin(), kernel.end(), std::ref(f32rng));
192 std::vector<float> bias(groups * group_output_channels);
193 std::generate(bias.begin(), bias.end(), std::ref(f32rng));
194 const size_t output_elements = batch_size * output_height * output_width * output_pixel_stride;
195
Marat Dukhan04f03be2019-11-19 12:36:47 -0800196 xnn_status status = xnn_initialize(nullptr /* allocator */);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700197 if (status != xnn_status_success) {
198 state.SkipWithError("failed to initialize XNNPACK");
199 return;
200 }
201
202 if (!cpuinfo_initialize()) {
203 state.SkipWithError("cpuinfo initialization failed");
204 return;
205 }
206 const size_t num_buffers = 1 +
Marat Dukhan42323232019-10-23 02:09:02 -0700207 benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(),
XNNPACK Teamb455b122019-09-27 18:10:33 -0700208 sizeof(float) * (kernel.size() + bias.size() + output_elements));
209 std::vector<float> output(output_elements * num_buffers);
210
211 std::vector<xnn_operator_t> convolution_operators(num_buffers);
212 for (xnn_operator_t& convolution_op : convolution_operators) {
213 status = xnn_create_convolution2d_nhwc_f32(
214 padding_top, padding_right, padding_bottom, padding_left,
215 kernel_height, kernel_width,
216 subsampling, subsampling,
217 dilation, dilation,
218 groups, group_input_channels, group_output_channels,
219 input_pixel_stride, output_pixel_stride,
220 kernel.data(), bias.data(),
221 -std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity(),
222 0 /* flags */, &convolution_op);
223 if (status != xnn_status_success) {
224 state.SkipWithError("failed to create FP32 Convolution operator");
225 return;
226 }
227 }
228
229 for (size_t i = 0; i < convolution_operators.size(); i++) {
230 status = xnn_setup_convolution2d_nhwc_f32(
231 convolution_operators[i],
232 batch_size, input_height, input_width,
233 input.data(), output.data() + i * output_elements,
234 nullptr /* thread pool */);
235 if (status != xnn_status_success) {
236 state.SkipWithError("failed to setup FP32 Convolution operator");
237 return;
238 }
239 }
240
241 size_t buffer_index = 0;
242 for (auto _ : state) {
243 state.PauseTiming();
Marat Dukhan42323232019-10-23 02:09:02 -0700244 benchmark::utils::PrefetchToL1(input.data(), input.size() * sizeof(float));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700245 buffer_index = (buffer_index + 1) % num_buffers;
246 state.ResumeTiming();
247
248 status = xnn_run_operator(convolution_operators[buffer_index], nullptr /* thread pool */);
249 if (status != xnn_status_success) {
250 state.SkipWithError("failed to run FP32 Convolution operator");
251 return;
252 }
253 }
254
255 for (xnn_operator_t& convolution_op : convolution_operators) {
256 status = xnn_delete_operator(convolution_op);
257 if (status != xnn_status_success) {
258 state.SkipWithError("failed to delete FP32 Convolution operator");
259 return;
260 }
261 convolution_op = nullptr;
262 }
263
264 state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
265 state.counters["FLOPS"] = benchmark::Counter(
266 uint64_t(state.iterations()) * 2 *
267 batch_size * output_height * output_width *
268 groups * group_input_channels * group_output_channels *
269 kernel_height * kernel_width,
270 benchmark::Counter::kIsRate);
271}
272
273#ifdef BENCHMARK_TENSORFLOW_LITE
274void tflite_convolution_f32(benchmark::State& state, const char* net) {
275 const size_t batch_size = state.range(0);
276 const size_t input_height = state.range(1);
277 const size_t input_width = state.range(2);
278 const size_t kernel_height = state.range(3);
279 const size_t kernel_width = state.range(4);
280 const size_t padding_height = state.range(5);
281 const size_t padding_width = state.range(6);
282 const size_t subsampling = state.range(7);
283 const size_t dilation = state.range(8);
284 const size_t groups = state.range(9);
285 const size_t group_input_channels = state.range(10);
286 const size_t group_output_channels = state.range(11);
287
288 bool is_depthwise = false;
289 if (groups != 1) {
290 if (group_input_channels == 1) {
291 is_depthwise = true;
292 } else {
293 state.SkipWithError("grouped convolution is not supported");
294 return;
295 }
296 }
297
298 std::random_device random_device;
299 auto rng = std::mt19937(random_device());
300 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
301
302 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
303 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
304
305 tflite::Padding padding = tflite::Padding_VALID;
306 if (padding_width == (effective_kernel_width - 1) && padding_height == (effective_kernel_height - 1)) {
307 padding = tflite::Padding_SAME;
308 } else if (padding_width == 0 && padding_height == 0) {
309 padding = tflite::Padding_VALID;
310 } else {
311 state.SkipWithError("unsupported padding");
312 return;
313 }
314
315 const size_t output_height = (input_height + padding_height - effective_kernel_height) / subsampling + 1;
316 const size_t output_width = (input_width + padding_width - effective_kernel_width) / subsampling + 1;
317
318 std::vector<float> kernel(groups * group_output_channels * kernel_height * kernel_width * group_input_channels);
319 std::generate(kernel.begin(), kernel.end(), std::ref(f32rng));
320 std::vector<float> bias(groups * group_output_channels);
321 std::generate(bias.begin(), bias.end(), std::ref(f32rng));
322
323 flatbuffers::FlatBufferBuilder builder;
324 flatbuffers::Offset<tflite::OperatorCode> operator_code =
325 CreateOperatorCode(
326 builder,
327 is_depthwise ? tflite::BuiltinOperator_DEPTHWISE_CONV_2D : tflite::BuiltinOperator_CONV_2D,
328 0);
329
330 flatbuffers::Offset<tflite::Conv2DOptions> conv2d_options = CreateConv2DOptions(
331 builder,
332 padding,
333 static_cast<int32_t>(subsampling), static_cast<int32_t>(subsampling),
334 tflite::ActivationFunctionType_NONE,
335 static_cast<int32_t>(dilation), static_cast<int32_t>(dilation));
336
337 flatbuffers::Offset<tflite::DepthwiseConv2DOptions> dwconv2d_options = CreateDepthwiseConv2DOptions(
338 builder,
339 padding,
340 static_cast<int32_t>(subsampling), static_cast<int32_t>(subsampling),
341 static_cast<int32_t>(group_output_channels),
342 tflite::ActivationFunctionType_NONE,
343 static_cast<int32_t>(dilation), static_cast<int32_t>(dilation));
344
345 flatbuffers::Offset<tflite::Buffer> buffers[3] = {
346 tflite::CreateBuffer(builder, builder.CreateVector({})),
347 tflite::CreateBuffer(builder, builder.CreateVector(
348 reinterpret_cast<const uint8_t*>(kernel.data()),
349 sizeof(float) * kernel.size())),
350 tflite::CreateBuffer(builder, builder.CreateVector(
351 reinterpret_cast<const uint8_t*>(bias.data()),
352 sizeof(float) * bias.size())),
353 };
354
355 const int32_t input_shape[4] = {
356 static_cast<int32_t>(batch_size),
357 static_cast<int32_t>(input_height),
358 static_cast<int32_t>(input_width),
359 static_cast<int32_t>(groups * group_input_channels)
360 };
361 const int32_t output_shape[4] = {
362 static_cast<int32_t>(batch_size),
363 static_cast<int32_t>(output_height),
364 static_cast<int32_t>(output_width),
365 static_cast<int32_t>(groups * group_output_channels)
366 };
367 const int32_t filter_shape[4] = {
368 static_cast<int32_t>(group_output_channels),
369 static_cast<int32_t>(kernel_height),
370 static_cast<int32_t>(kernel_width),
371 static_cast<int32_t>(groups * group_input_channels)
372 };
373 const int32_t bias_shape[1] = {
374 static_cast<int32_t>(groups * group_output_channels)
375 };
376
377 flatbuffers::Offset<tflite::Tensor> tensors[4] = {
378 tflite::CreateTensor(builder,
379 builder.CreateVector<int32_t>(input_shape, 4),
380 tflite::TensorType_FLOAT32,
381 0 /* buffer id */,
382 builder.CreateString("input")),
383 tflite::CreateTensor(builder,
384 builder.CreateVector<int32_t>(filter_shape, 4),
385 tflite::TensorType_FLOAT32,
386 1 /* buffer id */,
387 builder.CreateString("filter")),
388 tflite::CreateTensor(builder,
389 builder.CreateVector<int32_t>(bias_shape, 1),
390 tflite::TensorType_FLOAT32,
391 2 /* buffer id */,
392 builder.CreateString("bias")),
393 tflite::CreateTensor(builder,
394 builder.CreateVector<int32_t>(output_shape, 4),
395 tflite::TensorType_FLOAT32,
396 0 /* buffer id */,
397 builder.CreateString("output")),
398 };
399
400 const int32_t op_inputs[3] = { 0, 1, 2 };
401 const int32_t op_outputs[1] = { 3 };
402 flatbuffers::Offset<tflite::Operator> op = CreateOperator(
403 builder,
404 0 /* opcode_index */,
405 builder.CreateVector<int32_t>(op_inputs, 3),
406 builder.CreateVector<int32_t>(op_outputs, 1),
407 is_depthwise ? tflite::BuiltinOptions_DepthwiseConv2DOptions : tflite::BuiltinOptions_Conv2DOptions,
408 is_depthwise ? dwconv2d_options.Union() : conv2d_options.Union(),
409 /*custom_options */ 0,
410 tflite::CustomOptionsFormat_FLEXBUFFERS);
411
412 const int32_t graph_inputs[1] = { 0 };
413 const int32_t graph_outputs[1] = { 3 };
414 flatbuffers::Offset<tflite::SubGraph> subgraph = CreateSubGraph(
415 builder,
416 builder.CreateVector(tensors, 4),
417 builder.CreateVector<int32_t>(graph_inputs, 1),
418 builder.CreateVector<int32_t>(graph_outputs, 1),
419 builder.CreateVector(&op, 1),
420 builder.CreateString("Conv2D subgraph"));
421
422 flatbuffers::Offset<flatbuffers::String> description = builder.CreateString("Conv2D model");
423
424 flatbuffers::Offset<tflite::Model> model_buffer = tflite::CreateModel(builder,
425 TFLITE_SCHEMA_VERSION,
426 builder.CreateVector(&operator_code, 1),
427 builder.CreateVector(&subgraph, 1),
428 description,
429 builder.CreateVector(buffers, 3));
430
431 builder.Finish(model_buffer);
432
433 const tflite::Model* model = tflite::GetModel(builder.GetBufferPointer());
434 tflite::ops::builtin::BuiltinOpResolver resolver;
435 tflite::InterpreterBuilder interpreterBuilder(model, resolver);
436 std::unique_ptr<tflite::Interpreter> interpreter;
437 if (interpreterBuilder(&interpreter) != kTfLiteOk) {
438 state.SkipWithError("failed to create TFLite interpreter");
439 return;
440 }
441 if (interpreter == nullptr) {
442 state.SkipWithError("TFLite interpreter is null");
443 return;
444 }
445 interpreter->SetNumThreads(1);
446
447 if (interpreter->AllocateTensors() != kTfLiteOk) {
448 state.SkipWithError("failed to allocate tensors");
449 return;
450 }
451
452 std::generate(
453 interpreter->typed_tensor<float>(0),
454 interpreter->typed_tensor<float>(0) + batch_size * groups * group_input_channels * input_height * input_width,
455 std::ref(f32rng));
456
457 for (auto _ : state) {
458 state.PauseTiming();
Marat Dukhan42323232019-10-23 02:09:02 -0700459 benchmark::utils::WipeCache();
460 benchmark::utils::PrefetchToL1(
XNNPACK Teamb455b122019-09-27 18:10:33 -0700461 interpreter->typed_tensor<float>(0),
462 batch_size * groups * group_input_channels * input_height * input_width * sizeof(float));
463 state.ResumeTiming();
464
465 if (interpreter->Invoke() != kTfLiteOk) {
466 state.SkipWithError("failed to invoke TFLite interpreter");
467 return;
468 }
469 }
470
471 state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
472 state.counters["FLOPS"] = benchmark::Counter(
473 uint64_t(state.iterations()) * 2 *
474 batch_size * output_height * output_width *
475 groups * group_input_channels * group_output_channels *
476 kernel_height * kernel_width,
477 benchmark::Counter::kIsRate);
478
479 interpreter.reset();
480}
481#endif // BENCHMARK_TENSORFLOW_LITE
482
483#ifdef BENCHMARK_ARM_COMPUTE_LIBRARY
484static std::string compare_with_convolution_f32_reference_output(
485 const benchmark::State& state, const float* input, size_t input_size,
486 const float* kernel, size_t kernel_size, const float* bias, size_t bias_size,
487 const float* output, size_t output_size)
488{
489 const size_t batch_size = state.range(0);
490 const size_t input_height = state.range(1);
491 const size_t input_width = state.range(2);
492 const size_t kernel_height = state.range(3);
493 const size_t kernel_width = state.range(4);
494 const size_t padding_height = state.range(5);
495 const size_t padding_width = state.range(6);
496 const size_t subsampling = state.range(7);
497 const size_t dilation = state.range(8);
498 const size_t groups = state.range(9);
499 const size_t group_input_channels = state.range(10);
500 const size_t group_output_channels = state.range(11);
501
502 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
503 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
504 const size_t output_height = (input_height + padding_height - effective_kernel_height) / subsampling + 1;
505 const size_t output_width = (input_width + padding_width - effective_kernel_width) / subsampling + 1;
506 const size_t input_pixel_stride = groups * group_input_channels;
507 const size_t padding_left = padding_width / 2;
508 const size_t padding_top = padding_height / 2;
509
510 assert(input_size == batch_size * input_height * input_width * groups * group_input_channels);
511
512 assert(kernel_size == group_output_channels * kernel_height * kernel_width * groups * group_input_channels);
513
514 assert(bias_size == groups * group_output_channels);
515
516 assert(output_size == batch_size * output_height * output_width * groups * group_output_channels);
517
518 std::vector<float> output_ref(output_size);
519 for (size_t i = 0; i < batch_size; i++) {
520 for (size_t oy = 0; oy < output_height; oy++) {
521 for (size_t ox = 0; ox < output_width; ox++) {
522 for (size_t g = 0; g < groups; g++) {
523 for (size_t oc = 0; oc < group_output_channels; oc++) {
524 output_ref[(((i * output_height + oy) * output_width + ox) * groups + g) * group_output_channels + oc] =
525 bias[g * group_output_channels + oc];
526 }
527 }
528 }
529 }
530 }
531 for (size_t i = 0; i < batch_size; i++) {
532 for (size_t oy = 0; oy < output_height; oy++) {
533 for (size_t ox = 0; ox < output_width; ox++) {
534 for (size_t ky = 0; ky < kernel_height; ky++) {
535 const size_t iy = oy * subsampling + ky * dilation - padding_top;
536 if (iy < input_height) {
537 for (size_t kx = 0; kx < kernel_width; kx++) {
538 const size_t ix = ox * subsampling + kx * dilation - padding_left;
539 if (ix < input_width) {
540 for (size_t g = 0; g < groups; g++) {
541 for (size_t oc = 0; oc < group_output_channels; oc++) {
542 for (size_t ic = 0; ic < group_input_channels; ic++) {
543 output_ref[(((i * output_height + oy) * output_width + ox) * groups + g) * group_output_channels + oc] +=
544 input[((i * input_height + iy) * input_width + ix) * input_pixel_stride + g * group_input_channels + ic] *
545 kernel[(((oc * kernel_height + ky) * kernel_width + kx) * groups + g) * group_input_channels + ic];
546 } // group_input_channels loop
547 } // group_output_channels loop
548 } // groups loop
549 }
550 } // kernel_width loop
551 }
552 } // kernel_height loop
553 } // output_width loop
554 } // output_height loop
555 } // batch_size loop
556
557 const float relative_error_tolerance = 1e-4;
558 for (size_t i = 0; i < batch_size; i++) {
559 for (size_t y = 0; y < output_height; y++) {
560 for (size_t x = 0; x < output_width; x++) {
561 for (size_t g = 0; g < groups; g++) {
562 for (size_t c = 0; c < group_output_channels; c++) {
563 const size_t idx = (((i * output_height + y) * output_width + x) * groups + g) * group_output_channels + c;
564 const float value_ref = output_ref[idx];
565 const float value = output[idx];
566 if (std::abs(value - value_ref) > std::max(std::abs(value_ref) * relative_error_tolerance, std::numeric_limits<float>::epsilon())) {
567 std::ostringstream error_stream;
568 error_stream << "(x, y) = (" << x << ", " << y << "), group = " << g
569 << ", channel = " << c << ", refValue = " << value_ref
570 << ", actualValue = " << value
571 << ", absDiff=" << std::abs(value - value_ref);
572 return error_stream.str();
573 }
574 }
575 }
576 }
577 }
578 }
579 return "";
580}
581
582void armcl_convolution_f32(benchmark::State& state, const char* net) {
583 const size_t batch_size = state.range(0);
584 const size_t input_height = state.range(1);
585 const size_t input_width = state.range(2);
586 const size_t kernel_height = state.range(3);
587 const size_t kernel_width = state.range(4);
588 const size_t padding_height = state.range(5);
589 const size_t padding_width = state.range(6);
590 const size_t subsampling = state.range(7);
591 const size_t dilation = state.range(8);
592 const size_t groups = state.range(9);
593 const size_t group_input_channels = state.range(10);
594 const size_t group_output_channels = state.range(11);
595
596 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
597 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
598 const size_t padding_left = padding_width / 2;
599 const size_t padding_top = padding_height / 2;
600 const size_t padding_right = padding_width - padding_left;
601 const size_t padding_bottom = padding_height - padding_top;
602 const size_t output_height = (input_height + padding_height - effective_kernel_height) / subsampling + 1;
603 const size_t output_width = (input_width + padding_width - effective_kernel_width) / subsampling + 1;
604
605 arm_compute::PadStrideInfo pad_stride_info(
606 subsampling /* stride height */,
607 subsampling /* stride width */,
608 padding_left, padding_right, padding_top, padding_bottom,
609 arm_compute::DimensionRoundingType::FLOOR);
610 arm_compute::Size2D dilation_info(dilation, dilation);
611 // Note: activation is disabled by default.
612 arm_compute::ActivationLayerInfo activation_info;
613
614 // Note: no batch size and reverse order of dimensions, i.e. CWHN for NHWC.
615 arm_compute::TensorShape input_shape(
616 /* C */ groups * group_input_channels,
617 /* W */ input_width,
618 /* H */ input_height,
619 /* N */ batch_size);
620 arm_compute::TensorInfo input_info(
621 input_shape,
622 1 /* number of channels per element (!) */,
623 arm_compute::DataType::F32);
624 input_info.set_data_layout(arm_compute::DataLayout::NHWC);
625 arm_compute::Tensor input_tensor;
626 input_tensor.allocator()->init(input_info);
627 input_tensor.allocator()->allocate();
628
629 // Note: reverse order of dimensions, i.e. for IWHO for OHWI.
630 arm_compute::TensorShape kernel_shape(
631 /* I */ groups * group_input_channels,
632 /* W */ kernel_width,
633 /* H */ kernel_height,
634 /* O */ group_output_channels);
635 arm_compute::TensorInfo kernel_info(
636 kernel_shape,
637 1 /* number of channels per element (!) */,
638 arm_compute::DataType::F32);
639 kernel_info.set_data_layout(arm_compute::DataLayout::NHWC);
640 arm_compute::Tensor kernelTensor;
641 kernelTensor.allocator()->init(kernel_info);
642 kernelTensor.allocator()->allocate();
643
644 arm_compute::TensorShape bias_shape(groups * group_output_channels);
645 arm_compute::TensorInfo bias_info(
646 bias_shape,
647 1 /* number of channels per element (!) */,
648 arm_compute::DataType::F32);
649 bias_info.set_data_layout(arm_compute::DataLayout::NHWC);
650 arm_compute::Tensor bias_tensor;
651 bias_tensor.allocator()->init(bias_info);
652 bias_tensor.allocator()->allocate();
653
654 // Note: no batch size and reverse order of dimensions, i.e. CWHN for NHWC.
655 arm_compute::TensorShape output_shape(
656 /* C */ groups * group_output_channels,
657 /* W */ output_width,
658 /* H */ output_height,
659 /* N */ batch_size);
660 arm_compute::TensorInfo output_info(
661 output_shape,
662 1 /* number of channels per element (!) */,
663 arm_compute::DataType::F32);
664 output_info.set_data_layout(arm_compute::DataLayout::NHWC);
665 arm_compute::Tensor output_tensor;
666 output_tensor.allocator()->init(output_info);
667 output_tensor.allocator()->allocate();
668
669 std::random_device random_device;
670 auto rng = std::mt19937(random_device());
671 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
672
673 std::generate(
674 reinterpret_cast<float*>(input_tensor.buffer()),
675 reinterpret_cast<float*>(input_tensor.buffer()) + input_shape.total_size(),
676 std::ref(f32rng));
677 std::generate(
678 reinterpret_cast<float*>(kernelTensor.buffer()),
679 reinterpret_cast<float*>(kernelTensor.buffer()) + kernel_shape.total_size(),
680 std::ref(f32rng));
681 std::generate(
682 reinterpret_cast<float*>(bias_tensor.buffer()),
683 reinterpret_cast<float*>(bias_tensor.buffer()) + bias_shape.total_size(),
684 std::ref(f32rng));
685 std::generate(
686 reinterpret_cast<float*>(output_tensor.buffer()),
687 reinterpret_cast<float*>(output_tensor.buffer()) + output_shape.total_size(),
688 std::ref(f32rng));
689
690 bool is_depthwise = false;
691 if (groups != 1) {
692 // NEConvolutionLayer uses NEGEMMConvolutionLayer by default, which doesn't support grouped convolution.
693 // However, depthwise convolution is supported via NEDepthwiseConvolutionLayer.
694 if (group_input_channels == 1) {
695 is_depthwise = true;
696 } else {
697 state.SkipWithError("grouped convolution is not supported");
698 return;
699 }
700 }
701
702 std::shared_ptr<arm_compute::IFunction> layer;
703 if (is_depthwise) {
704 if (dilation != 1) {
705 state.SkipWithError("dilated depthwise convolution is not supported");
706 return;
707 }
708
709 // Avoid NEDepthwiseConvolutionLayer3x3 when stride isn't 2 in order to pass the output verification.
710 // TODO(b/130206370) This looks like a bug and needs further investigation.
711 if (kernel_height == 3 && kernel_width == 3 && subsampling == 2) {
712 auto* depthwise_3x3_convolution_layer = new arm_compute::NEDepthwiseConvolutionLayer3x3();
713 layer.reset(depthwise_3x3_convolution_layer);
714 depthwise_3x3_convolution_layer->configure(
715 &input_tensor, &kernelTensor, &bias_tensor, &output_tensor,
716 pad_stride_info, group_output_channels, activation_info);
717
718 if (!depthwise_3x3_convolution_layer->validate(
719 &input_info, &kernel_info, &bias_info, &output_info,
720 pad_stride_info, group_output_channels, activation_info))
721 {
722 state.SkipWithError("validation failed");
723 return;
724 }
725 } else {
726 auto* depthwise_convolution_layer = new arm_compute::NEDepthwiseConvolutionLayer();
727 layer.reset(depthwise_convolution_layer);
728 depthwise_convolution_layer->configure(
729 &input_tensor, &kernelTensor, &bias_tensor, &output_tensor,
730 pad_stride_info, group_output_channels, activation_info);
731
732 if (!depthwise_convolution_layer->validate(
733 &input_info, &kernel_info, &bias_info, &output_info,
734 pad_stride_info, group_output_channels, activation_info))
735 {
736 state.SkipWithError("validation failed");
737 return;
738 }
739 }
740 } else {
741 auto* convolution_layer = new arm_compute::NEConvolutionLayer();
742 layer.reset(convolution_layer);
743 convolution_layer->configure(
744 &input_tensor, &kernelTensor, &bias_tensor, &output_tensor,
745 pad_stride_info, arm_compute::WeightsInfo(), dilation_info, activation_info,
746 true /* enable fast math */, groups);
747
748 if (!convolution_layer->validate(
749 &input_info, &kernel_info, &bias_info, &output_info,
750 pad_stride_info, arm_compute::WeightsInfo(), dilation_info, activation_info,
751 true /* enable fast math */, groups))
752 {
753 state.SkipWithError("validation failed");
754 return;
755 }
756 }
757
758 // Dry run to let ACL do one-time initializations.
759 arm_compute::CPPScheduler::get().set_num_threads(1);
760 layer->run();
761
762 for (auto _ : state) {
763 state.PauseTiming();
Marat Dukhan42323232019-10-23 02:09:02 -0700764 benchmark::utils::WipeCache();
765 benchmark::utils::PrefetchToL1(
XNNPACK Teamb455b122019-09-27 18:10:33 -0700766 input_tensor.buffer(),
767 batch_size * groups * group_input_channels * input_height * input_width * sizeof(float));
768 state.ResumeTiming();
769
770 layer->run();
771 }
772
773 // Validate outputs.
774 const std::string error_string = compare_with_convolution_f32_reference_output(
775 state, reinterpret_cast<const float*>(input_tensor.buffer()),
776 input_shape.total_size(),
777 reinterpret_cast<const float*>(kernelTensor.buffer()),
778 kernel_shape.total_size(),
779 reinterpret_cast<const float*>(bias_tensor.buffer()),
780 bias_shape.total_size(),
781 reinterpret_cast<const float*>(output_tensor.buffer()),
782 output_shape.total_size());
783
784 if (!error_string.empty()) {
785 state.SkipWithError(("validation failed: " + error_string).c_str());
786 return;
787 }
788
789 input_tensor.allocator()->free();
790 kernelTensor.allocator()->free();
791 bias_tensor.allocator()->free();
792 output_tensor.allocator()->free();
793
794 state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
795 state.counters["FLOPS"] = benchmark::Counter(
796 uint64_t(state.iterations()) * 2 *
797 batch_size * output_height * output_width *
798 groups * group_input_channels * group_output_channels *
799 kernel_height * kernel_width,
800 benchmark::Counter::kIsRate);
801}
802#endif // BENCHMARK_ARM_COMPUTE_LIBRARY
803
804// ShuffleNet v1 with 1 group.
805static void ShuffleNetV1G1(benchmark::internal::Benchmark* b) {
806 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
807
808 /*************************** Conv 1 **************************/
809 /* N H W KH KW PH PW S D G GCin GCout */
810 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
811 /******************* Stage 2: stride-2 unit ******************/
812 /* N H W KH KW PH PW S D G GCin GCout */
813 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 36});
814 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 36, 1, 1});
815 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 36, 120});
816 /******************* Stage 2: stride-1 units *****************/
817 /* N H W KH KW PH PW S D G GCin GCout */
818 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 144, 36});
819 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 36, 1, 1});
820 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 36, 144});
821 /******************* Stage 3: stride-2 unit ******************/
822 /* N H W KH KW PH PW S D G GCin GCout */
823 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 144, 72});
824 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 72, 1, 1});
825 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 72, 144});
826 /******************* Stage 3: stride-1 units *****************/
827 /* N H W KH KW PH PW S D G GCin GCout */
828 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 288, 72});
829 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 72, 1, 1});
830 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 72, 288});
831 /******************* Stage 4: stride-2 unit ******************/
832 /* N H W KH KW PH PW S D G GCin GCout */
833 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 288, 144});
834 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 144, 1, 1});
835 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 144, 288});
836 /******************* Stage 4: stride-1 units *****************/
837 /* N H W KH KW PH PW S D G GCin GCout */
838 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 576, 144});
839 b->Args({1, 7, 7, 3, 3, 2, 2, 2, 1, 144, 1, 1});
840 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 144, 576});
841}
842
843// ShuffleNet v1 with 2 groups.
844static void ShuffleNetV1G2(benchmark::internal::Benchmark* b) {
845 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
846
847 /*************************** Conv 1 **************************/
848 /* N H W KH KW PH PW S D G GCin GCout */
849 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
850 /******************* Stage 2: stride-2 unit ******************/
851 /* N H W KH KW PH PW S D G GCin GCout */
852 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 50});
853 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 50, 1, 1});
854 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 2, 25, 88});
855 /******************* Stage 2: stride-1 units *****************/
856 /* N H W KH KW PH PW S D G GCin GCout */
857 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 2, 100, 25});
858 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 50, 1, 1});
859 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 2, 25, 100});
860 /******************* Stage 3: stride-2 unit ******************/
861 /* N H W KH KW PH PW S D G GCin GCout */
862 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 2, 100, 50});
863 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 100, 1, 1});
864 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 2, 50, 100});
865 /******************* Stage 3: stride-1 units *****************/
866 /* N H W KH KW PH PW S D G GCin GCout */
867 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 2, 200, 50});
868 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 100, 1, 1});
869 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 2, 50, 200});
870 /******************* Stage 4: stride-2 unit ******************/
871 /* N H W KH KW PH PW S D G GCin GCout */
872 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 2, 200, 100});
873 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 200, 1, 1});
874 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 2, 100, 200});
875 /******************* Stage 4: stride-1 units *****************/
876 /* N H W KH KW PH PW S D G GCin GCout */
877 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 2, 400, 100});
878 b->Args({1, 7, 7, 3, 3, 2, 2, 2, 1, 200, 1, 1});
879 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 2, 100, 400});
880}
881
882// ShuffleNet v1 with 3 groups.
883static void ShuffleNetV1G3(benchmark::internal::Benchmark* b) {
884 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
885
886 /*************************** Conv 1 **************************/
887 /* N H W KH KW PH PW S D G GCin GCout */
888 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
889 /******************* Stage 2: stride-2 unit ******************/
890 /* N H W KH KW PH PW S D G GCin GCout */
891 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 60});
892 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 60, 1, 1});
893 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 3, 20, 72});
894 /******************* Stage 2: stride-1 units *****************/
895 /* N H W KH KW PH PW S D G GCin GCout */
896 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 3, 80, 20});
897 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 60, 1, 1});
898 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 3, 20, 80});
899 /******************* Stage 3: stride-2 unit ******************/
900 /* N H W KH KW PH PW S D G GCin GCout */
901 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 3, 80, 40});
902 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 120, 1, 1});
903 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 3, 40, 80});
904 /******************* Stage 3: stride-1 units *****************/
905 /* N H W KH KW PH PW S D G GCin GCout */
906 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 3, 160, 40});
907 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 120, 1, 1});
908 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 3, 40, 160});
909 /******************* Stage 4: stride-2 unit ******************/
910 /* N H W KH KW PH PW S D G GCin GCout */
911 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 3, 160, 80});
912 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 240, 1, 1});
913 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 3, 80, 160});
914 /******************* Stage 4: stride-1 units *****************/
915 /* N H W KH KW PH PW S D G GCin GCout */
916 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 3, 320, 80});
917 b->Args({1, 7, 7, 3, 3, 2, 2, 2, 1, 240, 1, 1});
918 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 3, 80, 320});
919}
920
921// ShuffleNet v1 with 4 groups.
922static void ShuffleNetV1G4(benchmark::internal::Benchmark* b) {
923 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
924
925 /*************************** Conv 1 **************************/
926 /* N H W KH KW PH PW S D G GCin GCout */
927 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
928 /******************* Stage 2: stride-2 unit ******************/
929 /* N H W KH KW PH PW S D G GCin GCout */
930 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 68});
931 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 68, 1, 1});
932 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 4, 17, 62});
933 /******************* Stage 2: stride-1 units *****************/
934 /* N H W KH KW PH PW S D G GCin GCout */
935 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 4, 68, 17});
936 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 68, 1, 1});
937 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 4, 17, 68});
938 /******************* Stage 3: stride-2 unit ******************/
939 /* N H W KH KW PH PW S D G GCin GCout */
940 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 4, 68, 34});
941 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 136, 1, 1});
942 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 4, 34, 68});
943 /******************* Stage 3: stride-1 units *****************/
944 /* N H W KH KW PH PW S D G GCin GCout */
945 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 4, 136, 34});
946 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 136, 1, 1});
947 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 4, 34, 136});
948 /******************* Stage 4: stride-2 unit ******************/
949 /* N H W KH KW PH PW S D G GCin GCout */
950 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 4, 136, 68});
951 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 272, 1, 1});
952 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 4, 68, 136});
953 /******************* Stage 4: stride-1 units *****************/
954 /* N H W KH KW PH PW S D G GCin GCout */
955 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 4, 272, 68});
956 b->Args({1, 7, 7, 3, 3, 2, 2, 2, 1, 272, 1, 1});
957 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 4, 68, 272});
958}
959
960// ShuffleNet v1 with 8 groups.
961static void ShuffleNetV1G8(benchmark::internal::Benchmark* b) {
962 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
963
964 /*************************** Conv 1 **************************/
965 /* N H W KH KW PH PW S D G GCin GCout */
966 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
967 /******************* Stage 2: stride-2 unit ******************/
968 /* N H W KH KW PH PW S D G GCin GCout */
969 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 96});
970 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 96, 1, 1});
971 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 8, 12, 45});
972 /******************* Stage 2: stride-1 units *****************/
973 /* N H W KH KW PH PW S D G GCin GCout */
974 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 8, 48, 12});
975 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 96, 1, 1});
976 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 8, 12, 48});
977 /******************* Stage 3: stride-2 unit ******************/
978 /* N H W KH KW PH PW S D G GCin GCout */
979 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 8, 48, 24});
980 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 192, 1, 1});
981 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 8, 24, 48});
982 /******************* Stage 3: stride-1 units *****************/
983 /* N H W KH KW PH PW S D G GCin GCout */
984 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 8, 96, 24});
985 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 192, 1, 1});
986 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 8, 24, 96});
987 /******************* Stage 4: stride-2 unit ******************/
988 /* N H W KH KW PH PW S D G GCin GCout */
989 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 8, 96, 48});
990 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 384, 1, 1});
991 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 8, 48, 96});
992 /******************* Stage 4: stride-1 units *****************/
993 /* N H W KH KW PH PW S D G GCin GCout */
994 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 8, 192, 48});
995 b->Args({1, 7, 7, 3, 3, 2, 2, 2, 1, 384, 1, 1});
996 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 8, 48, 192});
997}
998
999// ShuffleNet v2 (0.5X scale)
1000static void ShuffleNetV2X05(benchmark::internal::Benchmark* b) {
1001 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1002
1003 /*************************** Conv 1 **************************/
1004 /* N H W KH KW PH PW S D G GCin GCout */
1005 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
1006 /************************** Stage 2 **************************/
1007 /* N H W KH KW PH PW S D G GCin GCout */
1008 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 24, 1, 1});
1009 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 24, 24});
1010 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 24});
1011 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 24, 1, 1});
1012 /************************** Stage 3 **************************/
1013 /* N H W KH KW PH PW S D G GCin GCout */
1014 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 48, 1, 1});
1015 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 48, 48});
1016 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 48, 48});
1017 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 48, 1, 1});
1018 /************************** Stage 4 **************************/
1019 /* N H W KH KW PH PW S D G GCin GCout */
1020 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 96, 1, 1});
1021 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 96, 96});
1022 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 96, 96});
1023 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 96, 1, 1});
1024 /*************************** Conv 5 **************************/
1025 /* N H W KH KW PH PW S D G GCin GCout */
1026 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 192, 1024});
1027}
1028
1029// ShuffleNet v2 (1.0X scale)
1030static void ShuffleNetV2X10(benchmark::internal::Benchmark* b) {
1031 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1032
1033 /*************************** Conv 1 **************************/
1034 /* N H W KH KW PH PW S D G GCin GCout */
1035 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
1036 /************************** Stage 2 **************************/
1037 /* N H W KH KW PH PW S D G GCin GCout */
1038 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 24, 1, 1});
1039 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 24, 58});
1040 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 58});
1041 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 58, 1, 1});
1042 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 58, 58});
1043 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 58, 1, 1});
1044 /************************** Stage 3 **************************/
1045 /* N H W KH KW PH PW S D G GCin GCout */
1046 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 116, 1, 1});
1047 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 116, 116});
1048 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 116, 116});
1049 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 116, 1, 1});
1050 /************************** Stage 4 **************************/
1051 /* N H W KH KW PH PW S D G GCin GCout */
1052 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 232, 1, 1});
1053 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 232, 232});
1054 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 232, 232});
1055 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 232, 1, 1});
1056 /*************************** Conv 5 **************************/
1057 /* N H W KH KW PH PW S D G GCin GCout */
1058 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 464, 1024});
1059}
1060
1061// ShuffleNet v2 (1.5X scale)
1062static void ShuffleNetV2X15(benchmark::internal::Benchmark* b) {
1063 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1064
1065 /*************************** Conv 1 **************************/
1066 /* N H W KH KW PH PW S D G GCin GCout */
1067 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
1068 /************************** Stage 2 **************************/
1069 /* N H W KH KW PH PW S D G GCin GCout */
1070 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 24, 1, 1});
1071 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 24, 88});
1072 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 88});
1073 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 88, 1, 1});
1074 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 88, 88});
1075 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 88, 1, 1});
1076 /************************** Stage 3 **************************/
1077 /* N H W KH KW PH PW S D G GCin GCout */
1078 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 176, 1, 1});
1079 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 176, 176});
1080 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 176, 176});
1081 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 176, 1, 1});
1082 /************************** Stage 4 **************************/
1083 /* N H W KH KW PH PW S D G GCin GCout */
1084 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 352, 1, 1});
1085 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 352, 352});
1086 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 352, 352});
1087 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 352, 1, 1});
1088 /*************************** Conv 5 **************************/
1089 /* N H W KH KW PH PW S D G GCin GCout */
1090 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 704, 1024});
1091}
1092
1093// ShuffleNet v2 (2.0X scale)
1094static void ShuffleNetV2X20(benchmark::internal::Benchmark* b) {
1095 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1096
1097 /*************************** Conv 1 **************************/
1098 /* N H W KH KW PH PW S D G GCin GCout */
1099 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 24});
1100 /************************** Stage 2 **************************/
1101 /* N H W KH KW PH PW S D G GCin GCout */
1102 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 24, 1, 1});
1103 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 24, 122});
1104 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 122});
1105 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 122, 1, 1});
1106 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 122, 122});
1107 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 122, 1, 1});
1108 /************************** Stage 3 **************************/
1109 /* N H W KH KW PH PW S D G GCin GCout */
1110 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 244, 1, 1});
1111 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 244, 244});
1112 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 244, 244});
1113 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 244, 1, 1});
1114 /************************** Stage 4 **************************/
1115 /* N H W KH KW PH PW S D G GCin GCout */
1116 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 488, 1, 1});
1117 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 488, 488});
1118 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 488, 488});
1119 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 488, 1, 1});
1120 /*************************** Conv 5 **************************/
1121 /* N H W KH KW PH PW S D G GCin GCout */
1122 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 976, 2048});
1123}
1124
1125static void MobileNetV1(benchmark::internal::Benchmark* b) {
1126 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1127
1128 /* N H W KH KW PH PW S D G GCin GCout */
1129 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 32});
1130 b->Args({1, 112, 112, 3, 3, 2, 2, 1, 1, 32, 1, 1});
1131 b->Args({1, 112, 112, 1, 1, 0, 0, 1, 1, 1, 32, 64});
1132 b->Args({1, 112, 112, 3, 3, 2, 2, 2, 1, 64, 1, 1});
1133 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 64, 128});
1134 b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 128, 1, 1});
1135 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 128, 128});
1136 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 128, 1, 1});
1137 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 128, 256});
1138 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 256, 1, 1});
1139 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 256, 256});
1140 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 256, 1, 1});
1141 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 256, 512});
1142 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 512, 1, 1});
1143 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 512, 512});
1144 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 512, 1, 1});
1145 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 512, 1024});
1146 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 1024, 1, 1});
1147 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 1024, 1024});
1148}
1149
1150static void MobileNetV2(benchmark::internal::Benchmark* b) {
1151 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1152
1153 /* N H W KH KW PH PW S D G GCin GCout */
1154 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 32});
1155
1156 /************************ Bottleneck 1 ***********************/
1157 /* N H W KH KW PH PW S D G GCin GCout */
1158 b->Args({1, 112, 112, 3, 3, 2, 2, 1, 1, 32, 1, 1});
1159 b->Args({1, 112, 112, 1, 1, 0, 0, 1, 1, 1, 32, 16});
1160
1161 /************************ Bottleneck 2 ***********************/
1162 /* N H W KH KW PH PW S D G GCin GCout */
1163 b->Args({1, 112, 112, 1, 1, 0, 0, 1, 1, 1, 16, 96});
1164 b->Args({1, 112, 112, 3, 3, 2, 2, 2, 1, 96, 1, 1});
1165 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 96, 24});
1166 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 144});
1167 b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 144, 1, 1});
1168 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 144, 24});
1169
1170 /************************ Bottleneck 3 ***********************/
1171 /* N H W KH KW PH PW S D G GCin GCout */
1172//b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 144});
1173 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 144, 1, 1});
1174 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 144, 32});
1175 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 32, 192});
1176 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 192, 1, 1});
1177 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 192, 32});
1178//b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 32, 192});
1179//b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 192, 1, 1});
1180//b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 192, 32});
1181
1182 /************************ Bottleneck 4 ***********************/
1183 /* N H W KH KW PH PW S D G GCin GCout */
1184//b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 32, 192});
1185 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 192, 1, 1});
1186 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 192, 64});
1187 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 64, 384});
1188 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 384, 1, 1});
1189 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 384, 64});
1190//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 64, 384});
1191//b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 384, 1, 1});
1192//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 384, 64});
1193//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 64, 384});
1194//b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 384, 1, 1});
1195//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 384, 64});
1196
1197 /************************ Bottleneck 5 ***********************/
1198 /* N H W KH KW PH PW S D G GCin GCout */
1199//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 64, 384});
1200//b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 384, 1, 1});
1201 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 384, 96});
1202 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 96, 576});
1203 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 576, 1, 1});
1204 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 576, 96});
1205//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 96, 576});
1206//b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 576, 1, 1});
1207//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 576, 96});
1208
1209 /************************ Bottleneck 6 ***********************/
1210 /* N H W KH KW PH PW S D G GCin GCout */
1211//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 96, 576});
1212 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 576, 1, 1});
1213 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 576, 160});
1214 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 160, 960});
1215 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 960, 1, 1});
1216 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 960, 160});
1217//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 160, 960});
1218//b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 960, 1, 1});
1219//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 960, 160});
1220
1221 /************************ Bottleneck 7 ***********************/
1222 /* N H W KH KW PH PW S D G GCin GCout */
1223//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 160, 960});
1224//b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 960, 1, 1});
1225 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 960, 320});
1226
1227 /******************** Pre-pooling Conv2D *********************/
1228 /* N H W KH KW PH PW S D G GCin GCout */
1229 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 320, 1280});
1230 /******************** Post-pooling Conv2D ********************/
1231 /* N H W KH KW PH PW S D G GCin GCout */
1232 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1280, 1000});
1233}
1234
1235static void MobileNetV3Small(benchmark::internal::Benchmark* b) {
1236 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1237
1238 /*********************** Initial Stage ***********************/
1239 /* N H W KH KW PH PW S D G GCin GCout */
1240 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 16});
1241 /*********************** Bottleneck 1 ************************/
1242 /* N H W KH KW PH PW S D G GCin GCout */
1243 b->Args({1, 112, 112, 3, 3, 2, 2, 2, 1, 16, 1, 1});
1244 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 16, 8});
1245 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 8, 16});
1246 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 16, 16});
1247 /*********************** Bottleneck 2 ************************/
1248 /* N H W KH KW PH PW S D G GCin GCout */
1249 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 16, 72});
1250 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 72, 1, 1});
1251 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 72, 24});
1252 /*********************** Bottleneck 3 ************************/
1253 /* N H W KH KW PH PW S D G GCin GCout */
1254 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 24, 88});
1255 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 88, 1, 1});
1256 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 88, 24});
1257 /*********************** Bottleneck 4 ************************/
1258 /* N H W KH KW PH PW S D G GCin GCout */
1259 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 24, 96});
1260 b->Args({1, 28, 28, 5, 5, 4, 4, 2, 1, 96, 1, 1});
1261 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 96, 24});
1262 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 24, 96});
1263 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 96, 40});
1264 /*********************** Bottleneck 5 ************************/
1265 /* N H W KH KW PH PW S D G GCin GCout */
1266 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 40, 240});
1267 b->Args({1, 14, 14, 5, 5, 4, 4, 1, 1, 240, 1, 1});
1268 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 240, 64});
1269 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 64, 240});
1270 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 240, 40});
1271 /*********************** Bottleneck 6 ************************/
1272 /* N H W KH KW PH PW S D G GCin GCout */
1273//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 40, 240});
1274//b->Args({1, 14, 14, 5, 5, 4, 4, 1, 1, 240, 1, 1});
1275//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 240, 64});
1276//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 64, 240});
1277//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 240, 40});
1278 /*********************** Bottleneck 7 ************************/
1279 /* N H W KH KW PH PW S D G GCin GCout */
1280 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 40, 120});
1281 b->Args({1, 14, 14, 5, 5, 4, 4, 1, 1, 120, 1, 1});
1282 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 120, 32});
1283 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 32, 120});
1284 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 120, 48});
1285 /*********************** Bottleneck 8 ************************/
1286 /* N H W KH KW PH PW S D G GCin GCout */
1287 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 48, 144});
1288 b->Args({1, 14, 14, 5, 5, 4, 4, 1, 1, 144, 1, 1});
1289 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 144, 40});
1290 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 40, 144});
1291 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 144, 48});
1292 /*********************** Bottleneck 9 ************************/
1293 /* N H W KH KW PH PW S D G GCin GCout */
1294 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 48, 288});
1295 b->Args({1, 14, 14, 5, 5, 4, 4, 2, 1, 288, 1, 1});
1296 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 288, 72});
1297 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 72, 288});
1298 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 288, 96});
1299 /*********************** Bottleneck 10 ***********************/
1300 /* N H W KH KW PH PW S D G GCin GCout */
1301 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 96, 576});
1302 b->Args({1, 7, 7, 5, 5, 4, 4, 1, 1, 576, 1, 1});
1303 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 576, 144});
1304 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 144, 576});
1305 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 576, 96});
1306 /*********************** Bottleneck 11 ***********************/
1307 /* N H W KH KW PH PW S D G GCin GCout */
1308//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 96, 576});
1309//b->Args({1, 7, 7, 5, 5, 4, 4, 1, 1, 576, 1, 1});
1310//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 576, 144});
1311//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 144, 576});
1312//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 576, 96});
1313 /************************ Last Stage ************************/
1314 /* N H W KH KW PH PW S D G GCin GCout */
1315//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 96, 576});
1316 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 576, 1024});
1317 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1024, 1001});
1318}
1319
1320static void MobileNetV3Large(benchmark::internal::Benchmark* b) {
1321 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1322
1323 /*********************** Initial Stage ***********************/
1324 /* N H W KH KW PH PW S D G GCin GCout */
1325 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 16});
1326 /*********************** Bottleneck 1 ************************/
1327 /* N H W KH KW PH PW S D G GCin GCout */
1328 b->Args({1, 112, 112, 3, 3, 2, 2, 1, 1, 16, 1, 1});
1329 b->Args({1, 112, 112, 1, 1, 0, 0, 1, 1, 1, 16, 16});
1330 /*********************** Bottleneck 2 ************************/
1331 /* N H W KH KW PH PW S D G GCin GCout */
1332 b->Args({1, 112, 112, 1, 1, 0, 0, 1, 1, 1, 16, 64});
1333 b->Args({1, 112, 112, 3, 3, 2, 2, 2, 1, 64, 1, 1});
1334 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 64, 24});
1335 /*********************** Bottleneck 3 ************************/
1336 /* N H W KH KW PH PW S D G GCin GCout */
1337 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 72});
1338 b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 72, 1, 1});
1339 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 72, 24});
1340 /*********************** Bottleneck 4 ************************/
1341 /* N H W KH KW PH PW S D G GCin GCout */
1342//b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 24, 72});
1343 b->Args({1, 56, 56, 5, 5, 4, 4, 2, 1, 72, 1, 1});
1344 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 72, 24});
1345 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 24, 72});
1346 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 72, 40});
1347 /*********************** Bottleneck 5 ************************/
1348 /* N H W KH KW PH PW S D G GCin GCout */
1349 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 40, 120});
1350 b->Args({1, 28, 28, 5, 5, 4, 4, 1, 1, 120, 1, 1});
1351 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 120, 32});
1352 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 32, 120});
1353 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 120, 40});
1354 /*********************** Bottleneck 6 ************************/
1355 /* N H W KH KW PH PW S D G GCin GCout */
1356//b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 40, 120});
1357//b->Args({1, 28, 28, 5, 5, 4, 4, 1, 1, 120, 1, 1});
1358//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 120, 32});
1359//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 32, 120});
1360//b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 120, 40});
1361 /*********************** Bottleneck 7 ************************/
1362 /* N H W KH KW PH PW S D G GCin GCout */
1363 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 40, 240});
1364 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 240, 1, 1});
1365 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 240, 80});
1366 /*********************** Bottleneck 8 ************************/
1367 /* N H W KH KW PH PW S D G GCin GCout */
1368 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 80, 200});
1369 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 200, 1, 1});
1370 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 200, 80});
1371 /*********************** Bottleneck 9 ************************/
1372 /* N H W KH KW PH PW S D G GCin GCout */
1373 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 80, 184});
1374 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 184, 1, 1});
1375 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 184, 80});
1376 /********************** Bottleneck 10 ***********************/
1377 /* N H W KH KW PH PW S D G GCin GCout */
1378//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 80, 184});
1379//b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 184, 1, 1});
1380//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 184, 80});
1381 /********************** Bottleneck 11 ***********************/
1382 /* N H W KH KW PH PW S D G GCin GCout */
1383 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 80, 480});
1384 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 480, 1, 1});
1385 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 480, 120});
1386 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 120, 480});
1387 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 480, 112});
1388 /********************** Bottleneck 12 ***********************/
1389 /* N H W KH KW PH PW S D G GCin GCout */
1390 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 112, 672});
1391 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 672, 1, 1});
1392 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 672, 168});
1393 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 168, 672});
1394 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 672, 112});
1395 /********************** Bottleneck 13 ***********************/
1396 /* N H W KH KW PH PW S D G GCin GCout */
1397//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 112, 672});
1398 b->Args({1, 14, 14, 5, 5, 4, 4, 2, 1, 672, 1, 1});
1399 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 672, 160});
1400 /********************** Bottleneck 14 ***********************/
1401 /* N H W KH KW PH PW S D G GCin GCout */
1402 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 160, 960});
1403 b->Args({1, 7, 7, 5, 5, 4, 4, 1, 1, 960, 1, 1});
1404 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 960, 240});
1405 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 240, 960});
1406 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 960, 160});
1407 /********************** Bottleneck 15 ***********************/
1408 /* N H W KH KW PH PW S D G GCin GCout */
1409//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 160, 960});
1410//b->Args({1, 7, 7, 5, 5, 4, 4, 1, 1, 960, 1, 1});
1411//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 960, 240});
1412//b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 240, 960});
1413//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 960, 160});
1414 /************************ Last Stage ***********************/
1415 /* N H W KH KW PH PW S D G GCin GCout */
1416//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 160, 960});
1417 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 960, 1280});
1418 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1280, 1001});
1419}
1420
1421// SqueezeNet 1.0
1422static void SqueezeNetV10(benchmark::internal::Benchmark* b) {
1423 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1424
1425 /************************** Conv 1 *************************/
1426 /* N H W KH KW PH PW S D G GCin GCout */
1427 b->Args({1, 224, 224, 7, 7, 6, 6, 2, 1, 1, 3, 96});
1428 /************************** Fire 2 *************************/
1429 /* N H W KH KW PH PW S D G GCin GCout */
1430 b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 96, 16});
1431 b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 16, 64});
1432 b->Args({1, 55, 55, 3, 3, 2, 2, 1, 1, 1, 16, 64});
1433 /************************** Fire 3 *************************/
1434 /* N H W KH KW PH PW S D G GCin GCout */
1435 b->Args({1, 56, 55, 1, 1, 0, 0, 1, 1, 1, 128, 16});
1436//b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 16, 64});
1437//b->Args({1, 55, 55, 3, 3, 2, 2, 1, 1, 1, 16, 64});
1438 /************************** Fire 4 *************************/
1439 /* N H W KH KW PH PW S D G GCin GCout */
1440 b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 128, 32});
1441 b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 32, 128});
1442 b->Args({1, 55, 55, 3, 3, 2, 2, 1, 1, 1, 32, 128});
1443 /************************** Fire 5 *************************/
1444 /* N H W KH KW PH PW S D G GCin GCout */
1445 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 256, 32});
1446 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 32, 128});
1447 b->Args({1, 27, 27, 3, 3, 2, 2, 1, 1, 1, 32, 128});
1448 /************************** Fire 6 *************************/
1449 /* N H W KH KW PH PW S D G GCin GCout */
1450 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 256, 48});
1451 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 48, 192});
1452 b->Args({1, 27, 27, 3, 3, 2, 2, 1, 1, 1, 48, 192});
1453 /************************** Fire 7 *************************/
1454 /* N H W KH KW PH PW S D G GCin GCout */
1455 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 384, 48});
1456//b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 48, 192});
1457//b->Args({1, 27, 27, 3, 3, 2, 2, 1, 1, 1, 48, 192});
1458 /************************** Fire 8 *************************/
1459 /* N H W KH KW PH PW S D G GCin GCout */
1460 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 384, 64});
1461 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 64, 256});
1462 b->Args({1, 27, 27, 3, 3, 2, 2, 1, 1, 1, 64, 256});
1463 /************************** Fire 9 *************************/
1464 /* N H W KH KW PH PW S D G GCin GCout */
1465 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 512, 64});
1466 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 64, 256});
1467 b->Args({1, 13, 13, 3, 3, 2, 2, 1, 1, 1, 64, 256});
1468 /************************* Conv 10 *************************/
1469 /* N H W KH KW PH PW S D G GCin GCout */
1470 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 512, 1000});
1471}
1472
1473// SqueezeNet 1.1
1474static void SqueezeNetV11(benchmark::internal::Benchmark* b) {
1475 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1476
1477 /************************** Conv 1 *************************/
1478 /* N H W KH KW PH PW S D G GCin GCout */
1479 b->Args({1, 224, 224, 3, 3, 2, 2, 2, 1, 1, 3, 64});
1480 /************************** Fire 2 *************************/
1481 /* N H W KH KW PH PW S D G GCin GCout */
1482 b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 64, 16});
1483 b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 16, 64});
1484 b->Args({1, 55, 55, 3, 3, 2, 2, 1, 1, 1, 16, 64});
1485 /************************** Fire 3 *************************/
1486 /* N H W KH KW PH PW S D G GCin GCout */
1487 b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 128, 16});
1488//b->Args({1, 55, 55, 1, 1, 0, 0, 1, 1, 1, 16, 64});
1489//b->Args({1, 55, 55, 3, 3, 2, 2, 1, 1, 1, 16, 64});
1490 /************************** Fire 4 *************************/
1491 /* N H W KH KW PH PW S D G GCin GCout */
1492 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 128, 32});
1493 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 32, 128});
1494 b->Args({1, 27, 27, 3, 3, 2, 2, 1, 1, 1, 32, 128});
1495 /************************** Fire 5 *************************/
1496 /* N H W KH KW PH PW S D G GCin GCout */
1497 b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 256, 32});
1498//b->Args({1, 27, 27, 1, 1, 0, 0, 1, 1, 1, 32, 128});
1499//b->Args({1, 27, 27, 3, 3, 2, 2, 1, 1, 1, 32, 128});
1500 /************************** Fire 6 *************************/
1501 /* N H W KH KW PH PW S D G GCin GCout */
1502 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 256, 48});
1503 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 48, 192});
1504 b->Args({1, 13, 13, 3, 3, 2, 2, 1, 1, 1, 48, 192});
1505 /************************** Fire 7 *************************/
1506 /* N H W KH KW PH PW S D G GCin GCout */
1507 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 384, 48});
1508//b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 48, 192});
1509//b->Args({1, 13, 13, 3, 3, 2, 2, 1, 1, 1, 48, 192});
1510 /************************** Fire 8 *************************/
1511 /* N H W KH KW PH PW S D G GCin GCout */
1512 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 384, 64});
1513 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 64, 256});
1514 b->Args({1, 13, 13, 3, 3, 2, 2, 1, 1, 1, 64, 256});
1515 /************************** Fire 9 *************************/
1516 /* N H W KH KW PH PW S D G GCin GCout */
1517 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 512, 64});
1518//b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 64, 256});
1519//b->Args({1, 13, 13, 3, 3, 2, 2, 1, 1, 1, 64, 256});
1520 /************************* Conv 10 *************************/
1521 /* N H W KH KW PH PW S D G GCin GCout */
1522 b->Args({1, 13, 13, 1, 1, 0, 0, 1, 1, 1, 512, 1000});
1523}
1524
1525static void InceptionV3(benchmark::internal::Benchmark* b) {
1526 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1527
1528 /* N H W KH KW PH PW S D G GCin GCout */
1529 b->Args({1, 299, 299, 3, 3, 0, 0, 2, 1, 1, 3, 32});
1530 b->Args({1, 149, 149, 3, 3, 0, 0, 1, 1, 1, 32, 32});
1531 b->Args({1, 147, 147, 3, 3, 2, 2, 1, 1, 1, 32, 64});
1532 b->Args({1, 73, 73, 1, 1, 0, 0, 1, 1, 1, 64, 80});
1533 b->Args({1, 73, 73, 3, 3, 0, 0, 1, 1, 1, 80, 192});
1534 b->Args({1, 35, 35, 1, 1, 0, 0, 1, 1, 1, 192, 64});
1535 b->Args({1, 35, 35, 1, 1, 0, 0, 1, 1, 1, 192, 48});
1536 b->Args({1, 35, 35, 5, 5, 4, 4, 1, 1, 1, 48, 64});
1537 b->Args({1, 35, 35, 3, 3, 2, 2, 1, 1, 1, 64, 96});
1538 b->Args({1, 35, 35, 3, 3, 2, 2, 1, 1, 1, 96, 96});
1539 b->Args({1, 35, 35, 1, 1, 0, 0, 1, 1, 1, 192, 32});
1540 b->Args({1, 35, 35, 1, 1, 0, 0, 1, 1, 1, 256, 64});
1541 b->Args({1, 35, 35, 1, 1, 0, 0, 1, 1, 1, 256, 48});
1542 b->Args({1, 35, 35, 1, 1, 0, 0, 1, 1, 1, 288, 64});
1543 b->Args({1, 35, 35, 1, 1, 0, 0, 1, 1, 1, 288, 48});
1544 b->Args({1, 35, 35, 3, 3, 0, 0, 2, 1, 1, 288, 384});
1545 b->Args({1, 35, 35, 3, 3, 0, 0, 2, 1, 1, 96, 96});
1546 b->Args({1, 17, 17, 1, 1, 0, 0, 1, 1, 1, 768, 192});
1547 b->Args({1, 17, 17, 1, 1, 0, 0, 1, 1, 1, 768, 128});
1548 b->Args({1, 17, 17, 1, 7, 0, 6, 1, 1, 1, 128, 128});
1549 b->Args({1, 17, 17, 7, 1, 6, 0, 1, 1, 1, 128, 192});
1550 b->Args({1, 17, 17, 7, 1, 6, 0, 1, 1, 1, 128, 128});
1551 b->Args({1, 17, 17, 1, 7, 0, 6, 1, 1, 1, 128, 192});
1552 b->Args({1, 17, 17, 1, 1, 0, 0, 1, 1, 1, 768, 160});
1553 b->Args({1, 17, 17, 1, 7, 0, 6, 1, 1, 1, 160, 160});
1554 b->Args({1, 17, 17, 7, 1, 6, 0, 1, 1, 1, 160, 192});
1555 b->Args({1, 17, 17, 7, 1, 6, 0, 1, 1, 1, 160, 160});
1556 b->Args({1, 17, 17, 1, 7, 0, 6, 1, 1, 1, 160, 192});
1557 b->Args({1, 17, 17, 1, 7, 0, 6, 1, 1, 1, 192, 192});
1558 b->Args({1, 17, 17, 7, 1, 6, 0, 1, 1, 1, 192, 192});
1559 b->Args({1, 17, 17, 3, 3, 0, 0, 2, 1, 1, 192, 320});
1560 b->Args({1, 17, 17, 3, 3, 0, 0, 2, 1, 1, 192, 192});
1561 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 1280, 320});
1562 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 1280, 384});
1563 b->Args({1, 8, 8, 1, 3, 0, 2, 1, 1, 1, 384, 384});
1564 b->Args({1, 8, 8, 3, 1, 2, 0, 1, 1, 1, 384, 384});
1565 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 1280, 448});
1566 b->Args({1, 8, 8, 3, 3, 2, 2, 1, 1, 1, 448, 384});
1567 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 1280, 192});
1568 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 2048, 320});
1569 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 2048, 384});
1570 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 2048, 448});
1571 b->Args({1, 8, 8, 1, 1, 0, 0, 1, 1, 1, 2048, 192});
1572 b->Args({1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 2048, 1001});
1573}
1574
1575static void ResNet18(benchmark::internal::Benchmark* b) {
1576 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1577
1578 /************************* Conv 1 *************************/
1579 /* N H W KH KW PH PW S D G GCin GCout */
1580 b->Args({1, 224, 224, 7, 7, 6, 6, 2, 1, 1, 3, 64});
1581 /************************ Conv 2.X ************************/
1582 /* N H W KH KW PH PW S D G GCin GCout */
1583 b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 1, 64, 64});
1584 /************************ Conv 3.X ************************/
1585 /* N H W KH KW PH PW S D G GCin GCout */
1586 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 1, 64, 128});
1587 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 1, 128, 128});
1588 b->Args({1, 56, 56, 1, 1, 0, 0, 2, 1, 1, 64, 128});
1589 /************************ Conv 4.X ************************/
1590 /* N H W KH KW PH PW S D G GCin GCout */
1591 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 1, 128, 256});
1592 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 1, 256, 256});
1593 b->Args({1, 28, 28, 1, 1, 0, 0, 2, 1, 1, 128, 256});
1594 /************************ Conv 5.X ************************/
1595 /* N H W KH KW PH PW S D G GCin GCout */
1596 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 1, 256, 512});
1597 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 1, 512, 512});
1598 b->Args({1, 14, 14, 1, 1, 0, 0, 2, 1, 1, 256, 512});
1599}
1600
1601static void ResNet50(benchmark::internal::Benchmark* b) {
1602 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1603
1604 /************************* Conv 1 *************************/
1605 /* N H W KH KW PH PW S D G GCin GCout */
1606 b->Args({1, 224, 224, 7, 7, 6, 6, 2, 1, 1, 3, 64});
1607 /************************ Conv 2.1 ************************/
1608 /* N H W KH KW PH PW S D G GCin GCout */
1609 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 64, 64});
1610 b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 1, 64, 64});
1611 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 64, 256});
1612//b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 64, 256});
1613 /************************ Conv 2.X ************************/
1614 /* N H W KH KW PH PW S D G GCin GCout */
1615 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 256, 64});
1616//b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 1, 64, 64});
1617//b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 64, 256});
1618 /************************ Conv 3.1 ************************/
1619 /* N H W KH KW PH PW S D G GCin GCout */
1620 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 256, 128});
1621 b->Args({1, 56, 56, 3, 3, 2, 2, 2, 1, 1, 128, 128});
1622 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 128, 512});
1623 b->Args({1, 56, 56, 1, 1, 0, 0, 2, 1, 1, 256, 512});
1624 /************************ Conv 3.X ************************/
1625 /* N H W KH KW PH PW S D G GCin GCout */
1626 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 512, 128});
1627 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 1, 128, 128});
1628//b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 128, 512});
1629 /************************ Conv 4.1 ************************/
1630 /* N H W KH KW PH PW S D G GCin GCout */
1631 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 512, 256});
1632 b->Args({1, 28, 28, 3, 3, 2, 2, 2, 1, 1, 256, 256});
1633 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 256, 1024});
1634 b->Args({1, 28, 28, 1, 1, 0, 0, 2, 1, 1, 512, 1024});
1635 /************************ Conv 4.X ************************/
1636 /* N H W KH KW PH PW S D G GCin GCout */
1637 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 1024, 256});
1638 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 1, 256, 256});
1639//b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 256, 1024});
1640 /************************ Conv 5.1 ************************/
1641 /* N H W KH KW PH PW S D G GCin GCout */
1642 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 1024, 512});
1643 b->Args({1, 14, 14, 3, 3, 2, 2, 2, 1, 1, 512, 512});
1644 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 512, 2048});
1645 b->Args({1, 14, 14, 1, 1, 0, 0, 2, 1, 1, 1024, 2048});
1646 /************************ Conv 5.X ************************/
1647 /* N H W KH KW PH PW S D G GCin GCout */
1648 b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 2048, 512});
1649 b->Args({1, 7, 7, 3, 3, 2, 2, 1, 1, 1, 512, 512});
1650//b->Args({1, 7, 7, 1, 1, 0, 0, 1, 1, 1, 512, 2048});
1651}
1652
1653static void VGG(benchmark::internal::Benchmark* b) {
1654 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1655
1656 /************************* Conv 1.1 ************************/
1657 /* N H W KH KW PH PW S D G GCin GCout */
1658 b->Args({1, 224, 224, 3, 3, 2, 2, 1, 1, 1, 3, 64});
1659 /************************* Conv 1.2 ************************/
1660 /* N H W KH KW PH PW S D G GCin GCout */
1661 b->Args({1, 224, 224, 3, 3, 2, 2, 1, 1, 1, 64, 64});
1662
1663 /************************* Conv 2.1 ************************/
1664 /* N H W KH KW PH PW S D G GCin GCout */
1665 b->Args({1, 112, 112, 3, 3, 2, 2, 1, 1, 1, 64, 128});
1666 /************************* Conv 2.2 ************************/
1667 /* N H W KH KW PH PW S D G GCin GCout */
1668 b->Args({1, 112, 112, 3, 3, 2, 2, 1, 1, 1, 128, 128});
1669
1670 /************************* Conv 3.1 ************************/
1671 /* N H W KH KW PH PW S D G GCin GCout */
1672 b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 1, 128, 256});
1673 /************************* Conv 3.2 ************************/
1674 /* N H W KH KW PH PW S D G GCin GCout */
1675 b->Args({1, 56, 56, 3, 3, 2, 2, 1, 1, 1, 256, 256});
1676 /************************* Conv 3.3 ************************/
1677 /* N H W KH KW PH PW S D G GCin GCout */
1678 b->Args({1, 56, 56, 1, 1, 0, 0, 1, 1, 1, 256, 256});
1679
1680 /************************* Conv 4.1 ************************/
1681 /* N H W KH KW PH PW S D G GCin GCout */
1682 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 1, 256, 512});
1683 /************************* Conv 4.2 ************************/
1684 /* N H W KH KW PH PW S D G GCin GCout */
1685 b->Args({1, 28, 28, 3, 3, 2, 2, 1, 1, 1, 512, 512});
1686 /************************* Conv 4.3 ************************/
1687 /* N H W KH KW PH PW S D G GCin GCout */
1688 b->Args({1, 28, 28, 1, 1, 0, 0, 1, 1, 1, 512, 512});
1689
1690 /************************* Conv 5.X ************************/
1691 /* N H W KH KW PH PW S D G GCin GCout */
1692 b->Args({1, 14, 14, 3, 3, 2, 2, 1, 1, 1, 512, 512});
1693 /************************* Conv 5.3 ************************/
1694 /* N H W KH KW PH PW S D G GCin GCout */
1695 b->Args({1, 14, 14, 1, 1, 0, 0, 1, 1, 1, 512, 512});
1696}
1697
1698// SRCNN (9-1-5)
1699static void SRCNN915(benchmark::internal::Benchmark* b) {
1700 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1701
1702 /* N H W KH KW PH PW S D G GCin GCout */
1703 b->Args({1, 384, 384, 9, 9, 0, 0, 1, 1, 1, 1, 64});
1704 b->Args({1, 376, 376, 1, 1, 0, 0, 1, 1, 1, 64, 32});
1705 b->Args({1, 376, 376, 5, 5, 0, 0, 1, 1, 1, 32, 1});
1706}
1707
1708// SRCNN (9-3-5)
1709static void SRCNN935(benchmark::internal::Benchmark* b) {
1710 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1711
1712 /* N H W KH KW PH PW S D G GCin GCout */
1713 b->Args({1, 384, 384, 9, 9, 0, 0, 1, 1, 1, 1, 64});
1714 b->Args({1, 376, 376, 3, 3, 0, 0, 1, 1, 1, 64, 32});
1715 b->Args({1, 374, 374, 5, 5, 0, 0, 1, 1, 1, 32, 1});
1716}
1717
1718// SRCNN (9-5-5)
1719static void SRCNN955(benchmark::internal::Benchmark* b) {
1720 b->ArgNames({"N", "H", "W", "KH", "KW", "PH", "PW", "S", "D", "G", "GCin", "GCout"});
1721
1722 /* N H W KH KW PH PW S D G GCin GCout */
1723 b->Args({1, 384, 384, 9, 9, 0, 0, 1, 1, 1, 1, 64});
1724 b->Args({1, 376, 376, 5, 5, 0, 0, 1, 1, 1, 64, 32});
1725 b->Args({1, 372, 372, 5, 5, 0, 0, 1, 1, 1, 32, 1});
1726}
1727
1728BENCHMARK_CAPTURE(xnnpack_convolution_f32, mobilenet_v1, "MobileNet v1")->Apply(MobileNetV1)->UseRealTime();
1729BENCHMARK_CAPTURE(xnnpack_convolution_f32, mobilenet_v2, "MobileNet v2")->Apply(MobileNetV2)->UseRealTime();
1730BENCHMARK_CAPTURE(xnnpack_convolution_f32, mobilenet_v3_small, "MobileNet v3 Small")->Apply(MobileNetV3Small)->UseRealTime();
1731BENCHMARK_CAPTURE(xnnpack_convolution_f32, mobilenet_v3_large, "MobileNet v3 Large")->Apply(MobileNetV3Large)->UseRealTime();
1732BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v1_g1, "ShuffleNet v1 (1 group)")->Apply(ShuffleNetV1G1)->UseRealTime();
1733BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v1_g2, "ShuffleNet v1 (2 groups)")->Apply(ShuffleNetV1G2)->UseRealTime();
1734BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v1_g3, "ShuffleNet v1 (3 groups)")->Apply(ShuffleNetV1G3)->UseRealTime();
1735BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v1_g4, "ShuffleNet v1 (4 groups)")->Apply(ShuffleNetV1G4)->UseRealTime();
1736BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v1_g8, "ShuffleNet v1 (8 groups)")->Apply(ShuffleNetV1G8)->UseRealTime();
1737BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v2_x05, "ShuffleNet v2 0.5X")->Apply(ShuffleNetV2X05)->UseRealTime();
1738BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v2_x10, "ShuffleNet v2 1.0X")->Apply(ShuffleNetV2X10)->UseRealTime();
1739BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v2_x15, "ShuffleNet v2 1.5X")->Apply(ShuffleNetV2X15)->UseRealTime();
1740BENCHMARK_CAPTURE(xnnpack_convolution_f32, shufflenet_v2_x20, "ShuffleNet v2 2.0X")->Apply(ShuffleNetV2X20)->UseRealTime();
1741BENCHMARK_CAPTURE(xnnpack_convolution_f32, squeezenet_v10, "SqueezeNet 1.0")->Apply(SqueezeNetV10)->UseRealTime();
1742BENCHMARK_CAPTURE(xnnpack_convolution_f32, squeezenet_v11, "SqueezeNet 1.1")->Apply(SqueezeNetV11)->UseRealTime();
1743BENCHMARK_CAPTURE(xnnpack_convolution_f32, inception_v3, "Inception v3")->Apply(InceptionV3)->UseRealTime();
1744BENCHMARK_CAPTURE(xnnpack_convolution_f32, resnet18, "ResNet-18")->Apply(ResNet18)->UseRealTime();
1745BENCHMARK_CAPTURE(xnnpack_convolution_f32, resnet50, "ResNet-50")->Apply(ResNet50)->UseRealTime();
1746BENCHMARK_CAPTURE(xnnpack_convolution_f32, vgg, "VGG")->Apply(VGG)->UseRealTime();
1747BENCHMARK_CAPTURE(xnnpack_convolution_f32, srcnn915, "SRCNN (9-1-5)")->Apply(SRCNN915)->UseRealTime();
1748BENCHMARK_CAPTURE(xnnpack_convolution_f32, srcnn935, "SRCNN (9-3-5)")->Apply(SRCNN935)->UseRealTime();
1749BENCHMARK_CAPTURE(xnnpack_convolution_f32, srcnn955, "SRCNN (9-5-5)")->Apply(SRCNN955)->UseRealTime();
1750
1751BENCHMARK_CAPTURE(xnnpack_convolution_q8, mobilenet_v1, "MobileNet v1")->Apply(MobileNetV1)->UseRealTime();
1752BENCHMARK_CAPTURE(xnnpack_convolution_q8, mobilenet_v2, "MobileNet v2")->Apply(MobileNetV2)->UseRealTime();
1753BENCHMARK_CAPTURE(xnnpack_convolution_q8, mobilenet_v3_small, "MobileNet v3 Small")->Apply(MobileNetV3Small)->UseRealTime();
1754BENCHMARK_CAPTURE(xnnpack_convolution_q8, mobilenet_v3_large, "MobileNet v3 Large")->Apply(MobileNetV3Large)->UseRealTime();
1755BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v1_g1, "ShuffleNet v1 (1 group)")->Apply(ShuffleNetV1G1)->UseRealTime();
1756BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v1_g2, "ShuffleNet v1 (2 groups)")->Apply(ShuffleNetV1G2)->UseRealTime();
1757BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v1_g3, "ShuffleNet v1 (3 groups)")->Apply(ShuffleNetV1G3)->UseRealTime();
1758BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v1_g4, "ShuffleNet v1 (4 groups)")->Apply(ShuffleNetV1G4)->UseRealTime();
1759BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v1_g8, "ShuffleNet v1 (8 groups)")->Apply(ShuffleNetV1G8)->UseRealTime();
1760BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v2_x05, "ShuffleNet v2 0.5X")->Apply(ShuffleNetV2X05)->UseRealTime();
1761BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v2_x10, "ShuffleNet v2 1.0X")->Apply(ShuffleNetV2X10)->UseRealTime();
1762BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v2_x15, "ShuffleNet v2 1.5X")->Apply(ShuffleNetV2X15)->UseRealTime();
1763BENCHMARK_CAPTURE(xnnpack_convolution_q8, shufflenet_v2_x20, "ShuffleNet v2 2.0X")->Apply(ShuffleNetV2X20)->UseRealTime();
1764BENCHMARK_CAPTURE(xnnpack_convolution_q8, squeezenet_v10, "SqueezeNet 1.0")->Apply(SqueezeNetV10)->UseRealTime();
1765BENCHMARK_CAPTURE(xnnpack_convolution_q8, squeezenet_v11, "SqueezeNet 1.1")->Apply(SqueezeNetV11)->UseRealTime();
1766BENCHMARK_CAPTURE(xnnpack_convolution_q8, inception_v3, "Inception v3")->Apply(InceptionV3)->UseRealTime();
1767BENCHMARK_CAPTURE(xnnpack_convolution_q8, resnet18, "ResNet-18")->Apply(ResNet18)->UseRealTime();
1768BENCHMARK_CAPTURE(xnnpack_convolution_q8, resnet50, "ResNet-50")->Apply(ResNet50)->UseRealTime();
1769BENCHMARK_CAPTURE(xnnpack_convolution_q8, vgg, "VGG")->Apply(VGG)->UseRealTime();
1770BENCHMARK_CAPTURE(xnnpack_convolution_q8, srcnn915, "SRCNN (9-1-5)")->Apply(SRCNN915)->UseRealTime();
1771BENCHMARK_CAPTURE(xnnpack_convolution_q8, srcnn935, "SRCNN (9-3-5)")->Apply(SRCNN935)->UseRealTime();
1772BENCHMARK_CAPTURE(xnnpack_convolution_q8, srcnn955, "SRCNN (9-5-5)")->Apply(SRCNN955)->UseRealTime();
1773
1774#ifdef BENCHMARK_TENSORFLOW_LITE
1775 BENCHMARK_CAPTURE(tflite_convolution_f32, mobilenet_v1, "MobileNet v1")->Apply(MobileNetV1)->UseRealTime();
1776 BENCHMARK_CAPTURE(tflite_convolution_f32, mobilenet_v2, "MobileNet v2")->Apply(MobileNetV2)->UseRealTime();
1777 BENCHMARK_CAPTURE(tflite_convolution_f32, mobilenet_v3_small, "MobileNet v3 Small")->Apply(MobileNetV3Small)->UseRealTime();
1778 BENCHMARK_CAPTURE(tflite_convolution_f32, mobilenet_v3_large, "MobileNet v3 Large")->Apply(MobileNetV3Large)->UseRealTime();
1779 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v1_g1, "ShuffleNet v1 (1 group)")->Apply(ShuffleNetV1G1)->UseRealTime();
1780 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v1_g2, "ShuffleNet v1 (2 groups)")->Apply(ShuffleNetV1G2)->UseRealTime();
1781 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v1_g3, "ShuffleNet v1 (3 groups)")->Apply(ShuffleNetV1G3)->UseRealTime();
1782 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v1_g4, "ShuffleNet v1 (4 groups)")->Apply(ShuffleNetV1G4)->UseRealTime();
1783 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v1_g8, "ShuffleNet v1 (8 groups)")->Apply(ShuffleNetV1G8)->UseRealTime();
1784 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v2_x05, "ShuffleNet v2 0.5X")->Apply(ShuffleNetV2X05)->UseRealTime();
1785 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v2_x10, "ShuffleNet v2 1.0X")->Apply(ShuffleNetV2X10)->UseRealTime();
1786 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v2_x15, "ShuffleNet v2 1.5X")->Apply(ShuffleNetV2X15)->UseRealTime();
1787 BENCHMARK_CAPTURE(tflite_convolution_f32, shufflenet_v2_x20, "ShuffleNet v2 2.0X")->Apply(ShuffleNetV2X20)->UseRealTime();
1788 BENCHMARK_CAPTURE(tflite_convolution_f32, squeezenet_v10, "SqueezeNet 1.0")->Apply(SqueezeNetV10)->UseRealTime();
1789 BENCHMARK_CAPTURE(tflite_convolution_f32, squeezenet_v11, "SqueezeNet 1.1")->Apply(SqueezeNetV11)->UseRealTime();
1790 BENCHMARK_CAPTURE(tflite_convolution_f32, inception_v3, "Inception v3")->Apply(InceptionV3)->UseRealTime();
1791 BENCHMARK_CAPTURE(tflite_convolution_f32, resnet18, "ResNet-18")->Apply(ResNet18)->UseRealTime();
1792 BENCHMARK_CAPTURE(tflite_convolution_f32, resnet50, "ResNet-50")->Apply(ResNet50)->UseRealTime();
1793 BENCHMARK_CAPTURE(tflite_convolution_f32, vgg, "VGG")->Apply(VGG)->UseRealTime();
1794 BENCHMARK_CAPTURE(tflite_convolution_f32, srcnn915, "SRCNN (9-1-5)")->Apply(SRCNN915)->UseRealTime();
1795 BENCHMARK_CAPTURE(tflite_convolution_f32, srcnn935, "SRCNN (9-3-5)")->Apply(SRCNN935)->UseRealTime();
1796 BENCHMARK_CAPTURE(tflite_convolution_f32, srcnn955, "SRCNN (9-5-5)")->Apply(SRCNN955)->UseRealTime();
1797#endif // BENCHMARK_TENSORFLOW_LITE
1798
1799#ifdef BENCHMARK_ARM_COMPUTE_LIBRARY
1800 BENCHMARK_CAPTURE(armcl_convolution_f32, mobilenet_v1, "MobileNet v1")->Apply(MobileNetV1)->UseRealTime();
1801 BENCHMARK_CAPTURE(armcl_convolution_f32, mobilenet_v2, "MobileNet v2")->Apply(MobileNetV2)->UseRealTime();
1802 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v1_g1, "ShuffleNet v1 (1 group)")->Apply(ShuffleNetV1G1)->UseRealTime();
1803 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v1_g2, "ShuffleNet v1 (2 groups)")->Apply(ShuffleNetV1G2)->UseRealTime();
1804 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v1_g3, "ShuffleNet v1 (3 groups)")->Apply(ShuffleNetV1G3)->UseRealTime();
1805 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v1_g4, "ShuffleNet v1 (4 groups)")->Apply(ShuffleNetV1G4)->UseRealTime();
1806 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v1_g8, "ShuffleNet v1 (8 groups)")->Apply(ShuffleNetV1G8)->UseRealTime();
1807 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v2_x05, "ShuffleNet v2 0.5X")->Apply(ShuffleNetV2X05)->UseRealTime();
1808 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v2_x10, "ShuffleNet v2 1.0X")->Apply(ShuffleNetV2X10)->UseRealTime();
1809 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v2_x15, "ShuffleNet v2 1.5X")->Apply(ShuffleNetV2X15)->UseRealTime();
1810 BENCHMARK_CAPTURE(armcl_convolution_f32, shufflenet_v2_x20, "ShuffleNet v2 2.0X")->Apply(ShuffleNetV2X20)->UseRealTime();
1811 BENCHMARK_CAPTURE(armcl_convolution_f32, squeezenet_v10, "SqueezeNet 1.0")->Apply(SqueezeNetV10)->UseRealTime();
1812 BENCHMARK_CAPTURE(armcl_convolution_f32, squeezenet_v11, "SqueezeNet 1.1")->Apply(SqueezeNetV11)->UseRealTime();
1813 BENCHMARK_CAPTURE(armcl_convolution_f32, inception_v3, "Inception v3")->Apply(InceptionV3)->UseRealTime();
1814 BENCHMARK_CAPTURE(armcl_convolution_f32, resnet18, "ResNet-18")->Apply(ResNet18)->UseRealTime();
1815 BENCHMARK_CAPTURE(armcl_convolution_f32, resnet50, "ResNet-50")->Apply(ResNet50)->UseRealTime();
1816 BENCHMARK_CAPTURE(armcl_convolution_f32, vgg, "VGG")->Apply(VGG)->UseRealTime();
1817 BENCHMARK_CAPTURE(armcl_convolution_f32, srcnn915, "SRCNN (9-1-5)")->Apply(SRCNN915)->UseRealTime();
1818 BENCHMARK_CAPTURE(armcl_convolution_f32, srcnn935, "SRCNN (9-3-5)")->Apply(SRCNN935)->UseRealTime();
1819 BENCHMARK_CAPTURE(armcl_convolution_f32, srcnn955, "SRCNN (9-5-5)")->Apply(SRCNN955)->UseRealTime();
1820#endif // BENCHMARK_ARM_COMPUTE_LIBRARY
1821
1822#ifndef XNNPACK_BENCHMARK_NO_MAIN
1823BENCHMARK_MAIN();
1824#endif