XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame^] | 1 | // Copyright 2019 Google LLC |
| 2 | // |
| 3 | // This source code is licensed under the BSD-style license found in the |
| 4 | // LICENSE file in the root directory of this source tree. |
| 5 | |
| 6 | #include <algorithm> |
| 7 | #include <cfloat> |
| 8 | #include <cmath> |
| 9 | #include <functional> |
| 10 | #include <random> |
| 11 | #include <vector> |
| 12 | |
| 13 | #include <cpuinfo.h> |
| 14 | |
| 15 | #include <benchmark/benchmark.h> |
| 16 | #include "bench/conv.h" |
| 17 | #include "bench/utils.h" |
| 18 | #include <xnnpack/AlignedAllocator.h> |
| 19 | #include <xnnpack/igemm.h> |
| 20 | #include <xnnpack/indirection.h> |
| 21 | #include <xnnpack/operator.h> |
| 22 | #include <xnnpack/pack.h> |
| 23 | #include <xnnpack/params.h> |
| 24 | #include <xnnpack/requantization.h> |
| 25 | |
| 26 | |
| 27 | static void IGEMMBenchmark(benchmark::State& state, |
| 28 | xnn_f32_igemm_ukernel_function f32_igemm, |
| 29 | uint32_t mr, uint32_t nr, uint32_t kr, uint32_t sr) |
| 30 | { |
| 31 | if (!cpuinfo_initialize()) { |
| 32 | state.SkipWithError("cpuinfo initialization failed"); |
| 33 | } |
| 34 | |
| 35 | const size_t input_height = state.range(0); |
| 36 | const size_t input_width = state.range(1); |
| 37 | const size_t kernel_height = state.range(2); |
| 38 | const size_t kernel_width = state.range(3); |
| 39 | const size_t kernel_size = kernel_height * kernel_width; |
| 40 | const size_t padding_height = state.range(4); |
| 41 | const size_t padding_width = state.range(5); |
| 42 | const size_t subsampling = state.range(6); |
| 43 | const size_t dilation = state.range(7); |
| 44 | const size_t group_input_channels = state.range(8); |
| 45 | const size_t group_output_channels = state.range(9); |
| 46 | |
| 47 | std::random_device random_device; |
| 48 | auto rng = std::mt19937(random_device()); |
| 49 | auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng); |
| 50 | |
| 51 | const size_t output_pixel_stride = group_output_channels; |
| 52 | const size_t input_pixel_stride = group_input_channels; |
| 53 | const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1; |
| 54 | const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1; |
| 55 | const size_t padding_left = padding_width / 2; |
| 56 | const size_t padding_top = padding_height / 2; |
| 57 | const size_t output_height = (input_height + padding_height - effective_kernel_height) / subsampling + 1; |
| 58 | const size_t output_width = (input_width + padding_width - effective_kernel_width) / subsampling + 1; |
| 59 | const size_t output_size = output_height * output_width; |
| 60 | |
| 61 | const size_t mc_stride = benchmark::utils::roundUp<size_t>(output_size, mr); |
| 62 | const size_t nc_stride = benchmark::utils::roundUp<size_t>(group_output_channels, nr); |
| 63 | const size_t kc_stride = benchmark::utils::roundUp<size_t>(group_input_channels, kr); |
| 64 | |
| 65 | std::vector<float> a(input_height * input_width * input_pixel_stride); |
| 66 | std::generate(a.begin(), a.end(), std::ref(f32rng)); |
| 67 | std::vector<float> k(group_output_channels * kernel_height * kernel_width * group_input_channels); |
| 68 | std::generate(k.begin(), k.end(), std::ref(f32rng)); |
| 69 | std::vector<float> b(group_output_channels); |
| 70 | std::generate(b.begin(), b.end(), std::ref(f32rng)); |
| 71 | |
| 72 | std::vector<float> z(group_input_channels); |
| 73 | |
| 74 | const size_t w_elements = (kernel_size * kc_stride + 1) * nc_stride; |
| 75 | const size_t i_elements = mc_stride * kernel_size; |
| 76 | const size_t c_elements = output_height * output_width * output_pixel_stride; |
| 77 | const size_t num_buffers = 1 + |
| 78 | benchmark::utils::divideRoundUp<size_t>(cpuinfo_get_max_cache_size(), |
| 79 | sizeof(float) * (w_elements + c_elements) + sizeof(void*) * i_elements); |
| 80 | |
| 81 | std::vector<float, AlignedAllocator<float, 32>> w(w_elements * num_buffers); |
| 82 | std::fill(w.begin(), w.end(), 0.0f); |
| 83 | xnn_pack_f32_conv_goki_w( |
| 84 | 1 /* groups */, group_output_channels, kernel_size, group_input_channels, |
| 85 | nr, kr, sr, k.data(), b.data(), w.data()); |
| 86 | for (size_t n = 1; n < num_buffers; n++) { |
| 87 | std::copy(w.cbegin(), w.cbegin() + w_elements, w.begin() + n * w_elements); |
| 88 | } |
| 89 | |
| 90 | std::vector<const float*> i(i_elements * num_buffers); |
| 91 | xnn_operator convolution_op = { }; |
| 92 | convolution_op.indirection_buffer = reinterpret_cast<const void**>(i.data()); |
| 93 | convolution_op.input = a.data(); |
| 94 | convolution_op.input_pixel_stride = input_pixel_stride; |
| 95 | convolution_op.zero_buffer = z.data(); |
| 96 | convolution_op.groups = 1; |
| 97 | convolution_op.group_input_channels = group_input_channels; |
| 98 | convolution_op.batch_size = 1; |
| 99 | convolution_op.input_height = input_height; |
| 100 | convolution_op.input_width = input_width; |
| 101 | convolution_op.output_height = output_height; |
| 102 | convolution_op.output_width = output_width; |
| 103 | convolution_op.kernel_height = kernel_height; |
| 104 | convolution_op.kernel_width = kernel_width; |
| 105 | convolution_op.stride_height = subsampling; |
| 106 | convolution_op.stride_width = subsampling; |
| 107 | convolution_op.dilation_height = dilation; |
| 108 | convolution_op.dilation_width = dilation; |
| 109 | convolution_op.padding_top = padding_top; |
| 110 | convolution_op.padding_left = padding_left; |
| 111 | xnn_indirection_init_conv2d(&convolution_op, mr, 2 /* log2(sizeof(float)) */); |
| 112 | for (size_t n = 1; n < num_buffers; n++) { |
| 113 | std::copy(i.cbegin(), i.cbegin() + i_elements, i.begin() + n * i_elements); |
| 114 | } |
| 115 | |
| 116 | std::vector<float> c(c_elements * num_buffers); |
| 117 | std::fill(c.begin(), c.end(), std::nanf("")); |
| 118 | |
| 119 | xnn_f32_output_params output_params = |
| 120 | xnn_compute_f32_output_params(-std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity()); |
| 121 | |
| 122 | size_t buffer_index = 0; |
| 123 | for (auto _ : state) { |
| 124 | state.PauseTiming(); |
| 125 | benchmark::utils::prefetchToL1(a.data(), a.size() * sizeof(float)); |
| 126 | buffer_index = (buffer_index + 1) % num_buffers; |
| 127 | state.ResumeTiming(); |
| 128 | |
| 129 | for (uint32_t m = 0; m < output_size; m += mr) { |
| 130 | const uint32_t mb = min(output_size - m, mr); |
| 131 | for (uint32_t n = 0; n < group_output_channels; n += nr) { |
| 132 | const uint32_t nb = min(group_output_channels - n, nr); |
| 133 | f32_igemm( |
| 134 | mb, nb, group_input_channels * sizeof(float), kernel_size * mr * sizeof(void*), |
| 135 | i.data() + buffer_index * i_elements + m, |
| 136 | w.data() + buffer_index * w_elements + n * (kc_stride * kernel_size + 1), |
| 137 | c.data() + buffer_index * c_elements + m * group_output_channels + n, group_output_channels * sizeof(float), nr * sizeof(float), |
| 138 | 0, z.data(), &output_params); |
| 139 | } |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency(); |
| 144 | state.counters["FLOPS"] = benchmark::Counter( |
| 145 | uint64_t(state.iterations()) * 2 * |
| 146 | output_height * output_width * |
| 147 | group_input_channels * group_output_channels * |
| 148 | kernel_height * kernel_width, |
| 149 | benchmark::Counter::kIsRate); |
| 150 | } |
| 151 | |
| 152 | #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 |
| 153 | static void f32_igemm_4x2__neon_ld64(benchmark::State& state, const char* net) { |
| 154 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x2__neon_ld64, 4, 2, 1, 1); |
| 155 | } |
| 156 | |
| 157 | static void f32_igemm_4x4__neon_ld64(benchmark::State& state, const char* net) { |
| 158 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x4__neon_ld64, 4, 4, 1, 1); |
| 159 | } |
| 160 | |
| 161 | static void f32_igemm_4x8__neon_ld128(benchmark::State& state, const char* net) { |
| 162 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__neon_ld128, 4, 8, 1, 1); |
| 163 | } |
| 164 | |
| 165 | static void f32_igemm_4x8__neon_ld64(benchmark::State& state, const char* net) { |
| 166 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__neon_ld64, 4, 8, 1, 1); |
| 167 | } |
| 168 | |
| 169 | static void f32_igemm_4x12__neon_ld64(benchmark::State& state, const char* net) { |
| 170 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x12__neon_ld64, 4, 12, 1, 1); |
| 171 | } |
| 172 | |
| 173 | static void f32_igemm_6x8__neon_ld64(benchmark::State& state, const char* net) { |
| 174 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8__neon_ld64, 6, 8, 1, 1); |
| 175 | } |
| 176 | |
| 177 | static void f32_igemm_4x2__neonfma_ld64(benchmark::State& state, const char* net) { |
| 178 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x2__neonfma_ld64, 4, 2, 1, 1); |
| 179 | } |
| 180 | |
| 181 | static void f32_igemm_4x4__neonfma_ld64(benchmark::State& state, const char* net) { |
| 182 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x4__neonfma_ld64, 4, 4, 1, 1); |
| 183 | } |
| 184 | |
| 185 | static void f32_igemm_4x8__neonfma_ld128(benchmark::State& state, const char* net) { |
| 186 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__neonfma_ld128, 4, 8, 1, 1); |
| 187 | } |
| 188 | |
| 189 | static void f32_igemm_4x8__neonfma_ld64(benchmark::State& state, const char* net) { |
| 190 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__neonfma_ld64, 4, 8, 1, 1); |
| 191 | } |
| 192 | |
| 193 | static void f32_igemm_4x12__neonfma_ld64(benchmark::State& state, const char* net) { |
| 194 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x12__neonfma_ld64, 4, 12, 1, 1); |
| 195 | } |
| 196 | |
| 197 | static void f32_igemm_6x8__neonfma_ld64(benchmark::State& state, const char* net) { |
| 198 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8__neonfma_ld64, 6, 8, 1, 1); |
| 199 | } |
| 200 | |
| 201 | BENCHMARK_CONV(f32_igemm_4x12__neon_ld64) |
| 202 | BENCHMARK_CONV(f32_igemm_4x12__neonfma_ld64) |
| 203 | BENCHMARK_CONV(f32_igemm_4x2__neon_ld64) |
| 204 | BENCHMARK_CONV(f32_igemm_4x2__neonfma_ld64) |
| 205 | BENCHMARK_CONV(f32_igemm_4x4__neon_ld64) |
| 206 | BENCHMARK_CONV(f32_igemm_4x4__neonfma_ld64) |
| 207 | BENCHMARK_CONV(f32_igemm_4x8__neon_ld128) |
| 208 | BENCHMARK_CONV(f32_igemm_4x8__neon_ld64) |
| 209 | BENCHMARK_CONV(f32_igemm_4x8__neonfma_ld128) |
| 210 | BENCHMARK_CONV(f32_igemm_4x8__neonfma_ld64) |
| 211 | BENCHMARK_CONV(f32_igemm_6x8__neon_ld64) |
| 212 | BENCHMARK_CONV(f32_igemm_6x8__neonfma_ld64) |
| 213 | #endif |
| 214 | |
| 215 | #if CPUINFO_ARCH_ARM64 |
| 216 | static void f32_igemm_1x12__aarch64_neonfma_cortex_a53(benchmark::State& state, const char* net) { |
| 217 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x12__aarch64_neonfma_cortex_a53, 1, 12, 1, 1); |
| 218 | } |
| 219 | |
| 220 | static void f32_igemm_1x8__aarch64_neonfma_cortex_a57(benchmark::State& state, const char* net) { |
| 221 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a57, 1, 8, 1, 1); |
| 222 | } |
| 223 | |
| 224 | static void f32_igemm_1x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
| 225 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a75, 1, 8, 1, 1); |
| 226 | } |
| 227 | |
| 228 | static void f32_igemm_4x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
| 229 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a75, 4, 8, 1, 1); |
| 230 | } |
| 231 | |
| 232 | static void f32_igemm_5x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
| 233 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_5x8__aarch64_neonfma_cortex_a75, 5, 8, 1, 1); |
| 234 | } |
| 235 | |
| 236 | static void f32_igemm_4x12__aarch64_neonfma_cortex_a53(benchmark::State& state, const char* net) { |
| 237 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x12__aarch64_neonfma_cortex_a53, 4, 12, 1, 1); |
| 238 | } |
| 239 | |
| 240 | static void f32_igemm_6x8__aarch64_neonfma_cortex_a57(benchmark::State& state, const char* net) { |
| 241 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a57, 6, 8, 1, 1); |
| 242 | } |
| 243 | |
| 244 | static void f32_igemm_6x8__aarch64_neonfma_cortex_a73(benchmark::State& state, const char* net) { |
| 245 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73, 6, 8, 1, 1); |
| 246 | } |
| 247 | |
| 248 | static void f32_igemm_6x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
| 249 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a75, 6, 8, 1, 1); |
| 250 | } |
| 251 | |
| 252 | BENCHMARK_CONV(f32_igemm_1x12__aarch64_neonfma_cortex_a53) |
| 253 | BENCHMARK_CONV(f32_igemm_1x8__aarch64_neonfma_cortex_a57) |
| 254 | BENCHMARK_CONV(f32_igemm_1x8__aarch64_neonfma_cortex_a75) |
| 255 | BENCHMARK_CONV(f32_igemm_4x12__aarch64_neonfma_cortex_a53) |
| 256 | BENCHMARK_CONV(f32_igemm_4x8__aarch64_neonfma_cortex_a75) |
| 257 | BENCHMARK_CONV(f32_igemm_5x8__aarch64_neonfma_cortex_a75) |
| 258 | BENCHMARK_CONV(f32_igemm_6x8__aarch64_neonfma_cortex_a57) |
| 259 | BENCHMARK_CONV(f32_igemm_6x8__aarch64_neonfma_cortex_a73) |
| 260 | BENCHMARK_CONV(f32_igemm_6x8__aarch64_neonfma_cortex_a75) |
| 261 | #endif /* CPUINFO_ARCH_ARM64 */ |
| 262 | |
| 263 | #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 |
| 264 | static void f32_igemm_1x8__sse_load1(benchmark::State& state, const char* net) { |
| 265 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8__sse_load1, 1, 8, 1, 1); |
| 266 | } |
| 267 | |
| 268 | static void f32_igemm_4x8__sse_load1(benchmark::State& state, const char* net) { |
| 269 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__sse_load1, 4, 8, 1, 1); |
| 270 | } |
| 271 | |
| 272 | static void f32_igemm_1x8__sse_dup(benchmark::State& state, const char* net) { |
| 273 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8__sse_dup, 1, 8, 1, 1); |
| 274 | } |
| 275 | |
| 276 | static void f32_igemm_4x8__sse_dup(benchmark::State& state, const char* net) { |
| 277 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__sse_dup, 4, 8, 1, 1); |
| 278 | } |
| 279 | |
| 280 | static void f32_igemm_1x8s4__sse(benchmark::State& state, const char* net) { |
| 281 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8s4__sse, 1, 8, 1, 4); |
| 282 | } |
| 283 | |
| 284 | static void f32_igemm_4x8s4__sse(benchmark::State& state, const char* net) { |
| 285 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8s4__sse, 4, 8, 1, 4); |
| 286 | } |
| 287 | |
| 288 | BENCHMARK_CONV(f32_igemm_1x8__sse_load1) |
| 289 | BENCHMARK_CONV(f32_igemm_4x8__sse_load1) |
| 290 | BENCHMARK_CONV(f32_igemm_1x8__sse_dup) |
| 291 | BENCHMARK_CONV(f32_igemm_4x8__sse_dup) |
| 292 | BENCHMARK_CONV(f32_igemm_1x8s4__sse) |
| 293 | BENCHMARK_CONV(f32_igemm_4x8s4__sse) |
| 294 | #endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */ |
| 295 | |
| 296 | #if !CPUINFO_ARCH_WASM && !CPUINFO_ARCH_ASMJS |
| 297 | static void f32_igemm_1x8__psimd_loadsplat(benchmark::State& state, const char* net) { |
| 298 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8__psimd_loadsplat, 1, 8, 1, 1); |
| 299 | } |
| 300 | |
| 301 | static void f32_igemm_4x8__psimd_loadsplat(benchmark::State& state, const char* net) { |
| 302 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__psimd_loadsplat, 4, 8, 1, 1); |
| 303 | } |
| 304 | |
| 305 | static void f32_igemm_6x8__psimd_loadsplat(benchmark::State& state, const char* net) { |
| 306 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8__psimd_loadsplat, 6, 8, 1, 1); |
| 307 | } |
| 308 | |
| 309 | static void f32_igemm_1x8__psimd_splat(benchmark::State& state, const char* net) { |
| 310 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8__psimd_splat, 1, 8, 1, 1); |
| 311 | } |
| 312 | |
| 313 | static void f32_igemm_4x8__psimd_splat(benchmark::State& state, const char* net) { |
| 314 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8__psimd_splat, 4, 8, 1, 1); |
| 315 | } |
| 316 | |
| 317 | static void f32_igemm_6x8__psimd_splat(benchmark::State& state, const char* net) { |
| 318 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8__psimd_splat, 6, 8, 1, 1); |
| 319 | } |
| 320 | |
| 321 | static void f32_igemm_1x8s4__psimd(benchmark::State& state, const char* net) { |
| 322 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x8s4__psimd, 1, 8, 1, 4); |
| 323 | } |
| 324 | |
| 325 | static void f32_igemm_4x8s4__psimd(benchmark::State& state, const char* net) { |
| 326 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x8s4__psimd, 4, 8, 1, 4); |
| 327 | } |
| 328 | |
| 329 | static void f32_igemm_6x8s4__psimd(benchmark::State& state, const char* net) { |
| 330 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_6x8s4__psimd, 6, 8, 1, 4); |
| 331 | } |
| 332 | |
| 333 | BENCHMARK_CONV(f32_igemm_1x8__psimd_loadsplat) |
| 334 | BENCHMARK_CONV(f32_igemm_4x8__psimd_loadsplat) |
| 335 | BENCHMARK_CONV(f32_igemm_6x8__psimd_loadsplat) |
| 336 | |
| 337 | BENCHMARK_CONV(f32_igemm_1x8__psimd_splat) |
| 338 | BENCHMARK_CONV(f32_igemm_4x8__psimd_splat) |
| 339 | BENCHMARK_CONV(f32_igemm_6x8__psimd_splat) |
| 340 | |
| 341 | BENCHMARK_CONV(f32_igemm_1x8s4__psimd) |
| 342 | BENCHMARK_CONV(f32_igemm_4x8s4__psimd) |
| 343 | BENCHMARK_CONV(f32_igemm_6x8s4__psimd) |
| 344 | #endif /* !CPUINFO_ARCH_WASM && !CPUINFO_ARCH_ASMJS */ |
| 345 | |
| 346 | static void f32_igemm_1x4__scalar(benchmark::State& state, const char* net) { |
| 347 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_1x4__scalar, 1, 4, 1, 1); |
| 348 | } |
| 349 | |
| 350 | static void f32_igemm_2x4__scalar(benchmark::State& state, const char* net) { |
| 351 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_2x4__scalar, 2, 4, 1, 1); |
| 352 | } |
| 353 | |
| 354 | static void f32_igemm_4x4__scalar(benchmark::State& state, const char* net) { |
| 355 | IGEMMBenchmark(state, xnn_f32_igemm_ukernel_4x4__scalar, 4, 4, 1, 1); |
| 356 | } |
| 357 | |
| 358 | BENCHMARK_CONV(f32_igemm_1x4__scalar) |
| 359 | BENCHMARK_CONV(f32_igemm_2x4__scalar) |
| 360 | BENCHMARK_CONV(f32_igemm_4x4__scalar) |
| 361 | |
| 362 | |
| 363 | #ifndef XNNPACK_BENCHMARK_NO_MAIN |
| 364 | BENCHMARK_MAIN(); |
| 365 | #endif |