XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 1 | // Copyright (c) Facebook, Inc. and its affiliates. |
| 2 | // All rights reserved. |
| 3 | // |
| 4 | // Copyright 2019 Google LLC |
| 5 | // |
| 6 | // This source code is licensed under the BSD-style license found in the |
| 7 | // LICENSE file in the root directory of this source tree. |
| 8 | |
| 9 | #include <algorithm> |
| 10 | #include <cfloat> |
| 11 | #include <chrono> |
| 12 | #include <cmath> |
| 13 | #include <functional> |
| 14 | #include <mutex> |
| 15 | #include <random> |
| 16 | #include <vector> |
| 17 | |
| 18 | #include <cpuinfo.h> |
| 19 | |
Frank Barchard | bb4c18b | 2019-09-30 11:05:52 -0700 | [diff] [blame] | 20 | #include <benchmark/benchmark.h> |
Marat Dukhan | 33f0c7a | 2019-10-01 13:33:08 -0700 | [diff] [blame] | 21 | #ifdef BENCHMARK_RUY |
Frank Barchard | bb4c18b | 2019-09-30 11:05:52 -0700 | [diff] [blame] | 22 | #include "tensorflow/lite/experimental/ruy/ruy.h" |
Marat Dukhan | 33f0c7a | 2019-10-01 13:33:08 -0700 | [diff] [blame] | 23 | #endif // BENCHMARK_RUY |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 24 | #include "bench/gemm.h" |
Frank Barchard | bb4c18b | 2019-09-30 11:05:52 -0700 | [diff] [blame] | 25 | #include "bench/utils.h" |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 26 | #include <xnnpack/AlignedAllocator.h> |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 27 | #include <xnnpack/common.h> |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 28 | #include <xnnpack/gemm.h> |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 29 | #include <xnnpack/pack.h> |
Frank Barchard | bb4c18b | 2019-09-30 11:05:52 -0700 | [diff] [blame] | 30 | #include <xnnpack/packx.h> |
Marat Dukhan | eeaa7bd | 2019-10-25 17:31:25 -0700 | [diff] [blame] | 31 | #include <xnnpack/params-init.h> |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 32 | #include <xnnpack/params.h> |
| 33 | #include <xnnpack/ppmm.h> |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 34 | |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 35 | |
| 36 | static void GEMMBenchmark(benchmark::State& state, |
| 37 | xnn_f32_gemm_ukernel_function gemm, |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 38 | size_t mr, size_t nr, size_t kr, size_t sr, |
| 39 | benchmark::utils::IsaCheckFunction isa_check = nullptr) |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 40 | { |
| 41 | if (!cpuinfo_initialize()) { |
| 42 | state.SkipWithError("cpuinfo initialization failed"); |
| 43 | return; |
| 44 | } |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 45 | if (isa_check && !isa_check(state)) { |
| 46 | return; |
| 47 | } |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 48 | |
| 49 | const size_t mc = state.range(0); |
| 50 | const size_t nc = state.range(1); |
| 51 | const size_t kc = state.range(2); |
| 52 | |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 53 | const size_t nc_stride = benchmark::utils::RoundUp(nc, nr); |
| 54 | const size_t kc_stride = benchmark::utils::RoundUp(kc, kr); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 55 | |
| 56 | std::random_device random_device; |
| 57 | auto rng = std::mt19937(random_device()); |
| 58 | auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng); |
| 59 | |
| 60 | std::vector<float> a(mc * kc); |
| 61 | std::generate(a.begin(), a.end(), std::ref(f32rng)); |
| 62 | std::vector<float> k(nc * kc); |
| 63 | std::generate(k.begin(), k.end(), std::ref(f32rng)); |
| 64 | std::vector<float> b(nc); |
| 65 | std::generate(b.begin(), b.end(), std::ref(f32rng)); |
| 66 | |
| 67 | const size_t w_elements = nc_stride * kc_stride + nc_stride; |
| 68 | const size_t c_elements = mc * nc; |
| 69 | const size_t num_buffers = 1 + |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 70 | benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 71 | sizeof(float) * (w_elements + c_elements)); |
| 72 | |
| 73 | std::vector<float, AlignedAllocator<float, 32>> w(w_elements * num_buffers); |
| 74 | std::fill(w.begin(), w.end(), 0.0f); |
| 75 | xnn_pack_f32_gemm_goi_w(1 /* groups */, nc, kc, nr, kr, sr, k.data(), b.data(), w.data()); |
| 76 | std::vector<float> c(c_elements * num_buffers); |
| 77 | std::fill(c.begin(), c.end(), std::nanf("")); |
| 78 | |
| 79 | xnn_f32_output_params output_params = |
Marat Dukhan | eeaa7bd | 2019-10-25 17:31:25 -0700 | [diff] [blame] | 80 | xnn_init_f32_output_params(-std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity()); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 81 | |
| 82 | size_t buffer_index = 0; |
| 83 | for (auto _ : state) { |
| 84 | // Use circular buffers (exceeding cache size) and prefetch to control cache state: |
| 85 | // - A is always in L1 cache (if fits, otherwise L2, L3, etc) |
| 86 | // - W is not in cache (for any cache level) |
| 87 | // - C is not in cache (for any cache level) |
| 88 | state.PauseTiming(); |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 89 | benchmark::utils::PrefetchToL1(a.data(), a.size() * sizeof(float)); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 90 | buffer_index = (buffer_index + 1) % num_buffers; |
| 91 | state.ResumeTiming(); |
| 92 | |
| 93 | for (uint32_t m = 0; m < mc; m += mr) { |
| 94 | const uint32_t mb = min(mc - m, mr); |
| 95 | gemm( |
| 96 | mb, nc, kc * sizeof(float), |
| 97 | a.data() + m * kc, kc * sizeof(float), |
| 98 | w.data() + buffer_index * nc_stride * (kc_stride + 1), |
| 99 | c.data() + (buffer_index * mc + m) * nc, nc * sizeof(float), nr * sizeof(float), |
| 100 | &output_params); |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency(); |
| 105 | state.counters["FLOPS"] = benchmark::Counter( |
| 106 | uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate); |
| 107 | } |
| 108 | |
| 109 | static void PPMM1PBenchmark(benchmark::State& state, |
| 110 | xnn_f32_ppmm_ukernel_function ppmm, |
| 111 | xnn_x32_packx_ukernel_function packx, |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 112 | size_t mr, size_t nr, |
| 113 | benchmark::utils::IsaCheckFunction isa_check = nullptr) |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 114 | { |
| 115 | if (!cpuinfo_initialize()) { |
| 116 | state.SkipWithError("cpuinfo initialization failed"); |
| 117 | return; |
| 118 | } |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 119 | if (isa_check && !isa_check(state)) { |
| 120 | return; |
| 121 | } |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 122 | |
| 123 | const size_t mc = state.range(0); |
| 124 | const size_t nc = state.range(1); |
| 125 | const size_t kc = state.range(2); |
| 126 | |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 127 | const size_t nc_stride = benchmark::utils::RoundUp(nc, nr); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 128 | |
| 129 | std::random_device random_device; |
| 130 | auto rng = std::mt19937(random_device()); |
| 131 | auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng); |
| 132 | |
| 133 | std::vector<float> a(mc * kc); |
| 134 | std::generate(a.begin(), a.end(), std::ref(f32rng)); |
| 135 | std::vector<float> k(nc * kc); |
| 136 | std::generate(k.begin(), k.end(), std::ref(f32rng)); |
| 137 | std::vector<float> b(nc); |
| 138 | std::generate(b.begin(), b.end(), std::ref(f32rng)); |
| 139 | |
| 140 | std::vector<uint32_t, AlignedAllocator<uint32_t, 32>> t(mr * kc); |
| 141 | |
| 142 | const size_t w_elements = nc_stride * kc + nc_stride; |
| 143 | const size_t c_elements = mc * nc; |
| 144 | const size_t num_buffers = 1 + |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 145 | benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 146 | sizeof(float) * (w_elements + c_elements)); |
| 147 | |
| 148 | std::vector<float, AlignedAllocator<float, 32>> w(w_elements * num_buffers); |
| 149 | std::fill(w.begin(), w.end(), 0.0f); |
| 150 | xnn_pack_f32_gemm_goi_w(1 /* groups */, nc, kc, nr, 1 /* kr */, 1 /* sr */, k.data(), b.data(), w.data()); |
| 151 | std::vector<float> c(c_elements * num_buffers); |
| 152 | std::fill(c.begin(), c.end(), std::nanf("")); |
| 153 | |
| 154 | xnn_f32_output_params output_params = |
Marat Dukhan | eeaa7bd | 2019-10-25 17:31:25 -0700 | [diff] [blame] | 155 | xnn_init_f32_output_params(-std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity()); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 156 | |
| 157 | size_t buffer_index = 0; |
| 158 | for (auto _ : state) { |
| 159 | // Use circular buffers (exceeding cache size) and prefetch to control cache state: |
| 160 | // - A is always in L1 cache (if fits, otherwise L2, L3, etc) |
| 161 | // - W is not in cache (for any cache level) |
| 162 | // - C is not in cache (for any cache level) |
| 163 | state.PauseTiming(); |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 164 | benchmark::utils::PrefetchToL1(a.data(), a.size() * sizeof(float)); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 165 | buffer_index = (buffer_index + 1) % num_buffers; |
| 166 | state.ResumeTiming(); |
| 167 | |
| 168 | for (uint32_t m = 0; m < mc; m += mr) { |
| 169 | const uint32_t mb = min(mc - m, mr); |
| 170 | packx(mb, kc, reinterpret_cast<const uint32_t*>(a.data() + m * kc), kc, t.data()); |
| 171 | ppmm( |
| 172 | mb, nc, kc * sizeof(float), |
| 173 | reinterpret_cast<const float*>(t.data()), |
| 174 | w.data() + nc_stride * buffer_index * (kc + 1), |
| 175 | c.data() + (mc * buffer_index + m) * nc, nc * sizeof(float), nr * sizeof(float), |
| 176 | &output_params); |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency(); |
| 181 | state.counters["FLOPS"] = benchmark::Counter( |
| 182 | uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate); |
| 183 | } |
| 184 | |
| 185 | static void PPMM2PBenchmark(benchmark::State& state, |
| 186 | xnn_f32_ppmm_ukernel_function ppmm, |
| 187 | xnn_x32_packx_ukernel_function packx, |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 188 | size_t mr, size_t nr, |
| 189 | benchmark::utils::IsaCheckFunction isa_check = nullptr) |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 190 | { |
| 191 | if (!cpuinfo_initialize()) { |
| 192 | state.SkipWithError("cpuinfo initialization failed"); |
| 193 | return; |
| 194 | } |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 195 | if (isa_check && !isa_check(state)) { |
| 196 | return; |
| 197 | } |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 198 | |
| 199 | const size_t mc = state.range(0); |
| 200 | const size_t nc = state.range(1); |
| 201 | const size_t kc = state.range(2); |
| 202 | |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 203 | const size_t mc_stride = benchmark::utils::RoundUp(mc, mr); |
| 204 | const size_t nc_stride = benchmark::utils::RoundUp(nc, nr); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 205 | |
| 206 | std::random_device random_device; |
| 207 | auto rng = std::mt19937(random_device()); |
| 208 | auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng); |
| 209 | |
| 210 | std::vector<float> a(mc * kc); |
| 211 | std::generate(a.begin(), a.end(), std::ref(f32rng)); |
| 212 | std::vector<float> k(nc * kc); |
| 213 | std::generate(k.begin(), k.end(), std::ref(f32rng)); |
| 214 | std::vector<float> b(nc); |
| 215 | std::generate(b.begin(), b.end(), std::ref(f32rng)); |
| 216 | |
| 217 | std::vector<uint32_t, AlignedAllocator<uint32_t, 32>> t(mc_stride * kc); |
| 218 | |
| 219 | const size_t w_elements = nc_stride * kc + nc_stride; |
| 220 | const size_t c_elements = mc * nc; |
| 221 | const size_t num_buffers = 1 + |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 222 | benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 223 | sizeof(float) * (w_elements + c_elements)); |
| 224 | |
| 225 | std::vector<float, AlignedAllocator<float, 32>> w(w_elements * num_buffers); |
| 226 | std::fill(w.begin(), w.end(), 0.0f); |
| 227 | xnn_pack_f32_gemm_goi_w(1 /* groups */, nc, kc, nr, 1 /* kr */, 1 /* sr */, k.data(), b.data(), w.data()); |
| 228 | std::vector<float> c(c_elements * num_buffers); |
| 229 | std::fill(c.begin(), c.end(), std::nanf("")); |
| 230 | |
| 231 | xnn_f32_output_params output_params = |
Marat Dukhan | eeaa7bd | 2019-10-25 17:31:25 -0700 | [diff] [blame] | 232 | xnn_init_f32_output_params(-std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity()); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 233 | |
| 234 | size_t buffer_index = 0; |
| 235 | for (auto _ : state) { |
| 236 | // Use circular buffers (exceeding cache size) and prefetch to control cache state: |
| 237 | // - A is always in L1 cache (if fits, otherwise L2, L3, etc) |
| 238 | // - W is not in cache (for any cache level) |
| 239 | // - C is not in cache (for any cache level) |
| 240 | state.PauseTiming(); |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 241 | benchmark::utils::PrefetchToL1(a.data(), a.size() * sizeof(float)); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 242 | buffer_index = (buffer_index + 1) % num_buffers; |
| 243 | state.ResumeTiming(); |
| 244 | |
| 245 | for (uint32_t m = 0; m < mc; m += mr) { |
| 246 | const uint32_t mb = min(mc - m, mr); |
| 247 | packx(mb, kc, reinterpret_cast<const uint32_t*>(a.data() + m * kc), kc, t.data() + m * kc); |
| 248 | } |
| 249 | for (uint32_t m = 0; m < mc; m += mr) { |
| 250 | const uint32_t mb = min(mc - m, mr); |
| 251 | ppmm( |
| 252 | mb, nc, kc * sizeof(float), |
| 253 | reinterpret_cast<const float*>(t.data() + m * kc), |
| 254 | w.data() + nc_stride * buffer_index * (kc + 1), |
| 255 | c.data() + (mc * buffer_index + m) * nc, nc * sizeof(float), nr * sizeof(float), |
| 256 | &output_params); |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency(); |
| 261 | state.counters["FLOPS"] = benchmark::Counter( |
| 262 | uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate); |
| 263 | } |
| 264 | |
Marat Dukhan | 33f0c7a | 2019-10-01 13:33:08 -0700 | [diff] [blame] | 265 | #ifdef BENCHMARK_RUY |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 266 | static void RuyBenchmark(benchmark::State& state, uint32_t threads) |
| 267 | { |
| 268 | std::random_device random_device; |
| 269 | auto rng = std::mt19937(random_device()); |
| 270 | auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng); |
| 271 | |
| 272 | const size_t mc = state.range(0); |
| 273 | const size_t nc = state.range(1); |
| 274 | const size_t kc = state.range(2); |
| 275 | |
| 276 | const size_t num_buffers = 1 + |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 277 | benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 278 | sizeof(float) * (nc * (mc + kc + 1))); |
| 279 | |
| 280 | std::vector<float> a(mc * kc); |
| 281 | std::generate(a.begin(), a.end(), std::ref(f32rng)); |
| 282 | std::vector<float> k(num_buffers * nc * kc); |
| 283 | std::generate(k.begin(), k.end(), std::ref(f32rng)); |
| 284 | std::vector<float> b(num_buffers * nc); |
| 285 | std::generate(b.begin(), b.end(), std::ref(f32rng)); |
| 286 | std::vector<float> c(num_buffers * nc * mc); |
| 287 | std::fill(c.begin(), c.end(), std::nanf("")); |
| 288 | |
| 289 | // Note: context must be static to avoid the cost of re-creating it for each benchmark. |
| 290 | static ruy::Context context; |
| 291 | context.max_num_threads = threads; |
| 292 | |
| 293 | ruy::Matrix<float> ruy_a; |
| 294 | ruy::MakeSimpleLayout(nc, kc, ruy::Order::kRowMajor, &ruy_a.layout); |
| 295 | ruy::Matrix<float> ruy_b; |
| 296 | ruy::MakeSimpleLayout(kc, mc, ruy::Order::kColMajor, &ruy_b.layout); |
| 297 | ruy_b.data = a.data(); |
| 298 | ruy::Matrix<float> ruy_c; |
| 299 | ruy::MakeSimpleLayout(nc, mc, ruy::Order::kColMajor, &ruy_c.layout); |
| 300 | |
| 301 | ruy::BasicSpec<float, float> spec; |
| 302 | |
| 303 | // ruy::Context uses deferred initialization, which affects percieved GEMM performance. Initialization happens during |
| 304 | // the first GEMM calls, and per Benoit Jacob it takes up to ~250 milliseconds for performance to stabilize. |
| 305 | // Thus, on the first benchmark, we compute GEMM for 500 milliseconds (to be safe) without recording performance, and |
| 306 | // keep the ruy::Context object initialized (by being static) between subsequent benchmarks. |
| 307 | static std::once_flag warmup; |
| 308 | std::call_once(warmup, [&](){ |
| 309 | auto start = std::chrono::steady_clock::now(); |
| 310 | do { |
| 311 | ruy_a.data = k.data(); |
| 312 | ruy_c.data = c.data(); |
| 313 | spec.bias = b.data(); |
| 314 | |
| 315 | ruy::Mul<ruy::kAllPaths>(ruy_a, ruy_b, spec, &context, &ruy_c); |
| 316 | } while (std::chrono::duration<double>(std::chrono::steady_clock::now() - start).count() < 0.5); |
| 317 | }); |
| 318 | |
| 319 | size_t buffer_index = 0; |
| 320 | for (auto _ : state) { |
| 321 | // Use circular buffers (exceeding cache size) and prefetch to control cache state: |
| 322 | // - A is always in L1 cache (if fits, otherwise L2, L3, etc) |
| 323 | // - K is not in cache (for any cache level) |
| 324 | // - B is not in cache (for any cache level) |
| 325 | // - C is not in cache (for any cache level) |
| 326 | state.PauseTiming(); |
Marat Dukhan | 4232323 | 2019-10-23 02:09:02 -0700 | [diff] [blame] | 327 | benchmark::utils::PrefetchToL1(a.data(), a.size() * sizeof(float)); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 328 | buffer_index = (buffer_index + 1) % num_buffers; |
| 329 | state.ResumeTiming(); |
| 330 | |
| 331 | ruy_a.data = k.data() + buffer_index * nc * kc; |
| 332 | ruy_c.data = c.data() + buffer_index * mc * nc; |
| 333 | spec.bias = b.data() + buffer_index * nc; |
| 334 | |
| 335 | ruy::Mul<ruy::kAllPaths>(ruy_a, ruy_b, spec, &context, &ruy_c); |
| 336 | } |
| 337 | |
| 338 | state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency(); |
| 339 | state.counters["FLOPS"] = benchmark::Counter( |
| 340 | uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate); |
| 341 | } |
| 342 | |
| 343 | static void ruy_st(benchmark::State& state, const char* net) |
| 344 | { |
| 345 | RuyBenchmark(state, 1); |
| 346 | } |
Marat Dukhan | 33f0c7a | 2019-10-01 13:33:08 -0700 | [diff] [blame] | 347 | #endif // BENCHMARK_RUY |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 348 | |
| 349 | |
Frank Barchard | dbafc58 | 2019-10-09 16:30:48 -0700 | [diff] [blame] | 350 | #if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 351 | static void f32_gemm_1x12__aarch64_neonfma_cortex_a53(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 352 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x12__aarch64_neonfma_cortex_a53, 1, 12, 1, 1); |
| 353 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 354 | static void f32_gemm_1x8__aarch64_neonfma_cortex_a53(benchmark::State& state, const char* net) { |
Frank Barchard | 21be34f | 2019-10-09 19:32:19 -0700 | [diff] [blame] | 355 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__aarch64_neonfma_cortex_a53, 1, 8, 1, 1); |
| 356 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 357 | static void f32_gemm_1x8__aarch64_neonfma_cortex_a57(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 358 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__aarch64_neonfma_cortex_a57, 1, 8, 1, 1); |
| 359 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 360 | static void f32_gemm_1x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 361 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__aarch64_neonfma_cortex_a75, 1, 8, 1, 1); |
| 362 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 363 | static void f32_gemm_4x12__aarch64_neonfma_cortex_a53(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 364 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x12__aarch64_neonfma_cortex_a53, 4, 12, 1, 1); |
| 365 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 366 | static void f32_gemm_4x8__aarch64_neonfma_cortex_a53(benchmark::State& state, const char* net) { |
Frank Barchard | 46fb807 | 2019-10-25 12:54:22 -0700 | [diff] [blame] | 367 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53, 4, 8, 1, 1); |
| 368 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 369 | static void f32_gemm_4x8__aarch64_neonfma_cortex_a57(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 370 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a57, 4, 8, 1, 1); |
| 371 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 372 | static void f32_gemm_4x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 373 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a75, 4, 8, 1, 1); |
| 374 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 375 | static void f32_gemm_4x8__aarch64_neonfma_ld64(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 376 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_ld64, 4, 8, 1, 1); |
| 377 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 378 | static void f32_gemm_4x8__aarch64_neonfma_ld128(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 379 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_ld128, 4, 8, 1, 1); |
| 380 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 381 | static void f32_gemm_5x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 382 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_5x8__aarch64_neonfma_cortex_a75, 5, 8, 1, 1); |
| 383 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 384 | static void f32_gemm_6x8__aarch64_neonfma_ld64(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 385 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_ld64, 6, 8, 1, 1); |
| 386 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 387 | static void f32_gemm_6x8__aarch64_neonfma_ld128(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 388 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_ld128, 6, 8, 1, 1); |
| 389 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 390 | static void f32_gemm_6x8__aarch64_neonfma_cortex_a53(benchmark::State& state, const char* net) { |
Frank Barchard | a7fb855 | 2019-10-23 17:14:17 -0700 | [diff] [blame] | 391 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53, 6, 8, 1, 1); |
| 392 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 393 | static void f32_gemm_6x8__aarch64_neonfma_cortex_a57(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 394 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a57, 6, 8, 1, 1); |
| 395 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 396 | static void f32_gemm_6x8__aarch64_neonfma_cortex_a73(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 397 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a73, 6, 8, 1, 1); |
| 398 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 399 | static void f32_gemm_6x8__aarch64_neonfma_cortex_a75(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 400 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a75, 6, 8, 1, 1); |
| 401 | } |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 402 | static void f32_gemm_1x8__neonfma_lane_ld64(benchmark::State& state, const char* net) { |
| 403 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__neonfma_lane_ld64, 1, 8, 1, 1); |
| 404 | } |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 405 | static void f32_gemm_4x8__neonfma_lane_ld64(benchmark::State& state, const char* net) { |
| 406 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__neonfma_lane_ld64, 4, 8, 1, 1); |
| 407 | } |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 408 | static void f32_gemm_4x8__neonfma_lane_ld128(benchmark::State& state, const char* net) { |
| 409 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__neonfma_lane_ld128, 4, 8, 1, 1); |
| 410 | } |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 411 | static void f32_gemm_5x8__neonfma_lane_ld64(benchmark::State& state, const char* net) { |
| 412 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_5x8__neonfma_lane_ld64, 5, 8, 1, 1); |
| 413 | } |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 414 | static void f32_gemm_6x8__neonfma_lane_ld64(benchmark::State& state, const char* net) { |
| 415 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__neonfma_lane_ld64, 6, 8, 1, 1); |
| 416 | } |
Frank Barchard | 5243bb0 | 2019-11-22 16:37:50 -0800 | [diff] [blame] | 417 | static void f32_gemm_1x8__neonfma_dup_ld64(benchmark::State& state, const char* net) { |
| 418 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__neonfma_dup_ld64, 1, 8, 1, 1); |
| 419 | } |
| 420 | static void f32_gemm_4x8__neonfma_dup_ld64(benchmark::State& state, const char* net) { |
| 421 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld64, 4, 8, 1, 1); |
| 422 | } |
| 423 | static void f32_gemm_4x8__neonfma_dup_ld128(benchmark::State& state, const char* net) { |
| 424 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128, 4, 8, 1, 1); |
| 425 | } |
| 426 | static void f32_gemm_6x8__neonfma_dup_ld64(benchmark::State& state, const char* net) { |
| 427 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld64, 6, 8, 1, 1); |
| 428 | } |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 429 | BENCHMARK_GEMM(f32_gemm_1x12__aarch64_neonfma_cortex_a53) |
| 430 | BENCHMARK_GEMM(f32_gemm_1x8__aarch64_neonfma_cortex_a53) |
| 431 | BENCHMARK_GEMM(f32_gemm_1x8__aarch64_neonfma_cortex_a57) |
| 432 | BENCHMARK_GEMM(f32_gemm_1x8__aarch64_neonfma_cortex_a75) |
| 433 | BENCHMARK_GEMM(f32_gemm_4x12__aarch64_neonfma_cortex_a53) |
| 434 | BENCHMARK_GEMM(f32_gemm_4x8__aarch64_neonfma_cortex_a53) |
| 435 | BENCHMARK_GEMM(f32_gemm_4x8__aarch64_neonfma_cortex_a57) |
| 436 | BENCHMARK_GEMM(f32_gemm_4x8__aarch64_neonfma_cortex_a75) |
| 437 | BENCHMARK_GEMM(f32_gemm_4x8__aarch64_neonfma_ld128) |
| 438 | BENCHMARK_GEMM(f32_gemm_4x8__aarch64_neonfma_ld64) |
| 439 | BENCHMARK_GEMM(f32_gemm_5x8__aarch64_neonfma_cortex_a75) |
| 440 | BENCHMARK_GEMM(f32_gemm_6x8__aarch64_neonfma_cortex_a53) |
| 441 | BENCHMARK_GEMM(f32_gemm_6x8__aarch64_neonfma_cortex_a57) |
| 442 | BENCHMARK_GEMM(f32_gemm_6x8__aarch64_neonfma_cortex_a73) |
| 443 | BENCHMARK_GEMM(f32_gemm_6x8__aarch64_neonfma_cortex_a75) |
| 444 | BENCHMARK_GEMM(f32_gemm_6x8__aarch64_neonfma_ld64) |
| 445 | BENCHMARK_GEMM(f32_gemm_6x8__aarch64_neonfma_ld128) |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 446 | BENCHMARK_GEMM(f32_gemm_1x8__neonfma_lane_ld64) |
| 447 | BENCHMARK_GEMM(f32_gemm_4x8__neonfma_lane_ld128) |
| 448 | BENCHMARK_GEMM(f32_gemm_4x8__neonfma_lane_ld64) |
| 449 | BENCHMARK_GEMM(f32_gemm_5x8__neonfma_lane_ld64) |
| 450 | BENCHMARK_GEMM(f32_gemm_6x8__neonfma_lane_ld64) |
Frank Barchard | 5243bb0 | 2019-11-22 16:37:50 -0800 | [diff] [blame] | 451 | BENCHMARK_GEMM(f32_gemm_1x8__neonfma_dup_ld64) |
| 452 | BENCHMARK_GEMM(f32_gemm_4x8__neonfma_dup_ld128) |
| 453 | BENCHMARK_GEMM(f32_gemm_4x8__neonfma_dup_ld64) |
| 454 | BENCHMARK_GEMM(f32_gemm_6x8__neonfma_dup_ld64) |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 455 | |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 456 | #endif // XNN_ARCH_ARM64 |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 457 | |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 458 | #if XNN_ARCH_ARM || XNN_ARCH_ARM64 |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 459 | static void f32_gemm_1x8__neon_lane_ld64(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 460 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__neon_lane_ld64, 1, 8, 1, 1, benchmark::utils::CheckNEON); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 461 | } |
| 462 | |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 463 | static void f32_gemm_4x8__neon_lane_ld64(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 464 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__neon_lane_ld64, 4, 8, 1, 1, benchmark::utils::CheckNEON); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 465 | } |
| 466 | |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 467 | static void f32_gemm_4x8__neon_lane_ld128(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 468 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__neon_lane_ld128, 4, 8, 1, 1, benchmark::utils::CheckNEON); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 469 | } |
| 470 | |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 471 | static void f32_gemm_5x8__neon_lane_ld64(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 472 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_5x8__neon_lane_ld64, 5, 8, 1, 1, benchmark::utils::CheckNEON); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 473 | } |
| 474 | |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 475 | static void f32_gemm_6x8__neon_lane_ld64(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 476 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__neon_lane_ld64, 6, 8, 1, 1, benchmark::utils::CheckNEON); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 477 | } |
| 478 | |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 479 | static void f32_gemm_1x8s4__neon(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 480 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8s4__neon, 1, 8, 1, 4, benchmark::utils::CheckNEON); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | static void f32_gemm_1x8s4__neonfma(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 484 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8s4__neonfma, 1, 8, 1, 4, benchmark::utils::CheckNEONFMA); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 485 | } |
| 486 | |
| 487 | static void f32_gemm_4x8s4__neon(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 488 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8s4__neon, 4, 8, 1, 4, benchmark::utils::CheckNEON); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 489 | } |
| 490 | |
| 491 | static void f32_gemm_4x8s4__neonfma(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 492 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8s4__neonfma, 4, 8, 1, 4, benchmark::utils::CheckNEONFMA); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | static void f32_gemm_6x8s4__neon(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 496 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8s4__neon, 6, 8, 1, 4, benchmark::utils::CheckNEON); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | static void f32_gemm_6x8s4__neonfma(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 500 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8s4__neonfma, 6, 8, 1, 4, benchmark::utils::CheckNEONFMA); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | static void f32_gemm_8x8s4__neon(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 504 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_8x8s4__neon, 8, 8, 1, 4, benchmark::utils::CheckNEON); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 505 | } |
| 506 | |
| 507 | static void f32_gemm_8x8s4__neonfma(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 508 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_8x8s4__neonfma, 8, 8, 1, 4, benchmark::utils::CheckNEONFMA); |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 509 | } |
| 510 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 511 | static void f32_ppmm_4x8_unipass__neonfma(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 512 | PPMM1PBenchmark(state, xnn_f32_ppmm_ukernel_4x8__neonfma, xnn_x32_packx_ukernel_4x__neon_st4, 4, 8, benchmark::utils::CheckNEONFMA); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 513 | } |
| 514 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 515 | static void f32_ppmm_4x8_twopass__neonfma(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 516 | PPMM2PBenchmark(state, xnn_f32_ppmm_ukernel_4x8__neonfma, xnn_x32_packx_ukernel_4x__neon_st4, 4, 8, benchmark::utils::CheckNEONFMA); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 517 | } |
| 518 | |
Frank Barchard | 91317c5 | 2019-11-22 10:54:35 -0800 | [diff] [blame] | 519 | BENCHMARK_GEMM(f32_gemm_1x8__neon_lane_ld64) |
| 520 | BENCHMARK_GEMM(f32_gemm_4x8__neon_lane_ld128) |
| 521 | BENCHMARK_GEMM(f32_gemm_4x8__neon_lane_ld64) |
| 522 | BENCHMARK_GEMM(f32_gemm_5x8__neon_lane_ld64) |
Frank Barchard | df06d80 | 2019-11-20 15:53:46 -0800 | [diff] [blame] | 523 | BENCHMARK_GEMM(f32_gemm_1x8s4__neon) |
| 524 | BENCHMARK_GEMM(f32_gemm_1x8s4__neonfma) |
| 525 | BENCHMARK_GEMM(f32_gemm_4x8s4__neon) |
| 526 | BENCHMARK_GEMM(f32_gemm_4x8s4__neonfma) |
| 527 | BENCHMARK_GEMM(f32_gemm_6x8s4__neon) |
| 528 | BENCHMARK_GEMM(f32_gemm_6x8s4__neonfma) |
| 529 | BENCHMARK_GEMM(f32_gemm_8x8s4__neon) |
| 530 | BENCHMARK_GEMM(f32_gemm_8x8s4__neonfma) |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 531 | BENCHMARK_GEMM(f32_ppmm_4x8_unipass__neonfma) |
| 532 | BENCHMARK_GEMM(f32_ppmm_4x8_twopass__neonfma) |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 533 | #endif // XNN_ARCH_ARM || XNN_ARCH_ARM64 |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 534 | |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 535 | #if XNN_ARCH_X86 || XNN_ARCH_X86_64 |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 536 | static void f32_gemm_1x8__sse_load1(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 537 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__sse_load1, 1, 8, 1, 1); |
| 538 | } |
| 539 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 540 | static void f32_gemm_4x8__sse_load1(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 541 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__sse_load1, 4, 8, 1, 1); |
| 542 | } |
| 543 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 544 | static void f32_gemm_1x8__sse_dup(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 545 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__sse_dup, 1, 8, 1, 1); |
| 546 | } |
| 547 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 548 | static void f32_gemm_4x8__sse_dup(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 549 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__sse_dup, 4, 8, 1, 1); |
| 550 | } |
| 551 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 552 | static void f32_gemm_1x8s4__sse(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 553 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8s4__sse, 1, 8, 1, 4); |
| 554 | } |
| 555 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 556 | static void f32_gemm_4x8s4__sse(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 557 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8s4__sse, 4, 8, 1, 4); |
| 558 | } |
| 559 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 560 | static void f32_ppmm_4x8_unipass__sse(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 561 | PPMM1PBenchmark(state, xnn_f32_ppmm_ukernel_4x8__sse, xnn_x32_packx_ukernel_4x__sse, 4, 8); |
| 562 | } |
| 563 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 564 | static void f32_ppmm_4x8_twopass__sse(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 565 | PPMM2PBenchmark(state, xnn_f32_ppmm_ukernel_4x8__sse, xnn_x32_packx_ukernel_4x__sse, 4, 8); |
| 566 | } |
| 567 | |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 568 | static void f32_gemm_1x8__avx_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 569 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__avx_broadcast, 1, 8, 1, 1, benchmark::utils::CheckAVX); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | static void f32_gemm_4x8__avx_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 573 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__avx_broadcast, 4, 8, 1, 1, benchmark::utils::CheckAVX); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 574 | } |
| 575 | |
| 576 | static void f32_gemm_5x8__avx_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 577 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_5x8__avx_broadcast, 5, 8, 1, 1, benchmark::utils::CheckAVX); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 578 | } |
| 579 | |
| 580 | static void f32_gemm_6x8__avx_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 581 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__avx_broadcast, 6, 8, 1, 1, benchmark::utils::CheckAVX); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 582 | } |
| 583 | |
| 584 | static void f32_gemm_7x8__avx_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 585 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_7x8__avx_broadcast, 7, 8, 1, 1, benchmark::utils::CheckAVX); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | static void f32_gemm_1x8__fma3_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 589 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x8__fma3_broadcast, 1, 8, 1, 1, benchmark::utils::CheckFMA3); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 590 | } |
| 591 | |
| 592 | static void f32_gemm_4x8__fma3_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 593 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__fma3_broadcast, 4, 8, 1, 1, benchmark::utils::CheckFMA3); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | static void f32_gemm_5x8__fma3_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 597 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_5x8__fma3_broadcast, 5, 8, 1, 1, benchmark::utils::CheckFMA3); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 598 | } |
| 599 | |
| 600 | static void f32_gemm_6x8__fma3_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 601 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__fma3_broadcast, 6, 8, 1, 1, benchmark::utils::CheckFMA3); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 602 | } |
| 603 | |
| 604 | static void f32_gemm_7x8__fma3_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 605 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_7x8__fma3_broadcast, 7, 8, 1, 1, benchmark::utils::CheckFMA3); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 606 | } |
| 607 | |
| 608 | static void f32_gemm_8x8__fma3_broadcast(benchmark::State& state, const char* net) { |
Marat Dukhan | c8466f5 | 2019-11-25 18:01:10 -0800 | [diff] [blame^] | 609 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_8x8__fma3_broadcast, 8, 8, 1, 1, benchmark::utils::CheckFMA3); |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 610 | } |
| 611 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 612 | BENCHMARK_GEMM(f32_gemm_1x8__sse_load1) |
| 613 | BENCHMARK_GEMM(f32_gemm_4x8__sse_load1) |
| 614 | BENCHMARK_GEMM(f32_gemm_1x8__sse_dup) |
| 615 | BENCHMARK_GEMM(f32_gemm_4x8__sse_dup) |
| 616 | BENCHMARK_GEMM(f32_gemm_1x8s4__sse) |
| 617 | BENCHMARK_GEMM(f32_gemm_4x8s4__sse) |
| 618 | BENCHMARK_GEMM(f32_ppmm_4x8_unipass__sse) |
| 619 | BENCHMARK_GEMM(f32_ppmm_4x8_twopass__sse) |
Marat Dukhan | fda12b8 | 2019-11-21 12:27:59 -0800 | [diff] [blame] | 620 | BENCHMARK_GEMM(f32_gemm_1x8__avx_broadcast) |
| 621 | BENCHMARK_GEMM(f32_gemm_4x8__avx_broadcast) |
| 622 | BENCHMARK_GEMM(f32_gemm_5x8__avx_broadcast) |
| 623 | BENCHMARK_GEMM(f32_gemm_6x8__avx_broadcast) |
| 624 | BENCHMARK_GEMM(f32_gemm_7x8__avx_broadcast) |
| 625 | BENCHMARK_GEMM(f32_gemm_1x8__fma3_broadcast) |
| 626 | BENCHMARK_GEMM(f32_gemm_4x8__fma3_broadcast) |
| 627 | BENCHMARK_GEMM(f32_gemm_5x8__fma3_broadcast) |
| 628 | BENCHMARK_GEMM(f32_gemm_6x8__fma3_broadcast) |
| 629 | BENCHMARK_GEMM(f32_gemm_7x8__fma3_broadcast) |
| 630 | BENCHMARK_GEMM(f32_gemm_8x8__fma3_broadcast) |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 631 | #endif // XNN_ARCH_X86 || XNN_ARCH_X86_64 |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 632 | |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 633 | #if !XNN_ARCH_WASM && !XNN_ARCH_ASMJS |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 634 | static void f32_gemm_4x8__psimd_loadsplat(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 635 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__psimd_loadsplat, 4, 8, 1, 1); |
| 636 | } |
| 637 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 638 | static void f32_gemm_6x8__psimd_loadsplat(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 639 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__psimd_loadsplat, 6, 8, 1, 1); |
| 640 | } |
| 641 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 642 | static void f32_gemm_4x8__psimd_splat(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 643 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8__psimd_splat, 4, 8, 1, 1); |
| 644 | } |
| 645 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 646 | static void f32_gemm_6x8__psimd_splat(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 647 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8__psimd_splat, 6, 8, 1, 1); |
| 648 | } |
| 649 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 650 | static void f32_gemm_4x8s4__psimd(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 651 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x8s4__psimd, 4, 8, 1, 4); |
| 652 | } |
| 653 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 654 | static void f32_gemm_6x8s4__psimd(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 655 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_6x8s4__psimd, 6, 8, 1, 4); |
| 656 | } |
| 657 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 658 | static void f32_ppmm_4x8_unipass__psimd(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 659 | PPMM1PBenchmark(state, xnn_f32_ppmm_ukernel_4x8__psimd, xnn_x32_packx_ukernel_4x__psimd, 4, 8); |
| 660 | } |
| 661 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 662 | static void f32_ppmm_4x8_twopass__psimd(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 663 | PPMM2PBenchmark(state, xnn_f32_ppmm_ukernel_4x8__psimd, xnn_x32_packx_ukernel_4x__psimd, 4, 8); |
| 664 | } |
| 665 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 666 | BENCHMARK_GEMM(f32_gemm_4x8__psimd_loadsplat) |
| 667 | BENCHMARK_GEMM(f32_gemm_6x8__psimd_loadsplat) |
| 668 | BENCHMARK_GEMM(f32_gemm_4x8__psimd_splat) |
| 669 | BENCHMARK_GEMM(f32_gemm_6x8__psimd_splat) |
| 670 | BENCHMARK_GEMM(f32_gemm_4x8s4__psimd) |
| 671 | BENCHMARK_GEMM(f32_gemm_6x8s4__psimd) |
| 672 | BENCHMARK_GEMM(f32_ppmm_4x8_unipass__psimd) |
| 673 | BENCHMARK_GEMM(f32_ppmm_4x8_twopass__psimd) |
Marat Dukhan | 1dadbf7 | 2019-10-01 10:46:20 -0700 | [diff] [blame] | 674 | #endif // !XNN_ARCH_WASM && !XNN_ARCH_ASMJS |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 675 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 676 | static void f32_gemm_1x4__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 677 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_1x4__scalar, 1, 4, 1, 1); |
| 678 | } |
| 679 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 680 | static void f32_gemm_2x4__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 681 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_2x4__scalar, 2, 4, 1, 1); |
| 682 | } |
| 683 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 684 | static void f32_gemm_4x4__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 685 | GEMMBenchmark(state, xnn_f32_gemm_ukernel_4x4__scalar, 4, 4, 1, 1); |
| 686 | } |
| 687 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 688 | static void f32_ppmm_2x4_unipass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 689 | PPMM1PBenchmark(state, xnn_f32_ppmm_ukernel_2x4__scalar, xnn_x32_packx_ukernel_2x__scalar, 2, 4); |
| 690 | } |
| 691 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 692 | static void f32_ppmm_4x2_unipass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 693 | PPMM1PBenchmark(state, xnn_f32_ppmm_ukernel_4x2__scalar, xnn_x32_packx_ukernel_4x__scalar, 4, 2); |
| 694 | } |
| 695 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 696 | static void f32_ppmm_4x4_unipass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 697 | PPMM1PBenchmark(state, xnn_f32_ppmm_ukernel_4x4__scalar, xnn_x32_packx_ukernel_4x__scalar, 4, 4); |
| 698 | } |
| 699 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 700 | static void f32_ppmm_3x3_unipass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 701 | PPMM1PBenchmark(state, xnn_f32_ppmm_ukernel_3x3__scalar, xnn_x32_packx_ukernel_3x__scalar, 3, 3); |
| 702 | } |
| 703 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 704 | static void f32_ppmm_2x4_twopass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 705 | PPMM2PBenchmark(state, xnn_f32_ppmm_ukernel_2x4__scalar, xnn_x32_packx_ukernel_2x__scalar, 2, 4); |
| 706 | } |
| 707 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 708 | static void f32_ppmm_4x2_twopass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 709 | PPMM2PBenchmark(state, xnn_f32_ppmm_ukernel_4x2__scalar, xnn_x32_packx_ukernel_4x__scalar, 4, 2); |
| 710 | } |
| 711 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 712 | static void f32_ppmm_4x4_twopass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 713 | PPMM2PBenchmark(state, xnn_f32_ppmm_ukernel_4x4__scalar, xnn_x32_packx_ukernel_4x__scalar, 4, 4); |
| 714 | } |
| 715 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 716 | static void f32_ppmm_3x3_twopass__scalar(benchmark::State& state, const char* net) { |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 717 | PPMM2PBenchmark(state, xnn_f32_ppmm_ukernel_3x3__scalar, xnn_x32_packx_ukernel_3x__scalar, 3, 3); |
| 718 | } |
| 719 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 720 | BENCHMARK_GEMM(f32_gemm_1x4__scalar) |
| 721 | BENCHMARK_GEMM(f32_gemm_2x4__scalar) |
| 722 | BENCHMARK_GEMM(f32_gemm_4x4__scalar) |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 723 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 724 | BENCHMARK_GEMM(f32_ppmm_2x4_unipass__scalar) |
| 725 | BENCHMARK_GEMM(f32_ppmm_4x2_unipass__scalar) |
| 726 | BENCHMARK_GEMM(f32_ppmm_4x4_unipass__scalar) |
| 727 | BENCHMARK_GEMM(f32_ppmm_3x3_unipass__scalar) |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 728 | |
Frank Barchard | 95bebc9 | 2019-11-15 18:18:28 -0800 | [diff] [blame] | 729 | BENCHMARK_GEMM(f32_ppmm_2x4_twopass__scalar) |
| 730 | BENCHMARK_GEMM(f32_ppmm_4x2_twopass__scalar) |
| 731 | BENCHMARK_GEMM(f32_ppmm_4x4_twopass__scalar) |
| 732 | BENCHMARK_GEMM(f32_ppmm_3x3_twopass__scalar) |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 733 | |
| 734 | |
Marat Dukhan | 33f0c7a | 2019-10-01 13:33:08 -0700 | [diff] [blame] | 735 | #ifdef BENCHMARK_RUY |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 736 | BENCHMARK_GEMM(ruy_st) |
Marat Dukhan | 33f0c7a | 2019-10-01 13:33:08 -0700 | [diff] [blame] | 737 | #endif // BENCHMARK_RUY |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 738 | |
| 739 | #ifndef XNNPACK_BENCHMARK_NO_MAIN |
| 740 | BENCHMARK_MAIN(); |
| 741 | #endif |