Marat Dukhan | 595e170 | 2020-07-31 10:12:52 -0700 | [diff] [blame] | 1 | // Copyright 2020 Google LLC |
| 2 | // |
| 3 | // This source code is licensed under the BSD-style license found in the |
| 4 | // LICENSE file in the root directory of this source tree. |
| 5 | |
| 6 | #include <algorithm> |
| 7 | #include <cfloat> |
| 8 | #include <chrono> |
| 9 | #include <cmath> |
| 10 | #include <functional> |
| 11 | #include <limits> |
| 12 | #include <mutex> |
| 13 | #include <random> |
| 14 | #include <vector> |
| 15 | |
| 16 | #include <cpuinfo.h> |
| 17 | |
| 18 | #include <benchmark/benchmark.h> |
| 19 | #include "bench/gemm.h" |
| 20 | #include "bench/utils.h" |
| 21 | #include <xnnpack/AlignedAllocator.h> |
| 22 | #include <xnnpack/common.h> |
| 23 | #include <xnnpack/gemm.h> |
| 24 | #include <xnnpack/pack.h> |
| 25 | #include <xnnpack/params-init.h> |
| 26 | #include <xnnpack/params.h> |
| 27 | |
| 28 | |
| 29 | static void GEMMBenchmark(benchmark::State& state, |
| 30 | xnn_qs8_gemm_ukernel_function gemm, |
| 31 | size_t mr, size_t nr, size_t kr, size_t sr) |
| 32 | { |
| 33 | if (!cpuinfo_initialize()) { |
| 34 | state.SkipWithError("cpuinfo initialization failed"); |
| 35 | return; |
| 36 | } |
| 37 | |
| 38 | const size_t mc = state.range(0); |
| 39 | const size_t nc = state.range(1); |
| 40 | const size_t kc = state.range(2); |
| 41 | |
| 42 | const size_t nc_stride = benchmark::utils::RoundUp(nc, nr); |
| 43 | const size_t kc_stride = benchmark::utils::RoundUp(kc, kr); |
| 44 | |
| 45 | std::random_device random_device; |
| 46 | auto rng = std::mt19937(random_device()); |
| 47 | auto s32rng = std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng); |
| 48 | auto s8rng = std::bind( |
| 49 | std::uniform_int_distribution<uint32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()), rng); |
| 50 | |
| 51 | std::vector<int8_t> a(mc * kc); |
| 52 | std::generate(a.begin(), a.end(), std::ref(s8rng)); |
| 53 | std::vector<int8_t> k(nc * kc); |
| 54 | std::generate(k.begin(), k.end(), std::ref(s8rng)); |
| 55 | std::vector<int32_t> b(nc); |
| 56 | std::generate(b.begin(), b.end(), std::ref(s32rng)); |
| 57 | |
| 58 | const size_t w_elements = kc_stride * nc_stride + nc_stride * sizeof(int32_t) / sizeof(int8_t); |
| 59 | const size_t c_elements = mc * nc; |
| 60 | const size_t num_buffers = 1 + |
| 61 | benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), |
| 62 | sizeof(int8_t) * (w_elements + c_elements)); |
| 63 | |
| 64 | std::vector<int8_t, AlignedAllocator<int8_t, 32>> w(w_elements * num_buffers); |
| 65 | std::fill(w.begin(), w.end(), 0); |
| 66 | const xnn_qs8_packing_params packing_params = { 127 }; |
| 67 | xnn_pack_qs8_gemm_goi_w(1 /* groups */, nc, kc, nr, kr, sr, k.data(), b.data(), w.data(), &packing_params); |
| 68 | std::vector<int8_t> c(c_elements * num_buffers); |
| 69 | std::fill(c.begin(), c.end(), 0xA5); |
| 70 | |
| 71 | union xnn_qs8_gemm_params quantization_params = xnn_init_qs8_gemm_params(0.75f, 127, -127, 126); |
| 72 | |
| 73 | size_t buffer_index = 0; |
| 74 | for (auto _ : state) { |
| 75 | // Use circular buffers (exceeding cache size) and prefetch to control cache state: |
| 76 | // - A is always in L1 cache (if fits, otherwise L2, L3, etc) |
| 77 | // - W is not in cache (for any cache level) |
| 78 | // - C is not in cache (for any cache level) |
| 79 | state.PauseTiming(); |
| 80 | benchmark::utils::PrefetchToL1(a.data(), a.size() * sizeof(int8_t)); |
| 81 | buffer_index = (buffer_index + 1) % num_buffers; |
| 82 | state.ResumeTiming(); |
| 83 | |
| 84 | for (uint32_t m = 0; m < mc; m += mr) { |
| 85 | const uint32_t mb = min(mc - m, mr); |
| 86 | for (uint32_t n = 0; n < nc; n += nr) { |
| 87 | const uint32_t nb = min(nc - n, nr); |
| 88 | gemm( |
| 89 | mb, nb, kc * sizeof(int8_t), |
| 90 | a.data() + m * kc, kc * sizeof(int8_t), |
| 91 | w.data() + (w_elements * buffer_index + n * (kc_stride + sizeof(int32_t))) / sizeof(int8_t), |
| 92 | c.data() + (mc * buffer_index + m) * nc + n, nc * sizeof(int8_t), nr * sizeof(int8_t), |
| 93 | &quantization_params); |
| 94 | } |
| 95 | } |
| 96 | } |
| 97 | |
| 98 | state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency(); |
| 99 | state.counters["OPS"] = benchmark::Counter( |
| 100 | uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate); |
| 101 | } |
| 102 | |
| 103 | |
| 104 | #if XNN_ARCH_X86 || XNN_ARCH_X86_64 |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 105 | static void qs8_gemm_4x4c2__sse2_ld64(benchmark::State& state, const char* net) { |
| 106 | GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_4x4c2__sse2_ld64, 4, 4, 2, 1); |
Marat Dukhan | 595e170 | 2020-07-31 10:12:52 -0700 | [diff] [blame] | 107 | } |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 108 | static void qs8_gemm_4x4c2__ssse3_ld64(benchmark::State& state, const char* net) { |
| 109 | GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_4x4c2__ssse3_ld64, 4, 4, 2, 1); |
Marat Dukhan | 595e170 | 2020-07-31 10:12:52 -0700 | [diff] [blame] | 110 | } |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 111 | static void qs8_gemm_4x4c2__sse41_ld64(benchmark::State& state, const char* net) { |
| 112 | GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_4x4c2__sse41_ld64, 4, 4, 2, 1); |
Marat Dukhan | 595e170 | 2020-07-31 10:12:52 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 115 | static void qs8_gemm_2x4c8__sse2_ld64(benchmark::State& state, const char* net) { |
| 116 | GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_2x4c8__sse2_ld64, 2, 4, 8, 1); |
Marat Dukhan | 733d0be | 2020-07-31 15:55:36 -0700 | [diff] [blame] | 117 | } |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 118 | static void qs8_gemm_2x4c8__ssse3_ld64(benchmark::State& state, const char* net) { |
| 119 | GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_2x4c8__ssse3_ld64, 2, 4, 8, 1); |
Marat Dukhan | 733d0be | 2020-07-31 15:55:36 -0700 | [diff] [blame] | 120 | } |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 121 | static void qs8_gemm_2x4c8__sse41_ld64(benchmark::State& state, const char* net) { |
| 122 | GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_2x4c8__sse41_ld64, 2, 4, 8, 1); |
Marat Dukhan | 733d0be | 2020-07-31 15:55:36 -0700 | [diff] [blame] | 123 | } |
| 124 | |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 125 | BENCHMARK_GEMM(qs8_gemm_4x4c2__sse2_ld64) |
| 126 | BENCHMARK_GEMM(qs8_gemm_4x4c2__ssse3_ld64) |
| 127 | BENCHMARK_GEMM(qs8_gemm_4x4c2__sse41_ld64) |
Marat Dukhan | 733d0be | 2020-07-31 15:55:36 -0700 | [diff] [blame] | 128 | |
Marat Dukhan | 14d3ce8 | 2020-07-31 16:19:15 -0700 | [diff] [blame^] | 129 | BENCHMARK_GEMM(qs8_gemm_2x4c8__sse2_ld64) |
| 130 | BENCHMARK_GEMM(qs8_gemm_2x4c8__ssse3_ld64) |
| 131 | BENCHMARK_GEMM(qs8_gemm_2x4c8__sse41_ld64) |
Marat Dukhan | 595e170 | 2020-07-31 10:12:52 -0700 | [diff] [blame] | 132 | #endif |
| 133 | |
| 134 | #ifndef XNNPACK_BENCHMARK_NO_MAIN |
| 135 | BENCHMARK_MAIN(); |
| 136 | #endif |