Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 1 | /* |
| 2 | * mem-memcpy.c |
| 3 | * |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 4 | * Simple memcpy() and memset() benchmarks |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 5 | * |
| 6 | * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> |
| 7 | */ |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 8 | |
| 9 | #include "../perf.h" |
| 10 | #include "../util/util.h" |
| 11 | #include "../util/parse-options.h" |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 12 | #include "../util/header.h" |
Yann Droneaud | 57480d2 | 2014-06-30 22:28:47 +0200 | [diff] [blame] | 13 | #include "../util/cloexec.h" |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 14 | #include "bench.h" |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 15 | #include "mem-memcpy-arch.h" |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 16 | #include "mem-memset-arch.h" |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 17 | |
| 18 | #include <stdio.h> |
| 19 | #include <stdlib.h> |
| 20 | #include <string.h> |
| 21 | #include <sys/time.h> |
| 22 | #include <errno.h> |
| 23 | |
| 24 | #define K 1024 |
| 25 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 26 | static const char *size_str = "1MB"; |
Ingo Molnar | e815e32 | 2015-10-19 10:04:24 +0200 | [diff] [blame] | 27 | static const char *routine_str = "all"; |
Jan Beulich | e3e877e | 2012-01-18 13:29:59 +0000 | [diff] [blame] | 28 | static int iterations = 1; |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 29 | static bool use_cycles; |
| 30 | static int cycles_fd; |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 31 | |
| 32 | static const struct option options[] = { |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 33 | OPT_STRING('l', "size", &size_str, "1MB", |
| 34 | "Specify the size of the memory buffers. " |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 35 | "Available units: B, KB, MB, GB and TB (case insensitive)"), |
| 36 | |
Ingo Molnar | e815e32 | 2015-10-19 10:04:24 +0200 | [diff] [blame] | 37 | OPT_STRING('r', "routine", &routine_str, "all", |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 38 | "Specify the routine to run, \"all\" runs all available routines, \"help\" lists them"), |
| 39 | |
Jan Beulich | e3e877e | 2012-01-18 13:29:59 +0000 | [diff] [blame] | 40 | OPT_INTEGER('i', "iterations", &iterations, |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 41 | "Repeat the function this number of times"), |
| 42 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 43 | OPT_BOOLEAN('c', "cycles", &use_cycles, |
| 44 | "Use a cycles event instead of gettimeofday() to measure performance"), |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 45 | |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 46 | OPT_END() |
| 47 | }; |
| 48 | |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 49 | typedef void *(*memcpy_t)(void *, const void *, size_t); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 50 | typedef void *(*memset_t)(void *, int, size_t); |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 51 | |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 52 | struct routine { |
| 53 | const char *name; |
| 54 | const char *desc; |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 55 | union { |
| 56 | memcpy_t memcpy; |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 57 | memset_t memset; |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 58 | } fn; |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 59 | }; |
| 60 | |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 61 | struct routine memcpy_routines[] = { |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 62 | { .name = "default", |
| 63 | .desc = "Default memcpy() provided by glibc", |
| 64 | .fn.memcpy = memcpy }, |
| 65 | |
Ingo Molnar | 89fe808 | 2013-09-30 12:07:11 +0200 | [diff] [blame] | 66 | #ifdef HAVE_ARCH_X86_64_SUPPORT |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 67 | # define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn}, |
| 68 | # include "mem-memcpy-x86-64-asm-def.h" |
| 69 | # undef MEMCPY_FN |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 70 | #endif |
| 71 | |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 72 | { NULL, } |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 73 | }; |
| 74 | |
| 75 | static const char * const bench_mem_memcpy_usage[] = { |
| 76 | "perf bench mem memcpy <options>", |
| 77 | NULL |
| 78 | }; |
| 79 | |
Hitoshi Mitake | 17d7a11 | 2012-07-02 22:46:17 +0900 | [diff] [blame] | 80 | static struct perf_event_attr cycle_attr = { |
Hitoshi Mitake | 12eac0b | 2009-11-20 12:37:17 +0900 | [diff] [blame] | 81 | .type = PERF_TYPE_HARDWARE, |
| 82 | .config = PERF_COUNT_HW_CPU_CYCLES |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 83 | }; |
| 84 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 85 | static void init_cycles(void) |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 86 | { |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 87 | cycles_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, perf_event_open_cloexec_flag()); |
Hitoshi Mitake | 12eac0b | 2009-11-20 12:37:17 +0900 | [diff] [blame] | 88 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 89 | if (cycles_fd < 0 && errno == ENOSYS) |
Hitoshi Mitake | 12eac0b | 2009-11-20 12:37:17 +0900 | [diff] [blame] | 90 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); |
| 91 | else |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 92 | BUG_ON(cycles_fd < 0); |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 93 | } |
| 94 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 95 | static u64 get_cycles(void) |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 96 | { |
| 97 | int ret; |
| 98 | u64 clk; |
| 99 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 100 | ret = read(cycles_fd, &clk, sizeof(u64)); |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 101 | BUG_ON(ret != sizeof(u64)); |
| 102 | |
| 103 | return clk; |
| 104 | } |
| 105 | |
| 106 | static double timeval2double(struct timeval *ts) |
| 107 | { |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 108 | return (double)ts->tv_sec + (double)ts->tv_usec / (double)1000000; |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 109 | } |
| 110 | |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 111 | #define print_bps(x) do { \ |
| 112 | if (x < K) \ |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 113 | printf(" %14lf bytes/sec\n", x); \ |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 114 | else if (x < K * K) \ |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 115 | printf(" %14lfd KB/sec\n", x / K); \ |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 116 | else if (x < K * K * K) \ |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 117 | printf(" %14lf MB/sec\n", x / K / K); \ |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 118 | else \ |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 119 | printf(" %14lf GB/sec\n", x / K / K / K); \ |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 120 | } while (0) |
| 121 | |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 122 | struct bench_mem_info { |
| 123 | const struct routine *routines; |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 124 | u64 (*do_cycles)(const struct routine *r, size_t size); |
| 125 | double (*do_gettimeofday)(const struct routine *r, size_t size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 126 | const char *const *usage; |
| 127 | }; |
| 128 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 129 | static void __bench_mem_routine(struct bench_mem_info *info, int r_idx, size_t size, double size_total) |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 130 | { |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 131 | const struct routine *r = &info->routines[r_idx]; |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 132 | double result_bps = 0.0; |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 133 | u64 result_cycles = 0; |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 134 | |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 135 | printf("# Routine '%s' (%s)\n", r->name, r->desc); |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 136 | |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 137 | if (bench_format == BENCH_FORMAT_DEFAULT) |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 138 | printf("# Copying %s bytes ...\n\n", size_str); |
Hitoshi Mitake | 12eac0b | 2009-11-20 12:37:17 +0900 | [diff] [blame] | 139 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 140 | if (use_cycles) { |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 141 | result_cycles = info->do_cycles(r, size); |
Hitoshi Mitake | 12eac0b | 2009-11-20 12:37:17 +0900 | [diff] [blame] | 142 | } else { |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 143 | result_bps = info->do_gettimeofday(r, size); |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | switch (bench_format) { |
| 147 | case BENCH_FORMAT_DEFAULT: |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 148 | if (use_cycles) { |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 149 | printf(" %14lf cycles/byte\n", (double)result_cycles/size_total); |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 150 | } else { |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 151 | print_bps(result_bps); |
| 152 | } |
| 153 | break; |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 154 | |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 155 | case BENCH_FORMAT_SIMPLE: |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 156 | if (use_cycles) { |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 157 | printf("%lf\n", (double)result_cycles/size_total); |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 158 | } else { |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 159 | printf("%lf\n", result_bps); |
Hitoshi Mitake | 49ce8fc | 2010-11-25 16:04:52 +0900 | [diff] [blame] | 160 | } |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 161 | break; |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 162 | |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 163 | default: |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 164 | BUG_ON(1); |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 165 | break; |
| 166 | } |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 167 | } |
| 168 | |
Ingo Molnar | 2946f59 | 2015-10-19 10:04:19 +0200 | [diff] [blame] | 169 | static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *info) |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 170 | { |
| 171 | int i; |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 172 | size_t size; |
| 173 | double size_total; |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 174 | |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 175 | argc = parse_options(argc, argv, options, info->usage, 0); |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 176 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 177 | if (use_cycles) |
| 178 | init_cycles(); |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 179 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 180 | size = (size_t)perf_atoll((char *)size_str); |
| 181 | size_total = (double)size * iterations; |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 182 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 183 | if ((s64)size <= 0) { |
| 184 | fprintf(stderr, "Invalid size:%s\n", size_str); |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 185 | return 1; |
| 186 | } |
| 187 | |
Ingo Molnar | e815e32 | 2015-10-19 10:04:24 +0200 | [diff] [blame] | 188 | if (!strncmp(routine_str, "all", 3)) { |
Borislav Petkov | dfecb95 | 2015-02-26 19:02:43 +0100 | [diff] [blame] | 189 | for (i = 0; info->routines[i].name; i++) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 190 | __bench_mem_routine(info, i, size, size_total); |
Borislav Petkov | dfecb95 | 2015-02-26 19:02:43 +0100 | [diff] [blame] | 191 | return 0; |
| 192 | } |
| 193 | |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 194 | for (i = 0; info->routines[i].name; i++) { |
Ingo Molnar | e815e32 | 2015-10-19 10:04:24 +0200 | [diff] [blame] | 195 | if (!strcmp(info->routines[i].name, routine_str)) |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 196 | break; |
| 197 | } |
| 198 | if (!info->routines[i].name) { |
Ingo Molnar | 13b1fdc | 2015-10-19 10:04:26 +0200 | [diff] [blame^] | 199 | if (strcmp(routine_str, "help") && strcmp(routine_str, "h")) |
| 200 | printf("Unknown routine: %s\n", routine_str); |
| 201 | printf("Available routines:\n"); |
Borislav Petkov | 515e23f | 2015-02-26 18:51:37 +0100 | [diff] [blame] | 202 | for (i = 0; info->routines[i].name; i++) { |
| 203 | printf("\t%s ... %s\n", |
| 204 | info->routines[i].name, info->routines[i].desc); |
| 205 | } |
| 206 | return 1; |
| 207 | } |
| 208 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 209 | __bench_mem_routine(info, i, size, size_total); |
Hitoshi Mitake | 827f3b4 | 2009-11-18 00:20:09 +0900 | [diff] [blame] | 210 | |
| 211 | return 0; |
| 212 | } |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 213 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 214 | static void memcpy_alloc_mem(void **dst, void **src, size_t size) |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 215 | { |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 216 | *dst = zalloc(size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 217 | if (!*dst) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 218 | die("memory allocation failed - maybe size is too large?\n"); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 219 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 220 | *src = zalloc(size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 221 | if (!*src) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 222 | die("memory allocation failed - maybe size is too large?\n"); |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 223 | |
| 224 | /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */ |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 225 | memset(*src, 0, size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 226 | } |
| 227 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 228 | static u64 do_memcpy_cycles(const struct routine *r, size_t size) |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 229 | { |
| 230 | u64 cycle_start = 0ULL, cycle_end = 0ULL; |
| 231 | void *src = NULL, *dst = NULL; |
| 232 | memcpy_t fn = r->fn.memcpy; |
| 233 | int i; |
| 234 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 235 | memcpy_alloc_mem(&dst, &src, size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 236 | |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 237 | /* |
| 238 | * We prefault the freshly allocated memory range here, |
| 239 | * to not measure page fault overhead: |
| 240 | */ |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 241 | fn(dst, src, size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 242 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 243 | cycle_start = get_cycles(); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 244 | for (i = 0; i < iterations; ++i) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 245 | fn(dst, src, size); |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 246 | cycle_end = get_cycles(); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 247 | |
| 248 | free(src); |
| 249 | free(dst); |
| 250 | return cycle_end - cycle_start; |
| 251 | } |
| 252 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 253 | static double do_memcpy_gettimeofday(const struct routine *r, size_t size) |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 254 | { |
| 255 | struct timeval tv_start, tv_end, tv_diff; |
| 256 | memcpy_t fn = r->fn.memcpy; |
| 257 | void *src = NULL, *dst = NULL; |
| 258 | int i; |
| 259 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 260 | memcpy_alloc_mem(&dst, &src, size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 261 | |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 262 | /* |
| 263 | * We prefault the freshly allocated memory range here, |
| 264 | * to not measure page fault overhead: |
| 265 | */ |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 266 | fn(dst, src, size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 267 | |
| 268 | BUG_ON(gettimeofday(&tv_start, NULL)); |
| 269 | for (i = 0; i < iterations; ++i) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 270 | fn(dst, src, size); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 271 | BUG_ON(gettimeofday(&tv_end, NULL)); |
| 272 | |
| 273 | timersub(&tv_end, &tv_start, &tv_diff); |
| 274 | |
| 275 | free(src); |
| 276 | free(dst); |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 277 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 278 | return (double)(((double)size * iterations) / timeval2double(&tv_diff)); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 279 | } |
| 280 | |
Ingo Molnar | 2946f59 | 2015-10-19 10:04:19 +0200 | [diff] [blame] | 281 | int bench_mem_memcpy(int argc, const char **argv, const char *prefix __maybe_unused) |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 282 | { |
| 283 | struct bench_mem_info info = { |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 284 | .routines = memcpy_routines, |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 285 | .do_cycles = do_memcpy_cycles, |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 286 | .do_gettimeofday = do_memcpy_gettimeofday, |
| 287 | .usage = bench_mem_memcpy_usage, |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 288 | }; |
| 289 | |
Ingo Molnar | 2946f59 | 2015-10-19 10:04:19 +0200 | [diff] [blame] | 290 | return bench_mem_common(argc, argv, &info); |
Rabin Vincent | 308197b | 2014-12-02 16:50:39 +0100 | [diff] [blame] | 291 | } |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 292 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 293 | static void memset_alloc_mem(void **dst, size_t size) |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 294 | { |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 295 | *dst = zalloc(size); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 296 | if (!*dst) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 297 | die("memory allocation failed - maybe size is too large?\n"); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 298 | } |
| 299 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 300 | static u64 do_memset_cycles(const struct routine *r, size_t size) |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 301 | { |
| 302 | u64 cycle_start = 0ULL, cycle_end = 0ULL; |
| 303 | memset_t fn = r->fn.memset; |
| 304 | void *dst = NULL; |
| 305 | int i; |
| 306 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 307 | memset_alloc_mem(&dst, size); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 308 | |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 309 | /* |
| 310 | * We prefault the freshly allocated memory range here, |
| 311 | * to not measure page fault overhead: |
| 312 | */ |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 313 | fn(dst, -1, size); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 314 | |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 315 | cycle_start = get_cycles(); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 316 | for (i = 0; i < iterations; ++i) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 317 | fn(dst, i, size); |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 318 | cycle_end = get_cycles(); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 319 | |
| 320 | free(dst); |
| 321 | return cycle_end - cycle_start; |
| 322 | } |
| 323 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 324 | static double do_memset_gettimeofday(const struct routine *r, size_t size) |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 325 | { |
| 326 | struct timeval tv_start, tv_end, tv_diff; |
| 327 | memset_t fn = r->fn.memset; |
| 328 | void *dst = NULL; |
| 329 | int i; |
| 330 | |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 331 | memset_alloc_mem(&dst, size); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 332 | |
Ingo Molnar | 6db175c | 2015-10-19 10:04:21 +0200 | [diff] [blame] | 333 | /* |
| 334 | * We prefault the freshly allocated memory range here, |
| 335 | * to not measure page fault overhead: |
| 336 | */ |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 337 | fn(dst, -1, size); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 338 | |
| 339 | BUG_ON(gettimeofday(&tv_start, NULL)); |
| 340 | for (i = 0; i < iterations; ++i) |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 341 | fn(dst, i, size); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 342 | BUG_ON(gettimeofday(&tv_end, NULL)); |
| 343 | |
| 344 | timersub(&tv_end, &tv_start, &tv_diff); |
| 345 | |
| 346 | free(dst); |
Ingo Molnar | a69b4f7 | 2015-10-19 10:04:25 +0200 | [diff] [blame] | 347 | return (double)(((double)size * iterations) / timeval2double(&tv_diff)); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 348 | } |
| 349 | |
| 350 | static const char * const bench_mem_memset_usage[] = { |
| 351 | "perf bench mem memset <options>", |
| 352 | NULL |
| 353 | }; |
| 354 | |
| 355 | static const struct routine memset_routines[] = { |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 356 | { .name = "default", |
| 357 | .desc = "Default memset() provided by glibc", |
| 358 | .fn.memset = memset }, |
| 359 | |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 360 | #ifdef HAVE_ARCH_X86_64_SUPPORT |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 361 | # define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn }, |
| 362 | # include "mem-memset-x86-64-asm-def.h" |
| 363 | # undef MEMSET_FN |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 364 | #endif |
| 365 | |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 366 | { NULL, } |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 367 | }; |
| 368 | |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 369 | int bench_mem_memset(int argc, const char **argv, const char *prefix __maybe_unused) |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 370 | { |
| 371 | struct bench_mem_info info = { |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 372 | .routines = memset_routines, |
Ingo Molnar | b14f2d3 | 2015-10-19 10:04:23 +0200 | [diff] [blame] | 373 | .do_cycles = do_memset_cycles, |
Ingo Molnar | 13839ec | 2015-10-19 10:04:17 +0200 | [diff] [blame] | 374 | .do_gettimeofday = do_memset_gettimeofday, |
| 375 | .usage = bench_mem_memset_usage, |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 376 | }; |
| 377 | |
Ingo Molnar | 2946f59 | 2015-10-19 10:04:19 +0200 | [diff] [blame] | 378 | return bench_mem_common(argc, argv, &info); |
Rabin Vincent | 5bce1a5 | 2014-12-02 16:50:40 +0100 | [diff] [blame] | 379 | } |