Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 1 | #ifndef FIO_STAT_H |
| 2 | #define FIO_STAT_H |
| 3 | |
Jens Axboe | ec41265 | 2012-03-08 12:37:31 +0100 | [diff] [blame] | 4 | #include "iolog.h" |
| 5 | |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 6 | struct group_run_stats { |
Shaohua Li | 6eaf09d | 2012-09-14 08:49:43 +0200 | [diff] [blame] | 7 | uint64_t max_run[DDIR_RWDIR_CNT], min_run[DDIR_RWDIR_CNT]; |
| 8 | uint64_t max_bw[DDIR_RWDIR_CNT], min_bw[DDIR_RWDIR_CNT]; |
| 9 | uint64_t io_kb[DDIR_RWDIR_CNT]; |
| 10 | uint64_t agg[DDIR_RWDIR_CNT]; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 11 | uint32_t kb_base; |
Steven Noonan | ad705bc | 2013-04-08 15:05:25 -0700 | [diff] [blame] | 12 | uint32_t unit_base; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 13 | uint32_t groupid; |
Jens Axboe | 771e58b | 2013-01-30 12:56:23 +0100 | [diff] [blame] | 14 | uint32_t unified_rw_rep; |
Jens Axboe | eb66320 | 2014-06-30 08:51:33 -0600 | [diff] [blame] | 15 | } __attribute__((packed)); |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * How many depth levels to log |
| 19 | */ |
| 20 | #define FIO_IO_U_MAP_NR 7 |
| 21 | #define FIO_IO_U_LAT_U_NR 10 |
| 22 | #define FIO_IO_U_LAT_M_NR 12 |
| 23 | |
| 24 | /* |
| 25 | * Aggregate clat samples to report percentile(s) of them. |
| 26 | * |
| 27 | * EXECUTIVE SUMMARY |
| 28 | * |
| 29 | * FIO_IO_U_PLAT_BITS determines the maximum statistical error on the |
| 30 | * value of resulting percentiles. The error will be approximately |
| 31 | * 1/2^(FIO_IO_U_PLAT_BITS+1) of the value. |
| 32 | * |
| 33 | * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum |
| 34 | * range being tracked for latency samples. The maximum value tracked |
| 35 | * accurately will be 2^(GROUP_NR + PLAT_BITS -1) microseconds. |
| 36 | * |
| 37 | * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory |
| 38 | * requirement of storing those aggregate counts. The memory used will |
| 39 | * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(int) |
| 40 | * bytes. |
| 41 | * |
| 42 | * FIO_IO_U_PLAT_NR is the total number of buckets. |
| 43 | * |
| 44 | * DETAILS |
| 45 | * |
| 46 | * Suppose the clat varies from 0 to 999 (usec), the straightforward |
| 47 | * method is to keep an array of (999 + 1) buckets, in which a counter |
| 48 | * keeps the count of samples which fall in the bucket, e.g., |
| 49 | * {[0],[1],...,[999]}. However this consumes a huge amount of space, |
| 50 | * and can be avoided if an approximation is acceptable. |
| 51 | * |
| 52 | * One such method is to let the range of the bucket to be greater |
| 53 | * than one. This method has low accuracy when the value is small. For |
| 54 | * example, let the buckets be {[0,99],[100,199],...,[900,999]}, and |
| 55 | * the represented value of each bucket be the mean of the range. Then |
| 56 | * a value 0 has an round-off error of 49.5. To improve on this, we |
| 57 | * use buckets with non-uniform ranges, while bounding the error of |
| 58 | * each bucket within a ratio of the sample value. A simple example |
| 59 | * would be when error_bound = 0.005, buckets are { |
| 60 | * {[0],[1],...,[99]}, {[100,101],[102,103],...,[198,199]},.., |
| 61 | * {[900,909],[910,919]...} }. The total range is partitioned into |
| 62 | * groups with different ranges, then buckets with uniform ranges. An |
| 63 | * upper bound of the error is (range_of_bucket/2)/value_of_bucket |
| 64 | * |
| 65 | * For better efficiency, we implement this using base two. We group |
| 66 | * samples by their Most Significant Bit (MSB), extract the next M bit |
| 67 | * of them as an index within the group, and discard the rest of the |
| 68 | * bits. |
| 69 | * |
| 70 | * E.g., assume a sample 'x' whose MSB is bit n (starting from bit 0), |
| 71 | * and use M bit for indexing |
| 72 | * |
| 73 | * | n | M bits | bit (n-M-1) ... bit 0 | |
| 74 | * |
| 75 | * Because x is at least 2^n, and bit 0 to bit (n-M-1) is at most |
| 76 | * (2^(n-M) - 1), discarding bit 0 to (n-M-1) makes the round-off |
| 77 | * error |
| 78 | * |
| 79 | * 2^(n-M)-1 2^(n-M) 1 |
| 80 | * e <= --------- <= ------- = --- |
| 81 | * 2^n 2^n 2^M |
| 82 | * |
| 83 | * Furthermore, we use "mean" of the range to represent the bucket, |
| 84 | * the error e can be lowered by half to 1 / 2^(M+1). By using M bits |
| 85 | * as the index, each group must contains 2^M buckets. |
| 86 | * |
| 87 | * E.g. Let M (FIO_IO_U_PLAT_BITS) be 6 |
| 88 | * Error bound is 1/2^(6+1) = 0.0078125 (< 1%) |
| 89 | * |
| 90 | * Group MSB #discarded range of #buckets |
| 91 | * error_bits value |
| 92 | * ---------------------------------------------------------------- |
| 93 | * 0* 0~5 0 [0,63] 64 |
| 94 | * 1* 6 0 [64,127] 64 |
| 95 | * 2 7 1 [128,255] 64 |
| 96 | * 3 8 2 [256,511] 64 |
| 97 | * 4 9 3 [512,1023] 64 |
| 98 | * ... ... ... [...,...] ... |
| 99 | * 18 23 17 [8838608,+inf]** 64 |
| 100 | * |
| 101 | * * Special cases: when n < (M-1) or when n == (M-1), in both cases, |
| 102 | * the value cannot be rounded off. Use all bits of the sample as |
| 103 | * index. |
| 104 | * |
| 105 | * ** If a sample's MSB is greater than 23, it will be counted as 23. |
| 106 | */ |
| 107 | |
| 108 | #define FIO_IO_U_PLAT_BITS 6 |
| 109 | #define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS) |
| 110 | #define FIO_IO_U_PLAT_GROUP_NR 19 |
| 111 | #define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL) |
| 112 | #define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified |
| 113 | list of percentiles */ |
| 114 | |
| 115 | #define MAX_PATTERN_SIZE 512 |
| 116 | #define FIO_JOBNAME_SIZE 128 |
Jens Axboe | 4e59d0f | 2014-03-14 08:41:39 -0600 | [diff] [blame] | 117 | #define FIO_JOBDESC_SIZE 256 |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 118 | #define FIO_VERROR_SIZE 128 |
| 119 | |
| 120 | struct thread_stat { |
| 121 | char name[FIO_JOBNAME_SIZE]; |
| 122 | char verror[FIO_VERROR_SIZE]; |
Jens Axboe | ddcc0b6 | 2011-10-03 14:45:27 +0200 | [diff] [blame] | 123 | uint32_t error; |
Jens Axboe | 2f122b1 | 2012-03-15 13:10:19 +0100 | [diff] [blame] | 124 | uint32_t thread_number; |
Jens Axboe | ddcc0b6 | 2011-10-03 14:45:27 +0200 | [diff] [blame] | 125 | uint32_t groupid; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 126 | uint32_t pid; |
Jens Axboe | 4e59d0f | 2014-03-14 08:41:39 -0600 | [diff] [blame] | 127 | char description[FIO_JOBDESC_SIZE]; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 128 | uint32_t members; |
Jens Axboe | 771e58b | 2013-01-30 12:56:23 +0100 | [diff] [blame] | 129 | uint32_t unified_rw_rep; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 130 | |
| 131 | /* |
| 132 | * bandwidth and latency stats |
| 133 | */ |
Shaohua Li | 6eaf09d | 2012-09-14 08:49:43 +0200 | [diff] [blame] | 134 | struct io_stat clat_stat[DDIR_RWDIR_CNT]; /* completion latency */ |
| 135 | struct io_stat slat_stat[DDIR_RWDIR_CNT]; /* submission latency */ |
| 136 | struct io_stat lat_stat[DDIR_RWDIR_CNT]; /* total latency */ |
| 137 | struct io_stat bw_stat[DDIR_RWDIR_CNT]; /* bandwidth stats */ |
| 138 | struct io_stat iops_stat[DDIR_RWDIR_CNT]; /* IOPS stats */ |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 139 | |
| 140 | /* |
| 141 | * fio system usage accounting |
| 142 | */ |
| 143 | uint64_t usr_time; |
| 144 | uint64_t sys_time; |
| 145 | uint64_t ctx; |
| 146 | uint64_t minf, majf; |
| 147 | |
| 148 | /* |
| 149 | * IO depth and latency stats |
| 150 | */ |
| 151 | uint64_t clat_percentiles; |
Vincent Kang Fu | 435d195 | 2013-02-06 08:43:40 +0100 | [diff] [blame] | 152 | uint64_t percentile_precision; |
Jens Axboe | 802ad4a | 2011-10-05 09:51:58 +0200 | [diff] [blame] | 153 | fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN]; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 154 | |
| 155 | uint32_t io_u_map[FIO_IO_U_MAP_NR]; |
| 156 | uint32_t io_u_submit[FIO_IO_U_MAP_NR]; |
| 157 | uint32_t io_u_complete[FIO_IO_U_MAP_NR]; |
| 158 | uint32_t io_u_lat_u[FIO_IO_U_LAT_U_NR]; |
| 159 | uint32_t io_u_lat_m[FIO_IO_U_LAT_M_NR]; |
Shaohua Li | 6eaf09d | 2012-09-14 08:49:43 +0200 | [diff] [blame] | 160 | uint32_t io_u_plat[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR]; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 161 | uint64_t total_io_u[3]; |
| 162 | uint64_t short_io_u[3]; |
| 163 | uint64_t total_submit; |
| 164 | uint64_t total_complete; |
| 165 | |
Shaohua Li | 6eaf09d | 2012-09-14 08:49:43 +0200 | [diff] [blame] | 166 | uint64_t io_bytes[DDIR_RWDIR_CNT]; |
| 167 | uint64_t runtime[DDIR_RWDIR_CNT]; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 168 | uint64_t total_run_time; |
| 169 | |
| 170 | /* |
| 171 | * IO Error related stats |
| 172 | */ |
| 173 | uint16_t continue_on_error; |
| 174 | uint64_t total_err_count; |
Jens Axboe | ddcc0b6 | 2011-10-03 14:45:27 +0200 | [diff] [blame] | 175 | uint32_t first_error; |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 176 | |
| 177 | uint32_t kb_base; |
Steven Noonan | ad705bc | 2013-04-08 15:05:25 -0700 | [diff] [blame] | 178 | uint32_t unit_base; |
Jens Axboe | 3e260a4 | 2013-12-09 12:38:53 -0700 | [diff] [blame] | 179 | |
| 180 | uint32_t latency_depth; |
| 181 | uint64_t latency_target; |
| 182 | fio_fp64_t latency_percentile; |
| 183 | uint64_t latency_window; |
Jens Axboe | eb66320 | 2014-06-30 08:51:33 -0600 | [diff] [blame] | 184 | } __attribute__((packed)); |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 185 | |
Jens Axboe | b75a394 | 2011-10-03 16:03:43 +0200 | [diff] [blame] | 186 | struct jobs_eta { |
| 187 | uint32_t nr_running; |
| 188 | uint32_t nr_ramp; |
Jens Axboe | ce8ea6e | 2014-06-27 15:01:06 -0600 | [diff] [blame] | 189 | |
Jens Axboe | b75a394 | 2011-10-03 16:03:43 +0200 | [diff] [blame] | 190 | uint32_t nr_pending; |
Jens Axboe | 714e85f | 2013-04-15 10:21:56 +0200 | [diff] [blame] | 191 | uint32_t nr_setting_up; |
Jens Axboe | ce8ea6e | 2014-06-27 15:01:06 -0600 | [diff] [blame] | 192 | |
Jens Axboe | b75a394 | 2011-10-03 16:03:43 +0200 | [diff] [blame] | 193 | uint32_t files_open; |
Jens Axboe | ce8ea6e | 2014-06-27 15:01:06 -0600 | [diff] [blame] | 194 | |
Jens Axboe | d79db12 | 2012-09-24 08:51:24 +0200 | [diff] [blame] | 195 | uint32_t m_rate[DDIR_RWDIR_CNT], t_rate[DDIR_RWDIR_CNT]; |
| 196 | uint32_t m_iops[DDIR_RWDIR_CNT], t_iops[DDIR_RWDIR_CNT]; |
Shaohua Li | 6eaf09d | 2012-09-14 08:49:43 +0200 | [diff] [blame] | 197 | uint32_t rate[DDIR_RWDIR_CNT]; |
| 198 | uint32_t iops[DDIR_RWDIR_CNT]; |
Jens Axboe | b75a394 | 2011-10-03 16:03:43 +0200 | [diff] [blame] | 199 | uint64_t elapsed_sec; |
| 200 | uint64_t eta_sec; |
Jens Axboe | b7f05eb | 2012-05-11 20:33:02 +0200 | [diff] [blame] | 201 | uint32_t is_pow2; |
Steven Noonan | ad705bc | 2013-04-08 15:05:25 -0700 | [diff] [blame] | 202 | uint32_t unit_base; |
Jens Axboe | 1d1f45a | 2011-10-03 19:44:41 +0200 | [diff] [blame] | 203 | |
| 204 | /* |
| 205 | * Network 'copy' of run_str[] |
| 206 | */ |
| 207 | uint32_t nr_threads; |
Jens Axboe | 372aecb | 2013-01-08 13:42:41 +0100 | [diff] [blame] | 208 | uint8_t run_str[]; |
Jens Axboe | eb66320 | 2014-06-30 08:51:33 -0600 | [diff] [blame] | 209 | } __attribute__((packed)); |
Jens Axboe | b75a394 | 2011-10-03 16:03:43 +0200 | [diff] [blame] | 210 | |
Jens Axboe | 61f6cce | 2014-06-24 08:40:21 -0600 | [diff] [blame] | 211 | extern struct jobs_eta *get_jobs_eta(int force, size_t *size); |
| 212 | |
Jens Axboe | cef9175 | 2013-04-26 17:05:57 -0600 | [diff] [blame] | 213 | extern void stat_init(void); |
| 214 | extern void stat_exit(void); |
| 215 | |
Castor Fu | 952b05e | 2013-10-31 11:00:34 -0600 | [diff] [blame] | 216 | extern struct json_object * show_thread_status(struct thread_stat *ts, struct group_run_stats *rs); |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 217 | extern void show_group_stats(struct group_run_stats *rs); |
Jens Axboe | af9c9fb | 2011-10-09 21:54:10 +0200 | [diff] [blame] | 218 | extern int calc_thread_status(struct jobs_eta *je, int force); |
Jens Axboe | cf451d1 | 2011-10-03 16:48:30 +0200 | [diff] [blame] | 219 | extern void display_thread_status(struct jobs_eta *je); |
Jens Axboe | 5b9babb | 2011-10-10 12:14:30 +0200 | [diff] [blame] | 220 | extern void show_run_stats(void); |
Jens Axboe | 2e62724 | 2014-07-25 09:51:56 +0200 | [diff] [blame] | 221 | extern void __show_run_stats(void); |
Jens Axboe | b852e7c | 2012-03-30 10:30:35 +0200 | [diff] [blame] | 222 | extern void show_running_run_stats(void); |
Jens Axboe | 0646490 | 2013-04-24 20:38:54 -0600 | [diff] [blame] | 223 | extern void check_for_running_stats(void); |
Jens Axboe | 5b9babb | 2011-10-10 12:14:30 +0200 | [diff] [blame] | 224 | extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int nr); |
Jens Axboe | 37f0c1a | 2011-10-11 14:08:33 +0200 | [diff] [blame] | 225 | extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src); |
| 226 | extern void init_thread_stat(struct thread_stat *ts); |
| 227 | extern void init_group_run_stat(struct group_run_stats *gs); |
Jens Axboe | 3e47bd2 | 2012-02-29 13:45:02 +0100 | [diff] [blame] | 228 | extern void eta_to_str(char *str, unsigned long eta_sec); |
Jens Axboe | b29ad56 | 2012-03-05 13:08:51 +0100 | [diff] [blame] | 229 | extern int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max, double *mean, double *dev); |
Jens Axboe | a269790 | 2012-03-05 16:43:49 +0100 | [diff] [blame] | 230 | extern unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr, fio_fp64_t *plist, unsigned int **output, unsigned int *maxv, unsigned int *minv); |
Jens Axboe | e5bd134 | 2012-03-05 21:38:12 +0100 | [diff] [blame] | 231 | extern void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat); |
| 232 | extern void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat); |
Jens Axboe | 2e33101 | 2012-03-05 22:07:54 +0100 | [diff] [blame] | 233 | extern void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist); |
Jens Axboe | 6bb5821 | 2014-02-21 13:55:31 -0800 | [diff] [blame] | 234 | extern void reset_io_stats(struct thread_data *); |
Jens Axboe | 2e33101 | 2012-03-05 22:07:54 +0100 | [diff] [blame] | 235 | |
Jens Axboe | b29ad56 | 2012-03-05 13:08:51 +0100 | [diff] [blame] | 236 | static inline int usec_to_msec(unsigned long *min, unsigned long *max, |
| 237 | double *mean, double *dev) |
| 238 | { |
| 239 | if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) { |
| 240 | *min /= 1000; |
| 241 | *max /= 1000; |
| 242 | *mean /= 1000.0; |
| 243 | *dev /= 1000.0; |
| 244 | return 0; |
| 245 | } |
| 246 | |
| 247 | return 1; |
| 248 | } |
Jens Axboe | 61f6cce | 2014-06-24 08:40:21 -0600 | [diff] [blame] | 249 | /* |
| 250 | * Worst level condensing would be 1:5, so allow enough room for that |
| 251 | */ |
| 252 | #define __THREAD_RUNSTR_SZ(nr) ((nr) * 5) |
Jens Axboe | 1814876 | 2014-06-23 19:07:12 -0600 | [diff] [blame] | 253 | #define THREAD_RUNSTR_SZ __THREAD_RUNSTR_SZ(thread_number) |
| 254 | |
Jens Axboe | a64e88d | 2011-10-03 14:20:01 +0200 | [diff] [blame] | 255 | #endif |