Add group reporting
Still needs some testing, so the option isn't documented yet. The
standard deviation averaging also isn't mathematically sound.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
diff --git a/fio.c b/fio.c
index ab84b60..48d65e0 100644
--- a/fio.c
+++ b/fio.c
@@ -661,7 +661,7 @@
struct fio_file *f;
int i;
- td->ts->stat_io_bytes[0] = td->ts->stat_io_bytes[1] = 0;
+ td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
td->zone_bytes = 0;
@@ -751,15 +751,15 @@
fio_gettime(&td->epoch, NULL);
memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch));
- getrusage(RUSAGE_SELF, &td->ts->ru_start);
+ getrusage(RUSAGE_SELF, &td->ts.ru_start);
runtime[0] = runtime[1] = 0;
while (td->loops--) {
fio_gettime(&td->start, NULL);
- memcpy(&td->ts->stat_sample_time, &td->start, sizeof(td->start));
+ memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
if (td->ratemin)
- memcpy(&td->lastrate, &td->ts->stat_sample_time, sizeof(td->lastrate));
+ memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
clear_io_state(td);
prune_io_piece_log(td);
@@ -792,16 +792,18 @@
}
update_rusage_stat(td);
- fio_gettime(&td->end_time, NULL);
- td->runtime[0] = runtime[0] / 1000;
- td->runtime[1] = runtime[1] / 1000;
+ td->ts.runtime[0] = runtime[0] / 1000;
+ td->ts.runtime[1] = runtime[1] / 1000;
+ td->ts.total_run_time = mtime_since_now(&td->epoch);
+ td->ts.io_bytes[0] = td->io_bytes[0];
+ td->ts.io_bytes[1] = td->io_bytes[1];
- if (td->ts->bw_log)
- finish_log(td, td->ts->bw_log, "bw");
- if (td->ts->slat_log)
- finish_log(td, td->ts->slat_log, "slat");
- if (td->ts->clat_log)
- finish_log(td, td->ts->clat_log, "clat");
+ if (td->ts.bw_log)
+ finish_log(td, td->ts.bw_log, "bw");
+ if (td->ts.slat_log)
+ finish_log(td, td->ts.slat_log, "slat");
+ if (td->ts.clat_log)
+ finish_log(td, td->ts.clat_log, "clat");
if (td->write_iolog_file)
write_iolog_close(td);
if (td->exec_postrun) {
diff --git a/fio.h b/fio.h
index d1d02b0..1b933ca 100644
--- a/fio.h
+++ b/fio.h
@@ -255,6 +255,13 @@
#define FIO_IO_U_LAT_NR 12
struct thread_stat {
+ char *name;
+ char *verror;
+ int error;
+ int groupid;
+ pid_t pid;
+ char *description;
+
struct io_log *slat_log;
struct io_log *clat_log;
struct io_log *bw_log;
@@ -284,6 +291,10 @@
unsigned int io_u_map[FIO_IO_U_MAP_NR];
unsigned int io_u_lat[FIO_IO_U_LAT_NR];
unsigned long total_io_u;
+
+ unsigned long long io_bytes[2];
+ unsigned long runtime[2];
+ unsigned long total_run_time;
};
/*
@@ -299,8 +310,7 @@
pthread_t thread;
int thread_number;
int groupid;
- struct thread_stat *ts;
- struct thread_stat __ts;
+ struct thread_stat ts;
enum fio_filetype filetype;
struct fio_file *files;
unsigned int nr_files;
@@ -410,7 +420,6 @@
unsigned long rate_bytes;
struct timeval lastrate;
- unsigned long runtime[2]; /* msec */
unsigned long long io_size;
unsigned long long total_file_size;
unsigned long long start_offset;
@@ -436,7 +445,6 @@
struct timeval start; /* start of this loop */
struct timeval epoch; /* time job was started */
- struct timeval end_time;/* time job ended */
/*
* read/write mixed workload state
diff --git a/init.c b/init.c
index 6dcd801..bbf6f2a 100644
--- a/init.c
+++ b/init.c
@@ -784,12 +784,6 @@
if (td == &def_thread)
return 0;
- /*
- * this will later be shared between thread for group reporting
- */
- td->ts = &td->__ts;
- memset(td->ts, 0, sizeof(*td->ts));
-
assert(td->io_ops);
if (td->odirect)
@@ -856,9 +850,9 @@
fio_sem_init(&td->mutex, 0);
- td->ts->clat_stat[0].min_val = td->ts->clat_stat[1].min_val = ULONG_MAX;
- td->ts->slat_stat[0].min_val = td->ts->slat_stat[1].min_val = ULONG_MAX;
- td->ts->bw_stat[0].min_val = td->ts->bw_stat[1].min_val = ULONG_MAX;
+ td->ts.clat_stat[0].min_val = td->ts.clat_stat[1].min_val = ULONG_MAX;
+ td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX;
+ td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX;
if (td->stonewall && td->thread_number > 1)
groupid++;
@@ -869,11 +863,11 @@
goto err;
if (td->write_lat_log) {
- setup_log(&td->ts->slat_log);
- setup_log(&td->ts->clat_log);
+ setup_log(&td->ts.slat_log);
+ setup_log(&td->ts.clat_log);
}
if (td->write_bw_log)
- setup_log(&td->ts->bw_log);
+ setup_log(&td->ts.bw_log);
if (!td->name)
td->name = strdup(jobname);
diff --git a/io_u.c b/io_u.c
index 5b5802c..79a0ce8 100644
--- a/io_u.c
+++ b/io_u.c
@@ -286,8 +286,8 @@
break;
}
- td->ts->io_u_map[index]++;
- td->ts->total_io_u++;
+ td->ts.io_u_map[index]++;
+ td->ts.total_io_u++;
}
static void io_u_mark_latency(struct thread_data *td, unsigned long msec)
@@ -321,7 +321,7 @@
break;
}
- td->ts->io_u_lat[index]++;
+ td->ts.io_u_lat[index]++;
}
/*
diff --git a/stat.c b/stat.c
index 576e5d2..ccab0c7 100644
--- a/stat.c
+++ b/stat.c
@@ -6,6 +6,7 @@
#include <dirent.h>
#include <libgen.h>
#include <math.h>
+#include <assert.h>
#include "fio.h"
@@ -322,7 +323,7 @@
void update_rusage_stat(struct thread_data *td)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
getrusage(RUSAGE_SELF, &ts->ru_end);
@@ -412,29 +413,27 @@
}
}
-static void show_ddir_status(struct thread_data *td, struct group_run_stats *rs,
+static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
int ddir)
{
const char *ddir_str[] = { "read ", "write" };
- struct thread_stat *ts;
unsigned long min, max;
unsigned long long bw;
double mean, dev;
char *io_p, *bw_p;
- if (!td->runtime[ddir])
+ if (!ts->runtime[ddir])
return;
- bw = td->io_bytes[ddir] / td->runtime[ddir];
- io_p = num2str(td->io_bytes[ddir] >> 10, 6, 1);
+ bw = ts->io_bytes[ddir] / ts->runtime[ddir];
+ io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1);
bw_p = num2str(bw, 6, 1);
- fprintf(f_out, " %s: io=%siB, bw=%siB/s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, td->runtime[ddir]);
+ fprintf(f_out, " %s: io=%siB, bw=%siB/s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, ts->runtime[ddir]);
free(io_p);
free(bw_p);
- ts = td->ts;
if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, " slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
@@ -449,7 +448,7 @@
}
}
-static void show_thread_status(struct thread_data *td,
+static void show_thread_status(struct thread_stat *ts,
struct group_run_stats *rs)
{
double usr_cpu, sys_cpu;
@@ -458,37 +457,37 @@
double io_u_lat[FIO_IO_U_LAT_NR];
int i;
- if (!(td->io_bytes[0] + td->io_bytes[1]))
+ if (!(ts->io_bytes[0] + ts->io_bytes[1]))
return;
- if (!td->error)
- fprintf(f_out, "%s: (groupid=%d): err=%2d: pid=%d\n",td->name, td->groupid, td->error, td->pid);
+ if (!ts->error)
+ fprintf(f_out, "%s: (groupid=%d): err=%2d: pid=%d\n", ts->name, ts->groupid, ts->error, ts->pid);
else
- fprintf(f_out, "%s: (groupid=%d): err=%2d (%s): pid=%d\n",td->name, td->groupid, td->error, td->verror, td->pid);
+ fprintf(f_out, "%s: (groupid=%d): err=%2d (%s): pid=%d\n", ts->name, ts->groupid, ts->error, ts->verror, ts->pid);
- if (td_read(td))
- show_ddir_status(td, rs, DDIR_READ);
- if (td_write(td))
- show_ddir_status(td, rs, DDIR_WRITE);
+ if (ts->io_bytes[DDIR_READ])
+ show_ddir_status(rs, ts, DDIR_READ);
+ if (ts->io_bytes[DDIR_WRITE])
+ show_ddir_status(rs, ts, DDIR_WRITE);
- runtime = mtime_since(&td->epoch, &td->end_time);
+ runtime = ts->total_run_time;
if (runtime) {
double runt = (double) runtime;
- usr_cpu = (double) td->ts->usr_time * 100 / runt;
- sys_cpu = (double) td->ts->sys_time * 100 / runt;
+ usr_cpu = (double) ts->usr_time * 100 / runt;
+ sys_cpu = (double) ts->sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ts->ctx);
+ fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ts->ctx);
/*
* Do depth distribution calculations
*/
for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
- io_u_dist[i] = (double) td->ts->io_u_map[i] / (double) td->ts->total_io_u;
+ io_u_dist[i] = (double) ts->io_u_map[i] / (double) ts->total_io_u;
io_u_dist[i] *= 100.0;
}
@@ -498,30 +497,29 @@
* Do latency distribution calculations
*/
for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
- io_u_lat[i] = (double) td->ts->io_u_lat[i] / (double) td->ts->total_io_u;
+ io_u_lat[i] = (double) ts->io_u_lat[i] / (double) ts->total_io_u;
io_u_lat[i] *= 100.0;
}
fprintf(f_out, " lat (msec): 2=%3.1f%%, 4=%3.1f%%, 10=%3.1f%%, 20=%3.1f%%, 50=%3.1f%%, 100=%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
fprintf(f_out, " lat (msec): 250=%3.1f%%, 500=%3.1f%%, 750=%3.1f%%, 1000=%3.1f%%, >=2000=%3.1f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
- if (td->description)
- fprintf(f_out, "%s\n", td->description);
+ if (ts->description)
+ fprintf(f_out, "%s\n", ts->description);
}
-static void show_ddir_status_terse(struct thread_data *td,
+static void show_ddir_status_terse(struct thread_stat *ts,
struct group_run_stats *rs, int ddir)
{
- struct thread_stat *ts = td->ts;
unsigned long min, max;
unsigned long long bw;
double mean, dev;
bw = 0;
- if (td->runtime[ddir])
- bw = td->io_bytes[ddir] / td->runtime[ddir];
+ if (ts->runtime[ddir])
+ bw = ts->io_bytes[ddir] / ts->runtime[ddir];
- fprintf(f_out, ",%llu,%llu,%lu", td->io_bytes[ddir] >> 10, bw, td->runtime[ddir]);
+ fprintf(f_out, ",%llu,%llu,%lu", ts->io_bytes[ddir] >> 10, bw, ts->runtime[ddir]);
if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
@@ -540,38 +538,68 @@
fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", min, max, p_of_agg, mean, dev);
} else
fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", 0UL, 0UL, 0.0, 0.0, 0.0);
-
}
-static void show_thread_status_terse(struct thread_data *td,
+static void show_thread_status_terse(struct thread_stat *ts,
struct group_run_stats *rs)
{
double usr_cpu, sys_cpu;
- fprintf(f_out, "%s,%d,%d",td->name, td->groupid, td->error);
+ fprintf(f_out, "%s,%d,%d", ts->name, ts->groupid, ts->error);
- show_ddir_status_terse(td, rs, 0);
- show_ddir_status_terse(td, rs, 1);
+ show_ddir_status_terse(ts, rs, 0);
+ show_ddir_status_terse(ts, rs, 1);
- if (td->runtime[0] + td->runtime[1]) {
- double runt = (double) (td->runtime[0] + td->runtime[1]);
+ if (ts->total_run_time) {
+ double runt = (double) ts->total_run_time;
- usr_cpu = (double) td->ts->usr_time * 100 / runt;
- sys_cpu = (double) td->ts->sys_time * 100 / runt;
+ usr_cpu = (double) ts->usr_time * 100 / runt;
+ sys_cpu = (double) ts->sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, td->ts->ctx);
+ fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, ts->ctx);
+}
+
+static void __sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
+{
+ double mean, S;
+
+ dst->min_val = min(dst->min_val, src->min_val);
+ dst->max_val = max(dst->max_val, src->max_val);
+ dst->samples += src->samples;
+
+ /*
+ * Needs a new method for calculating stddev, we cannot just
+ * average them we do below for nr > 1
+ */
+ if (nr == 1) {
+ mean = src->mean;
+ S = src->S;
+ } else {
+ mean = ((src->mean * (double) nr) + dst->mean) / ((double) nr + 1.0);
+ S = ((src->S * (double) nr) + dst->S) / ((double) nr + 1.0);
+ }
+
+ dst->mean = mean;
+ dst->S = S;
+}
+
+static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
+{
+ __sum_stat(&dst[DDIR_READ], &src[DDIR_READ], nr);
+ __sum_stat(&dst[DDIR_WRITE], &src[DDIR_WRITE], nr);
}
void show_run_stats(void)
{
struct group_run_stats *runstats, *rs;
struct thread_data *td;
- int i;
+ struct thread_stat *threadstats, *ts;
+ int i, j, k, nr_ts, last_ts, members;
runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
@@ -583,25 +611,118 @@
rs->min_bw[1] = rs->min_run[1] = ~0UL;
}
+ /*
+ * find out how many threads stats we need. if group reporting isn't
+ * enabled, it's one-per-td.
+ */
+ nr_ts = 0;
+ last_ts = -1;
for_each_td(td, i) {
+ if (!td->group_reporting) {
+ nr_ts++;
+ continue;
+ }
+ if (last_ts == td->groupid)
+ continue;
+
+ last_ts = td->groupid;
+ nr_ts++;
+ }
+
+ threadstats = malloc(nr_ts * sizeof(struct thread_stat));
+
+ for (i = 0; i < nr_ts; i++) {
+ ts = &threadstats[i];
+
+ memset(ts, 0, sizeof(*ts));
+ ts->clat_stat[0].min_val = -1UL;
+ ts->clat_stat[1].min_val = -1UL;
+ ts->slat_stat[0].min_val = -1UL;
+ ts->slat_stat[1].min_val = -1UL;
+ ts->bw_stat[0].min_val = -1UL;
+ ts->bw_stat[1].min_val = -1UL;
+ }
+
+ j = 0;
+ last_ts = -1;
+ members = 0;
+ for_each_td(td, i) {
+ ts = &threadstats[j];
+
+ members++;
+
+ if (!ts->groupid) {
+ ts->name = td->name;
+ ts->description = td->description;
+ ts->error = td->error;
+ ts->groupid = td->groupid;
+ ts->pid = td->pid;
+ ts->verror = td->verror;
+ }
+
+ sum_stat(ts->clat_stat, td->ts.clat_stat, members);
+ sum_stat(ts->slat_stat, td->ts.slat_stat, members);
+ sum_stat(ts->bw_stat, td->ts.bw_stat, members);
+
+ ts->stat_io_bytes[0] += td->ts.stat_io_bytes[0];
+ ts->stat_io_bytes[1] += td->ts.stat_io_bytes[1];
+
+ ts->usr_time += td->ts.usr_time;
+ ts->sys_time += td->ts.sys_time;
+ ts->ctx += td->ts.ctx;
+
+ for (k = 0; k < FIO_IO_U_MAP_NR; k++)
+ ts->io_u_map[k] += td->ts.io_u_map[k];
+ for (k = 0; k < FIO_IO_U_LAT_NR; k++)
+ ts->io_u_lat[k] += td->ts.io_u_lat[k];
+
+ ts->total_io_u += td->ts.total_io_u;
+ ts->io_bytes[0] += td->ts.io_bytes[0];
+ ts->io_bytes[1] += td->ts.io_bytes[1];
+
+ if (ts->runtime[0] < td->ts.runtime[0])
+ ts->runtime[0] = td->ts.runtime[0];
+ if (ts->runtime[1] < td->ts.runtime[1])
+ ts->runtime[1] = td->ts.runtime[1];
+
+ ts->total_run_time += td->ts.total_run_time;
+
+ if (!td->group_reporting) {
+ members = 0;
+ j++;
+ continue;
+ }
+ if (last_ts == td->groupid)
+ continue;
+
+ if (last_ts != -1) {
+ members = 0;
+ j++;
+ }
+
+ last_ts = td->groupid;
+ }
+
+ for (i = 0; i < nr_ts; i++) {
unsigned long long rbw, wbw;
- rs = &runstats[td->groupid];
+ ts = &threadstats[i];
+ rs = &runstats[ts->groupid];
- if (td->runtime[0] < rs->min_run[0] || !rs->min_run[0])
- rs->min_run[0] = td->runtime[0];
- if (td->runtime[0] > rs->max_run[0])
- rs->max_run[0] = td->runtime[0];
- if (td->runtime[1] < rs->min_run[1] || !rs->min_run[1])
- rs->min_run[1] = td->runtime[1];
- if (td->runtime[1] > rs->max_run[1])
- rs->max_run[1] = td->runtime[1];
+ if (ts->runtime[0] < rs->min_run[0] || !rs->min_run[0])
+ rs->min_run[0] = ts->runtime[0];
+ if (ts->runtime[0] > rs->max_run[0])
+ rs->max_run[0] = ts->runtime[0];
+ if (ts->runtime[1] < rs->min_run[1] || !rs->min_run[1])
+ rs->min_run[1] = ts->runtime[1];
+ if (ts->runtime[1] > rs->max_run[1])
+ rs->max_run[1] = ts->runtime[1];
rbw = wbw = 0;
- if (td->runtime[0])
- rbw = td->io_bytes[0] / (unsigned long long) td->runtime[0];
- if (td->runtime[1])
- wbw = td->io_bytes[1] / (unsigned long long) td->runtime[1];
+ if (ts->runtime[0])
+ rbw = td->io_bytes[0] / (unsigned long long) ts->runtime[0];
+ if (ts->runtime[1])
+ wbw = td->io_bytes[1] / (unsigned long long) ts->runtime[1];
if (rbw < rs->min_bw[0])
rs->min_bw[0] = rbw;
@@ -612,8 +733,8 @@
if (wbw > rs->max_bw[1])
rs->max_bw[1] = wbw;
- rs->io_kb[0] += td->io_bytes[0] >> 10;
- rs->io_kb[1] += td->io_bytes[1] >> 10;
+ rs->io_kb[0] += ts->io_bytes[0] >> 10;
+ rs->io_kb[1] += ts->io_bytes[1] >> 10;
}
for (i = 0; i < groupid + 1; i++) {
@@ -631,13 +752,14 @@
if (!terse_output)
printf("\n");
- for_each_td(td, i) {
- rs = &runstats[td->groupid];
+ for (i = 0; i < nr_ts; i++) {
+ ts = &threadstats[i];
+ rs = &runstats[ts->groupid];
if (terse_output)
- show_thread_status_terse(td, rs);
+ show_thread_status_terse(ts, rs);
else
- show_thread_status(td, rs);
+ show_thread_status(ts, rs);
}
if (!terse_output) {
@@ -648,6 +770,7 @@
}
free(runstats);
+ free(threadstats);
}
static inline void add_stat_sample(struct io_stat *is, unsigned long data)
@@ -700,7 +823,7 @@
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->clat_stat[ddir], msec);
@@ -711,7 +834,7 @@
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->slat_stat[ddir], msec);
@@ -722,7 +845,7 @@
void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
struct timeval *t)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
unsigned long rate;