Fix rate option with iodepth > 1
The rate option currently doesnt work when used with libaio engine.
The math currently, calculates the time t2 (when the I/O completed) -
t1 (when the io_u unit was created) as the time it takes for the I/O
and the bandwidth for the rate calculation is calculated from that.
This math will work correctly for sync engine as there is only one io
in progress at a time, but for libaio engine, when there are multiple
I/Os queued, the same time (as in from t1 to t2) could be attributed
to other I/Os as well so the actual bandwidth is actually higher.
I have a patch, but this is more brute force where I take the total
bytes read/written divided by the time since I/Os started to calculate
the bandwidth and decide on the time that needs to be spent sleeping
(if any).This is a little more heavy weight than the previous math. I
think there are probably simpler/cleaner solutions than this but this
is the current patch I have for it.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
diff --git a/fio.h b/fio.h
index 3b2dd6e..119dc09 100644
--- a/fio.h
+++ b/fio.h
@@ -359,7 +359,7 @@
/*
* Rate state
*/
- unsigned long rate_usec_cycle[2];
+ unsigned long rate_nsec_cycle[2];
long rate_pending_usleep[2];
unsigned long rate_bytes[2];
unsigned long rate_blocks[2];
diff --git a/gettime.c b/gettime.c
index c0d27e4..8ec70b9 100644
--- a/gettime.c
+++ b/gettime.c
@@ -10,7 +10,7 @@
#include "hash.h"
-static int clock_gettime_works;
+static int clock_gettime_works = 0;
static struct timeval last_tv;
static int last_tv_valid;
diff --git a/init.c b/init.c
index 6ae78be..b9dee19 100644
--- a/init.c
+++ b/init.c
@@ -205,21 +205,19 @@
static int __setup_rate(struct thread_data *td, enum fio_ddir ddir)
{
unsigned int bs = td->o.min_bs[ddir];
- unsigned long long rate;
- unsigned long ios_per_msec;
+ unsigned long long bytes_per_sec;
- if (td->o.rate[ddir]) {
- rate = td->o.rate[ddir];
- ios_per_msec = (rate * 1000LL) / bs;
- } else
- ios_per_msec = td->o.rate_iops[ddir] * 1000UL;
+ if (td->o.rate[ddir])
+ bytes_per_sec = td->o.rate[ddir];
+ else
+ bytes_per_sec = td->o.rate_iops[ddir] * bs;
- if (!ios_per_msec) {
+ if (!bytes_per_sec) {
log_err("rate lower than supported\n");
return -1;
}
- td->rate_usec_cycle[ddir] = 1000000000ULL / ios_per_msec;
+ td->rate_nsec_cycle[ddir] = 1000000000ULL / bytes_per_sec;
td->rate_pending_usleep[ddir] = 0;
return 0;
}
diff --git a/io_u.c b/io_u.c
index 4be958d..1845d3b 100644
--- a/io_u.c
+++ b/io_u.c
@@ -984,6 +984,7 @@
if (!io_u->error) {
unsigned int bytes = io_u->buflen - io_u->resid;
const enum fio_ddir idx = io_u->ddir;
+ const enum fio_ddir odx = io_u->ddir ^ 1;
int ret;
td->io_blocks[idx]++;
@@ -992,15 +993,10 @@
if (ramp_time_over(td)) {
unsigned long uninitialized_var(lusec);
- unsigned long uninitialized_var(rusec);
if (!td->o.disable_clat || !td->o.disable_bw)
lusec = utime_since(&io_u->issue_time,
&icd->time);
- if (__should_check_rate(td, idx) ||
- __should_check_rate(td, idx ^ 1))
- rusec = utime_since(&io_u->start_time,
- &icd->time);
if (!td->o.disable_clat) {
add_clat_sample(td, idx, lusec, bytes);
@@ -1009,11 +1005,16 @@
if (!td->o.disable_bw)
add_bw_sample(td, idx, bytes, &icd->time);
if (__should_check_rate(td, idx)) {
- td->rate_pending_usleep[idx] +=
- (long) td->rate_usec_cycle[idx] - rusec;
+ td->rate_pending_usleep[idx] =
+ ((td->this_io_bytes[idx] *
+ td->rate_nsec_cycle[idx]) / 1000 -
+ utime_since_now(&td->start));
}
if (__should_check_rate(td, idx ^ 1))
- td->rate_pending_usleep[idx ^ 1] -= rusec;
+ td->rate_pending_usleep[odx] =
+ ((td->this_io_bytes[odx] *
+ td->rate_nsec_cycle[odx]) / 1000 -
+ utime_since_now(&td->start));
}
if (td_write(td) && idx == DDIR_WRITE &&