verify: stop on actual number of bytes needed to be verified
If we don't use LFSR or a random map, we don't get told when
to stop by the random generator or map. So track it on the
side, using the same mechanism as do_io().
Signed-off-by: Jens Axboe <axboe@kernel.dk>
diff --git a/backend.c b/backend.c
index 902414e..e025dbf 100644
--- a/backend.c
+++ b/backend.c
@@ -216,7 +216,7 @@
}
static int check_min_rate(struct thread_data *td, struct timeval *now,
- unsigned long *bytes_done)
+ uint64_t *bytes_done)
{
int ret = 0;
@@ -393,8 +393,9 @@
* The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
-static void do_verify(struct thread_data *td)
+static void do_verify(struct thread_data *td, uint64_t verify_bytes)
{
+ uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
struct fio_file *f;
struct io_u *io_u;
int ret, min_events;
@@ -453,6 +454,9 @@
break;
}
} else {
+ if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes)
+ break;
+
while ((io_u = get_io_u(td)) != NULL) {
/*
* We are only interested in the places where
@@ -523,7 +527,7 @@
requeue_io_u(td, &io_u);
} else {
sync_done:
- ret = io_u_sync_complete(td, io_u, NULL);
+ ret = io_u_sync_complete(td, io_u, bytes_done);
if (ret < 0)
break;
}
@@ -566,7 +570,7 @@
* and do the verification on them through
* the callback handler
*/
- if (io_u_queued_complete(td, min_events, NULL) < 0) {
+ if (io_u_queued_complete(td, min_events, bytes_done) < 0) {
ret = -1;
break;
}
@@ -608,9 +612,12 @@
/*
* Main IO worker function. It retrieves io_u's to process and queues
* and reaps them, checking for rate and errors along the way.
+ *
+ * Returns number of bytes written and trimmed.
*/
-static void do_io(struct thread_data *td)
+static uint64_t do_io(struct thread_data *td)
{
+ uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
unsigned int i;
int ret = 0;
@@ -623,7 +630,6 @@
(!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
td->o.time_based) {
struct timeval comp_time;
- unsigned long bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
int min_evts = 0;
struct io_u *io_u;
int ret2, full;
@@ -827,6 +833,8 @@
*/
if (!ddir_rw_sum(td->this_io_bytes))
td->done = 1;
+
+ return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
}
static void cleanup_io_u(struct thread_data *td)
@@ -1206,6 +1214,8 @@
clear_state = 0;
while (keep_running(td)) {
+ uint64_t verify_bytes;
+
fio_gettime(&td->start, NULL);
memcpy(&td->bw_sample_time, &td->start, sizeof(td->start));
memcpy(&td->iops_sample_time, &td->start, sizeof(td->start));
@@ -1226,7 +1236,7 @@
prune_io_piece_log(td);
- do_io(td);
+ verify_bytes = do_io(td);
clear_state = 1;
@@ -1255,7 +1265,7 @@
fio_gettime(&td->start, NULL);
- do_verify(td);
+ do_verify(td, verify_bytes);
td->ts.runtime[DDIR_READ] += utime_since_now(&td->start);
diff --git a/io_u.c b/io_u.c
index 8567e11..6ae3eae 100644
--- a/io_u.c
+++ b/io_u.c
@@ -16,7 +16,7 @@
int nr; /* input */
int error; /* output */
- unsigned long bytes_done[DDIR_RWDIR_CNT]; /* output */
+ uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
struct timeval time; /* output */
};
@@ -1493,7 +1493,7 @@
* Complete a single io_u for the sync engines.
*/
int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
- unsigned long *bytes)
+ uint64_t *bytes)
{
struct io_completion_data icd;
@@ -1522,7 +1522,7 @@
* Called to complete min_events number of io for the async engines.
*/
int io_u_queued_complete(struct thread_data *td, int min_evts,
- unsigned long *bytes)
+ uint64_t *bytes)
{
struct io_completion_data icd;
struct timespec *tvp = NULL;
diff --git a/ioengine.h b/ioengine.h
index d5a0dc9..7299636 100644
--- a/ioengine.h
+++ b/ioengine.h
@@ -185,8 +185,8 @@
extern void put_io_u(struct thread_data *, struct io_u *);
extern void clear_io_u(struct thread_data *, struct io_u *);
extern void requeue_io_u(struct thread_data *, struct io_u **);
-extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *, unsigned long *);
-extern int __must_check io_u_queued_complete(struct thread_data *, int, unsigned long *);
+extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *, uint64_t *);
+extern int __must_check io_u_queued_complete(struct thread_data *, int, uint64_t *);
extern void io_u_queued(struct thread_data *, struct io_u *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);