blob: f61fee8cb32240d2d85929eb0d131b009ec0d47d [file] [log] [blame]
Jens Axboe10ba5352006-10-20 11:39:27 +02001#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
Jens Axboe0c6e7512007-02-22 11:19:39 +01006#include <assert.h>
Jens Axboe10ba5352006-10-20 11:39:27 +02007
8#include "fio.h"
Jens Axboe5973caf2008-05-21 19:52:35 +02009#include "hash.h"
Jens Axboe4f5af7b2009-06-03 08:45:40 +020010#include "verify.h"
Jens Axboe0d29de82010-09-01 13:54:15 +020011#include "trim.h"
Jens Axboe1fbbf722010-03-25 23:03:18 +010012#include "lib/rand.h"
Jens Axboe7ebd7962012-11-28 21:24:46 +010013#include "lib/axmap.h"
Jens Axboe002fe732014-02-11 08:31:13 -070014#include "err.h"
Jens Axboe10ba5352006-10-20 11:39:27 +020015
Jens Axboe97601022007-02-18 12:47:29 +010016struct io_completion_data {
17 int nr; /* input */
Jens Axboe97601022007-02-18 12:47:29 +010018
19 int error; /* output */
Jens Axboe100f49f2013-01-23 10:15:57 -070020 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
Jens Axboe97601022007-02-18 12:47:29 +010021 struct timeval time; /* output */
22};
23
Jens Axboe10ba5352006-10-20 11:39:27 +020024/*
Jens Axboe7ebd7962012-11-28 21:24:46 +010025 * The ->io_axmap contains a map of blocks we have or have not done io
Jens Axboe10ba5352006-10-20 11:39:27 +020026 * to yet. Used to make sure we cover the entire range in a fair fashion.
27 */
Jens Axboe1ae83d42013-01-12 01:44:15 -070028static int random_map_free(struct fio_file *f, const uint64_t block)
Jens Axboe10ba5352006-10-20 11:39:27 +020029{
Jens Axboe7ebd7962012-11-28 21:24:46 +010030 return !axmap_isset(f->io_axmap, block);
Jens Axboe10ba5352006-10-20 11:39:27 +020031}
32
33/*
Jens Axboedf415582006-10-20 11:41:03 +020034 * Mark a given offset as used in the map.
35 */
Jens Axboe9bf20612007-03-01 09:33:57 +010036static void mark_random_map(struct thread_data *td, struct io_u *io_u)
Jens Axboedf415582006-10-20 11:41:03 +020037{
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010038 unsigned int min_bs = td->o.rw_min_bs;
Jens Axboe9bf20612007-03-01 09:33:57 +010039 struct fio_file *f = io_u->file;
Jens Axboe51ede0b2012-11-22 13:50:29 +010040 unsigned int nr_blocks;
Jens Axboe1ae83d42013-01-12 01:44:15 -070041 uint64_t block;
Jens Axboedf415582006-10-20 11:41:03 +020042
Jens Axboe1ae83d42013-01-12 01:44:15 -070043 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
Jens Axboec685b5b2007-02-10 20:02:28 +010044 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
45
Jens Axboe2ab9e982012-11-22 15:14:17 +010046 if (!(io_u->flags & IO_U_F_BUSY_OK))
Jens Axboe7ebd7962012-11-28 21:24:46 +010047 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
Jens Axboedf415582006-10-20 11:41:03 +020048
Jens Axboe51ede0b2012-11-22 13:50:29 +010049 if ((nr_blocks * min_bs) < io_u->buflen)
50 io_u->buflen = nr_blocks * min_bs;
Jens Axboedf415582006-10-20 11:41:03 +020051}
52
Jens Axboe74776732013-01-11 14:03:25 +010053static uint64_t last_block(struct thread_data *td, struct fio_file *f,
54 enum fio_ddir ddir)
Jens Axboe2ba1c292008-02-01 13:16:38 +010055{
Jens Axboe74776732013-01-11 14:03:25 +010056 uint64_t max_blocks;
57 uint64_t max_size;
Jens Axboe2ba1c292008-02-01 13:16:38 +010058
Jens Axboeff58fce2010-08-25 12:02:08 +020059 assert(ddir_rw(ddir));
60
Jens Axboed9dd70f2008-05-23 12:37:23 +020061 /*
62 * Hmm, should we make sure that ->io_size <= ->real_file_size?
63 */
64 max_size = f->io_size;
65 if (max_size > f->real_file_size)
66 max_size = f->real_file_size;
67
Steven Noonaned335852012-01-31 13:58:00 +010068 if (td->o.zone_range)
69 max_size = td->o.zone_range;
70
Justin Enoe67b9282015-01-27 14:23:20 -080071 if (td->o.min_bs[ddir] > td->o.ba[ddir])
72 max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
73
Jens Axboe1ae83d42013-01-12 01:44:15 -070074 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
Jens Axboe2ba1c292008-02-01 13:16:38 +010075 if (!max_blocks)
76 return 0;
77
Jens Axboe67778e82008-05-15 09:20:08 +020078 return max_blocks;
Jens Axboe2ba1c292008-02-01 13:16:38 +010079}
80
Jens Axboe1ae83d42013-01-12 01:44:15 -070081struct rand_off {
82 struct flist_head list;
83 uint64_t off;
84};
85
Jens Axboee25839d2012-11-06 10:49:42 +010086static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -070087 enum fio_ddir ddir, uint64_t *b)
Jens Axboeec4015d2007-03-23 08:04:27 +010088{
Jens Axboe46ad62d2014-11-23 18:41:11 -070089 uint64_t r;
Jens Axboeec4015d2007-03-23 08:04:27 +010090
Jens Axboe8055e412012-11-26 08:43:47 +010091 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
Jens Axboe46ad62d2014-11-23 18:41:11 -070092 uint64_t lastb;
93
94 lastb = last_block(td, f, ddir);
95 if (!lastb)
96 return 1;
97
Jens Axboef6787012014-11-05 18:39:23 -070098 r = __rand(&td->random_state);
Jens Axboe8055e412012-11-26 08:43:47 +010099
Jens Axboe4b91ee82013-02-25 10:18:33 +0100100 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
Jens Axboe8055e412012-11-26 08:43:47 +0100101
Jens Axboe559073f2014-11-05 18:34:02 -0700102 *b = lastb * (r / ((uint64_t) FRAND_MAX + 1.0));
Jens Axboe51ede0b2012-11-22 13:50:29 +0100103 } else {
Jens Axboe8055e412012-11-26 08:43:47 +0100104 uint64_t off = 0;
105
Jens Axboed55dd042014-12-15 09:38:43 -0700106 assert(fio_file_lfsr(f));
107
Jens Axboe46ad62d2014-11-23 18:41:11 -0700108 if (lfsr_next(&f->lfsr, &off))
Jens Axboe8055e412012-11-26 08:43:47 +0100109 return 1;
110
111 *b = off;
Jens Axboe51ede0b2012-11-22 13:50:29 +0100112 }
Jens Axboe2615cc42011-03-28 09:35:09 +0200113
Jens Axboeec4015d2007-03-23 08:04:27 +0100114 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100115 * if we are not maintaining a random map, we are done.
Jens Axboeec4015d2007-03-23 08:04:27 +0100116 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100117 if (!file_randommap(td, f))
118 goto ret;
Jens Axboe43c63a72007-05-21 13:23:30 +0200119
120 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100121 * calculate map offset and check if it's free
Jens Axboe43c63a72007-05-21 13:23:30 +0200122 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100123 if (random_map_free(f, *b))
124 goto ret;
125
Jens Axboe4b91ee82013-02-25 10:18:33 +0100126 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
127 (unsigned long long) *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100128
Jens Axboe7ebd7962012-11-28 21:24:46 +0100129 *b = axmap_next_free(f->io_axmap, *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100130 if (*b == (uint64_t) -1ULL)
131 return 1;
Jens Axboe0ce8b112011-01-27 22:25:29 +0100132ret:
133 return 0;
Jens Axboeec4015d2007-03-23 08:04:27 +0100134}
135
Jens Axboe925fee32012-11-06 13:50:32 +0100136static int __get_next_rand_offset_zipf(struct thread_data *td,
137 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700138 uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100139{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100140 *b = zipf_next(&f->zipf);
Jens Axboee25839d2012-11-06 10:49:42 +0100141 return 0;
142}
143
Jens Axboe925fee32012-11-06 13:50:32 +0100144static int __get_next_rand_offset_pareto(struct thread_data *td,
145 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700146 uint64_t *b)
Jens Axboe925fee32012-11-06 13:50:32 +0100147{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100148 *b = pareto_next(&f->zipf);
Jens Axboe925fee32012-11-06 13:50:32 +0100149 return 0;
150}
151
Jens Axboe1ae83d42013-01-12 01:44:15 -0700152static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
153{
154 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
155 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
156
157 return r1->off - r2->off;
158}
159
160static int get_off_from_method(struct thread_data *td, struct fio_file *f,
161 enum fio_ddir ddir, uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100162{
163 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
164 return __get_next_rand_offset(td, f, ddir, b);
165 else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
166 return __get_next_rand_offset_zipf(td, f, ddir, b);
Jens Axboe925fee32012-11-06 13:50:32 +0100167 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
168 return __get_next_rand_offset_pareto(td, f, ddir, b);
Jens Axboee25839d2012-11-06 10:49:42 +0100169
170 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
171 return 1;
172}
173
Jens Axboebcd5abf2013-01-23 09:27:25 -0700174/*
175 * Sort the reads for a verify phase in batches of verifysort_nr, if
176 * specified.
177 */
178static inline int should_sort_io(struct thread_data *td)
179{
180 if (!td->o.verifysort_nr || !td->o.do_verify)
181 return 0;
182 if (!td_random(td))
183 return 0;
184 if (td->runstate != TD_VERIFYING)
185 return 0;
186 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
187 return 0;
188
189 return 1;
190}
191
Jens Axboed9472272013-07-25 10:20:45 -0600192static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
Jens Axboe211c9b82013-04-26 08:56:17 -0600193{
194 unsigned int v;
195 unsigned long r;
196
Jens Axboed9472272013-07-25 10:20:45 -0600197 if (td->o.perc_rand[ddir] == 100)
Jens Axboe211c9b82013-04-26 08:56:17 -0600198 return 1;
199
Jens Axboef6787012014-11-05 18:39:23 -0700200 r = __rand(&td->seq_rand_state[ddir]);
Jens Axboe559073f2014-11-05 18:34:02 -0700201 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
Jens Axboe211c9b82013-04-26 08:56:17 -0600202
Jens Axboed9472272013-07-25 10:20:45 -0600203 return v <= td->o.perc_rand[ddir];
Jens Axboe211c9b82013-04-26 08:56:17 -0600204}
205
Jens Axboe1ae83d42013-01-12 01:44:15 -0700206static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
207 enum fio_ddir ddir, uint64_t *b)
208{
209 struct rand_off *r;
210 int i, ret = 1;
211
Jens Axboebcd5abf2013-01-23 09:27:25 -0700212 if (!should_sort_io(td))
Jens Axboe1ae83d42013-01-12 01:44:15 -0700213 return get_off_from_method(td, f, ddir, b);
214
215 if (!flist_empty(&td->next_rand_list)) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700216fetch:
Jens Axboe12dbd062014-07-03 21:19:57 -0600217 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
Jens Axboe1ae83d42013-01-12 01:44:15 -0700218 flist_del(&r->list);
219 *b = r->off;
220 free(r);
221 return 0;
222 }
223
224 for (i = 0; i < td->o.verifysort_nr; i++) {
225 r = malloc(sizeof(*r));
226
227 ret = get_off_from_method(td, f, ddir, &r->off);
228 if (ret) {
229 free(r);
230 break;
231 }
232
233 flist_add(&r->list, &td->next_rand_list);
234 }
235
236 if (ret && !i)
237 return ret;
238
239 assert(!flist_empty(&td->next_rand_list));
240 flist_sort(NULL, &td->next_rand_list, flist_cmp);
241 goto fetch;
242}
243
Jens Axboe38dad622010-07-20 14:46:00 -0600244static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700245 enum fio_ddir ddir, uint64_t *b)
Jens Axboe38dad622010-07-20 14:46:00 -0600246{
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100247 if (!get_next_rand_offset(td, f, ddir, b))
248 return 0;
249
250 if (td->o.time_based) {
Jens Axboe33c48812013-01-21 09:46:06 -0700251 fio_file_reset(td, f);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100252 if (!get_next_rand_offset(td, f, ddir, b))
253 return 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600254 }
255
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100256 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
Jens Axboe08a99be2014-12-14 19:01:24 -0700257 f->file_name, (unsigned long long) f->last_pos[ddir],
Jens Axboe4b91ee82013-02-25 10:18:33 +0100258 (unsigned long long) f->real_file_size);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100259 return 1;
Jens Axboe38dad622010-07-20 14:46:00 -0600260}
261
Jens Axboe37cf9e32012-03-17 12:54:30 +0100262static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700263 enum fio_ddir ddir, uint64_t *offset)
Jens Axboe38dad622010-07-20 14:46:00 -0600264{
Jens Axboe8a423942014-09-28 16:18:43 -0600265 struct thread_options *o = &td->o;
266
Jens Axboeff58fce2010-08-25 12:02:08 +0200267 assert(ddir_rw(ddir));
268
Jens Axboe08a99be2014-12-14 19:01:24 -0700269 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
Jens Axboe8a423942014-09-28 16:18:43 -0600270 o->time_based)
Jens Axboe08a99be2014-12-14 19:01:24 -0700271 f->last_pos[ddir] = f->last_pos[ddir] - f->io_size;
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100272
Jens Axboe08a99be2014-12-14 19:01:24 -0700273 if (f->last_pos[ddir] < f->real_file_size) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700274 uint64_t pos;
Jens Axboe059b0802011-08-25 09:09:37 +0200275
Jens Axboe08a99be2014-12-14 19:01:24 -0700276 if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0)
277 f->last_pos[ddir] = f->real_file_size;
Jens Axboea66da7a2011-08-31 13:14:12 -0600278
Jens Axboe08a99be2014-12-14 19:01:24 -0700279 pos = f->last_pos[ddir] - f->file_offset;
Jens Axboe8a423942014-09-28 16:18:43 -0600280 if (pos && o->ddir_seq_add) {
281 pos += o->ddir_seq_add;
282
283 /*
284 * If we reach beyond the end of the file
285 * with holed IO, wrap around to the
286 * beginning again.
287 */
288 if (pos >= f->real_file_size)
289 pos = f->file_offset;
290 }
Jens Axboe059b0802011-08-25 09:09:37 +0200291
Jens Axboe37cf9e32012-03-17 12:54:30 +0100292 *offset = pos;
Jens Axboe38dad622010-07-20 14:46:00 -0600293 return 0;
294 }
295
296 return 1;
297}
298
299static int get_next_block(struct thread_data *td, struct io_u *io_u,
Jens Axboe6aca9b32013-07-25 12:45:26 -0600300 enum fio_ddir ddir, int rw_seq,
301 unsigned int *is_random)
Jens Axboe38dad622010-07-20 14:46:00 -0600302{
303 struct fio_file *f = io_u->file;
Jens Axboe1ae83d42013-01-12 01:44:15 -0700304 uint64_t b, offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600305 int ret;
306
Jens Axboeff58fce2010-08-25 12:02:08 +0200307 assert(ddir_rw(ddir));
308
Jens Axboe37cf9e32012-03-17 12:54:30 +0100309 b = offset = -1ULL;
310
Jens Axboe38dad622010-07-20 14:46:00 -0600311 if (rw_seq) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600312 if (td_random(td)) {
Jens Axboe6aca9b32013-07-25 12:45:26 -0600313 if (should_do_random(td, ddir)) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600314 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600315 *is_random = 1;
316 } else {
317 *is_random = 0;
Jens Axboe211c9b82013-04-26 08:56:17 -0600318 io_u->flags |= IO_U_F_BUSY_OK;
319 ret = get_next_seq_offset(td, f, ddir, &offset);
320 if (ret)
321 ret = get_next_rand_block(td, f, ddir, &b);
322 }
Jens Axboe6aca9b32013-07-25 12:45:26 -0600323 } else {
324 *is_random = 0;
Jens Axboe37cf9e32012-03-17 12:54:30 +0100325 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600326 }
Jens Axboe38dad622010-07-20 14:46:00 -0600327 } else {
328 io_u->flags |= IO_U_F_BUSY_OK;
Jens Axboe6aca9b32013-07-25 12:45:26 -0600329 *is_random = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600330
331 if (td->o.rw_seq == RW_SEQ_SEQ) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100332 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600333 if (ret) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100334 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600335 *is_random = 0;
336 }
Jens Axboe38dad622010-07-20 14:46:00 -0600337 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
Jens Axboe08a99be2014-12-14 19:01:24 -0700338 if (f->last_start[ddir] != -1ULL)
339 offset = f->last_start[ddir] - f->file_offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600340 else
Jens Axboe37cf9e32012-03-17 12:54:30 +0100341 offset = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600342 ret = 0;
343 } else {
344 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
345 ret = 1;
346 }
347 }
Bruce Cran6d68b992013-01-13 17:21:58 +0000348
Jens Axboe37cf9e32012-03-17 12:54:30 +0100349 if (!ret) {
350 if (offset != -1ULL)
351 io_u->offset = offset;
352 else if (b != -1ULL)
353 io_u->offset = b * td->o.ba[ddir];
354 else {
Jens Axboe4e0a8fa2013-04-15 11:40:57 +0200355 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
Jens Axboe37cf9e32012-03-17 12:54:30 +0100356 ret = 1;
357 }
358 }
359
Jens Axboe38dad622010-07-20 14:46:00 -0600360 return ret;
361}
362
Jens Axboe10ba5352006-10-20 11:39:27 +0200363/*
364 * For random io, generate a random new block and see if it's used. Repeat
365 * until we find a free one. For sequential io, just return the end of
366 * the last io issued.
367 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600368static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
369 unsigned int *is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200370{
Jens Axboe9bf20612007-03-01 09:33:57 +0100371 struct fio_file *f = io_u->file;
Jens Axboe4ba66132008-02-05 10:05:50 +0100372 enum fio_ddir ddir = io_u->ddir;
Jens Axboe38dad622010-07-20 14:46:00 -0600373 int rw_seq_hit = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200374
Jens Axboeff58fce2010-08-25 12:02:08 +0200375 assert(ddir_rw(ddir));
376
Jens Axboe38dad622010-07-20 14:46:00 -0600377 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
378 rw_seq_hit = 1;
Jens Axboe5736c102010-07-20 12:03:25 -0600379 td->ddir_seq_nr = td->o.ddir_seq_nr;
Jens Axboe38dad622010-07-20 14:46:00 -0600380 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200381
Jens Axboe6aca9b32013-07-25 12:45:26 -0600382 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
Jens Axboe38dad622010-07-20 14:46:00 -0600383 return 1;
Jens Axboe10ba5352006-10-20 11:39:27 +0200384
Jens Axboe009bd842008-05-15 10:19:46 +0200385 if (io_u->offset >= f->io_size) {
386 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100387 (unsigned long long) io_u->offset,
388 (unsigned long long) f->io_size);
Jens Axboe009bd842008-05-15 10:19:46 +0200389 return 1;
390 }
391
392 io_u->offset += f->file_offset;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100393 if (io_u->offset >= f->real_file_size) {
394 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100395 (unsigned long long) io_u->offset,
396 (unsigned long long) f->real_file_size);
Jens Axboe10ba5352006-10-20 11:39:27 +0200397 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100398 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200399
400 return 0;
401}
402
Jens Axboe6aca9b32013-07-25 12:45:26 -0600403static int get_next_offset(struct thread_data *td, struct io_u *io_u,
404 unsigned int *is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100405{
Jens Axboed72be542012-11-30 19:37:46 +0100406 if (td->flags & TD_F_PROFILE_OPS) {
407 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100408
Jens Axboed72be542012-11-30 19:37:46 +0100409 if (ops->fill_io_u_off)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600410 return ops->fill_io_u_off(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100411 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100412
Jens Axboe6aca9b32013-07-25 12:45:26 -0600413 return __get_next_offset(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100414}
415
Jens Axboe79944122011-05-24 11:26:16 +0200416static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
417 unsigned int buflen)
418{
419 struct fio_file *f = io_u->file;
420
Jens Axboebedc9dc2014-03-17 12:51:09 -0600421 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
Jens Axboe79944122011-05-24 11:26:16 +0200422}
423
Jens Axboe6aca9b32013-07-25 12:45:26 -0600424static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
425 unsigned int is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200426{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600427 int ddir = io_u->ddir;
Jens Axboe24d23ca2012-11-13 08:31:24 -0700428 unsigned int buflen = 0;
Jens Axboef3059de2008-06-11 15:37:32 +0200429 unsigned int minbs, maxbs;
Jens Axboe559073f2014-11-05 18:34:02 -0700430 unsigned long r;
Jens Axboe10ba5352006-10-20 11:39:27 +0200431
Erwan Velu9ee1c642014-04-02 10:51:16 +0200432 assert(ddir_rw(ddir));
Jens Axboe6aca9b32013-07-25 12:45:26 -0600433
434 if (td->o.bs_is_seq_rand)
435 ddir = is_random ? DDIR_WRITE: DDIR_READ;
Jens Axboeff58fce2010-08-25 12:02:08 +0200436
Jens Axboef3059de2008-06-11 15:37:32 +0200437 minbs = td->o.min_bs[ddir];
438 maxbs = td->o.max_bs[ddir];
439
Jens Axboe79944122011-05-24 11:26:16 +0200440 if (minbs == maxbs)
441 return minbs;
442
Jens Axboe52c58022012-02-06 21:58:56 +0100443 /*
444 * If we can't satisfy the min block size from here, then fail
445 */
446 if (!io_u_fits(td, io_u, minbs))
447 return 0;
448
Jens Axboe79944122011-05-24 11:26:16 +0200449 do {
Jens Axboef6787012014-11-05 18:39:23 -0700450 r = __rand(&td->bsrange_state);
Jens Axboe4c07ad82011-03-28 09:51:09 +0200451
Jens Axboe720e84a2009-04-21 08:29:55 +0200452 if (!td->o.bssplit_nr[ddir]) {
Jens Axboef3059de2008-06-11 15:37:32 +0200453 buflen = 1 + (unsigned int) ((double) maxbs *
Jens Axboe559073f2014-11-05 18:34:02 -0700454 (r / (FRAND_MAX + 1.0)));
Jens Axboef3059de2008-06-11 15:37:32 +0200455 if (buflen < minbs)
456 buflen = minbs;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100457 } else {
Jens Axboe564ca972007-12-14 12:21:19 +0100458 long perc = 0;
459 unsigned int i;
460
Jens Axboe720e84a2009-04-21 08:29:55 +0200461 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
462 struct bssplit *bsp = &td->o.bssplit[ddir][i];
Jens Axboe564ca972007-12-14 12:21:19 +0100463
464 buflen = bsp->bs;
465 perc += bsp->perc;
Jens Axboe559073f2014-11-05 18:34:02 -0700466 if ((r <= ((FRAND_MAX / 100L) * perc)) &&
Jens Axboe79944122011-05-24 11:26:16 +0200467 io_u_fits(td, io_u, buflen))
Jens Axboe564ca972007-12-14 12:21:19 +0100468 break;
469 }
470 }
Jens Axboe79944122011-05-24 11:26:16 +0200471
Josef Bacika9f70b12013-07-08 20:32:50 -0400472 if (td->o.do_verify && td->o.verify != VERIFY_NONE)
473 buflen = (buflen + td->o.verify_interval - 1) &
474 ~(td->o.verify_interval - 1);
475
Jens Axboef3059de2008-06-11 15:37:32 +0200476 if (!td->o.bs_unaligned && is_power_of_2(minbs))
477 buflen = (buflen + minbs - 1) & ~(minbs - 1);
Jens Axboe10ba5352006-10-20 11:39:27 +0200478
Jens Axboe79944122011-05-24 11:26:16 +0200479 } while (!io_u_fits(td, io_u, buflen));
Jens Axboe6a5e6882007-07-26 10:47:51 +0200480
Jens Axboe10ba5352006-10-20 11:39:27 +0200481 return buflen;
482}
483
Jens Axboe6aca9b32013-07-25 12:45:26 -0600484static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
485 unsigned int is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100486{
Jens Axboed72be542012-11-30 19:37:46 +0100487 if (td->flags & TD_F_PROFILE_OPS) {
488 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100489
Jens Axboed72be542012-11-30 19:37:46 +0100490 if (ops->fill_io_u_size)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600491 return ops->fill_io_u_size(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100492 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100493
Jens Axboe6aca9b32013-07-25 12:45:26 -0600494 return __get_next_buflen(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100495}
496
Jens Axboeafe24a52007-03-16 20:27:27 +0100497static void set_rwmix_bytes(struct thread_data *td)
498{
Jens Axboeafe24a52007-03-16 20:27:27 +0100499 unsigned int diff;
500
501 /*
502 * we do time or byte based switch. this is needed because
503 * buffered writes may issue a lot quicker than they complete,
504 * whereas reads do not.
505 */
Jens Axboee47f7992007-03-21 14:05:39 +0100506 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
Jens Axboe04c540d2008-05-28 10:35:26 +0200507 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
Jens Axboee47f7992007-03-21 14:05:39 +0100508}
509
510static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
511{
512 unsigned int v;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200513 unsigned long r;
Jens Axboee47f7992007-03-21 14:05:39 +0100514
Jens Axboef6787012014-11-05 18:39:23 -0700515 r = __rand(&td->rwmix_state);
Jens Axboe559073f2014-11-05 18:34:02 -0700516 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
Jens Axboe4c07ad82011-03-28 09:51:09 +0200517
Jens Axboe04c540d2008-05-28 10:35:26 +0200518 if (v <= td->o.rwmix[DDIR_READ])
Jens Axboee47f7992007-03-21 14:05:39 +0100519 return DDIR_READ;
520
521 return DDIR_WRITE;
Jens Axboeafe24a52007-03-16 20:27:27 +0100522}
523
Jens Axboe002e7182013-05-17 12:39:53 +0200524void io_u_quiesce(struct thread_data *td)
525{
526 /*
527 * We are going to sleep, ensure that we flush anything pending as
528 * not to skew our latency numbers.
529 *
530 * Changed to only monitor 'in flight' requests here instead of the
531 * td->cur_depth, b/c td->cur_depth does not accurately represent
532 * io's that have been actually submitted to an async engine,
533 * and cur_depth is meaningless for sync engines.
534 */
Jens Axboe7e63b3d2015-01-15 10:39:12 -0700535 if (td->io_u_queued || td->cur_depth) {
536 int fio_unused ret;
537
538 ret = td_io_commit(td);
539 }
540
Jens Axboe002e7182013-05-17 12:39:53 +0200541 while (td->io_u_in_flight) {
542 int fio_unused ret;
543
544 ret = io_u_queued_complete(td, 1, NULL);
545 }
546}
547
Jens Axboe581e7142009-06-09 12:47:16 +0200548static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
549{
550 enum fio_ddir odir = ddir ^ 1;
Jens Axboe581e7142009-06-09 12:47:16 +0200551 long usec;
552
Jens Axboeff58fce2010-08-25 12:02:08 +0200553 assert(ddir_rw(ddir));
554
Jens Axboe315fcfe2013-02-08 19:05:25 +0100555 if (td->rate_pending_usleep[ddir] <= 0)
Jens Axboe581e7142009-06-09 12:47:16 +0200556 return ddir;
557
558 /*
559 * We have too much pending sleep in this direction. See if we
560 * should switch.
561 */
Jens Axboe315fcfe2013-02-08 19:05:25 +0100562 if (td_rw(td) && td->o.rwmix[odir]) {
Jens Axboe581e7142009-06-09 12:47:16 +0200563 /*
564 * Other direction does not have too much pending, switch
565 */
566 if (td->rate_pending_usleep[odir] < 100000)
567 return odir;
568
569 /*
570 * Both directions have pending sleep. Sleep the minimum time
571 * and deduct from both.
572 */
573 if (td->rate_pending_usleep[ddir] <=
574 td->rate_pending_usleep[odir]) {
575 usec = td->rate_pending_usleep[ddir];
576 } else {
577 usec = td->rate_pending_usleep[odir];
578 ddir = odir;
579 }
580 } else
581 usec = td->rate_pending_usleep[ddir];
582
Jens Axboe002e7182013-05-17 12:39:53 +0200583 io_u_quiesce(td);
Jens Axboe78c1eda2011-08-30 15:34:14 -0600584
Jens Axboe7e63b3d2015-01-15 10:39:12 -0700585 usec = usec_sleep(td, usec);
Jens Axboe581e7142009-06-09 12:47:16 +0200586
587 td->rate_pending_usleep[ddir] -= usec;
588
589 odir = ddir ^ 1;
590 if (td_rw(td) && __should_check_rate(td, odir))
591 td->rate_pending_usleep[odir] -= usec;
Jens Axboe0b9d69e2009-09-11 22:29:54 +0200592
Jens Axboed4eb4652014-11-10 15:40:24 -0700593 if (ddir == DDIR_TRIM)
594 return DDIR_TRIM;
Jens Axboee0224c62013-02-07 19:55:24 +0100595
Jens Axboe581e7142009-06-09 12:47:16 +0200596 return ddir;
597}
598
Jens Axboe10ba5352006-10-20 11:39:27 +0200599/*
600 * Return the data direction for the next io_u. If the job is a
601 * mixed read/write workload, check the rwmix cycle and switch if
602 * necessary.
603 */
Jens Axboe1e97cce2006-12-05 11:44:16 +0100604static enum fio_ddir get_rw_ddir(struct thread_data *td)
Jens Axboe10ba5352006-10-20 11:39:27 +0200605{
Jens Axboe581e7142009-06-09 12:47:16 +0200606 enum fio_ddir ddir;
607
Jens Axboe5f9099e2009-06-16 22:40:26 +0200608 /*
609 * see if it's time to fsync
610 */
611 if (td->o.fsync_blocks &&
612 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
613 td->io_issues[DDIR_WRITE] && should_fsync(td))
614 return DDIR_SYNC;
615
616 /*
617 * see if it's time to fdatasync
618 */
619 if (td->o.fdatasync_blocks &&
620 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
621 td->io_issues[DDIR_WRITE] && should_fsync(td))
622 return DDIR_DATASYNC;
623
Jens Axboe44f29692010-03-09 20:09:44 +0100624 /*
625 * see if it's time to sync_file_range
626 */
627 if (td->sync_file_range_nr &&
628 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
629 td->io_issues[DDIR_WRITE] && should_fsync(td))
630 return DDIR_SYNC_FILE_RANGE;
631
Jens Axboe10ba5352006-10-20 11:39:27 +0200632 if (td_rw(td)) {
Jens Axboe10ba5352006-10-20 11:39:27 +0200633 /*
634 * Check if it's time to seed a new data direction.
635 */
Jens Axboee4928662008-04-07 09:19:46 +0200636 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
Jens Axboee47f7992007-03-21 14:05:39 +0100637 /*
638 * Put a top limit on how many bytes we do for
639 * one data direction, to avoid overflowing the
640 * ranges too much
641 */
642 ddir = get_rand_ddir(td);
Jens Axboee47f7992007-03-21 14:05:39 +0100643
644 if (ddir != td->rwmix_ddir)
645 set_rwmix_bytes(td);
646
647 td->rwmix_ddir = ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200648 }
Jens Axboe581e7142009-06-09 12:47:16 +0200649 ddir = td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200650 } else if (td_read(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200651 ddir = DDIR_READ;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200652 else if (td_write(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200653 ddir = DDIR_WRITE;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200654 else
655 ddir = DDIR_TRIM;
Jens Axboe581e7142009-06-09 12:47:16 +0200656
657 td->rwmix_ddir = rate_ddir(td, ddir);
658 return td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200659}
660
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200661static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
662{
Jens Axboebcd5abf2013-01-23 09:27:25 -0700663 io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200664
665 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
666 td->o.barrier_blocks &&
667 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
668 td->io_issues[DDIR_WRITE])
669 io_u->flags |= IO_U_F_BARRIER;
670}
671
Jens Axboee8462bd2009-07-06 12:59:04 +0200672void put_file_log(struct thread_data *td, struct fio_file *f)
Jens Axboe60f2c652008-05-16 12:31:36 +0200673{
Jens Axboe71b84ca2014-04-14 12:01:45 -0600674 unsigned int ret = put_file(td, f);
Jens Axboe60f2c652008-05-16 12:31:36 +0200675
676 if (ret)
677 td_verror(td, ret, "file close");
678}
679
Jens Axboe10ba5352006-10-20 11:39:27 +0200680void put_io_u(struct thread_data *td, struct io_u *io_u)
681{
Jens Axboee8462bd2009-07-06 12:59:04 +0200682 td_io_u_lock(td);
683
Jens Axboee69fdf72014-07-23 16:11:43 +0200684 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
Jens Axboe60f2c652008-05-16 12:31:36 +0200685 put_file_log(td, io_u->file);
Jens Axboee69fdf72014-07-23 16:11:43 +0200686
Jens Axboe10ba5352006-10-20 11:39:27 +0200687 io_u->file = NULL;
Steven Langd7ee2a72011-10-26 09:46:50 +0200688 io_u->flags |= IO_U_F_FREE;
689
Radha Ramachandran0c412142009-11-03 21:45:31 +0100690 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
691 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200692 io_u_qpush(&td->io_u_freelist, io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200693 td_io_u_unlock(td);
694 td_io_u_free_notify(td);
Jens Axboe10ba5352006-10-20 11:39:27 +0200695}
696
Radha Ramachandranf2bba182009-06-15 08:40:16 +0200697void clear_io_u(struct thread_data *td, struct io_u *io_u)
698{
699 io_u->flags &= ~IO_U_F_FLIGHT;
700 put_io_u(td, io_u);
701}
702
Jens Axboe755200a2007-02-19 13:08:12 +0100703void requeue_io_u(struct thread_data *td, struct io_u **io_u)
704{
705 struct io_u *__io_u = *io_u;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700706 enum fio_ddir ddir = acct_ddir(__io_u);
Jens Axboe755200a2007-02-19 13:08:12 +0100707
Jens Axboe465221b2008-05-30 22:07:49 +0200708 dprint(FD_IO, "requeue %p\n", __io_u);
709
Jens Axboee8462bd2009-07-06 12:59:04 +0200710 td_io_u_lock(td);
711
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100712 __io_u->flags |= IO_U_F_FREE;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700713 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
714 td->io_issues[ddir]--;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100715
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100716 __io_u->flags &= ~IO_U_F_FLIGHT;
Radha Ramachandran0c412142009-11-03 21:45:31 +0100717 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
718 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200719
720 io_u_rpush(&td->io_u_requeues, __io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200721 td_io_u_unlock(td);
Jens Axboe755200a2007-02-19 13:08:12 +0100722 *io_u = NULL;
723}
724
Jens Axboe9bf20612007-03-01 09:33:57 +0100725static int fill_io_u(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +0200726{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600727 unsigned int is_random;
728
Jens Axboeb4c5e1a2007-10-25 18:44:45 +0200729 if (td->io_ops->flags & FIO_NOIO)
730 goto out;
731
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200732 set_rw_ddir(td, io_u);
Jens Axboea00735e2006-11-03 08:58:08 +0100733
Jens Axboe87dc1ab2006-10-24 14:41:26 +0200734 /*
Jens Axboeff58fce2010-08-25 12:02:08 +0200735 * fsync() or fdatasync() or trim etc, we are done
Jens Axboe5f9099e2009-06-16 22:40:26 +0200736 */
Jens Axboeff58fce2010-08-25 12:02:08 +0200737 if (!ddir_rw(io_u->ddir))
Jens Axboe5f9099e2009-06-16 22:40:26 +0200738 goto out;
739
740 /*
Jens Axboe48f5abd2007-07-20 13:25:04 +0200741 * See if it's time to switch to a new zone
742 */
Jens Axboe13af05a2012-02-11 09:04:02 +0100743 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
Jens Axboe418bf542014-09-28 16:20:58 -0600744 struct fio_file *f = io_u->file;
745
Jens Axboe48f5abd2007-07-20 13:25:04 +0200746 td->zone_bytes = 0;
Jens Axboe418bf542014-09-28 16:20:58 -0600747 f->file_offset += td->o.zone_range + td->o.zone_skip;
748
749 /*
750 * Wrap from the beginning, if we exceed the file size
751 */
752 if (f->file_offset >= f->real_file_size)
753 f->file_offset = f->real_file_size - f->file_offset;
Jens Axboe08a99be2014-12-14 19:01:24 -0700754 f->last_pos[io_u->ddir] = f->file_offset;
Jens Axboe48f5abd2007-07-20 13:25:04 +0200755 td->io_skip_bytes += td->o.zone_skip;
756 }
757
758 /*
Jens Axboec685b5b2007-02-10 20:02:28 +0100759 * No log, let the seq/rand engine retrieve the next buflen and
760 * position.
Jens Axboe10ba5352006-10-20 11:39:27 +0200761 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600762 if (get_next_offset(td, io_u, &is_random)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +0100763 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100764 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100765 }
Jens Axboec685b5b2007-02-10 20:02:28 +0100766
Jens Axboe6aca9b32013-07-25 12:45:26 -0600767 io_u->buflen = get_next_buflen(td, io_u, is_random);
Jens Axboe2ba1c292008-02-01 13:16:38 +0100768 if (!io_u->buflen) {
769 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100770 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100771 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200772
Jens Axboe2ba1c292008-02-01 13:16:38 +0100773 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
774 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
Jens Axboe4b91ee82013-02-25 10:18:33 +0100775 dprint(FD_IO, " off=%llu/%lu > %llu\n",
776 (unsigned long long) io_u->offset, io_u->buflen,
777 (unsigned long long) io_u->file->real_file_size);
Jens Axboe6a5e6882007-07-26 10:47:51 +0200778 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100779 }
Jens Axboe6a5e6882007-07-26 10:47:51 +0200780
Jens Axboebca4ed42007-02-12 05:13:23 +0100781 /*
782 * mark entry before potentially trimming io_u
783 */
Jens Axboe303032a2008-03-26 10:11:10 +0100784 if (td_random(td) && file_randommap(td, io_u->file))
Jens Axboe9bf20612007-03-01 09:33:57 +0100785 mark_random_map(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +0200786
Jens Axboec38e9462007-03-27 08:48:48 +0200787out:
Jens Axboe2ba1c292008-02-01 13:16:38 +0100788 dprint_io_u(io_u, "fill_io_u");
Jens Axboed9d91e32008-01-31 13:25:42 +0100789 td->zone_bytes += io_u->buflen;
Jens Axboebca4ed42007-02-12 05:13:23 +0100790 return 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200791}
792
Jens Axboe838bc702008-05-22 13:08:23 +0200793static void __io_u_mark_map(unsigned int *map, unsigned int nr)
794{
Jens Axboe2b13e712011-01-19 14:04:16 -0700795 int idx = 0;
Jens Axboe838bc702008-05-22 13:08:23 +0200796
797 switch (nr) {
798 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700799 idx = 6;
Jens Axboe838bc702008-05-22 13:08:23 +0200800 break;
801 case 33 ... 64:
Jens Axboe2b13e712011-01-19 14:04:16 -0700802 idx = 5;
Jens Axboe838bc702008-05-22 13:08:23 +0200803 break;
804 case 17 ... 32:
Jens Axboe2b13e712011-01-19 14:04:16 -0700805 idx = 4;
Jens Axboe838bc702008-05-22 13:08:23 +0200806 break;
807 case 9 ... 16:
Jens Axboe2b13e712011-01-19 14:04:16 -0700808 idx = 3;
Jens Axboe838bc702008-05-22 13:08:23 +0200809 break;
810 case 5 ... 8:
Jens Axboe2b13e712011-01-19 14:04:16 -0700811 idx = 2;
Jens Axboe838bc702008-05-22 13:08:23 +0200812 break;
813 case 1 ... 4:
Jens Axboe2b13e712011-01-19 14:04:16 -0700814 idx = 1;
Jens Axboe838bc702008-05-22 13:08:23 +0200815 case 0:
816 break;
817 }
818
Jens Axboe2b13e712011-01-19 14:04:16 -0700819 map[idx]++;
Jens Axboe838bc702008-05-22 13:08:23 +0200820}
821
822void io_u_mark_submit(struct thread_data *td, unsigned int nr)
823{
824 __io_u_mark_map(td->ts.io_u_submit, nr);
825 td->ts.total_submit++;
826}
827
828void io_u_mark_complete(struct thread_data *td, unsigned int nr)
829{
830 __io_u_mark_map(td->ts.io_u_complete, nr);
831 td->ts.total_complete++;
832}
833
Jens Axboed8005752008-05-15 09:49:09 +0200834void io_u_mark_depth(struct thread_data *td, unsigned int nr)
Jens Axboe71619dc2007-01-13 23:56:33 +0100835{
Jens Axboe2b13e712011-01-19 14:04:16 -0700836 int idx = 0;
Jens Axboe71619dc2007-01-13 23:56:33 +0100837
838 switch (td->cur_depth) {
839 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700840 idx = 6;
Jens Axboea783e612007-06-19 09:50:28 +0200841 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100842 case 32 ... 63:
Jens Axboe2b13e712011-01-19 14:04:16 -0700843 idx = 5;
Jens Axboea783e612007-06-19 09:50:28 +0200844 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100845 case 16 ... 31:
Jens Axboe2b13e712011-01-19 14:04:16 -0700846 idx = 4;
Jens Axboea783e612007-06-19 09:50:28 +0200847 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100848 case 8 ... 15:
Jens Axboe2b13e712011-01-19 14:04:16 -0700849 idx = 3;
Jens Axboea783e612007-06-19 09:50:28 +0200850 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100851 case 4 ... 7:
Jens Axboe2b13e712011-01-19 14:04:16 -0700852 idx = 2;
Jens Axboea783e612007-06-19 09:50:28 +0200853 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100854 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700855 idx = 1;
Jens Axboe71619dc2007-01-13 23:56:33 +0100856 case 1:
857 break;
858 }
859
Jens Axboe2b13e712011-01-19 14:04:16 -0700860 td->ts.io_u_map[idx] += nr;
Jens Axboe71619dc2007-01-13 23:56:33 +0100861}
862
Jens Axboe04a0fea2007-06-19 12:48:41 +0200863static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
864{
Jens Axboe2b13e712011-01-19 14:04:16 -0700865 int idx = 0;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200866
867 assert(usec < 1000);
868
869 switch (usec) {
870 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700871 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200872 break;
873 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700874 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200875 break;
876 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700877 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200878 break;
879 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700880 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200881 break;
882 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700883 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200884 break;
885 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700886 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200887 break;
888 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700889 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200890 break;
891 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700892 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200893 break;
894 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700895 idx = 1;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200896 case 0 ... 1:
897 break;
898 }
899
Jens Axboe2b13e712011-01-19 14:04:16 -0700900 assert(idx < FIO_IO_U_LAT_U_NR);
901 td->ts.io_u_lat_u[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200902}
903
904static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
Jens Axboeec118302007-02-17 04:38:20 +0100905{
Jens Axboe2b13e712011-01-19 14:04:16 -0700906 int idx = 0;
Jens Axboeec118302007-02-17 04:38:20 +0100907
908 switch (msec) {
909 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700910 idx = 11;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200911 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100912 case 1000 ... 1999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700913 idx = 10;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200914 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100915 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700916 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200917 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100918 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700919 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200920 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100921 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700922 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200923 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100924 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700925 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200926 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100927 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700928 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200929 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100930 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700931 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200932 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100933 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700934 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200935 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100936 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700937 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200938 break;
Jens Axboeec118302007-02-17 04:38:20 +0100939 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700940 idx = 1;
Jens Axboeec118302007-02-17 04:38:20 +0100941 case 0 ... 1:
942 break;
943 }
944
Jens Axboe2b13e712011-01-19 14:04:16 -0700945 assert(idx < FIO_IO_U_LAT_M_NR);
946 td->ts.io_u_lat_m[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200947}
948
949static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
950{
951 if (usec < 1000)
952 io_u_mark_lat_usec(td, usec);
953 else
954 io_u_mark_lat_msec(td, usec / 1000);
Jens Axboeec118302007-02-17 04:38:20 +0100955}
956
Jens Axboe0aabe162007-02-23 08:45:55 +0100957/*
958 * Get next file to service by choosing one at random
959 */
Jens Axboe2cc52932009-06-09 14:14:20 +0200960static struct fio_file *get_next_file_rand(struct thread_data *td,
961 enum fio_file_flags goodf,
Jens Axboed6aed792009-06-03 08:41:15 +0200962 enum fio_file_flags badf)
Jens Axboe0aabe162007-02-23 08:45:55 +0100963{
Jens Axboe0aabe162007-02-23 08:45:55 +0100964 struct fio_file *f;
Jens Axboe1c178182007-03-13 13:25:18 +0100965 int fno;
Jens Axboe0aabe162007-02-23 08:45:55 +0100966
967 do {
Jens Axboe87b10672009-03-04 09:39:47 +0100968 int opened = 0;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200969 unsigned long r;
Jens Axboe7c83c082007-03-01 10:04:15 +0100970
Jens Axboef6787012014-11-05 18:39:23 -0700971 r = __rand(&td->next_file_state);
Jens Axboe559073f2014-11-05 18:34:02 -0700972 fno = (unsigned int) ((double) td->o.nr_files
Jens Axboe4c07ad82011-03-28 09:51:09 +0200973 * (r / (FRAND_MAX + 1.0)));
Jens Axboe4c07ad82011-03-28 09:51:09 +0200974
Jens Axboe126d65c2008-03-01 18:04:31 +0100975 f = td->files[fno];
Jens Axboed6aed792009-06-03 08:41:15 +0200976 if (fio_file_done(f))
Jens Axboe059e63c2007-03-27 20:34:47 +0200977 continue;
Jens Axboe1c178182007-03-13 13:25:18 +0100978
Jens Axboed6aed792009-06-03 08:41:15 +0200979 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +0100980 int err;
981
Jens Axboe002fe732014-02-11 08:31:13 -0700982 if (td->nr_open_files >= td->o.open_files)
983 return ERR_PTR(-EBUSY);
984
Jens Axboe87b10672009-03-04 09:39:47 +0100985 err = td_io_open_file(td, f);
986 if (err)
987 continue;
988 opened = 1;
989 }
990
Jens Axboe2ba1c292008-02-01 13:16:38 +0100991 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
992 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100993 return f;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100994 }
Jens Axboe87b10672009-03-04 09:39:47 +0100995 if (opened)
996 td_io_close_file(td, f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100997 } while (1);
998}
999
1000/*
1001 * Get next file to service by doing round robin between all available ones
1002 */
Jens Axboe1c178182007-03-13 13:25:18 +01001003static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1004 int badf)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001005{
1006 unsigned int old_next_file = td->next_file;
1007 struct fio_file *f;
1008
1009 do {
Jens Axboe87b10672009-03-04 09:39:47 +01001010 int opened = 0;
1011
Jens Axboe126d65c2008-03-01 18:04:31 +01001012 f = td->files[td->next_file];
Jens Axboe3d7c3912007-02-19 13:16:12 +01001013
1014 td->next_file++;
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001015 if (td->next_file >= td->o.nr_files)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001016 td->next_file = 0;
1017
Jens Axboe87b10672009-03-04 09:39:47 +01001018 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
Jens Axboed6aed792009-06-03 08:41:15 +02001019 if (fio_file_done(f)) {
Jens Axboed5ed68e2007-03-28 09:33:43 +02001020 f = NULL;
Jens Axboe059e63c2007-03-27 20:34:47 +02001021 continue;
Jens Axboed5ed68e2007-03-28 09:33:43 +02001022 }
Jens Axboe059e63c2007-03-27 20:34:47 +02001023
Jens Axboed6aed792009-06-03 08:41:15 +02001024 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +01001025 int err;
1026
Jens Axboe002fe732014-02-11 08:31:13 -07001027 if (td->nr_open_files >= td->o.open_files)
1028 return ERR_PTR(-EBUSY);
1029
Jens Axboe87b10672009-03-04 09:39:47 +01001030 err = td_io_open_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001031 if (err) {
1032 dprint(FD_FILE, "error %d on open of %s\n",
1033 err, f->file_name);
Jens Axboe87c27b42009-05-20 10:45:12 +02001034 f = NULL;
Jens Axboe87b10672009-03-04 09:39:47 +01001035 continue;
Jens Axboeb5696bf2009-03-04 16:03:49 +01001036 }
Jens Axboe87b10672009-03-04 09:39:47 +01001037 opened = 1;
1038 }
1039
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001040 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1041 f->flags);
Jens Axboe1c178182007-03-13 13:25:18 +01001042 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
Jens Axboe3d7c3912007-02-19 13:16:12 +01001043 break;
1044
Jens Axboe87b10672009-03-04 09:39:47 +01001045 if (opened)
1046 td_io_close_file(td, f);
1047
Jens Axboe3d7c3912007-02-19 13:16:12 +01001048 f = NULL;
1049 } while (td->next_file != old_next_file);
1050
Jens Axboe2ba1c292008-02-01 13:16:38 +01001051 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
Jens Axboe3d7c3912007-02-19 13:16:12 +01001052 return f;
1053}
1054
Jens Axboe7eb36572010-03-08 13:58:49 +01001055static struct fio_file *__get_next_file(struct thread_data *td)
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001056{
Jens Axboe1907dbc2007-03-12 11:44:28 +01001057 struct fio_file *f;
1058
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001059 assert(td->o.nr_files <= td->files_index);
Jens Axboe1c178182007-03-13 13:25:18 +01001060
Jens Axboeb5696bf2009-03-04 16:03:49 +01001061 if (td->nr_done_files >= td->o.nr_files) {
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001062 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1063 " nr_files=%d\n", td->nr_open_files,
1064 td->nr_done_files,
1065 td->o.nr_files);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001066 return NULL;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001067 }
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001068
Jens Axboe1907dbc2007-03-12 11:44:28 +01001069 f = td->file_service_file;
Jens Axboed6aed792009-06-03 08:41:15 +02001070 if (f && fio_file_open(f) && !fio_file_closing(f)) {
Jens Axboea086c252009-03-04 08:27:37 +01001071 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1072 goto out;
1073 if (td->file_service_left--)
1074 goto out;
1075 }
Jens Axboe1907dbc2007-03-12 11:44:28 +01001076
Jens Axboea086c252009-03-04 08:27:37 +01001077 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1078 td->o.file_service_type == FIO_FSERVICE_SEQ)
Jens Axboed6aed792009-06-03 08:41:15 +02001079 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001080 else
Jens Axboed6aed792009-06-03 08:41:15 +02001081 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboe1907dbc2007-03-12 11:44:28 +01001082
Jens Axboe002fe732014-02-11 08:31:13 -07001083 if (IS_ERR(f))
1084 return f;
1085
Jens Axboe1907dbc2007-03-12 11:44:28 +01001086 td->file_service_file = f;
1087 td->file_service_left = td->file_service_nr - 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001088out:
Jens Axboe0dac4212014-02-25 13:43:17 -08001089 if (f)
1090 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1091 else
1092 dprint(FD_FILE, "get_next_file: NULL\n");
Jens Axboe1907dbc2007-03-12 11:44:28 +01001093 return f;
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001094}
1095
Jens Axboe7eb36572010-03-08 13:58:49 +01001096static struct fio_file *get_next_file(struct thread_data *td)
1097{
Andrey Kuzmin705fa7e2014-06-27 20:21:22 -06001098 if (td->flags & TD_F_PROFILE_OPS) {
Jens Axboed72be542012-11-30 19:37:46 +01001099 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +01001100
Jens Axboed72be542012-11-30 19:37:46 +01001101 if (ops->get_next_file)
1102 return ops->get_next_file(td);
1103 }
Jens Axboe7eb36572010-03-08 13:58:49 +01001104
1105 return __get_next_file(td);
1106}
1107
Jens Axboe002fe732014-02-11 08:31:13 -07001108static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
Jens Axboe429f6672007-07-23 10:38:43 +02001109{
1110 struct fio_file *f;
1111
1112 do {
1113 f = get_next_file(td);
Jens Axboe002fe732014-02-11 08:31:13 -07001114 if (IS_ERR_OR_NULL(f))
1115 return PTR_ERR(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001116
Jens Axboe429f6672007-07-23 10:38:43 +02001117 io_u->file = f;
1118 get_file(f);
1119
1120 if (!fill_io_u(td, io_u))
1121 break;
1122
Jens Axboeb5696bf2009-03-04 16:03:49 +01001123 put_file_log(td, f);
Jens Axboe429f6672007-07-23 10:38:43 +02001124 td_io_close_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001125 io_u->file = NULL;
Jens Axboed6aed792009-06-03 08:41:15 +02001126 fio_file_set_done(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001127 td->nr_done_files++;
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001128 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1129 td->nr_done_files, td->o.nr_files);
Jens Axboe429f6672007-07-23 10:38:43 +02001130 } while (1);
1131
1132 return 0;
1133}
1134
Jens Axboe3e260a42013-12-09 12:38:53 -07001135static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1136 unsigned long tusec, unsigned long max_usec)
1137{
1138 if (!td->error)
1139 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1140 td_verror(td, ETIMEDOUT, "max latency exceeded");
1141 icd->error = ETIMEDOUT;
1142}
1143
1144static void lat_new_cycle(struct thread_data *td)
1145{
1146 fio_gettime(&td->latency_ts, NULL);
1147 td->latency_ios = ddir_rw_sum(td->io_blocks);
1148 td->latency_failed = 0;
1149}
1150
1151/*
1152 * We had an IO outside the latency target. Reduce the queue depth. If we
1153 * are at QD=1, then it's time to give up.
1154 */
1155static int __lat_target_failed(struct thread_data *td)
1156{
1157 if (td->latency_qd == 1)
1158 return 1;
1159
1160 td->latency_qd_high = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001161
1162 if (td->latency_qd == td->latency_qd_low)
1163 td->latency_qd_low--;
1164
Jens Axboe3e260a42013-12-09 12:38:53 -07001165 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1166
1167 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1168
1169 /*
1170 * When we ramp QD down, quiesce existing IO to prevent
1171 * a storm of ramp downs due to pending higher depth.
1172 */
1173 io_u_quiesce(td);
1174 lat_new_cycle(td);
1175 return 0;
1176}
1177
1178static int lat_target_failed(struct thread_data *td)
1179{
1180 if (td->o.latency_percentile.u.f == 100.0)
1181 return __lat_target_failed(td);
1182
1183 td->latency_failed++;
1184 return 0;
1185}
1186
1187void lat_target_init(struct thread_data *td)
1188{
Jens Axboe6bb58212014-02-21 13:55:31 -08001189 td->latency_end_run = 0;
1190
Jens Axboe3e260a42013-12-09 12:38:53 -07001191 if (td->o.latency_target) {
1192 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1193 fio_gettime(&td->latency_ts, NULL);
1194 td->latency_qd = 1;
1195 td->latency_qd_high = td->o.iodepth;
1196 td->latency_qd_low = 1;
1197 td->latency_ios = ddir_rw_sum(td->io_blocks);
1198 } else
1199 td->latency_qd = td->o.iodepth;
1200}
1201
Jens Axboe6bb58212014-02-21 13:55:31 -08001202void lat_target_reset(struct thread_data *td)
1203{
1204 if (!td->latency_end_run)
1205 lat_target_init(td);
1206}
1207
Jens Axboe3e260a42013-12-09 12:38:53 -07001208static void lat_target_success(struct thread_data *td)
1209{
1210 const unsigned int qd = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001211 struct thread_options *o = &td->o;
Jens Axboe3e260a42013-12-09 12:38:53 -07001212
1213 td->latency_qd_low = td->latency_qd;
1214
1215 /*
1216 * If we haven't failed yet, we double up to a failing value instead
1217 * of bisecting from highest possible queue depth. If we have set
1218 * a limit other than td->o.iodepth, bisect between that.
1219 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001220 if (td->latency_qd_high != o->iodepth)
Jens Axboe3e260a42013-12-09 12:38:53 -07001221 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1222 else
1223 td->latency_qd *= 2;
1224
Jens Axboe6bb58212014-02-21 13:55:31 -08001225 if (td->latency_qd > o->iodepth)
1226 td->latency_qd = o->iodepth;
Jens Axboe3e260a42013-12-09 12:38:53 -07001227
1228 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
Jens Axboe6bb58212014-02-21 13:55:31 -08001229
Jens Axboe3e260a42013-12-09 12:38:53 -07001230 /*
Jens Axboe6bb58212014-02-21 13:55:31 -08001231 * Same as last one, we are done. Let it run a latency cycle, so
1232 * we get only the results from the targeted depth.
Jens Axboe3e260a42013-12-09 12:38:53 -07001233 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001234 if (td->latency_qd == qd) {
1235 if (td->latency_end_run) {
1236 dprint(FD_RATE, "We are done\n");
1237 td->done = 1;
1238 } else {
1239 dprint(FD_RATE, "Quiesce and final run\n");
1240 io_u_quiesce(td);
1241 td->latency_end_run = 1;
1242 reset_all_stats(td);
1243 reset_io_stats(td);
1244 }
1245 }
Jens Axboe3e260a42013-12-09 12:38:53 -07001246
1247 lat_new_cycle(td);
1248}
1249
1250/*
1251 * Check if we can bump the queue depth
1252 */
1253void lat_target_check(struct thread_data *td)
1254{
1255 uint64_t usec_window;
1256 uint64_t ios;
1257 double success_ios;
1258
1259 usec_window = utime_since_now(&td->latency_ts);
1260 if (usec_window < td->o.latency_window)
1261 return;
1262
1263 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1264 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1265 success_ios *= 100.0;
1266
1267 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1268
1269 if (success_ios >= td->o.latency_percentile.u.f)
1270 lat_target_success(td);
1271 else
1272 __lat_target_failed(td);
1273}
1274
1275/*
1276 * If latency target is enabled, we might be ramping up or down and not
1277 * using the full queue depth available.
1278 */
Jens Axboe5a48d302014-09-30 13:29:57 -06001279int queue_full(const struct thread_data *td)
Jens Axboe3e260a42013-12-09 12:38:53 -07001280{
1281 const int qempty = io_u_qempty(&td->io_u_freelist);
1282
1283 if (qempty)
1284 return 1;
1285 if (!td->o.latency_target)
1286 return 0;
1287
1288 return td->cur_depth >= td->latency_qd;
1289}
Jens Axboe429f6672007-07-23 10:38:43 +02001290
Jens Axboe10ba5352006-10-20 11:39:27 +02001291struct io_u *__get_io_u(struct thread_data *td)
1292{
Jens Axboe0cae66f2014-03-03 13:55:32 -07001293 struct io_u *io_u = NULL;
Jens Axboe10ba5352006-10-20 11:39:27 +02001294
Jens Axboede54cfd2014-11-10 20:34:00 -07001295 if (td->stop_io)
1296 return NULL;
1297
Jens Axboee8462bd2009-07-06 12:59:04 +02001298 td_io_u_lock(td);
1299
1300again:
Jens Axboe2ae0b202013-05-28 14:16:55 +02001301 if (!io_u_rempty(&td->io_u_requeues))
1302 io_u = io_u_rpop(&td->io_u_requeues);
Jens Axboe3e260a42013-12-09 12:38:53 -07001303 else if (!queue_full(td)) {
Jens Axboe2ae0b202013-05-28 14:16:55 +02001304 io_u = io_u_qpop(&td->io_u_freelist);
Jens Axboe10ba5352006-10-20 11:39:27 +02001305
Jens Axboe225ba9e2014-02-26 14:31:15 -08001306 io_u->file = NULL;
Jens Axboe6040dab2006-10-24 19:38:15 +02001307 io_u->buflen = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001308 io_u->resid = 0;
Jens Axboed7762cf2007-02-23 12:34:57 +01001309 io_u->end_io = NULL;
Jens Axboe755200a2007-02-19 13:08:12 +01001310 }
1311
1312 if (io_u) {
Jens Axboe0c6e7512007-02-22 11:19:39 +01001313 assert(io_u->flags & IO_U_F_FREE);
Jens Axboee69fdf72014-07-23 16:11:43 +02001314 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
1315 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1316 IO_U_F_VER_LIST);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001317
Jens Axboe755200a2007-02-19 13:08:12 +01001318 io_u->error = 0;
Jens Axboebcd5abf2013-01-23 09:27:25 -07001319 io_u->acct_ddir = -1;
Jens Axboe10ba5352006-10-20 11:39:27 +02001320 td->cur_depth++;
Radha Ramachandran0c412142009-11-03 21:45:31 +01001321 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
Jens Axboef9401282014-02-06 12:17:37 -07001322 io_u->ipo = NULL;
Jens Axboe1dec3e02010-03-19 10:33:39 +01001323 } else if (td->o.verify_async) {
1324 /*
1325 * We ran out, wait for async verify threads to finish and
1326 * return one
1327 */
1328 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1329 goto again;
Jens Axboe10ba5352006-10-20 11:39:27 +02001330 }
1331
Jens Axboee8462bd2009-07-06 12:59:04 +02001332 td_io_u_unlock(td);
Jens Axboe10ba5352006-10-20 11:39:27 +02001333 return io_u;
1334}
1335
Jens Axboe0d29de82010-09-01 13:54:15 +02001336static int check_get_trim(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +02001337{
Jens Axboed72be542012-11-30 19:37:46 +01001338 if (!(td->flags & TD_F_TRIM_BACKLOG))
1339 return 0;
1340
1341 if (td->trim_entries) {
Jens Axboe0d29de82010-09-01 13:54:15 +02001342 int get_trim = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001343
Jens Axboe0d29de82010-09-01 13:54:15 +02001344 if (td->trim_batch) {
1345 td->trim_batch--;
1346 get_trim = 1;
1347 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1348 td->last_ddir != DDIR_READ) {
1349 td->trim_batch = td->o.trim_batch;
1350 if (!td->trim_batch)
1351 td->trim_batch = td->o.trim_backlog;
1352 get_trim = 1;
1353 }
1354
1355 if (get_trim && !get_next_trim(td, io_u))
1356 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001357 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001358
Jens Axboe0d29de82010-09-01 13:54:15 +02001359 return 0;
1360}
1361
1362static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1363{
Jens Axboed72be542012-11-30 19:37:46 +01001364 if (!(td->flags & TD_F_VER_BACKLOG))
1365 return 0;
1366
1367 if (td->io_hist_len) {
Jens Axboe9e144182010-06-15 14:25:36 +02001368 int get_verify = 0;
1369
Jens Axboed1ece0c2012-03-07 09:32:58 +01001370 if (td->verify_batch)
Jens Axboe9e144182010-06-15 14:25:36 +02001371 get_verify = 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001372 else if (!(td->io_hist_len % td->o.verify_backlog) &&
Jens Axboe9e144182010-06-15 14:25:36 +02001373 td->last_ddir != DDIR_READ) {
1374 td->verify_batch = td->o.verify_batch;
Jens Axboef8a75c92010-06-15 14:27:28 +02001375 if (!td->verify_batch)
1376 td->verify_batch = td->o.verify_backlog;
Jens Axboe9e144182010-06-15 14:25:36 +02001377 get_verify = 1;
1378 }
1379
Jens Axboed1ece0c2012-03-07 09:32:58 +01001380 if (get_verify && !get_next_verify(td, io_u)) {
1381 td->verify_batch--;
Jens Axboe0d29de82010-09-01 13:54:15 +02001382 return 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001383 }
Jens Axboe9e144182010-06-15 14:25:36 +02001384 }
1385
Jens Axboe0d29de82010-09-01 13:54:15 +02001386 return 0;
1387}
1388
1389/*
Jens Axboede789762011-09-16 22:11:23 +02001390 * Fill offset and start time into the buffer content, to prevent too
Jens Axboe23f394d2011-09-16 22:45:27 +02001391 * easy compressible data for simple de-dupe attempts. Do this for every
1392 * 512b block in the range, since that should be the smallest block size
1393 * we can expect from a device.
Jens Axboede789762011-09-16 22:11:23 +02001394 */
1395static void small_content_scramble(struct io_u *io_u)
1396{
Jens Axboe23f394d2011-09-16 22:45:27 +02001397 unsigned int i, nr_blocks = io_u->buflen / 512;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001398 uint64_t boffset;
Jens Axboe23f394d2011-09-16 22:45:27 +02001399 unsigned int offset;
1400 void *p, *end;
Jens Axboede789762011-09-16 22:11:23 +02001401
Jens Axboe23f394d2011-09-16 22:45:27 +02001402 if (!nr_blocks)
1403 return;
1404
1405 p = io_u->xfer_buf;
Jens Axboefba76ee2011-09-27 14:27:48 -06001406 boffset = io_u->offset;
Jens Axboe81f03662012-02-02 09:20:09 +01001407 io_u->buf_filled_len = 0;
Jens Axboefad82f72011-09-19 11:33:30 +02001408
Jens Axboe23f394d2011-09-16 22:45:27 +02001409 for (i = 0; i < nr_blocks; i++) {
1410 /*
1411 * Fill the byte offset into a "random" start offset of
1412 * the buffer, given by the product of the usec time
1413 * and the actual offset.
1414 */
Jens Axboefad82f72011-09-19 11:33:30 +02001415 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001416 offset &= ~(sizeof(uint64_t) - 1);
1417 if (offset >= 512 - sizeof(uint64_t))
1418 offset -= sizeof(uint64_t);
Jens Axboefba76ee2011-09-27 14:27:48 -06001419 memcpy(p + offset, &boffset, sizeof(boffset));
Jens Axboe23f394d2011-09-16 22:45:27 +02001420
1421 end = p + 512 - sizeof(io_u->start_time);
1422 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1423 p += 512;
Jens Axboefad82f72011-09-19 11:33:30 +02001424 boffset += 512;
Jens Axboe23f394d2011-09-16 22:45:27 +02001425 }
Jens Axboede789762011-09-16 22:11:23 +02001426}
1427
1428/*
Jens Axboe0d29de82010-09-01 13:54:15 +02001429 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1430 * etc. The returned io_u is fully ready to be prepped and submitted.
1431 */
1432struct io_u *get_io_u(struct thread_data *td)
1433{
1434 struct fio_file *f;
1435 struct io_u *io_u;
Jens Axboede789762011-09-16 22:11:23 +02001436 int do_scramble = 0;
Jens Axboe002fe732014-02-11 08:31:13 -07001437 long ret = 0;
Jens Axboe0d29de82010-09-01 13:54:15 +02001438
1439 io_u = __get_io_u(td);
1440 if (!io_u) {
1441 dprint(FD_IO, "__get_io_u failed\n");
1442 return NULL;
1443 }
1444
1445 if (check_get_verify(td, io_u))
1446 goto out;
1447 if (check_get_trim(td, io_u))
1448 goto out;
1449
Jens Axboe755200a2007-02-19 13:08:12 +01001450 /*
1451 * from a requeue, io_u already setup
1452 */
1453 if (io_u->file)
Jens Axboe77f392b2007-02-19 20:13:09 +01001454 goto out;
Jens Axboe755200a2007-02-19 13:08:12 +01001455
Jens Axboe429f6672007-07-23 10:38:43 +02001456 /*
1457 * If using an iolog, grab next piece if any available.
1458 */
Jens Axboed72be542012-11-30 19:37:46 +01001459 if (td->flags & TD_F_READ_IOLOG) {
Jens Axboe429f6672007-07-23 10:38:43 +02001460 if (read_iolog_get(td, io_u))
1461 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001462 } else if (set_io_u_file(td, io_u)) {
Jens Axboe002fe732014-02-11 08:31:13 -07001463 ret = -EBUSY;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001464 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001465 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001466 }
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001467
Jens Axboe429f6672007-07-23 10:38:43 +02001468 f = io_u->file;
Jens Axboe002fe732014-02-11 08:31:13 -07001469 if (!f) {
1470 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1471 goto err_put;
1472 }
1473
Jens Axboed6aed792009-06-03 08:41:15 +02001474 assert(fio_file_open(f));
Jens Axboe97af62c2007-05-22 11:12:13 +02001475
Jens Axboeff58fce2010-08-25 12:02:08 +02001476 if (ddir_rw(io_u->ddir)) {
Jens Axboed0656a92008-02-01 18:33:23 +01001477 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +01001478 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001479 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001480 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001481
Jens Axboe08a99be2014-12-14 19:01:24 -07001482 f->last_start[io_u->ddir] = io_u->offset;
1483 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001484
Jens Axboefd684182011-09-19 09:24:44 +02001485 if (io_u->ddir == DDIR_WRITE) {
Jens Axboed72be542012-11-30 19:37:46 +01001486 if (td->flags & TD_F_REFILL_BUFFERS) {
Jens Axboe9c426842012-03-02 21:02:12 +01001487 io_u_fill_buffer(td, io_u,
Jens Axboe8e0aa162014-09-26 15:04:58 -06001488 td->o.min_bs[DDIR_WRITE],
1489 io_u->xfer_buflen);
Jens Axboebedc9dc2014-03-17 12:51:09 -06001490 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1491 !(td->flags & TD_F_COMPRESS))
Jens Axboefd684182011-09-19 09:24:44 +02001492 do_scramble = 1;
Jens Axboed72be542012-11-30 19:37:46 +01001493 if (td->flags & TD_F_VER_NONE) {
Jens Axboe629f1d72012-03-09 19:02:01 +01001494 populate_verify_io_u(td, io_u);
1495 do_scramble = 0;
1496 }
Jens Axboefd684182011-09-19 09:24:44 +02001497 } else if (io_u->ddir == DDIR_READ) {
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001498 /*
1499 * Reset the buf_filled parameters so next time if the
1500 * buffer is used for writes it is refilled.
1501 */
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001502 io_u->buf_filled_len = 0;
1503 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001504 }
1505
Jens Axboe165faf12007-02-07 11:30:37 +01001506 /*
1507 * Set io data pointers.
1508 */
Jens Axboecec6b552007-02-06 20:15:38 +01001509 io_u->xfer_buf = io_u->buf;
1510 io_u->xfer_buflen = io_u->buflen;
Jens Axboe5973caf2008-05-21 19:52:35 +02001511
Jens Axboe6ac7a332008-03-01 15:22:32 +01001512out:
Jens Axboe0d29de82010-09-01 13:54:15 +02001513 assert(io_u->file);
Jens Axboe429f6672007-07-23 10:38:43 +02001514 if (!td_io_prep(td, io_u)) {
Jens Axboe993bf482008-11-14 13:04:53 +01001515 if (!td->o.disable_slat)
1516 fio_gettime(&io_u->start_time, NULL);
Jens Axboede789762011-09-16 22:11:23 +02001517 if (do_scramble)
1518 small_content_scramble(io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001519 return io_u;
Jens Axboe36167d82007-02-18 05:41:31 +01001520 }
Jens Axboe429f6672007-07-23 10:38:43 +02001521err_put:
Jens Axboe2ba1c292008-02-01 13:16:38 +01001522 dprint(FD_IO, "get_io_u failed\n");
Jens Axboe429f6672007-07-23 10:38:43 +02001523 put_io_u(td, io_u);
Jens Axboe002fe732014-02-11 08:31:13 -07001524 return ERR_PTR(ret);
Jens Axboe10ba5352006-10-20 11:39:27 +02001525}
1526
Jens Axboe54517922007-03-05 10:06:06 +01001527void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1528{
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001529 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
Jens Axboe825f8182010-08-25 10:47:18 +02001530
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001531 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1532 return;
Jens Axboe54517922007-03-05 10:06:06 +01001533
Robert Elliott2cbdcdb2014-09-16 17:09:48 -05001534 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1535 io_u->file ? " on file " : "",
1536 io_u->file ? io_u->file->file_name : "",
1537 strerror(io_u->error),
1538 io_ddir_name(io_u->ddir),
1539 io_u->offset, io_u->xfer_buflen);
Jens Axboe54517922007-03-05 10:06:06 +01001540
1541 if (!td->error)
1542 td_verror(td, io_u->error, "io_u error");
1543}
1544
Jens Axboeaba6c952014-02-13 19:59:56 -07001545static inline int gtod_reduce(struct thread_data *td)
1546{
Jens Axboe729fe3a2014-02-14 08:46:35 -07001547 return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat
Jens Axboeb74b8202014-02-13 20:04:02 -07001548 && td->o.disable_bw;
Jens Axboeaba6c952014-02-13 19:59:56 -07001549}
1550
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001551static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1552 struct io_completion_data *icd,
1553 const enum fio_ddir idx, unsigned int bytes)
1554{
Jens Axboe24d23ca2012-11-13 08:31:24 -07001555 unsigned long lusec = 0;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001556
Jens Axboeaba6c952014-02-13 19:59:56 -07001557 if (!gtod_reduce(td))
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001558 lusec = utime_since(&io_u->issue_time, &icd->time);
1559
1560 if (!td->o.disable_lat) {
1561 unsigned long tusec;
1562
1563 tusec = utime_since(&io_u->start_time, &icd->time);
Jens Axboeccefd5f2014-06-30 20:59:03 -06001564 add_lat_sample(td, idx, tusec, bytes, io_u->offset);
Jens Axboe15501532012-10-24 16:37:45 +02001565
Jens Axboed4afedf2013-05-22 22:21:29 +02001566 if (td->flags & TD_F_PROFILE_OPS) {
1567 struct prof_io_ops *ops = &td->prof_io_ops;
1568
1569 if (ops->io_u_lat)
1570 icd->error = ops->io_u_lat(td, tusec);
1571 }
1572
Jens Axboe3e260a42013-12-09 12:38:53 -07001573 if (td->o.max_latency && tusec > td->o.max_latency)
1574 lat_fatal(td, icd, tusec, td->o.max_latency);
1575 if (td->o.latency_target && tusec > td->o.latency_target) {
1576 if (lat_target_failed(td))
1577 lat_fatal(td, icd, tusec, td->o.latency_target);
Jens Axboe15501532012-10-24 16:37:45 +02001578 }
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001579 }
1580
1581 if (!td->o.disable_clat) {
Jens Axboeccefd5f2014-06-30 20:59:03 -06001582 add_clat_sample(td, idx, lusec, bytes, io_u->offset);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001583 io_u_mark_latency(td, lusec);
1584 }
1585
1586 if (!td->o.disable_bw)
1587 add_bw_sample(td, idx, bytes, &icd->time);
1588
Jens Axboeaba6c952014-02-13 19:59:56 -07001589 if (!gtod_reduce(td))
1590 add_iops_sample(td, idx, bytes, &icd->time);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001591}
1592
Steven Lang1b8dbf22011-11-09 13:48:01 +01001593static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1594{
Jens Axboe1ae83d42013-01-12 01:44:15 -07001595 uint64_t secs, remainder, bps, bytes;
1596
Steven Lang1b8dbf22011-11-09 13:48:01 +01001597 bytes = td->this_io_bytes[ddir];
1598 bps = td->rate_bps[ddir];
1599 secs = bytes / bps;
1600 remainder = bytes % bps;
1601 return remainder * 1000000 / bps + secs * 1000000;
1602}
1603
Jens Axboee69fdf72014-07-23 16:11:43 +02001604static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
Jens Axboe97601022007-02-18 12:47:29 +01001605 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001606{
Jens Axboee69fdf72014-07-23 16:11:43 +02001607 struct io_u *io_u = *io_u_ptr;
1608 enum fio_ddir ddir = io_u->ddir;
1609 struct fio_file *f = io_u->file;
Jens Axboe10ba5352006-10-20 11:39:27 +02001610
Jens Axboe2ba1c292008-02-01 13:16:38 +01001611 dprint_io_u(io_u, "io complete");
1612
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001613 td_io_u_lock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001614 assert(io_u->flags & IO_U_F_FLIGHT);
Jens Axboe38dad622010-07-20 14:46:00 -06001615 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
Jens Axboef9401282014-02-06 12:17:37 -07001616
1617 /*
1618 * Mark IO ok to verify
1619 */
1620 if (io_u->ipo) {
Jens Axboe890b6652014-05-06 19:06:51 -06001621 /*
1622 * Remove errored entry from the verification list
1623 */
1624 if (io_u->error)
1625 unlog_io_piece(td, io_u);
1626 else {
1627 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1628 write_barrier();
1629 }
Jens Axboef9401282014-02-06 12:17:37 -07001630 }
1631
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001632 td_io_u_unlock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001633
Jens Axboee69fdf72014-07-23 16:11:43 +02001634 if (ddir_sync(ddir)) {
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001635 td->last_was_sync = 1;
Jens Axboe44f29692010-03-09 20:09:44 +01001636 if (f) {
1637 f->first_write = -1ULL;
1638 f->last_write = -1ULL;
1639 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001640 return;
1641 }
1642
1643 td->last_was_sync = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001644 td->last_ddir = ddir;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001645
Jens Axboee69fdf72014-07-23 16:11:43 +02001646 if (!io_u->error && ddir_rw(ddir)) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001647 unsigned int bytes = io_u->buflen - io_u->resid;
Jens Axboee69fdf72014-07-23 16:11:43 +02001648 const enum fio_ddir oddir = ddir ^ 1;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001649 int ret;
Jens Axboe10ba5352006-10-20 11:39:27 +02001650
Jens Axboee69fdf72014-07-23 16:11:43 +02001651 td->io_blocks[ddir]++;
1652 td->this_io_blocks[ddir]++;
1653 td->io_bytes[ddir] += bytes;
webeeae2fafc2012-03-23 13:41:41 +01001654
1655 if (!(io_u->flags & IO_U_F_VER_LIST))
Jens Axboee69fdf72014-07-23 16:11:43 +02001656 td->this_io_bytes[ddir] += bytes;
Jens Axboe10ba5352006-10-20 11:39:27 +02001657
Jens Axboede54cfd2014-11-10 20:34:00 -07001658 if (ddir == DDIR_WRITE) {
1659 if (f) {
1660 if (f->first_write == -1ULL ||
1661 io_u->offset < f->first_write)
1662 f->first_write = io_u->offset;
1663 if (f->last_write == -1ULL ||
1664 ((io_u->offset + bytes) > f->last_write))
1665 f->last_write = io_u->offset + bytes;
1666 }
1667 if (td->last_write_comp) {
1668 int idx = td->last_write_idx++;
1669
1670 td->last_write_comp[idx] = io_u->offset;
1671 if (td->last_write_idx == td->o.iodepth)
1672 td->last_write_idx = 0;
1673 }
Jens Axboe44f29692010-03-09 20:09:44 +01001674 }
1675
Steven Lang6b1190f2012-02-07 09:42:59 +01001676 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1677 td->runstate == TD_VERIFYING)) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001678 account_io_completion(td, io_u, icd, ddir, bytes);
Jens Axboe40e1a6f2009-06-11 10:55:39 +02001679
Jens Axboee69fdf72014-07-23 16:11:43 +02001680 if (__should_check_rate(td, ddir)) {
1681 td->rate_pending_usleep[ddir] =
1682 (usec_for_io(td, ddir) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001683 utime_since_now(&td->start));
Jens Axboeb23b6a22009-06-11 22:06:23 +02001684 }
Jens Axboee69fdf72014-07-23 16:11:43 +02001685 if (ddir != DDIR_TRIM &&
1686 __should_check_rate(td, oddir)) {
1687 td->rate_pending_usleep[oddir] =
1688 (usec_for_io(td, oddir) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001689 utime_since_now(&td->start));
Jens Axboee69fdf72014-07-23 16:11:43 +02001690 }
Jens Axboe721938a2008-09-10 09:46:16 +02001691 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001692
Jens Axboee69fdf72014-07-23 16:11:43 +02001693 icd->bytes_done[ddir] += bytes;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001694
Jens Axboed7762cf2007-02-23 12:34:57 +01001695 if (io_u->end_io) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001696 ret = io_u->end_io(td, io_u_ptr);
1697 io_u = *io_u_ptr;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001698 if (ret && !icd->error)
1699 icd->error = ret;
1700 }
Jens Axboeff58fce2010-08-25 12:02:08 +02001701 } else if (io_u->error) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001702 icd->error = io_u->error;
Jens Axboe54517922007-03-05 10:06:06 +01001703 io_u_log_error(td, io_u);
1704 }
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001705 if (icd->error) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001706 enum error_type_bit eb = td_error_type(ddir, icd->error);
1707
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001708 if (!td_non_fatal_error(td, eb, icd->error))
1709 return;
Jens Axboee69fdf72014-07-23 16:11:43 +02001710
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001711 /*
1712 * If there is a non_fatal error, then add to the error count
1713 * and clear all the errors.
1714 */
1715 update_error_count(td, icd->error);
1716 td_clear_error(td);
1717 icd->error = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001718 if (io_u)
1719 io_u->error = 0;
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001720 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001721}
1722
Jens Axboe9520ebb2008-10-16 21:03:27 +02001723static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1724 int nr)
Jens Axboe36167d82007-02-18 05:41:31 +01001725{
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001726 int ddir;
Jens Axboeaba6c952014-02-13 19:59:56 -07001727
1728 if (!gtod_reduce(td))
Jens Axboe9520ebb2008-10-16 21:03:27 +02001729 fio_gettime(&icd->time, NULL);
Jens Axboe36167d82007-02-18 05:41:31 +01001730
Jens Axboe3af6ef32007-02-18 06:57:43 +01001731 icd->nr = nr;
1732
Jens Axboe36167d82007-02-18 05:41:31 +01001733 icd->error = 0;
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001734 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1735 icd->bytes_done[ddir] = 0;
Jens Axboe36167d82007-02-18 05:41:31 +01001736}
1737
Jens Axboe97601022007-02-18 12:47:29 +01001738static void ios_completed(struct thread_data *td,
1739 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001740{
1741 struct io_u *io_u;
1742 int i;
1743
Jens Axboe10ba5352006-10-20 11:39:27 +02001744 for (i = 0; i < icd->nr; i++) {
1745 io_u = td->io_ops->event(td, i);
1746
Jens Axboee69fdf72014-07-23 16:11:43 +02001747 io_completed(td, &io_u, icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001748
Jens Axboee69fdf72014-07-23 16:11:43 +02001749 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001750 put_io_u(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +02001751 }
1752}
Jens Axboe97601022007-02-18 12:47:29 +01001753
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001754/*
1755 * Complete a single io_u for the sync engines.
1756 */
Jens Axboe581e7142009-06-09 12:47:16 +02001757int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
Jens Axboe100f49f2013-01-23 10:15:57 -07001758 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001759{
1760 struct io_completion_data icd;
1761
Jens Axboe9520ebb2008-10-16 21:03:27 +02001762 init_icd(td, &icd, 1);
Jens Axboee69fdf72014-07-23 16:11:43 +02001763 io_completed(td, &io_u, &icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001764
Jens Axboee69fdf72014-07-23 16:11:43 +02001765 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001766 put_io_u(td, io_u);
Jens Axboe97601022007-02-18 12:47:29 +01001767
Jens Axboe581e7142009-06-09 12:47:16 +02001768 if (icd.error) {
1769 td_verror(td, icd.error, "io_u_sync_complete");
1770 return -1;
1771 }
Jens Axboe97601022007-02-18 12:47:29 +01001772
Jens Axboe581e7142009-06-09 12:47:16 +02001773 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001774 int ddir;
1775
1776 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1777 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001778 }
1779
1780 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001781}
1782
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001783/*
1784 * Called to complete min_events number of io for the async engines.
1785 */
Jens Axboe581e7142009-06-09 12:47:16 +02001786int io_u_queued_complete(struct thread_data *td, int min_evts,
Jens Axboe100f49f2013-01-23 10:15:57 -07001787 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001788{
Jens Axboe97601022007-02-18 12:47:29 +01001789 struct io_completion_data icd;
Jens Axboe00de55e2007-02-20 10:45:57 +01001790 struct timespec *tvp = NULL;
Jens Axboe97601022007-02-18 12:47:29 +01001791 int ret;
Davide Libenzi4d06a332007-03-22 07:43:50 +01001792 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
Jens Axboe97601022007-02-18 12:47:29 +01001793
Jens Axboe49504212008-06-05 09:03:30 +02001794 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
Jens Axboeb271fe62008-02-04 10:49:41 +01001795
Jens Axboe49504212008-06-05 09:03:30 +02001796 if (!min_evts)
Jens Axboe00de55e2007-02-20 10:45:57 +01001797 tvp = &ts;
Robert Elliott05074832014-09-04 13:51:05 -06001798 else if (min_evts > td->cur_depth)
1799 min_evts = td->cur_depth;
Jens Axboe97601022007-02-18 12:47:29 +01001800
Jens Axboe49504212008-06-05 09:03:30 +02001801 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
Jens Axboe97601022007-02-18 12:47:29 +01001802 if (ret < 0) {
Jens Axboee1161c32007-02-22 19:36:48 +01001803 td_verror(td, -ret, "td_io_getevents");
Jens Axboe97601022007-02-18 12:47:29 +01001804 return ret;
1805 } else if (!ret)
1806 return ret;
1807
Jens Axboe9520ebb2008-10-16 21:03:27 +02001808 init_icd(td, &icd, ret);
Jens Axboe97601022007-02-18 12:47:29 +01001809 ios_completed(td, &icd);
Jens Axboe581e7142009-06-09 12:47:16 +02001810 if (icd.error) {
1811 td_verror(td, icd.error, "io_u_queued_complete");
1812 return -1;
1813 }
Jens Axboe97601022007-02-18 12:47:29 +01001814
Jens Axboe581e7142009-06-09 12:47:16 +02001815 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001816 int ddir;
1817
1818 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1819 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001820 }
1821
1822 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001823}
Jens Axboe7e77dd02007-02-20 10:57:34 +01001824
1825/*
1826 * Call when io_u is really queued, to update the submission latency.
1827 */
1828void io_u_queued(struct thread_data *td, struct io_u *io_u)
1829{
Jens Axboe9520ebb2008-10-16 21:03:27 +02001830 if (!td->o.disable_slat) {
1831 unsigned long slat_time;
Jens Axboe7e77dd02007-02-20 10:57:34 +01001832
Jens Axboe9520ebb2008-10-16 21:03:27 +02001833 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
Jens Axboeccefd5f2014-06-30 20:59:03 -06001834 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
1835 io_u->offset);
Jens Axboe9520ebb2008-10-16 21:03:27 +02001836 }
Jens Axboe7e77dd02007-02-20 10:57:34 +01001837}
Jens Axboe433afcb2007-02-22 10:39:01 +01001838
Jens Axboee66dac22014-09-22 10:02:07 -06001839/*
1840 * See if we should reuse the last seed, if dedupe is enabled
1841 */
1842static struct frand_state *get_buf_state(struct thread_data *td)
1843{
1844 unsigned int v;
1845 unsigned long r;
1846
1847 if (!td->o.dedupe_percentage)
1848 return &td->buf_state;
Jens Axboe64d3bab2014-09-22 14:20:05 -06001849 else if (td->o.dedupe_percentage == 100)
1850 return &td->buf_state_prev;
Jens Axboee66dac22014-09-22 10:02:07 -06001851
1852 r = __rand(&td->dedupe_state);
1853 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
1854
1855 if (v <= td->o.dedupe_percentage)
1856 return &td->buf_state_prev;
1857
1858 return &td->buf_state;
1859}
1860
1861static void save_buf_state(struct thread_data *td, struct frand_state *rs)
1862{
1863 if (rs == &td->buf_state)
1864 frand_copy(&td->buf_state_prev, rs);
1865}
1866
Jens Axboecc86c392013-05-03 15:12:33 +02001867void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1868 unsigned int max_bs)
Jens Axboe5973caf2008-05-21 19:52:35 +02001869{
Jens Axboefd1583f2014-12-03 19:55:33 -07001870 struct thread_options *o = &td->o;
1871
Vasily Tarasovefd633f2015-01-28 09:10:30 -07001872 if (o->compress_percentage || o->dedupe_percentage) {
Jens Axboe9c426842012-03-02 21:02:12 +01001873 unsigned int perc = td->o.compress_percentage;
Jens Axboee66dac22014-09-22 10:02:07 -06001874 struct frand_state *rs;
Jens Axboe8e0aa162014-09-26 15:04:58 -06001875 unsigned int left = max_bs;
Jens Axboee66dac22014-09-22 10:02:07 -06001876
Jens Axboe8e0aa162014-09-26 15:04:58 -06001877 do {
1878 rs = get_buf_state(td);
Jens Axboe9c426842012-03-02 21:02:12 +01001879
Jens Axboe8e0aa162014-09-26 15:04:58 -06001880 min_write = min(min_write, left);
Jens Axboef97a43a2012-03-09 19:06:24 +01001881
Jens Axboe8e0aa162014-09-26 15:04:58 -06001882 if (perc) {
1883 unsigned int seg = min_write;
Jens Axboecc86c392013-05-03 15:12:33 +02001884
Jens Axboe8e0aa162014-09-26 15:04:58 -06001885 seg = min(min_write, td->o.compress_chunk);
1886 if (!seg)
1887 seg = min_write;
1888
1889 fill_random_buf_percentage(rs, buf, perc, seg,
Jens Axboefd1583f2014-12-03 19:55:33 -07001890 min_write, o->buffer_pattern,
1891 o->buffer_pattern_bytes);
Jens Axboe8e0aa162014-09-26 15:04:58 -06001892 } else
1893 fill_random_buf(rs, buf, min_write);
1894
1895 buf += min_write;
1896 left -= min_write;
Jens Axboee66dac22014-09-22 10:02:07 -06001897 save_buf_state(td, rs);
Jens Axboe8e0aa162014-09-26 15:04:58 -06001898 } while (left);
Jens Axboefd1583f2014-12-03 19:55:33 -07001899 } else if (o->buffer_pattern_bytes)
1900 fill_buffer_pattern(td, buf, max_bs);
1901 else
Jens Axboecc86c392013-05-03 15:12:33 +02001902 memset(buf, 0, max_bs);
1903}
1904
1905/*
1906 * "randomly" fill the buffer contents
1907 */
1908void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1909 unsigned int min_write, unsigned int max_bs)
1910{
1911 io_u->buf_filled_len = 0;
1912 fill_io_buffer(td, io_u->buf, min_write, max_bs);
Jens Axboe5973caf2008-05-21 19:52:35 +02001913}