blob: c51982d88a2468254dda05796db69143c16f1142 [file] [log] [blame]
Jens Axboe10ba5352006-10-20 11:39:27 +02001#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
Jens Axboe0c6e7512007-02-22 11:19:39 +01006#include <assert.h>
Jens Axboe10ba5352006-10-20 11:39:27 +02007
8#include "fio.h"
Jens Axboe5973caf2008-05-21 19:52:35 +02009#include "hash.h"
Jens Axboe4f5af7b2009-06-03 08:45:40 +020010#include "verify.h"
Jens Axboe0d29de82010-09-01 13:54:15 +020011#include "trim.h"
Jens Axboe1fbbf722010-03-25 23:03:18 +010012#include "lib/rand.h"
Jens Axboe7ebd7962012-11-28 21:24:46 +010013#include "lib/axmap.h"
Jens Axboe002fe732014-02-11 08:31:13 -070014#include "err.h"
Jens Axboe10ba5352006-10-20 11:39:27 +020015
Jens Axboe97601022007-02-18 12:47:29 +010016struct io_completion_data {
17 int nr; /* input */
Jens Axboe97601022007-02-18 12:47:29 +010018
19 int error; /* output */
Jens Axboe100f49f2013-01-23 10:15:57 -070020 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
Jens Axboe97601022007-02-18 12:47:29 +010021 struct timeval time; /* output */
22};
23
Jens Axboe10ba5352006-10-20 11:39:27 +020024/*
Jens Axboe7ebd7962012-11-28 21:24:46 +010025 * The ->io_axmap contains a map of blocks we have or have not done io
Jens Axboe10ba5352006-10-20 11:39:27 +020026 * to yet. Used to make sure we cover the entire range in a fair fashion.
27 */
Jens Axboe1ae83d42013-01-12 01:44:15 -070028static int random_map_free(struct fio_file *f, const uint64_t block)
Jens Axboe10ba5352006-10-20 11:39:27 +020029{
Jens Axboe7ebd7962012-11-28 21:24:46 +010030 return !axmap_isset(f->io_axmap, block);
Jens Axboe10ba5352006-10-20 11:39:27 +020031}
32
33/*
Jens Axboedf415582006-10-20 11:41:03 +020034 * Mark a given offset as used in the map.
35 */
Jens Axboe9bf20612007-03-01 09:33:57 +010036static void mark_random_map(struct thread_data *td, struct io_u *io_u)
Jens Axboedf415582006-10-20 11:41:03 +020037{
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010038 unsigned int min_bs = td->o.rw_min_bs;
Jens Axboe9bf20612007-03-01 09:33:57 +010039 struct fio_file *f = io_u->file;
Jens Axboe51ede0b2012-11-22 13:50:29 +010040 unsigned int nr_blocks;
Jens Axboe1ae83d42013-01-12 01:44:15 -070041 uint64_t block;
Jens Axboedf415582006-10-20 11:41:03 +020042
Jens Axboe1ae83d42013-01-12 01:44:15 -070043 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
Jens Axboec685b5b2007-02-10 20:02:28 +010044 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
45
Jens Axboe2ab9e982012-11-22 15:14:17 +010046 if (!(io_u->flags & IO_U_F_BUSY_OK))
Jens Axboe7ebd7962012-11-28 21:24:46 +010047 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
Jens Axboedf415582006-10-20 11:41:03 +020048
Jens Axboe51ede0b2012-11-22 13:50:29 +010049 if ((nr_blocks * min_bs) < io_u->buflen)
50 io_u->buflen = nr_blocks * min_bs;
Jens Axboedf415582006-10-20 11:41:03 +020051}
52
Jens Axboe74776732013-01-11 14:03:25 +010053static uint64_t last_block(struct thread_data *td, struct fio_file *f,
54 enum fio_ddir ddir)
Jens Axboe2ba1c292008-02-01 13:16:38 +010055{
Jens Axboe74776732013-01-11 14:03:25 +010056 uint64_t max_blocks;
57 uint64_t max_size;
Jens Axboe2ba1c292008-02-01 13:16:38 +010058
Jens Axboeff58fce2010-08-25 12:02:08 +020059 assert(ddir_rw(ddir));
60
Jens Axboed9dd70f2008-05-23 12:37:23 +020061 /*
62 * Hmm, should we make sure that ->io_size <= ->real_file_size?
63 */
64 max_size = f->io_size;
65 if (max_size > f->real_file_size)
66 max_size = f->real_file_size;
67
Steven Noonaned335852012-01-31 13:58:00 +010068 if (td->o.zone_range)
69 max_size = td->o.zone_range;
70
Jens Axboe1ae83d42013-01-12 01:44:15 -070071 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
Jens Axboe2ba1c292008-02-01 13:16:38 +010072 if (!max_blocks)
73 return 0;
74
Jens Axboe67778e82008-05-15 09:20:08 +020075 return max_blocks;
Jens Axboe2ba1c292008-02-01 13:16:38 +010076}
77
Jens Axboe1ae83d42013-01-12 01:44:15 -070078struct rand_off {
79 struct flist_head list;
80 uint64_t off;
81};
82
Jens Axboee25839d2012-11-06 10:49:42 +010083static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -070084 enum fio_ddir ddir, uint64_t *b)
Jens Axboeec4015d2007-03-23 08:04:27 +010085{
Jens Axboe1ae83d42013-01-12 01:44:15 -070086 uint64_t r, lastb;
Jens Axboe74776732013-01-11 14:03:25 +010087
88 lastb = last_block(td, f, ddir);
89 if (!lastb)
90 return 1;
Jens Axboeec4015d2007-03-23 08:04:27 +010091
Jens Axboe8055e412012-11-26 08:43:47 +010092 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
Jens Axboef6787012014-11-05 18:39:23 -070093 r = __rand(&td->random_state);
Jens Axboe8055e412012-11-26 08:43:47 +010094
Jens Axboe4b91ee82013-02-25 10:18:33 +010095 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
Jens Axboe8055e412012-11-26 08:43:47 +010096
Jens Axboe559073f2014-11-05 18:34:02 -070097 *b = lastb * (r / ((uint64_t) FRAND_MAX + 1.0));
Jens Axboe51ede0b2012-11-22 13:50:29 +010098 } else {
Jens Axboe8055e412012-11-26 08:43:47 +010099 uint64_t off = 0;
100
Jens Axboe74776732013-01-11 14:03:25 +0100101 if (lfsr_next(&f->lfsr, &off, lastb))
Jens Axboe8055e412012-11-26 08:43:47 +0100102 return 1;
103
104 *b = off;
Jens Axboe51ede0b2012-11-22 13:50:29 +0100105 }
Jens Axboe2615cc42011-03-28 09:35:09 +0200106
Jens Axboeec4015d2007-03-23 08:04:27 +0100107 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100108 * if we are not maintaining a random map, we are done.
Jens Axboeec4015d2007-03-23 08:04:27 +0100109 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100110 if (!file_randommap(td, f))
111 goto ret;
Jens Axboe43c63a72007-05-21 13:23:30 +0200112
113 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100114 * calculate map offset and check if it's free
Jens Axboe43c63a72007-05-21 13:23:30 +0200115 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100116 if (random_map_free(f, *b))
117 goto ret;
118
Jens Axboe4b91ee82013-02-25 10:18:33 +0100119 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
120 (unsigned long long) *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100121
Jens Axboe7ebd7962012-11-28 21:24:46 +0100122 *b = axmap_next_free(f->io_axmap, *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100123 if (*b == (uint64_t) -1ULL)
124 return 1;
Jens Axboe0ce8b112011-01-27 22:25:29 +0100125ret:
126 return 0;
Jens Axboeec4015d2007-03-23 08:04:27 +0100127}
128
Jens Axboe925fee32012-11-06 13:50:32 +0100129static int __get_next_rand_offset_zipf(struct thread_data *td,
130 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700131 uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100132{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100133 *b = zipf_next(&f->zipf);
Jens Axboee25839d2012-11-06 10:49:42 +0100134 return 0;
135}
136
Jens Axboe925fee32012-11-06 13:50:32 +0100137static int __get_next_rand_offset_pareto(struct thread_data *td,
138 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700139 uint64_t *b)
Jens Axboe925fee32012-11-06 13:50:32 +0100140{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100141 *b = pareto_next(&f->zipf);
Jens Axboe925fee32012-11-06 13:50:32 +0100142 return 0;
143}
144
Jens Axboe1ae83d42013-01-12 01:44:15 -0700145static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
146{
147 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
148 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
149
150 return r1->off - r2->off;
151}
152
153static int get_off_from_method(struct thread_data *td, struct fio_file *f,
154 enum fio_ddir ddir, uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100155{
156 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
157 return __get_next_rand_offset(td, f, ddir, b);
158 else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
159 return __get_next_rand_offset_zipf(td, f, ddir, b);
Jens Axboe925fee32012-11-06 13:50:32 +0100160 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
161 return __get_next_rand_offset_pareto(td, f, ddir, b);
Jens Axboee25839d2012-11-06 10:49:42 +0100162
163 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
164 return 1;
165}
166
Jens Axboebcd5abf2013-01-23 09:27:25 -0700167/*
168 * Sort the reads for a verify phase in batches of verifysort_nr, if
169 * specified.
170 */
171static inline int should_sort_io(struct thread_data *td)
172{
173 if (!td->o.verifysort_nr || !td->o.do_verify)
174 return 0;
175 if (!td_random(td))
176 return 0;
177 if (td->runstate != TD_VERIFYING)
178 return 0;
179 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
180 return 0;
181
182 return 1;
183}
184
Jens Axboed9472272013-07-25 10:20:45 -0600185static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
Jens Axboe211c9b82013-04-26 08:56:17 -0600186{
187 unsigned int v;
188 unsigned long r;
189
Jens Axboed9472272013-07-25 10:20:45 -0600190 if (td->o.perc_rand[ddir] == 100)
Jens Axboe211c9b82013-04-26 08:56:17 -0600191 return 1;
192
Jens Axboef6787012014-11-05 18:39:23 -0700193 r = __rand(&td->seq_rand_state[ddir]);
Jens Axboe559073f2014-11-05 18:34:02 -0700194 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
Jens Axboe211c9b82013-04-26 08:56:17 -0600195
Jens Axboed9472272013-07-25 10:20:45 -0600196 return v <= td->o.perc_rand[ddir];
Jens Axboe211c9b82013-04-26 08:56:17 -0600197}
198
Jens Axboe1ae83d42013-01-12 01:44:15 -0700199static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
200 enum fio_ddir ddir, uint64_t *b)
201{
202 struct rand_off *r;
203 int i, ret = 1;
204
Jens Axboebcd5abf2013-01-23 09:27:25 -0700205 if (!should_sort_io(td))
Jens Axboe1ae83d42013-01-12 01:44:15 -0700206 return get_off_from_method(td, f, ddir, b);
207
208 if (!flist_empty(&td->next_rand_list)) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700209fetch:
Jens Axboe12dbd062014-07-03 21:19:57 -0600210 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
Jens Axboe1ae83d42013-01-12 01:44:15 -0700211 flist_del(&r->list);
212 *b = r->off;
213 free(r);
214 return 0;
215 }
216
217 for (i = 0; i < td->o.verifysort_nr; i++) {
218 r = malloc(sizeof(*r));
219
220 ret = get_off_from_method(td, f, ddir, &r->off);
221 if (ret) {
222 free(r);
223 break;
224 }
225
226 flist_add(&r->list, &td->next_rand_list);
227 }
228
229 if (ret && !i)
230 return ret;
231
232 assert(!flist_empty(&td->next_rand_list));
233 flist_sort(NULL, &td->next_rand_list, flist_cmp);
234 goto fetch;
235}
236
Jens Axboe38dad622010-07-20 14:46:00 -0600237static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700238 enum fio_ddir ddir, uint64_t *b)
Jens Axboe38dad622010-07-20 14:46:00 -0600239{
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100240 if (!get_next_rand_offset(td, f, ddir, b))
241 return 0;
242
243 if (td->o.time_based) {
Jens Axboe33c48812013-01-21 09:46:06 -0700244 fio_file_reset(td, f);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100245 if (!get_next_rand_offset(td, f, ddir, b))
246 return 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600247 }
248
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100249 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100250 f->file_name, (unsigned long long) f->last_pos,
251 (unsigned long long) f->real_file_size);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100252 return 1;
Jens Axboe38dad622010-07-20 14:46:00 -0600253}
254
Jens Axboe37cf9e32012-03-17 12:54:30 +0100255static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700256 enum fio_ddir ddir, uint64_t *offset)
Jens Axboe38dad622010-07-20 14:46:00 -0600257{
Jens Axboe8a423942014-09-28 16:18:43 -0600258 struct thread_options *o = &td->o;
259
Jens Axboeff58fce2010-08-25 12:02:08 +0200260 assert(ddir_rw(ddir));
261
Jens Axboe8a423942014-09-28 16:18:43 -0600262 if (f->last_pos >= f->io_size + get_start_offset(td, f) &&
263 o->time_based)
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100264 f->last_pos = f->last_pos - f->io_size;
265
Jens Axboe38dad622010-07-20 14:46:00 -0600266 if (f->last_pos < f->real_file_size) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700267 uint64_t pos;
Jens Axboe059b0802011-08-25 09:09:37 +0200268
Jens Axboe8a423942014-09-28 16:18:43 -0600269 if (f->last_pos == f->file_offset && o->ddir_seq_add < 0)
Jens Axboea66da7a2011-08-31 13:14:12 -0600270 f->last_pos = f->real_file_size;
271
272 pos = f->last_pos - f->file_offset;
Jens Axboe8a423942014-09-28 16:18:43 -0600273 if (pos && o->ddir_seq_add) {
274 pos += o->ddir_seq_add;
275
276 /*
277 * If we reach beyond the end of the file
278 * with holed IO, wrap around to the
279 * beginning again.
280 */
281 if (pos >= f->real_file_size)
282 pos = f->file_offset;
283 }
Jens Axboe059b0802011-08-25 09:09:37 +0200284
Jens Axboe37cf9e32012-03-17 12:54:30 +0100285 *offset = pos;
Jens Axboe38dad622010-07-20 14:46:00 -0600286 return 0;
287 }
288
289 return 1;
290}
291
292static int get_next_block(struct thread_data *td, struct io_u *io_u,
Jens Axboe6aca9b32013-07-25 12:45:26 -0600293 enum fio_ddir ddir, int rw_seq,
294 unsigned int *is_random)
Jens Axboe38dad622010-07-20 14:46:00 -0600295{
296 struct fio_file *f = io_u->file;
Jens Axboe1ae83d42013-01-12 01:44:15 -0700297 uint64_t b, offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600298 int ret;
299
Jens Axboeff58fce2010-08-25 12:02:08 +0200300 assert(ddir_rw(ddir));
301
Jens Axboe37cf9e32012-03-17 12:54:30 +0100302 b = offset = -1ULL;
303
Jens Axboe38dad622010-07-20 14:46:00 -0600304 if (rw_seq) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600305 if (td_random(td)) {
Jens Axboe6aca9b32013-07-25 12:45:26 -0600306 if (should_do_random(td, ddir)) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600307 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600308 *is_random = 1;
309 } else {
310 *is_random = 0;
Jens Axboe211c9b82013-04-26 08:56:17 -0600311 io_u->flags |= IO_U_F_BUSY_OK;
312 ret = get_next_seq_offset(td, f, ddir, &offset);
313 if (ret)
314 ret = get_next_rand_block(td, f, ddir, &b);
315 }
Jens Axboe6aca9b32013-07-25 12:45:26 -0600316 } else {
317 *is_random = 0;
Jens Axboe37cf9e32012-03-17 12:54:30 +0100318 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600319 }
Jens Axboe38dad622010-07-20 14:46:00 -0600320 } else {
321 io_u->flags |= IO_U_F_BUSY_OK;
Jens Axboe6aca9b32013-07-25 12:45:26 -0600322 *is_random = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600323
324 if (td->o.rw_seq == RW_SEQ_SEQ) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100325 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600326 if (ret) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100327 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600328 *is_random = 0;
329 }
Jens Axboe38dad622010-07-20 14:46:00 -0600330 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
331 if (f->last_start != -1ULL)
Jens Axboe37cf9e32012-03-17 12:54:30 +0100332 offset = f->last_start - f->file_offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600333 else
Jens Axboe37cf9e32012-03-17 12:54:30 +0100334 offset = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600335 ret = 0;
336 } else {
337 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
338 ret = 1;
339 }
340 }
Bruce Cran6d68b992013-01-13 17:21:58 +0000341
Jens Axboe37cf9e32012-03-17 12:54:30 +0100342 if (!ret) {
343 if (offset != -1ULL)
344 io_u->offset = offset;
345 else if (b != -1ULL)
346 io_u->offset = b * td->o.ba[ddir];
347 else {
Jens Axboe4e0a8fa2013-04-15 11:40:57 +0200348 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
Jens Axboe37cf9e32012-03-17 12:54:30 +0100349 ret = 1;
350 }
351 }
352
Jens Axboe38dad622010-07-20 14:46:00 -0600353 return ret;
354}
355
Jens Axboe10ba5352006-10-20 11:39:27 +0200356/*
357 * For random io, generate a random new block and see if it's used. Repeat
358 * until we find a free one. For sequential io, just return the end of
359 * the last io issued.
360 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600361static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
362 unsigned int *is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200363{
Jens Axboe9bf20612007-03-01 09:33:57 +0100364 struct fio_file *f = io_u->file;
Jens Axboe4ba66132008-02-05 10:05:50 +0100365 enum fio_ddir ddir = io_u->ddir;
Jens Axboe38dad622010-07-20 14:46:00 -0600366 int rw_seq_hit = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200367
Jens Axboeff58fce2010-08-25 12:02:08 +0200368 assert(ddir_rw(ddir));
369
Jens Axboe38dad622010-07-20 14:46:00 -0600370 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
371 rw_seq_hit = 1;
Jens Axboe5736c102010-07-20 12:03:25 -0600372 td->ddir_seq_nr = td->o.ddir_seq_nr;
Jens Axboe38dad622010-07-20 14:46:00 -0600373 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200374
Jens Axboe6aca9b32013-07-25 12:45:26 -0600375 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
Jens Axboe38dad622010-07-20 14:46:00 -0600376 return 1;
Jens Axboe10ba5352006-10-20 11:39:27 +0200377
Jens Axboe009bd842008-05-15 10:19:46 +0200378 if (io_u->offset >= f->io_size) {
379 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100380 (unsigned long long) io_u->offset,
381 (unsigned long long) f->io_size);
Jens Axboe009bd842008-05-15 10:19:46 +0200382 return 1;
383 }
384
385 io_u->offset += f->file_offset;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100386 if (io_u->offset >= f->real_file_size) {
387 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100388 (unsigned long long) io_u->offset,
389 (unsigned long long) f->real_file_size);
Jens Axboe10ba5352006-10-20 11:39:27 +0200390 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100391 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200392
393 return 0;
394}
395
Jens Axboe6aca9b32013-07-25 12:45:26 -0600396static int get_next_offset(struct thread_data *td, struct io_u *io_u,
397 unsigned int *is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100398{
Jens Axboed72be542012-11-30 19:37:46 +0100399 if (td->flags & TD_F_PROFILE_OPS) {
400 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100401
Jens Axboed72be542012-11-30 19:37:46 +0100402 if (ops->fill_io_u_off)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600403 return ops->fill_io_u_off(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100404 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100405
Jens Axboe6aca9b32013-07-25 12:45:26 -0600406 return __get_next_offset(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100407}
408
Jens Axboe79944122011-05-24 11:26:16 +0200409static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
410 unsigned int buflen)
411{
412 struct fio_file *f = io_u->file;
413
Jens Axboebedc9dc2014-03-17 12:51:09 -0600414 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
Jens Axboe79944122011-05-24 11:26:16 +0200415}
416
Jens Axboe6aca9b32013-07-25 12:45:26 -0600417static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
418 unsigned int is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200419{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600420 int ddir = io_u->ddir;
Jens Axboe24d23ca2012-11-13 08:31:24 -0700421 unsigned int buflen = 0;
Jens Axboef3059de2008-06-11 15:37:32 +0200422 unsigned int minbs, maxbs;
Jens Axboe559073f2014-11-05 18:34:02 -0700423 unsigned long r;
Jens Axboe10ba5352006-10-20 11:39:27 +0200424
Erwan Velu9ee1c642014-04-02 10:51:16 +0200425 assert(ddir_rw(ddir));
Jens Axboe6aca9b32013-07-25 12:45:26 -0600426
427 if (td->o.bs_is_seq_rand)
428 ddir = is_random ? DDIR_WRITE: DDIR_READ;
Jens Axboeff58fce2010-08-25 12:02:08 +0200429
Jens Axboef3059de2008-06-11 15:37:32 +0200430 minbs = td->o.min_bs[ddir];
431 maxbs = td->o.max_bs[ddir];
432
Jens Axboe79944122011-05-24 11:26:16 +0200433 if (minbs == maxbs)
434 return minbs;
435
Jens Axboe52c58022012-02-06 21:58:56 +0100436 /*
437 * If we can't satisfy the min block size from here, then fail
438 */
439 if (!io_u_fits(td, io_u, minbs))
440 return 0;
441
Jens Axboe79944122011-05-24 11:26:16 +0200442 do {
Jens Axboef6787012014-11-05 18:39:23 -0700443 r = __rand(&td->bsrange_state);
Jens Axboe4c07ad82011-03-28 09:51:09 +0200444
Jens Axboe720e84a2009-04-21 08:29:55 +0200445 if (!td->o.bssplit_nr[ddir]) {
Jens Axboef3059de2008-06-11 15:37:32 +0200446 buflen = 1 + (unsigned int) ((double) maxbs *
Jens Axboe559073f2014-11-05 18:34:02 -0700447 (r / (FRAND_MAX + 1.0)));
Jens Axboef3059de2008-06-11 15:37:32 +0200448 if (buflen < minbs)
449 buflen = minbs;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100450 } else {
Jens Axboe564ca972007-12-14 12:21:19 +0100451 long perc = 0;
452 unsigned int i;
453
Jens Axboe720e84a2009-04-21 08:29:55 +0200454 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
455 struct bssplit *bsp = &td->o.bssplit[ddir][i];
Jens Axboe564ca972007-12-14 12:21:19 +0100456
457 buflen = bsp->bs;
458 perc += bsp->perc;
Jens Axboe559073f2014-11-05 18:34:02 -0700459 if ((r <= ((FRAND_MAX / 100L) * perc)) &&
Jens Axboe79944122011-05-24 11:26:16 +0200460 io_u_fits(td, io_u, buflen))
Jens Axboe564ca972007-12-14 12:21:19 +0100461 break;
462 }
463 }
Jens Axboe79944122011-05-24 11:26:16 +0200464
Josef Bacika9f70b12013-07-08 20:32:50 -0400465 if (td->o.do_verify && td->o.verify != VERIFY_NONE)
466 buflen = (buflen + td->o.verify_interval - 1) &
467 ~(td->o.verify_interval - 1);
468
Jens Axboef3059de2008-06-11 15:37:32 +0200469 if (!td->o.bs_unaligned && is_power_of_2(minbs))
470 buflen = (buflen + minbs - 1) & ~(minbs - 1);
Jens Axboe10ba5352006-10-20 11:39:27 +0200471
Jens Axboe79944122011-05-24 11:26:16 +0200472 } while (!io_u_fits(td, io_u, buflen));
Jens Axboe6a5e6882007-07-26 10:47:51 +0200473
Jens Axboe10ba5352006-10-20 11:39:27 +0200474 return buflen;
475}
476
Jens Axboe6aca9b32013-07-25 12:45:26 -0600477static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
478 unsigned int is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100479{
Jens Axboed72be542012-11-30 19:37:46 +0100480 if (td->flags & TD_F_PROFILE_OPS) {
481 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100482
Jens Axboed72be542012-11-30 19:37:46 +0100483 if (ops->fill_io_u_size)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600484 return ops->fill_io_u_size(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100485 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100486
Jens Axboe6aca9b32013-07-25 12:45:26 -0600487 return __get_next_buflen(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100488}
489
Jens Axboeafe24a52007-03-16 20:27:27 +0100490static void set_rwmix_bytes(struct thread_data *td)
491{
Jens Axboeafe24a52007-03-16 20:27:27 +0100492 unsigned int diff;
493
494 /*
495 * we do time or byte based switch. this is needed because
496 * buffered writes may issue a lot quicker than they complete,
497 * whereas reads do not.
498 */
Jens Axboee47f7992007-03-21 14:05:39 +0100499 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
Jens Axboe04c540d2008-05-28 10:35:26 +0200500 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
Jens Axboee47f7992007-03-21 14:05:39 +0100501}
502
503static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
504{
505 unsigned int v;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200506 unsigned long r;
Jens Axboee47f7992007-03-21 14:05:39 +0100507
Jens Axboef6787012014-11-05 18:39:23 -0700508 r = __rand(&td->rwmix_state);
Jens Axboe559073f2014-11-05 18:34:02 -0700509 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
Jens Axboe4c07ad82011-03-28 09:51:09 +0200510
Jens Axboe04c540d2008-05-28 10:35:26 +0200511 if (v <= td->o.rwmix[DDIR_READ])
Jens Axboee47f7992007-03-21 14:05:39 +0100512 return DDIR_READ;
513
514 return DDIR_WRITE;
Jens Axboeafe24a52007-03-16 20:27:27 +0100515}
516
Jens Axboe002e7182013-05-17 12:39:53 +0200517void io_u_quiesce(struct thread_data *td)
518{
519 /*
520 * We are going to sleep, ensure that we flush anything pending as
521 * not to skew our latency numbers.
522 *
523 * Changed to only monitor 'in flight' requests here instead of the
524 * td->cur_depth, b/c td->cur_depth does not accurately represent
525 * io's that have been actually submitted to an async engine,
526 * and cur_depth is meaningless for sync engines.
527 */
528 while (td->io_u_in_flight) {
529 int fio_unused ret;
530
531 ret = io_u_queued_complete(td, 1, NULL);
532 }
533}
534
Jens Axboe581e7142009-06-09 12:47:16 +0200535static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
536{
537 enum fio_ddir odir = ddir ^ 1;
538 struct timeval t;
539 long usec;
540
Jens Axboeff58fce2010-08-25 12:02:08 +0200541 assert(ddir_rw(ddir));
542
Jens Axboe315fcfe2013-02-08 19:05:25 +0100543 if (td->rate_pending_usleep[ddir] <= 0)
Jens Axboe581e7142009-06-09 12:47:16 +0200544 return ddir;
545
546 /*
547 * We have too much pending sleep in this direction. See if we
548 * should switch.
549 */
Jens Axboe315fcfe2013-02-08 19:05:25 +0100550 if (td_rw(td) && td->o.rwmix[odir]) {
Jens Axboe581e7142009-06-09 12:47:16 +0200551 /*
552 * Other direction does not have too much pending, switch
553 */
554 if (td->rate_pending_usleep[odir] < 100000)
555 return odir;
556
557 /*
558 * Both directions have pending sleep. Sleep the minimum time
559 * and deduct from both.
560 */
561 if (td->rate_pending_usleep[ddir] <=
562 td->rate_pending_usleep[odir]) {
563 usec = td->rate_pending_usleep[ddir];
564 } else {
565 usec = td->rate_pending_usleep[odir];
566 ddir = odir;
567 }
568 } else
569 usec = td->rate_pending_usleep[ddir];
570
Jens Axboe002e7182013-05-17 12:39:53 +0200571 io_u_quiesce(td);
Jens Axboe78c1eda2011-08-30 15:34:14 -0600572
Jens Axboe581e7142009-06-09 12:47:16 +0200573 fio_gettime(&t, NULL);
574 usec_sleep(td, usec);
575 usec = utime_since_now(&t);
576
577 td->rate_pending_usleep[ddir] -= usec;
578
579 odir = ddir ^ 1;
580 if (td_rw(td) && __should_check_rate(td, odir))
581 td->rate_pending_usleep[odir] -= usec;
Jens Axboe0b9d69e2009-09-11 22:29:54 +0200582
Jens Axboed4eb4652014-11-10 15:40:24 -0700583 if (ddir == DDIR_TRIM)
584 return DDIR_TRIM;
Jens Axboee0224c62013-02-07 19:55:24 +0100585
Jens Axboe581e7142009-06-09 12:47:16 +0200586 return ddir;
587}
588
Jens Axboe10ba5352006-10-20 11:39:27 +0200589/*
590 * Return the data direction for the next io_u. If the job is a
591 * mixed read/write workload, check the rwmix cycle and switch if
592 * necessary.
593 */
Jens Axboe1e97cce2006-12-05 11:44:16 +0100594static enum fio_ddir get_rw_ddir(struct thread_data *td)
Jens Axboe10ba5352006-10-20 11:39:27 +0200595{
Jens Axboe581e7142009-06-09 12:47:16 +0200596 enum fio_ddir ddir;
597
Jens Axboe5f9099e2009-06-16 22:40:26 +0200598 /*
599 * see if it's time to fsync
600 */
601 if (td->o.fsync_blocks &&
602 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
603 td->io_issues[DDIR_WRITE] && should_fsync(td))
604 return DDIR_SYNC;
605
606 /*
607 * see if it's time to fdatasync
608 */
609 if (td->o.fdatasync_blocks &&
610 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
611 td->io_issues[DDIR_WRITE] && should_fsync(td))
612 return DDIR_DATASYNC;
613
Jens Axboe44f29692010-03-09 20:09:44 +0100614 /*
615 * see if it's time to sync_file_range
616 */
617 if (td->sync_file_range_nr &&
618 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
619 td->io_issues[DDIR_WRITE] && should_fsync(td))
620 return DDIR_SYNC_FILE_RANGE;
621
Jens Axboe10ba5352006-10-20 11:39:27 +0200622 if (td_rw(td)) {
Jens Axboe10ba5352006-10-20 11:39:27 +0200623 /*
624 * Check if it's time to seed a new data direction.
625 */
Jens Axboee4928662008-04-07 09:19:46 +0200626 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
Jens Axboee47f7992007-03-21 14:05:39 +0100627 /*
628 * Put a top limit on how many bytes we do for
629 * one data direction, to avoid overflowing the
630 * ranges too much
631 */
632 ddir = get_rand_ddir(td);
Jens Axboee47f7992007-03-21 14:05:39 +0100633
634 if (ddir != td->rwmix_ddir)
635 set_rwmix_bytes(td);
636
637 td->rwmix_ddir = ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200638 }
Jens Axboe581e7142009-06-09 12:47:16 +0200639 ddir = td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200640 } else if (td_read(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200641 ddir = DDIR_READ;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200642 else if (td_write(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200643 ddir = DDIR_WRITE;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200644 else
645 ddir = DDIR_TRIM;
Jens Axboe581e7142009-06-09 12:47:16 +0200646
647 td->rwmix_ddir = rate_ddir(td, ddir);
648 return td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200649}
650
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200651static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
652{
Jens Axboebcd5abf2013-01-23 09:27:25 -0700653 io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200654
655 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
656 td->o.barrier_blocks &&
657 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
658 td->io_issues[DDIR_WRITE])
659 io_u->flags |= IO_U_F_BARRIER;
660}
661
Jens Axboee8462bd2009-07-06 12:59:04 +0200662void put_file_log(struct thread_data *td, struct fio_file *f)
Jens Axboe60f2c652008-05-16 12:31:36 +0200663{
Jens Axboe71b84ca2014-04-14 12:01:45 -0600664 unsigned int ret = put_file(td, f);
Jens Axboe60f2c652008-05-16 12:31:36 +0200665
666 if (ret)
667 td_verror(td, ret, "file close");
668}
669
Jens Axboe10ba5352006-10-20 11:39:27 +0200670void put_io_u(struct thread_data *td, struct io_u *io_u)
671{
Jens Axboee8462bd2009-07-06 12:59:04 +0200672 td_io_u_lock(td);
673
Jens Axboee69fdf72014-07-23 16:11:43 +0200674 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
Jens Axboe60f2c652008-05-16 12:31:36 +0200675 put_file_log(td, io_u->file);
Jens Axboee69fdf72014-07-23 16:11:43 +0200676
Jens Axboe10ba5352006-10-20 11:39:27 +0200677 io_u->file = NULL;
Steven Langd7ee2a72011-10-26 09:46:50 +0200678 io_u->flags |= IO_U_F_FREE;
679
Radha Ramachandran0c412142009-11-03 21:45:31 +0100680 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
681 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200682 io_u_qpush(&td->io_u_freelist, io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200683 td_io_u_unlock(td);
684 td_io_u_free_notify(td);
Jens Axboe10ba5352006-10-20 11:39:27 +0200685}
686
Radha Ramachandranf2bba182009-06-15 08:40:16 +0200687void clear_io_u(struct thread_data *td, struct io_u *io_u)
688{
689 io_u->flags &= ~IO_U_F_FLIGHT;
690 put_io_u(td, io_u);
691}
692
Jens Axboe755200a2007-02-19 13:08:12 +0100693void requeue_io_u(struct thread_data *td, struct io_u **io_u)
694{
695 struct io_u *__io_u = *io_u;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700696 enum fio_ddir ddir = acct_ddir(__io_u);
Jens Axboe755200a2007-02-19 13:08:12 +0100697
Jens Axboe465221b2008-05-30 22:07:49 +0200698 dprint(FD_IO, "requeue %p\n", __io_u);
699
Jens Axboee8462bd2009-07-06 12:59:04 +0200700 td_io_u_lock(td);
701
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100702 __io_u->flags |= IO_U_F_FREE;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700703 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
704 td->io_issues[ddir]--;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100705
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100706 __io_u->flags &= ~IO_U_F_FLIGHT;
Radha Ramachandran0c412142009-11-03 21:45:31 +0100707 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
708 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200709
710 io_u_rpush(&td->io_u_requeues, __io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200711 td_io_u_unlock(td);
Jens Axboe755200a2007-02-19 13:08:12 +0100712 *io_u = NULL;
713}
714
Jens Axboe9bf20612007-03-01 09:33:57 +0100715static int fill_io_u(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +0200716{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600717 unsigned int is_random;
718
Jens Axboeb4c5e1a2007-10-25 18:44:45 +0200719 if (td->io_ops->flags & FIO_NOIO)
720 goto out;
721
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200722 set_rw_ddir(td, io_u);
Jens Axboea00735e2006-11-03 08:58:08 +0100723
Jens Axboe87dc1ab2006-10-24 14:41:26 +0200724 /*
Jens Axboeff58fce2010-08-25 12:02:08 +0200725 * fsync() or fdatasync() or trim etc, we are done
Jens Axboe5f9099e2009-06-16 22:40:26 +0200726 */
Jens Axboeff58fce2010-08-25 12:02:08 +0200727 if (!ddir_rw(io_u->ddir))
Jens Axboe5f9099e2009-06-16 22:40:26 +0200728 goto out;
729
730 /*
Jens Axboe48f5abd2007-07-20 13:25:04 +0200731 * See if it's time to switch to a new zone
732 */
Jens Axboe13af05a2012-02-11 09:04:02 +0100733 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
Jens Axboe418bf542014-09-28 16:20:58 -0600734 struct fio_file *f = io_u->file;
735
Jens Axboe48f5abd2007-07-20 13:25:04 +0200736 td->zone_bytes = 0;
Jens Axboe418bf542014-09-28 16:20:58 -0600737 f->file_offset += td->o.zone_range + td->o.zone_skip;
738
739 /*
740 * Wrap from the beginning, if we exceed the file size
741 */
742 if (f->file_offset >= f->real_file_size)
743 f->file_offset = f->real_file_size - f->file_offset;
744 f->last_pos = f->file_offset;
Jens Axboe48f5abd2007-07-20 13:25:04 +0200745 td->io_skip_bytes += td->o.zone_skip;
746 }
747
748 /*
Jens Axboec685b5b2007-02-10 20:02:28 +0100749 * No log, let the seq/rand engine retrieve the next buflen and
750 * position.
Jens Axboe10ba5352006-10-20 11:39:27 +0200751 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600752 if (get_next_offset(td, io_u, &is_random)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +0100753 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100754 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100755 }
Jens Axboec685b5b2007-02-10 20:02:28 +0100756
Jens Axboe6aca9b32013-07-25 12:45:26 -0600757 io_u->buflen = get_next_buflen(td, io_u, is_random);
Jens Axboe2ba1c292008-02-01 13:16:38 +0100758 if (!io_u->buflen) {
759 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100760 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100761 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200762
Jens Axboe2ba1c292008-02-01 13:16:38 +0100763 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
764 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
Jens Axboe4b91ee82013-02-25 10:18:33 +0100765 dprint(FD_IO, " off=%llu/%lu > %llu\n",
766 (unsigned long long) io_u->offset, io_u->buflen,
767 (unsigned long long) io_u->file->real_file_size);
Jens Axboe6a5e6882007-07-26 10:47:51 +0200768 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100769 }
Jens Axboe6a5e6882007-07-26 10:47:51 +0200770
Jens Axboebca4ed42007-02-12 05:13:23 +0100771 /*
772 * mark entry before potentially trimming io_u
773 */
Jens Axboe303032a2008-03-26 10:11:10 +0100774 if (td_random(td) && file_randommap(td, io_u->file))
Jens Axboe9bf20612007-03-01 09:33:57 +0100775 mark_random_map(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +0200776
Jens Axboec38e9462007-03-27 08:48:48 +0200777out:
Jens Axboe2ba1c292008-02-01 13:16:38 +0100778 dprint_io_u(io_u, "fill_io_u");
Jens Axboed9d91e32008-01-31 13:25:42 +0100779 td->zone_bytes += io_u->buflen;
Jens Axboebca4ed42007-02-12 05:13:23 +0100780 return 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200781}
782
Jens Axboe838bc702008-05-22 13:08:23 +0200783static void __io_u_mark_map(unsigned int *map, unsigned int nr)
784{
Jens Axboe2b13e712011-01-19 14:04:16 -0700785 int idx = 0;
Jens Axboe838bc702008-05-22 13:08:23 +0200786
787 switch (nr) {
788 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700789 idx = 6;
Jens Axboe838bc702008-05-22 13:08:23 +0200790 break;
791 case 33 ... 64:
Jens Axboe2b13e712011-01-19 14:04:16 -0700792 idx = 5;
Jens Axboe838bc702008-05-22 13:08:23 +0200793 break;
794 case 17 ... 32:
Jens Axboe2b13e712011-01-19 14:04:16 -0700795 idx = 4;
Jens Axboe838bc702008-05-22 13:08:23 +0200796 break;
797 case 9 ... 16:
Jens Axboe2b13e712011-01-19 14:04:16 -0700798 idx = 3;
Jens Axboe838bc702008-05-22 13:08:23 +0200799 break;
800 case 5 ... 8:
Jens Axboe2b13e712011-01-19 14:04:16 -0700801 idx = 2;
Jens Axboe838bc702008-05-22 13:08:23 +0200802 break;
803 case 1 ... 4:
Jens Axboe2b13e712011-01-19 14:04:16 -0700804 idx = 1;
Jens Axboe838bc702008-05-22 13:08:23 +0200805 case 0:
806 break;
807 }
808
Jens Axboe2b13e712011-01-19 14:04:16 -0700809 map[idx]++;
Jens Axboe838bc702008-05-22 13:08:23 +0200810}
811
812void io_u_mark_submit(struct thread_data *td, unsigned int nr)
813{
814 __io_u_mark_map(td->ts.io_u_submit, nr);
815 td->ts.total_submit++;
816}
817
818void io_u_mark_complete(struct thread_data *td, unsigned int nr)
819{
820 __io_u_mark_map(td->ts.io_u_complete, nr);
821 td->ts.total_complete++;
822}
823
Jens Axboed8005752008-05-15 09:49:09 +0200824void io_u_mark_depth(struct thread_data *td, unsigned int nr)
Jens Axboe71619dc2007-01-13 23:56:33 +0100825{
Jens Axboe2b13e712011-01-19 14:04:16 -0700826 int idx = 0;
Jens Axboe71619dc2007-01-13 23:56:33 +0100827
828 switch (td->cur_depth) {
829 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700830 idx = 6;
Jens Axboea783e612007-06-19 09:50:28 +0200831 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100832 case 32 ... 63:
Jens Axboe2b13e712011-01-19 14:04:16 -0700833 idx = 5;
Jens Axboea783e612007-06-19 09:50:28 +0200834 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100835 case 16 ... 31:
Jens Axboe2b13e712011-01-19 14:04:16 -0700836 idx = 4;
Jens Axboea783e612007-06-19 09:50:28 +0200837 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100838 case 8 ... 15:
Jens Axboe2b13e712011-01-19 14:04:16 -0700839 idx = 3;
Jens Axboea783e612007-06-19 09:50:28 +0200840 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100841 case 4 ... 7:
Jens Axboe2b13e712011-01-19 14:04:16 -0700842 idx = 2;
Jens Axboea783e612007-06-19 09:50:28 +0200843 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100844 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700845 idx = 1;
Jens Axboe71619dc2007-01-13 23:56:33 +0100846 case 1:
847 break;
848 }
849
Jens Axboe2b13e712011-01-19 14:04:16 -0700850 td->ts.io_u_map[idx] += nr;
Jens Axboe71619dc2007-01-13 23:56:33 +0100851}
852
Jens Axboe04a0fea2007-06-19 12:48:41 +0200853static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
854{
Jens Axboe2b13e712011-01-19 14:04:16 -0700855 int idx = 0;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200856
857 assert(usec < 1000);
858
859 switch (usec) {
860 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700861 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200862 break;
863 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700864 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200865 break;
866 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700867 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200868 break;
869 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700870 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200871 break;
872 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700873 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200874 break;
875 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700876 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200877 break;
878 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700879 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200880 break;
881 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700882 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200883 break;
884 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700885 idx = 1;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200886 case 0 ... 1:
887 break;
888 }
889
Jens Axboe2b13e712011-01-19 14:04:16 -0700890 assert(idx < FIO_IO_U_LAT_U_NR);
891 td->ts.io_u_lat_u[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200892}
893
894static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
Jens Axboeec118302007-02-17 04:38:20 +0100895{
Jens Axboe2b13e712011-01-19 14:04:16 -0700896 int idx = 0;
Jens Axboeec118302007-02-17 04:38:20 +0100897
898 switch (msec) {
899 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700900 idx = 11;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200901 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100902 case 1000 ... 1999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700903 idx = 10;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200904 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100905 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700906 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200907 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100908 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700909 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200910 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100911 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700912 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200913 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100914 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700915 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200916 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100917 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700918 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200919 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100920 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700921 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200922 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100923 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700924 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200925 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100926 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700927 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200928 break;
Jens Axboeec118302007-02-17 04:38:20 +0100929 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700930 idx = 1;
Jens Axboeec118302007-02-17 04:38:20 +0100931 case 0 ... 1:
932 break;
933 }
934
Jens Axboe2b13e712011-01-19 14:04:16 -0700935 assert(idx < FIO_IO_U_LAT_M_NR);
936 td->ts.io_u_lat_m[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200937}
938
939static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
940{
941 if (usec < 1000)
942 io_u_mark_lat_usec(td, usec);
943 else
944 io_u_mark_lat_msec(td, usec / 1000);
Jens Axboeec118302007-02-17 04:38:20 +0100945}
946
Jens Axboe0aabe162007-02-23 08:45:55 +0100947/*
948 * Get next file to service by choosing one at random
949 */
Jens Axboe2cc52932009-06-09 14:14:20 +0200950static struct fio_file *get_next_file_rand(struct thread_data *td,
951 enum fio_file_flags goodf,
Jens Axboed6aed792009-06-03 08:41:15 +0200952 enum fio_file_flags badf)
Jens Axboe0aabe162007-02-23 08:45:55 +0100953{
Jens Axboe0aabe162007-02-23 08:45:55 +0100954 struct fio_file *f;
Jens Axboe1c178182007-03-13 13:25:18 +0100955 int fno;
Jens Axboe0aabe162007-02-23 08:45:55 +0100956
957 do {
Jens Axboe87b10672009-03-04 09:39:47 +0100958 int opened = 0;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200959 unsigned long r;
Jens Axboe7c83c082007-03-01 10:04:15 +0100960
Jens Axboef6787012014-11-05 18:39:23 -0700961 r = __rand(&td->next_file_state);
Jens Axboe559073f2014-11-05 18:34:02 -0700962 fno = (unsigned int) ((double) td->o.nr_files
Jens Axboe4c07ad82011-03-28 09:51:09 +0200963 * (r / (FRAND_MAX + 1.0)));
Jens Axboe4c07ad82011-03-28 09:51:09 +0200964
Jens Axboe126d65c2008-03-01 18:04:31 +0100965 f = td->files[fno];
Jens Axboed6aed792009-06-03 08:41:15 +0200966 if (fio_file_done(f))
Jens Axboe059e63c2007-03-27 20:34:47 +0200967 continue;
Jens Axboe1c178182007-03-13 13:25:18 +0100968
Jens Axboed6aed792009-06-03 08:41:15 +0200969 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +0100970 int err;
971
Jens Axboe002fe732014-02-11 08:31:13 -0700972 if (td->nr_open_files >= td->o.open_files)
973 return ERR_PTR(-EBUSY);
974
Jens Axboe87b10672009-03-04 09:39:47 +0100975 err = td_io_open_file(td, f);
976 if (err)
977 continue;
978 opened = 1;
979 }
980
Jens Axboe2ba1c292008-02-01 13:16:38 +0100981 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
982 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100983 return f;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100984 }
Jens Axboe87b10672009-03-04 09:39:47 +0100985 if (opened)
986 td_io_close_file(td, f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100987 } while (1);
988}
989
990/*
991 * Get next file to service by doing round robin between all available ones
992 */
Jens Axboe1c178182007-03-13 13:25:18 +0100993static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
994 int badf)
Jens Axboe3d7c3912007-02-19 13:16:12 +0100995{
996 unsigned int old_next_file = td->next_file;
997 struct fio_file *f;
998
999 do {
Jens Axboe87b10672009-03-04 09:39:47 +01001000 int opened = 0;
1001
Jens Axboe126d65c2008-03-01 18:04:31 +01001002 f = td->files[td->next_file];
Jens Axboe3d7c3912007-02-19 13:16:12 +01001003
1004 td->next_file++;
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001005 if (td->next_file >= td->o.nr_files)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001006 td->next_file = 0;
1007
Jens Axboe87b10672009-03-04 09:39:47 +01001008 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
Jens Axboed6aed792009-06-03 08:41:15 +02001009 if (fio_file_done(f)) {
Jens Axboed5ed68e2007-03-28 09:33:43 +02001010 f = NULL;
Jens Axboe059e63c2007-03-27 20:34:47 +02001011 continue;
Jens Axboed5ed68e2007-03-28 09:33:43 +02001012 }
Jens Axboe059e63c2007-03-27 20:34:47 +02001013
Jens Axboed6aed792009-06-03 08:41:15 +02001014 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +01001015 int err;
1016
Jens Axboe002fe732014-02-11 08:31:13 -07001017 if (td->nr_open_files >= td->o.open_files)
1018 return ERR_PTR(-EBUSY);
1019
Jens Axboe87b10672009-03-04 09:39:47 +01001020 err = td_io_open_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001021 if (err) {
1022 dprint(FD_FILE, "error %d on open of %s\n",
1023 err, f->file_name);
Jens Axboe87c27b42009-05-20 10:45:12 +02001024 f = NULL;
Jens Axboe87b10672009-03-04 09:39:47 +01001025 continue;
Jens Axboeb5696bf2009-03-04 16:03:49 +01001026 }
Jens Axboe87b10672009-03-04 09:39:47 +01001027 opened = 1;
1028 }
1029
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001030 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1031 f->flags);
Jens Axboe1c178182007-03-13 13:25:18 +01001032 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
Jens Axboe3d7c3912007-02-19 13:16:12 +01001033 break;
1034
Jens Axboe87b10672009-03-04 09:39:47 +01001035 if (opened)
1036 td_io_close_file(td, f);
1037
Jens Axboe3d7c3912007-02-19 13:16:12 +01001038 f = NULL;
1039 } while (td->next_file != old_next_file);
1040
Jens Axboe2ba1c292008-02-01 13:16:38 +01001041 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
Jens Axboe3d7c3912007-02-19 13:16:12 +01001042 return f;
1043}
1044
Jens Axboe7eb36572010-03-08 13:58:49 +01001045static struct fio_file *__get_next_file(struct thread_data *td)
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001046{
Jens Axboe1907dbc2007-03-12 11:44:28 +01001047 struct fio_file *f;
1048
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001049 assert(td->o.nr_files <= td->files_index);
Jens Axboe1c178182007-03-13 13:25:18 +01001050
Jens Axboeb5696bf2009-03-04 16:03:49 +01001051 if (td->nr_done_files >= td->o.nr_files) {
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001052 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1053 " nr_files=%d\n", td->nr_open_files,
1054 td->nr_done_files,
1055 td->o.nr_files);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001056 return NULL;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001057 }
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001058
Jens Axboe1907dbc2007-03-12 11:44:28 +01001059 f = td->file_service_file;
Jens Axboed6aed792009-06-03 08:41:15 +02001060 if (f && fio_file_open(f) && !fio_file_closing(f)) {
Jens Axboea086c252009-03-04 08:27:37 +01001061 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1062 goto out;
1063 if (td->file_service_left--)
1064 goto out;
1065 }
Jens Axboe1907dbc2007-03-12 11:44:28 +01001066
Jens Axboea086c252009-03-04 08:27:37 +01001067 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1068 td->o.file_service_type == FIO_FSERVICE_SEQ)
Jens Axboed6aed792009-06-03 08:41:15 +02001069 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001070 else
Jens Axboed6aed792009-06-03 08:41:15 +02001071 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboe1907dbc2007-03-12 11:44:28 +01001072
Jens Axboe002fe732014-02-11 08:31:13 -07001073 if (IS_ERR(f))
1074 return f;
1075
Jens Axboe1907dbc2007-03-12 11:44:28 +01001076 td->file_service_file = f;
1077 td->file_service_left = td->file_service_nr - 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001078out:
Jens Axboe0dac4212014-02-25 13:43:17 -08001079 if (f)
1080 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1081 else
1082 dprint(FD_FILE, "get_next_file: NULL\n");
Jens Axboe1907dbc2007-03-12 11:44:28 +01001083 return f;
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001084}
1085
Jens Axboe7eb36572010-03-08 13:58:49 +01001086static struct fio_file *get_next_file(struct thread_data *td)
1087{
Andrey Kuzmin705fa7e2014-06-27 20:21:22 -06001088 if (td->flags & TD_F_PROFILE_OPS) {
Jens Axboed72be542012-11-30 19:37:46 +01001089 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +01001090
Jens Axboed72be542012-11-30 19:37:46 +01001091 if (ops->get_next_file)
1092 return ops->get_next_file(td);
1093 }
Jens Axboe7eb36572010-03-08 13:58:49 +01001094
1095 return __get_next_file(td);
1096}
1097
Jens Axboe002fe732014-02-11 08:31:13 -07001098static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
Jens Axboe429f6672007-07-23 10:38:43 +02001099{
1100 struct fio_file *f;
1101
1102 do {
1103 f = get_next_file(td);
Jens Axboe002fe732014-02-11 08:31:13 -07001104 if (IS_ERR_OR_NULL(f))
1105 return PTR_ERR(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001106
Jens Axboe429f6672007-07-23 10:38:43 +02001107 io_u->file = f;
1108 get_file(f);
1109
1110 if (!fill_io_u(td, io_u))
1111 break;
1112
Jens Axboeb5696bf2009-03-04 16:03:49 +01001113 put_file_log(td, f);
Jens Axboe429f6672007-07-23 10:38:43 +02001114 td_io_close_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001115 io_u->file = NULL;
Jens Axboed6aed792009-06-03 08:41:15 +02001116 fio_file_set_done(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001117 td->nr_done_files++;
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001118 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1119 td->nr_done_files, td->o.nr_files);
Jens Axboe429f6672007-07-23 10:38:43 +02001120 } while (1);
1121
1122 return 0;
1123}
1124
Jens Axboe3e260a42013-12-09 12:38:53 -07001125static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1126 unsigned long tusec, unsigned long max_usec)
1127{
1128 if (!td->error)
1129 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1130 td_verror(td, ETIMEDOUT, "max latency exceeded");
1131 icd->error = ETIMEDOUT;
1132}
1133
1134static void lat_new_cycle(struct thread_data *td)
1135{
1136 fio_gettime(&td->latency_ts, NULL);
1137 td->latency_ios = ddir_rw_sum(td->io_blocks);
1138 td->latency_failed = 0;
1139}
1140
1141/*
1142 * We had an IO outside the latency target. Reduce the queue depth. If we
1143 * are at QD=1, then it's time to give up.
1144 */
1145static int __lat_target_failed(struct thread_data *td)
1146{
1147 if (td->latency_qd == 1)
1148 return 1;
1149
1150 td->latency_qd_high = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001151
1152 if (td->latency_qd == td->latency_qd_low)
1153 td->latency_qd_low--;
1154
Jens Axboe3e260a42013-12-09 12:38:53 -07001155 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1156
1157 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1158
1159 /*
1160 * When we ramp QD down, quiesce existing IO to prevent
1161 * a storm of ramp downs due to pending higher depth.
1162 */
1163 io_u_quiesce(td);
1164 lat_new_cycle(td);
1165 return 0;
1166}
1167
1168static int lat_target_failed(struct thread_data *td)
1169{
1170 if (td->o.latency_percentile.u.f == 100.0)
1171 return __lat_target_failed(td);
1172
1173 td->latency_failed++;
1174 return 0;
1175}
1176
1177void lat_target_init(struct thread_data *td)
1178{
Jens Axboe6bb58212014-02-21 13:55:31 -08001179 td->latency_end_run = 0;
1180
Jens Axboe3e260a42013-12-09 12:38:53 -07001181 if (td->o.latency_target) {
1182 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1183 fio_gettime(&td->latency_ts, NULL);
1184 td->latency_qd = 1;
1185 td->latency_qd_high = td->o.iodepth;
1186 td->latency_qd_low = 1;
1187 td->latency_ios = ddir_rw_sum(td->io_blocks);
1188 } else
1189 td->latency_qd = td->o.iodepth;
1190}
1191
Jens Axboe6bb58212014-02-21 13:55:31 -08001192void lat_target_reset(struct thread_data *td)
1193{
1194 if (!td->latency_end_run)
1195 lat_target_init(td);
1196}
1197
Jens Axboe3e260a42013-12-09 12:38:53 -07001198static void lat_target_success(struct thread_data *td)
1199{
1200 const unsigned int qd = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001201 struct thread_options *o = &td->o;
Jens Axboe3e260a42013-12-09 12:38:53 -07001202
1203 td->latency_qd_low = td->latency_qd;
1204
1205 /*
1206 * If we haven't failed yet, we double up to a failing value instead
1207 * of bisecting from highest possible queue depth. If we have set
1208 * a limit other than td->o.iodepth, bisect between that.
1209 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001210 if (td->latency_qd_high != o->iodepth)
Jens Axboe3e260a42013-12-09 12:38:53 -07001211 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1212 else
1213 td->latency_qd *= 2;
1214
Jens Axboe6bb58212014-02-21 13:55:31 -08001215 if (td->latency_qd > o->iodepth)
1216 td->latency_qd = o->iodepth;
Jens Axboe3e260a42013-12-09 12:38:53 -07001217
1218 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
Jens Axboe6bb58212014-02-21 13:55:31 -08001219
Jens Axboe3e260a42013-12-09 12:38:53 -07001220 /*
Jens Axboe6bb58212014-02-21 13:55:31 -08001221 * Same as last one, we are done. Let it run a latency cycle, so
1222 * we get only the results from the targeted depth.
Jens Axboe3e260a42013-12-09 12:38:53 -07001223 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001224 if (td->latency_qd == qd) {
1225 if (td->latency_end_run) {
1226 dprint(FD_RATE, "We are done\n");
1227 td->done = 1;
1228 } else {
1229 dprint(FD_RATE, "Quiesce and final run\n");
1230 io_u_quiesce(td);
1231 td->latency_end_run = 1;
1232 reset_all_stats(td);
1233 reset_io_stats(td);
1234 }
1235 }
Jens Axboe3e260a42013-12-09 12:38:53 -07001236
1237 lat_new_cycle(td);
1238}
1239
1240/*
1241 * Check if we can bump the queue depth
1242 */
1243void lat_target_check(struct thread_data *td)
1244{
1245 uint64_t usec_window;
1246 uint64_t ios;
1247 double success_ios;
1248
1249 usec_window = utime_since_now(&td->latency_ts);
1250 if (usec_window < td->o.latency_window)
1251 return;
1252
1253 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1254 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1255 success_ios *= 100.0;
1256
1257 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1258
1259 if (success_ios >= td->o.latency_percentile.u.f)
1260 lat_target_success(td);
1261 else
1262 __lat_target_failed(td);
1263}
1264
1265/*
1266 * If latency target is enabled, we might be ramping up or down and not
1267 * using the full queue depth available.
1268 */
Jens Axboe5a48d302014-09-30 13:29:57 -06001269int queue_full(const struct thread_data *td)
Jens Axboe3e260a42013-12-09 12:38:53 -07001270{
1271 const int qempty = io_u_qempty(&td->io_u_freelist);
1272
1273 if (qempty)
1274 return 1;
1275 if (!td->o.latency_target)
1276 return 0;
1277
1278 return td->cur_depth >= td->latency_qd;
1279}
Jens Axboe429f6672007-07-23 10:38:43 +02001280
Jens Axboe10ba5352006-10-20 11:39:27 +02001281struct io_u *__get_io_u(struct thread_data *td)
1282{
Jens Axboe0cae66f2014-03-03 13:55:32 -07001283 struct io_u *io_u = NULL;
Jens Axboe10ba5352006-10-20 11:39:27 +02001284
Jens Axboede54cfd2014-11-10 20:34:00 -07001285 if (td->stop_io)
1286 return NULL;
1287
Jens Axboee8462bd2009-07-06 12:59:04 +02001288 td_io_u_lock(td);
1289
1290again:
Jens Axboe2ae0b202013-05-28 14:16:55 +02001291 if (!io_u_rempty(&td->io_u_requeues))
1292 io_u = io_u_rpop(&td->io_u_requeues);
Jens Axboe3e260a42013-12-09 12:38:53 -07001293 else if (!queue_full(td)) {
Jens Axboe2ae0b202013-05-28 14:16:55 +02001294 io_u = io_u_qpop(&td->io_u_freelist);
Jens Axboe10ba5352006-10-20 11:39:27 +02001295
Jens Axboe225ba9e2014-02-26 14:31:15 -08001296 io_u->file = NULL;
Jens Axboe6040dab2006-10-24 19:38:15 +02001297 io_u->buflen = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001298 io_u->resid = 0;
Jens Axboed7762cf2007-02-23 12:34:57 +01001299 io_u->end_io = NULL;
Jens Axboe755200a2007-02-19 13:08:12 +01001300 }
1301
1302 if (io_u) {
Jens Axboe0c6e7512007-02-22 11:19:39 +01001303 assert(io_u->flags & IO_U_F_FREE);
Jens Axboee69fdf72014-07-23 16:11:43 +02001304 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
1305 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1306 IO_U_F_VER_LIST);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001307
Jens Axboe755200a2007-02-19 13:08:12 +01001308 io_u->error = 0;
Jens Axboebcd5abf2013-01-23 09:27:25 -07001309 io_u->acct_ddir = -1;
Jens Axboe10ba5352006-10-20 11:39:27 +02001310 td->cur_depth++;
Radha Ramachandran0c412142009-11-03 21:45:31 +01001311 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
Jens Axboef9401282014-02-06 12:17:37 -07001312 io_u->ipo = NULL;
Jens Axboe1dec3e02010-03-19 10:33:39 +01001313 } else if (td->o.verify_async) {
1314 /*
1315 * We ran out, wait for async verify threads to finish and
1316 * return one
1317 */
1318 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1319 goto again;
Jens Axboe10ba5352006-10-20 11:39:27 +02001320 }
1321
Jens Axboee8462bd2009-07-06 12:59:04 +02001322 td_io_u_unlock(td);
Jens Axboe10ba5352006-10-20 11:39:27 +02001323 return io_u;
1324}
1325
Jens Axboe0d29de82010-09-01 13:54:15 +02001326static int check_get_trim(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +02001327{
Jens Axboed72be542012-11-30 19:37:46 +01001328 if (!(td->flags & TD_F_TRIM_BACKLOG))
1329 return 0;
1330
1331 if (td->trim_entries) {
Jens Axboe0d29de82010-09-01 13:54:15 +02001332 int get_trim = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001333
Jens Axboe0d29de82010-09-01 13:54:15 +02001334 if (td->trim_batch) {
1335 td->trim_batch--;
1336 get_trim = 1;
1337 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1338 td->last_ddir != DDIR_READ) {
1339 td->trim_batch = td->o.trim_batch;
1340 if (!td->trim_batch)
1341 td->trim_batch = td->o.trim_backlog;
1342 get_trim = 1;
1343 }
1344
1345 if (get_trim && !get_next_trim(td, io_u))
1346 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001347 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001348
Jens Axboe0d29de82010-09-01 13:54:15 +02001349 return 0;
1350}
1351
1352static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1353{
Jens Axboed72be542012-11-30 19:37:46 +01001354 if (!(td->flags & TD_F_VER_BACKLOG))
1355 return 0;
1356
1357 if (td->io_hist_len) {
Jens Axboe9e144182010-06-15 14:25:36 +02001358 int get_verify = 0;
1359
Jens Axboed1ece0c2012-03-07 09:32:58 +01001360 if (td->verify_batch)
Jens Axboe9e144182010-06-15 14:25:36 +02001361 get_verify = 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001362 else if (!(td->io_hist_len % td->o.verify_backlog) &&
Jens Axboe9e144182010-06-15 14:25:36 +02001363 td->last_ddir != DDIR_READ) {
1364 td->verify_batch = td->o.verify_batch;
Jens Axboef8a75c92010-06-15 14:27:28 +02001365 if (!td->verify_batch)
1366 td->verify_batch = td->o.verify_backlog;
Jens Axboe9e144182010-06-15 14:25:36 +02001367 get_verify = 1;
1368 }
1369
Jens Axboed1ece0c2012-03-07 09:32:58 +01001370 if (get_verify && !get_next_verify(td, io_u)) {
1371 td->verify_batch--;
Jens Axboe0d29de82010-09-01 13:54:15 +02001372 return 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001373 }
Jens Axboe9e144182010-06-15 14:25:36 +02001374 }
1375
Jens Axboe0d29de82010-09-01 13:54:15 +02001376 return 0;
1377}
1378
1379/*
Jens Axboede789762011-09-16 22:11:23 +02001380 * Fill offset and start time into the buffer content, to prevent too
Jens Axboe23f394d2011-09-16 22:45:27 +02001381 * easy compressible data for simple de-dupe attempts. Do this for every
1382 * 512b block in the range, since that should be the smallest block size
1383 * we can expect from a device.
Jens Axboede789762011-09-16 22:11:23 +02001384 */
1385static void small_content_scramble(struct io_u *io_u)
1386{
Jens Axboe23f394d2011-09-16 22:45:27 +02001387 unsigned int i, nr_blocks = io_u->buflen / 512;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001388 uint64_t boffset;
Jens Axboe23f394d2011-09-16 22:45:27 +02001389 unsigned int offset;
1390 void *p, *end;
Jens Axboede789762011-09-16 22:11:23 +02001391
Jens Axboe23f394d2011-09-16 22:45:27 +02001392 if (!nr_blocks)
1393 return;
1394
1395 p = io_u->xfer_buf;
Jens Axboefba76ee2011-09-27 14:27:48 -06001396 boffset = io_u->offset;
Jens Axboe81f03662012-02-02 09:20:09 +01001397 io_u->buf_filled_len = 0;
Jens Axboefad82f72011-09-19 11:33:30 +02001398
Jens Axboe23f394d2011-09-16 22:45:27 +02001399 for (i = 0; i < nr_blocks; i++) {
1400 /*
1401 * Fill the byte offset into a "random" start offset of
1402 * the buffer, given by the product of the usec time
1403 * and the actual offset.
1404 */
Jens Axboefad82f72011-09-19 11:33:30 +02001405 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001406 offset &= ~(sizeof(uint64_t) - 1);
1407 if (offset >= 512 - sizeof(uint64_t))
1408 offset -= sizeof(uint64_t);
Jens Axboefba76ee2011-09-27 14:27:48 -06001409 memcpy(p + offset, &boffset, sizeof(boffset));
Jens Axboe23f394d2011-09-16 22:45:27 +02001410
1411 end = p + 512 - sizeof(io_u->start_time);
1412 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1413 p += 512;
Jens Axboefad82f72011-09-19 11:33:30 +02001414 boffset += 512;
Jens Axboe23f394d2011-09-16 22:45:27 +02001415 }
Jens Axboede789762011-09-16 22:11:23 +02001416}
1417
1418/*
Jens Axboe0d29de82010-09-01 13:54:15 +02001419 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1420 * etc. The returned io_u is fully ready to be prepped and submitted.
1421 */
1422struct io_u *get_io_u(struct thread_data *td)
1423{
1424 struct fio_file *f;
1425 struct io_u *io_u;
Jens Axboede789762011-09-16 22:11:23 +02001426 int do_scramble = 0;
Jens Axboe002fe732014-02-11 08:31:13 -07001427 long ret = 0;
Jens Axboe0d29de82010-09-01 13:54:15 +02001428
1429 io_u = __get_io_u(td);
1430 if (!io_u) {
1431 dprint(FD_IO, "__get_io_u failed\n");
1432 return NULL;
1433 }
1434
1435 if (check_get_verify(td, io_u))
1436 goto out;
1437 if (check_get_trim(td, io_u))
1438 goto out;
1439
Jens Axboe755200a2007-02-19 13:08:12 +01001440 /*
1441 * from a requeue, io_u already setup
1442 */
1443 if (io_u->file)
Jens Axboe77f392b2007-02-19 20:13:09 +01001444 goto out;
Jens Axboe755200a2007-02-19 13:08:12 +01001445
Jens Axboe429f6672007-07-23 10:38:43 +02001446 /*
1447 * If using an iolog, grab next piece if any available.
1448 */
Jens Axboed72be542012-11-30 19:37:46 +01001449 if (td->flags & TD_F_READ_IOLOG) {
Jens Axboe429f6672007-07-23 10:38:43 +02001450 if (read_iolog_get(td, io_u))
1451 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001452 } else if (set_io_u_file(td, io_u)) {
Jens Axboe002fe732014-02-11 08:31:13 -07001453 ret = -EBUSY;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001454 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001455 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001456 }
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001457
Jens Axboe429f6672007-07-23 10:38:43 +02001458 f = io_u->file;
Jens Axboe002fe732014-02-11 08:31:13 -07001459 if (!f) {
1460 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1461 goto err_put;
1462 }
1463
Jens Axboed6aed792009-06-03 08:41:15 +02001464 assert(fio_file_open(f));
Jens Axboe97af62c2007-05-22 11:12:13 +02001465
Jens Axboeff58fce2010-08-25 12:02:08 +02001466 if (ddir_rw(io_u->ddir)) {
Jens Axboed0656a92008-02-01 18:33:23 +01001467 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +01001468 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001469 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001470 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001471
Jens Axboe38dad622010-07-20 14:46:00 -06001472 f->last_start = io_u->offset;
Jens Axboe36167d82007-02-18 05:41:31 +01001473 f->last_pos = io_u->offset + io_u->buflen;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001474
Jens Axboefd684182011-09-19 09:24:44 +02001475 if (io_u->ddir == DDIR_WRITE) {
Jens Axboed72be542012-11-30 19:37:46 +01001476 if (td->flags & TD_F_REFILL_BUFFERS) {
Jens Axboe9c426842012-03-02 21:02:12 +01001477 io_u_fill_buffer(td, io_u,
Jens Axboe8e0aa162014-09-26 15:04:58 -06001478 td->o.min_bs[DDIR_WRITE],
1479 io_u->xfer_buflen);
Jens Axboebedc9dc2014-03-17 12:51:09 -06001480 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1481 !(td->flags & TD_F_COMPRESS))
Jens Axboefd684182011-09-19 09:24:44 +02001482 do_scramble = 1;
Jens Axboed72be542012-11-30 19:37:46 +01001483 if (td->flags & TD_F_VER_NONE) {
Jens Axboe629f1d72012-03-09 19:02:01 +01001484 populate_verify_io_u(td, io_u);
1485 do_scramble = 0;
1486 }
Jens Axboefd684182011-09-19 09:24:44 +02001487 } else if (io_u->ddir == DDIR_READ) {
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001488 /*
1489 * Reset the buf_filled parameters so next time if the
1490 * buffer is used for writes it is refilled.
1491 */
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001492 io_u->buf_filled_len = 0;
1493 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001494 }
1495
Jens Axboe165faf12007-02-07 11:30:37 +01001496 /*
1497 * Set io data pointers.
1498 */
Jens Axboecec6b552007-02-06 20:15:38 +01001499 io_u->xfer_buf = io_u->buf;
1500 io_u->xfer_buflen = io_u->buflen;
Jens Axboe5973caf2008-05-21 19:52:35 +02001501
Jens Axboe6ac7a332008-03-01 15:22:32 +01001502out:
Jens Axboe0d29de82010-09-01 13:54:15 +02001503 assert(io_u->file);
Jens Axboe429f6672007-07-23 10:38:43 +02001504 if (!td_io_prep(td, io_u)) {
Jens Axboe993bf482008-11-14 13:04:53 +01001505 if (!td->o.disable_slat)
1506 fio_gettime(&io_u->start_time, NULL);
Jens Axboede789762011-09-16 22:11:23 +02001507 if (do_scramble)
1508 small_content_scramble(io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001509 return io_u;
Jens Axboe36167d82007-02-18 05:41:31 +01001510 }
Jens Axboe429f6672007-07-23 10:38:43 +02001511err_put:
Jens Axboe2ba1c292008-02-01 13:16:38 +01001512 dprint(FD_IO, "get_io_u failed\n");
Jens Axboe429f6672007-07-23 10:38:43 +02001513 put_io_u(td, io_u);
Jens Axboe002fe732014-02-11 08:31:13 -07001514 return ERR_PTR(ret);
Jens Axboe10ba5352006-10-20 11:39:27 +02001515}
1516
Jens Axboe54517922007-03-05 10:06:06 +01001517void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1518{
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001519 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
Jens Axboe825f8182010-08-25 10:47:18 +02001520
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001521 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1522 return;
Jens Axboe54517922007-03-05 10:06:06 +01001523
Robert Elliott2cbdcdb2014-09-16 17:09:48 -05001524 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1525 io_u->file ? " on file " : "",
1526 io_u->file ? io_u->file->file_name : "",
1527 strerror(io_u->error),
1528 io_ddir_name(io_u->ddir),
1529 io_u->offset, io_u->xfer_buflen);
Jens Axboe54517922007-03-05 10:06:06 +01001530
1531 if (!td->error)
1532 td_verror(td, io_u->error, "io_u error");
1533}
1534
Jens Axboeaba6c952014-02-13 19:59:56 -07001535static inline int gtod_reduce(struct thread_data *td)
1536{
Jens Axboe729fe3a2014-02-14 08:46:35 -07001537 return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat
Jens Axboeb74b8202014-02-13 20:04:02 -07001538 && td->o.disable_bw;
Jens Axboeaba6c952014-02-13 19:59:56 -07001539}
1540
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001541static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1542 struct io_completion_data *icd,
1543 const enum fio_ddir idx, unsigned int bytes)
1544{
Jens Axboe24d23ca2012-11-13 08:31:24 -07001545 unsigned long lusec = 0;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001546
Jens Axboeaba6c952014-02-13 19:59:56 -07001547 if (!gtod_reduce(td))
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001548 lusec = utime_since(&io_u->issue_time, &icd->time);
1549
1550 if (!td->o.disable_lat) {
1551 unsigned long tusec;
1552
1553 tusec = utime_since(&io_u->start_time, &icd->time);
Jens Axboeccefd5f2014-06-30 20:59:03 -06001554 add_lat_sample(td, idx, tusec, bytes, io_u->offset);
Jens Axboe15501532012-10-24 16:37:45 +02001555
Jens Axboed4afedf2013-05-22 22:21:29 +02001556 if (td->flags & TD_F_PROFILE_OPS) {
1557 struct prof_io_ops *ops = &td->prof_io_ops;
1558
1559 if (ops->io_u_lat)
1560 icd->error = ops->io_u_lat(td, tusec);
1561 }
1562
Jens Axboe3e260a42013-12-09 12:38:53 -07001563 if (td->o.max_latency && tusec > td->o.max_latency)
1564 lat_fatal(td, icd, tusec, td->o.max_latency);
1565 if (td->o.latency_target && tusec > td->o.latency_target) {
1566 if (lat_target_failed(td))
1567 lat_fatal(td, icd, tusec, td->o.latency_target);
Jens Axboe15501532012-10-24 16:37:45 +02001568 }
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001569 }
1570
1571 if (!td->o.disable_clat) {
Jens Axboeccefd5f2014-06-30 20:59:03 -06001572 add_clat_sample(td, idx, lusec, bytes, io_u->offset);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001573 io_u_mark_latency(td, lusec);
1574 }
1575
1576 if (!td->o.disable_bw)
1577 add_bw_sample(td, idx, bytes, &icd->time);
1578
Jens Axboeaba6c952014-02-13 19:59:56 -07001579 if (!gtod_reduce(td))
1580 add_iops_sample(td, idx, bytes, &icd->time);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001581}
1582
Steven Lang1b8dbf22011-11-09 13:48:01 +01001583static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1584{
Jens Axboe1ae83d42013-01-12 01:44:15 -07001585 uint64_t secs, remainder, bps, bytes;
1586
Steven Lang1b8dbf22011-11-09 13:48:01 +01001587 bytes = td->this_io_bytes[ddir];
1588 bps = td->rate_bps[ddir];
1589 secs = bytes / bps;
1590 remainder = bytes % bps;
1591 return remainder * 1000000 / bps + secs * 1000000;
1592}
1593
Jens Axboee69fdf72014-07-23 16:11:43 +02001594static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
Jens Axboe97601022007-02-18 12:47:29 +01001595 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001596{
Jens Axboee69fdf72014-07-23 16:11:43 +02001597 struct io_u *io_u = *io_u_ptr;
1598 enum fio_ddir ddir = io_u->ddir;
1599 struct fio_file *f = io_u->file;
Jens Axboe10ba5352006-10-20 11:39:27 +02001600
Jens Axboe2ba1c292008-02-01 13:16:38 +01001601 dprint_io_u(io_u, "io complete");
1602
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001603 td_io_u_lock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001604 assert(io_u->flags & IO_U_F_FLIGHT);
Jens Axboe38dad622010-07-20 14:46:00 -06001605 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
Jens Axboef9401282014-02-06 12:17:37 -07001606
1607 /*
1608 * Mark IO ok to verify
1609 */
1610 if (io_u->ipo) {
Jens Axboe890b6652014-05-06 19:06:51 -06001611 /*
1612 * Remove errored entry from the verification list
1613 */
1614 if (io_u->error)
1615 unlog_io_piece(td, io_u);
1616 else {
1617 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1618 write_barrier();
1619 }
Jens Axboef9401282014-02-06 12:17:37 -07001620 }
1621
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001622 td_io_u_unlock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001623
Jens Axboee69fdf72014-07-23 16:11:43 +02001624 if (ddir_sync(ddir)) {
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001625 td->last_was_sync = 1;
Jens Axboe44f29692010-03-09 20:09:44 +01001626 if (f) {
1627 f->first_write = -1ULL;
1628 f->last_write = -1ULL;
1629 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001630 return;
1631 }
1632
1633 td->last_was_sync = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001634 td->last_ddir = ddir;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001635
Jens Axboee69fdf72014-07-23 16:11:43 +02001636 if (!io_u->error && ddir_rw(ddir)) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001637 unsigned int bytes = io_u->buflen - io_u->resid;
Jens Axboee69fdf72014-07-23 16:11:43 +02001638 const enum fio_ddir oddir = ddir ^ 1;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001639 int ret;
Jens Axboe10ba5352006-10-20 11:39:27 +02001640
Jens Axboee69fdf72014-07-23 16:11:43 +02001641 td->io_blocks[ddir]++;
1642 td->this_io_blocks[ddir]++;
1643 td->io_bytes[ddir] += bytes;
webeeae2fafc2012-03-23 13:41:41 +01001644
1645 if (!(io_u->flags & IO_U_F_VER_LIST))
Jens Axboee69fdf72014-07-23 16:11:43 +02001646 td->this_io_bytes[ddir] += bytes;
Jens Axboe10ba5352006-10-20 11:39:27 +02001647
Jens Axboede54cfd2014-11-10 20:34:00 -07001648 if (ddir == DDIR_WRITE) {
1649 if (f) {
1650 if (f->first_write == -1ULL ||
1651 io_u->offset < f->first_write)
1652 f->first_write = io_u->offset;
1653 if (f->last_write == -1ULL ||
1654 ((io_u->offset + bytes) > f->last_write))
1655 f->last_write = io_u->offset + bytes;
1656 }
1657 if (td->last_write_comp) {
1658 int idx = td->last_write_idx++;
1659
1660 td->last_write_comp[idx] = io_u->offset;
1661 if (td->last_write_idx == td->o.iodepth)
1662 td->last_write_idx = 0;
1663 }
Jens Axboe44f29692010-03-09 20:09:44 +01001664 }
1665
Steven Lang6b1190f2012-02-07 09:42:59 +01001666 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1667 td->runstate == TD_VERIFYING)) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001668 account_io_completion(td, io_u, icd, ddir, bytes);
Jens Axboe40e1a6f2009-06-11 10:55:39 +02001669
Jens Axboee69fdf72014-07-23 16:11:43 +02001670 if (__should_check_rate(td, ddir)) {
1671 td->rate_pending_usleep[ddir] =
1672 (usec_for_io(td, ddir) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001673 utime_since_now(&td->start));
Jens Axboeb23b6a22009-06-11 22:06:23 +02001674 }
Jens Axboee69fdf72014-07-23 16:11:43 +02001675 if (ddir != DDIR_TRIM &&
1676 __should_check_rate(td, oddir)) {
1677 td->rate_pending_usleep[oddir] =
1678 (usec_for_io(td, oddir) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001679 utime_since_now(&td->start));
Jens Axboee69fdf72014-07-23 16:11:43 +02001680 }
Jens Axboe721938a2008-09-10 09:46:16 +02001681 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001682
Jens Axboee69fdf72014-07-23 16:11:43 +02001683 icd->bytes_done[ddir] += bytes;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001684
Jens Axboed7762cf2007-02-23 12:34:57 +01001685 if (io_u->end_io) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001686 ret = io_u->end_io(td, io_u_ptr);
1687 io_u = *io_u_ptr;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001688 if (ret && !icd->error)
1689 icd->error = ret;
1690 }
Jens Axboeff58fce2010-08-25 12:02:08 +02001691 } else if (io_u->error) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001692 icd->error = io_u->error;
Jens Axboe54517922007-03-05 10:06:06 +01001693 io_u_log_error(td, io_u);
1694 }
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001695 if (icd->error) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001696 enum error_type_bit eb = td_error_type(ddir, icd->error);
1697
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001698 if (!td_non_fatal_error(td, eb, icd->error))
1699 return;
Jens Axboee69fdf72014-07-23 16:11:43 +02001700
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001701 /*
1702 * If there is a non_fatal error, then add to the error count
1703 * and clear all the errors.
1704 */
1705 update_error_count(td, icd->error);
1706 td_clear_error(td);
1707 icd->error = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001708 if (io_u)
1709 io_u->error = 0;
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001710 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001711}
1712
Jens Axboe9520ebb2008-10-16 21:03:27 +02001713static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1714 int nr)
Jens Axboe36167d82007-02-18 05:41:31 +01001715{
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001716 int ddir;
Jens Axboeaba6c952014-02-13 19:59:56 -07001717
1718 if (!gtod_reduce(td))
Jens Axboe9520ebb2008-10-16 21:03:27 +02001719 fio_gettime(&icd->time, NULL);
Jens Axboe36167d82007-02-18 05:41:31 +01001720
Jens Axboe3af6ef32007-02-18 06:57:43 +01001721 icd->nr = nr;
1722
Jens Axboe36167d82007-02-18 05:41:31 +01001723 icd->error = 0;
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001724 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1725 icd->bytes_done[ddir] = 0;
Jens Axboe36167d82007-02-18 05:41:31 +01001726}
1727
Jens Axboe97601022007-02-18 12:47:29 +01001728static void ios_completed(struct thread_data *td,
1729 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001730{
1731 struct io_u *io_u;
1732 int i;
1733
Jens Axboe10ba5352006-10-20 11:39:27 +02001734 for (i = 0; i < icd->nr; i++) {
1735 io_u = td->io_ops->event(td, i);
1736
Jens Axboee69fdf72014-07-23 16:11:43 +02001737 io_completed(td, &io_u, icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001738
Jens Axboee69fdf72014-07-23 16:11:43 +02001739 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001740 put_io_u(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +02001741 }
1742}
Jens Axboe97601022007-02-18 12:47:29 +01001743
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001744/*
1745 * Complete a single io_u for the sync engines.
1746 */
Jens Axboe581e7142009-06-09 12:47:16 +02001747int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
Jens Axboe100f49f2013-01-23 10:15:57 -07001748 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001749{
1750 struct io_completion_data icd;
1751
Jens Axboe9520ebb2008-10-16 21:03:27 +02001752 init_icd(td, &icd, 1);
Jens Axboee69fdf72014-07-23 16:11:43 +02001753 io_completed(td, &io_u, &icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001754
Jens Axboee69fdf72014-07-23 16:11:43 +02001755 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001756 put_io_u(td, io_u);
Jens Axboe97601022007-02-18 12:47:29 +01001757
Jens Axboe581e7142009-06-09 12:47:16 +02001758 if (icd.error) {
1759 td_verror(td, icd.error, "io_u_sync_complete");
1760 return -1;
1761 }
Jens Axboe97601022007-02-18 12:47:29 +01001762
Jens Axboe581e7142009-06-09 12:47:16 +02001763 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001764 int ddir;
1765
1766 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1767 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001768 }
1769
1770 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001771}
1772
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001773/*
1774 * Called to complete min_events number of io for the async engines.
1775 */
Jens Axboe581e7142009-06-09 12:47:16 +02001776int io_u_queued_complete(struct thread_data *td, int min_evts,
Jens Axboe100f49f2013-01-23 10:15:57 -07001777 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001778{
Jens Axboe97601022007-02-18 12:47:29 +01001779 struct io_completion_data icd;
Jens Axboe00de55e2007-02-20 10:45:57 +01001780 struct timespec *tvp = NULL;
Jens Axboe97601022007-02-18 12:47:29 +01001781 int ret;
Davide Libenzi4d06a332007-03-22 07:43:50 +01001782 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
Jens Axboe97601022007-02-18 12:47:29 +01001783
Jens Axboe49504212008-06-05 09:03:30 +02001784 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
Jens Axboeb271fe62008-02-04 10:49:41 +01001785
Jens Axboe49504212008-06-05 09:03:30 +02001786 if (!min_evts)
Jens Axboe00de55e2007-02-20 10:45:57 +01001787 tvp = &ts;
Robert Elliott05074832014-09-04 13:51:05 -06001788 else if (min_evts > td->cur_depth)
1789 min_evts = td->cur_depth;
Jens Axboe97601022007-02-18 12:47:29 +01001790
Jens Axboe49504212008-06-05 09:03:30 +02001791 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
Jens Axboe97601022007-02-18 12:47:29 +01001792 if (ret < 0) {
Jens Axboee1161c32007-02-22 19:36:48 +01001793 td_verror(td, -ret, "td_io_getevents");
Jens Axboe97601022007-02-18 12:47:29 +01001794 return ret;
1795 } else if (!ret)
1796 return ret;
1797
Jens Axboe9520ebb2008-10-16 21:03:27 +02001798 init_icd(td, &icd, ret);
Jens Axboe97601022007-02-18 12:47:29 +01001799 ios_completed(td, &icd);
Jens Axboe581e7142009-06-09 12:47:16 +02001800 if (icd.error) {
1801 td_verror(td, icd.error, "io_u_queued_complete");
1802 return -1;
1803 }
Jens Axboe97601022007-02-18 12:47:29 +01001804
Jens Axboe581e7142009-06-09 12:47:16 +02001805 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001806 int ddir;
1807
1808 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1809 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001810 }
1811
1812 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001813}
Jens Axboe7e77dd02007-02-20 10:57:34 +01001814
1815/*
1816 * Call when io_u is really queued, to update the submission latency.
1817 */
1818void io_u_queued(struct thread_data *td, struct io_u *io_u)
1819{
Jens Axboe9520ebb2008-10-16 21:03:27 +02001820 if (!td->o.disable_slat) {
1821 unsigned long slat_time;
Jens Axboe7e77dd02007-02-20 10:57:34 +01001822
Jens Axboe9520ebb2008-10-16 21:03:27 +02001823 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
Jens Axboeccefd5f2014-06-30 20:59:03 -06001824 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
1825 io_u->offset);
Jens Axboe9520ebb2008-10-16 21:03:27 +02001826 }
Jens Axboe7e77dd02007-02-20 10:57:34 +01001827}
Jens Axboe433afcb2007-02-22 10:39:01 +01001828
Jens Axboee66dac22014-09-22 10:02:07 -06001829/*
1830 * See if we should reuse the last seed, if dedupe is enabled
1831 */
1832static struct frand_state *get_buf_state(struct thread_data *td)
1833{
1834 unsigned int v;
1835 unsigned long r;
1836
1837 if (!td->o.dedupe_percentage)
1838 return &td->buf_state;
Jens Axboe64d3bab2014-09-22 14:20:05 -06001839 else if (td->o.dedupe_percentage == 100)
1840 return &td->buf_state_prev;
Jens Axboee66dac22014-09-22 10:02:07 -06001841
1842 r = __rand(&td->dedupe_state);
1843 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
1844
1845 if (v <= td->o.dedupe_percentage)
1846 return &td->buf_state_prev;
1847
1848 return &td->buf_state;
1849}
1850
1851static void save_buf_state(struct thread_data *td, struct frand_state *rs)
1852{
1853 if (rs == &td->buf_state)
1854 frand_copy(&td->buf_state_prev, rs);
1855}
1856
Jens Axboecc86c392013-05-03 15:12:33 +02001857void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1858 unsigned int max_bs)
Jens Axboe5973caf2008-05-21 19:52:35 +02001859{
Jens Axboece35b1e2014-01-14 15:35:58 -07001860 if (td->o.buffer_pattern_bytes)
1861 fill_buffer_pattern(td, buf, max_bs);
1862 else if (!td->o.zero_buffers) {
Jens Axboe9c426842012-03-02 21:02:12 +01001863 unsigned int perc = td->o.compress_percentage;
Jens Axboee66dac22014-09-22 10:02:07 -06001864 struct frand_state *rs;
Jens Axboe8e0aa162014-09-26 15:04:58 -06001865 unsigned int left = max_bs;
Jens Axboee66dac22014-09-22 10:02:07 -06001866
Jens Axboe8e0aa162014-09-26 15:04:58 -06001867 do {
1868 rs = get_buf_state(td);
Jens Axboe9c426842012-03-02 21:02:12 +01001869
Jens Axboe8e0aa162014-09-26 15:04:58 -06001870 min_write = min(min_write, left);
Jens Axboef97a43a2012-03-09 19:06:24 +01001871
Jens Axboe8e0aa162014-09-26 15:04:58 -06001872 if (perc) {
1873 unsigned int seg = min_write;
Jens Axboecc86c392013-05-03 15:12:33 +02001874
Jens Axboe8e0aa162014-09-26 15:04:58 -06001875 seg = min(min_write, td->o.compress_chunk);
1876 if (!seg)
1877 seg = min_write;
1878
1879 fill_random_buf_percentage(rs, buf, perc, seg,
1880 min_write);
1881 } else
1882 fill_random_buf(rs, buf, min_write);
1883
1884 buf += min_write;
1885 left -= min_write;
Jens Axboee66dac22014-09-22 10:02:07 -06001886 save_buf_state(td, rs);
Jens Axboe8e0aa162014-09-26 15:04:58 -06001887 } while (left);
Jens Axboe9c426842012-03-02 21:02:12 +01001888 } else
Jens Axboecc86c392013-05-03 15:12:33 +02001889 memset(buf, 0, max_bs);
1890}
1891
1892/*
1893 * "randomly" fill the buffer contents
1894 */
1895void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1896 unsigned int min_write, unsigned int max_bs)
1897{
1898 io_u->buf_filled_len = 0;
1899 fill_io_buffer(td, io_u->buf, min_write, max_bs);
Jens Axboe5973caf2008-05-21 19:52:35 +02001900}