blob: 411da3249796987d698e475380824022dca6d12d [file] [log] [blame]
Jens Axboe10ba5352006-10-20 11:39:27 +02001#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
Jens Axboe0c6e7512007-02-22 11:19:39 +01006#include <assert.h>
Jens Axboe10ba5352006-10-20 11:39:27 +02007
8#include "fio.h"
Jens Axboe5973caf2008-05-21 19:52:35 +02009#include "hash.h"
Jens Axboe4f5af7b2009-06-03 08:45:40 +020010#include "verify.h"
Jens Axboe0d29de82010-09-01 13:54:15 +020011#include "trim.h"
Jens Axboe1fbbf722010-03-25 23:03:18 +010012#include "lib/rand.h"
Jens Axboe7ebd7962012-11-28 21:24:46 +010013#include "lib/axmap.h"
Jens Axboe002fe732014-02-11 08:31:13 -070014#include "err.h"
Jens Axboe10ba5352006-10-20 11:39:27 +020015
Jens Axboe97601022007-02-18 12:47:29 +010016struct io_completion_data {
17 int nr; /* input */
Jens Axboe97601022007-02-18 12:47:29 +010018
19 int error; /* output */
Jens Axboe100f49f2013-01-23 10:15:57 -070020 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
Jens Axboe97601022007-02-18 12:47:29 +010021 struct timeval time; /* output */
22};
23
Jens Axboe10ba5352006-10-20 11:39:27 +020024/*
Jens Axboe7ebd7962012-11-28 21:24:46 +010025 * The ->io_axmap contains a map of blocks we have or have not done io
Jens Axboe10ba5352006-10-20 11:39:27 +020026 * to yet. Used to make sure we cover the entire range in a fair fashion.
27 */
Jens Axboe1ae83d42013-01-12 01:44:15 -070028static int random_map_free(struct fio_file *f, const uint64_t block)
Jens Axboe10ba5352006-10-20 11:39:27 +020029{
Jens Axboe7ebd7962012-11-28 21:24:46 +010030 return !axmap_isset(f->io_axmap, block);
Jens Axboe10ba5352006-10-20 11:39:27 +020031}
32
33/*
Jens Axboedf415582006-10-20 11:41:03 +020034 * Mark a given offset as used in the map.
35 */
Jens Axboe9bf20612007-03-01 09:33:57 +010036static void mark_random_map(struct thread_data *td, struct io_u *io_u)
Jens Axboedf415582006-10-20 11:41:03 +020037{
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010038 unsigned int min_bs = td->o.rw_min_bs;
Jens Axboe9bf20612007-03-01 09:33:57 +010039 struct fio_file *f = io_u->file;
Jens Axboe51ede0b2012-11-22 13:50:29 +010040 unsigned int nr_blocks;
Jens Axboe1ae83d42013-01-12 01:44:15 -070041 uint64_t block;
Jens Axboedf415582006-10-20 11:41:03 +020042
Jens Axboe1ae83d42013-01-12 01:44:15 -070043 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
Jens Axboec685b5b2007-02-10 20:02:28 +010044 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
45
Jens Axboe2ab9e982012-11-22 15:14:17 +010046 if (!(io_u->flags & IO_U_F_BUSY_OK))
Jens Axboe7ebd7962012-11-28 21:24:46 +010047 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
Jens Axboedf415582006-10-20 11:41:03 +020048
Jens Axboe51ede0b2012-11-22 13:50:29 +010049 if ((nr_blocks * min_bs) < io_u->buflen)
50 io_u->buflen = nr_blocks * min_bs;
Jens Axboedf415582006-10-20 11:41:03 +020051}
52
Jens Axboe74776732013-01-11 14:03:25 +010053static uint64_t last_block(struct thread_data *td, struct fio_file *f,
54 enum fio_ddir ddir)
Jens Axboe2ba1c292008-02-01 13:16:38 +010055{
Jens Axboe74776732013-01-11 14:03:25 +010056 uint64_t max_blocks;
57 uint64_t max_size;
Jens Axboe2ba1c292008-02-01 13:16:38 +010058
Jens Axboeff58fce2010-08-25 12:02:08 +020059 assert(ddir_rw(ddir));
60
Jens Axboed9dd70f2008-05-23 12:37:23 +020061 /*
62 * Hmm, should we make sure that ->io_size <= ->real_file_size?
63 */
64 max_size = f->io_size;
65 if (max_size > f->real_file_size)
66 max_size = f->real_file_size;
67
Steven Noonaned335852012-01-31 13:58:00 +010068 if (td->o.zone_range)
69 max_size = td->o.zone_range;
70
Jens Axboe1ae83d42013-01-12 01:44:15 -070071 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
Jens Axboe2ba1c292008-02-01 13:16:38 +010072 if (!max_blocks)
73 return 0;
74
Jens Axboe67778e82008-05-15 09:20:08 +020075 return max_blocks;
Jens Axboe2ba1c292008-02-01 13:16:38 +010076}
77
Jens Axboe1ae83d42013-01-12 01:44:15 -070078struct rand_off {
79 struct flist_head list;
80 uint64_t off;
81};
82
Jens Axboee25839d2012-11-06 10:49:42 +010083static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -070084 enum fio_ddir ddir, uint64_t *b)
Jens Axboeec4015d2007-03-23 08:04:27 +010085{
Jens Axboe1ae83d42013-01-12 01:44:15 -070086 uint64_t r, lastb;
Jens Axboe74776732013-01-11 14:03:25 +010087
88 lastb = last_block(td, f, ddir);
89 if (!lastb)
90 return 1;
Jens Axboeec4015d2007-03-23 08:04:27 +010091
Jens Axboe8055e412012-11-26 08:43:47 +010092 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
Jens Axboe1ae83d42013-01-12 01:44:15 -070093 uint64_t rmax;
Jens Axboe15b87722011-10-12 09:42:33 +020094
Jens Axboe8055e412012-11-26 08:43:47 +010095 rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
96
97 if (td->o.use_os_rand) {
98 rmax = OS_RAND_MAX;
99 r = os_random_long(&td->random_state);
100 } else {
101 rmax = FRAND_MAX;
102 r = __rand(&td->__random_state);
103 }
104
Jens Axboe4b91ee82013-02-25 10:18:33 +0100105 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
Jens Axboe8055e412012-11-26 08:43:47 +0100106
Peter Oberparleiter80e88a12014-02-28 09:09:13 -0800107 *b = lastb * (r / ((uint64_t) rmax + 1.0));
Jens Axboe51ede0b2012-11-22 13:50:29 +0100108 } else {
Jens Axboe8055e412012-11-26 08:43:47 +0100109 uint64_t off = 0;
110
Jens Axboe74776732013-01-11 14:03:25 +0100111 if (lfsr_next(&f->lfsr, &off, lastb))
Jens Axboe8055e412012-11-26 08:43:47 +0100112 return 1;
113
114 *b = off;
Jens Axboe51ede0b2012-11-22 13:50:29 +0100115 }
Jens Axboe2615cc42011-03-28 09:35:09 +0200116
Jens Axboeec4015d2007-03-23 08:04:27 +0100117 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100118 * if we are not maintaining a random map, we are done.
Jens Axboeec4015d2007-03-23 08:04:27 +0100119 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100120 if (!file_randommap(td, f))
121 goto ret;
Jens Axboe43c63a72007-05-21 13:23:30 +0200122
123 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100124 * calculate map offset and check if it's free
Jens Axboe43c63a72007-05-21 13:23:30 +0200125 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100126 if (random_map_free(f, *b))
127 goto ret;
128
Jens Axboe4b91ee82013-02-25 10:18:33 +0100129 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
130 (unsigned long long) *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100131
Jens Axboe7ebd7962012-11-28 21:24:46 +0100132 *b = axmap_next_free(f->io_axmap, *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100133 if (*b == (uint64_t) -1ULL)
134 return 1;
Jens Axboe0ce8b112011-01-27 22:25:29 +0100135ret:
136 return 0;
Jens Axboeec4015d2007-03-23 08:04:27 +0100137}
138
Jens Axboe925fee32012-11-06 13:50:32 +0100139static int __get_next_rand_offset_zipf(struct thread_data *td,
140 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700141 uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100142{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100143 *b = zipf_next(&f->zipf);
Jens Axboee25839d2012-11-06 10:49:42 +0100144 return 0;
145}
146
Jens Axboe925fee32012-11-06 13:50:32 +0100147static int __get_next_rand_offset_pareto(struct thread_data *td,
148 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700149 uint64_t *b)
Jens Axboe925fee32012-11-06 13:50:32 +0100150{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100151 *b = pareto_next(&f->zipf);
Jens Axboe925fee32012-11-06 13:50:32 +0100152 return 0;
153}
154
Jens Axboe1ae83d42013-01-12 01:44:15 -0700155static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
156{
157 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
158 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
159
160 return r1->off - r2->off;
161}
162
163static int get_off_from_method(struct thread_data *td, struct fio_file *f,
164 enum fio_ddir ddir, uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100165{
166 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
167 return __get_next_rand_offset(td, f, ddir, b);
168 else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
169 return __get_next_rand_offset_zipf(td, f, ddir, b);
Jens Axboe925fee32012-11-06 13:50:32 +0100170 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
171 return __get_next_rand_offset_pareto(td, f, ddir, b);
Jens Axboee25839d2012-11-06 10:49:42 +0100172
173 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
174 return 1;
175}
176
Jens Axboebcd5abf2013-01-23 09:27:25 -0700177/*
178 * Sort the reads for a verify phase in batches of verifysort_nr, if
179 * specified.
180 */
181static inline int should_sort_io(struct thread_data *td)
182{
183 if (!td->o.verifysort_nr || !td->o.do_verify)
184 return 0;
185 if (!td_random(td))
186 return 0;
187 if (td->runstate != TD_VERIFYING)
188 return 0;
189 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
190 return 0;
191
192 return 1;
193}
194
Jens Axboed9472272013-07-25 10:20:45 -0600195static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
Jens Axboe211c9b82013-04-26 08:56:17 -0600196{
197 unsigned int v;
198 unsigned long r;
199
Jens Axboed9472272013-07-25 10:20:45 -0600200 if (td->o.perc_rand[ddir] == 100)
Jens Axboe211c9b82013-04-26 08:56:17 -0600201 return 1;
202
203 if (td->o.use_os_rand) {
Jens Axboed9472272013-07-25 10:20:45 -0600204 r = os_random_long(&td->seq_rand_state[ddir]);
Jens Axboe211c9b82013-04-26 08:56:17 -0600205 v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
206 } else {
Jens Axboed9472272013-07-25 10:20:45 -0600207 r = __rand(&td->__seq_rand_state[ddir]);
Jens Axboe211c9b82013-04-26 08:56:17 -0600208 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
209 }
210
Jens Axboed9472272013-07-25 10:20:45 -0600211 return v <= td->o.perc_rand[ddir];
Jens Axboe211c9b82013-04-26 08:56:17 -0600212}
213
Jens Axboe1ae83d42013-01-12 01:44:15 -0700214static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
215 enum fio_ddir ddir, uint64_t *b)
216{
217 struct rand_off *r;
218 int i, ret = 1;
219
Jens Axboebcd5abf2013-01-23 09:27:25 -0700220 if (!should_sort_io(td))
Jens Axboe1ae83d42013-01-12 01:44:15 -0700221 return get_off_from_method(td, f, ddir, b);
222
223 if (!flist_empty(&td->next_rand_list)) {
224 struct rand_off *r;
225fetch:
226 r = flist_entry(td->next_rand_list.next, struct rand_off, list);
227 flist_del(&r->list);
228 *b = r->off;
229 free(r);
230 return 0;
231 }
232
233 for (i = 0; i < td->o.verifysort_nr; i++) {
234 r = malloc(sizeof(*r));
235
236 ret = get_off_from_method(td, f, ddir, &r->off);
237 if (ret) {
238 free(r);
239 break;
240 }
241
242 flist_add(&r->list, &td->next_rand_list);
243 }
244
245 if (ret && !i)
246 return ret;
247
248 assert(!flist_empty(&td->next_rand_list));
249 flist_sort(NULL, &td->next_rand_list, flist_cmp);
250 goto fetch;
251}
252
Jens Axboe38dad622010-07-20 14:46:00 -0600253static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700254 enum fio_ddir ddir, uint64_t *b)
Jens Axboe38dad622010-07-20 14:46:00 -0600255{
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100256 if (!get_next_rand_offset(td, f, ddir, b))
257 return 0;
258
259 if (td->o.time_based) {
Jens Axboe33c48812013-01-21 09:46:06 -0700260 fio_file_reset(td, f);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100261 if (!get_next_rand_offset(td, f, ddir, b))
262 return 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600263 }
264
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100265 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100266 f->file_name, (unsigned long long) f->last_pos,
267 (unsigned long long) f->real_file_size);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100268 return 1;
Jens Axboe38dad622010-07-20 14:46:00 -0600269}
270
Jens Axboe37cf9e32012-03-17 12:54:30 +0100271static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700272 enum fio_ddir ddir, uint64_t *offset)
Jens Axboe38dad622010-07-20 14:46:00 -0600273{
Jens Axboeff58fce2010-08-25 12:02:08 +0200274 assert(ddir_rw(ddir));
275
Jens Axboebedc9dc2014-03-17 12:51:09 -0600276 if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based)
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100277 f->last_pos = f->last_pos - f->io_size;
278
Jens Axboe38dad622010-07-20 14:46:00 -0600279 if (f->last_pos < f->real_file_size) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700280 uint64_t pos;
Jens Axboe059b0802011-08-25 09:09:37 +0200281
Jens Axboea66da7a2011-08-31 13:14:12 -0600282 if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
283 f->last_pos = f->real_file_size;
284
285 pos = f->last_pos - f->file_offset;
Jens Axboe059b0802011-08-25 09:09:37 +0200286 if (pos)
287 pos += td->o.ddir_seq_add;
288
Jens Axboe37cf9e32012-03-17 12:54:30 +0100289 *offset = pos;
Jens Axboe38dad622010-07-20 14:46:00 -0600290 return 0;
291 }
292
293 return 1;
294}
295
296static int get_next_block(struct thread_data *td, struct io_u *io_u,
Jens Axboe6aca9b32013-07-25 12:45:26 -0600297 enum fio_ddir ddir, int rw_seq,
298 unsigned int *is_random)
Jens Axboe38dad622010-07-20 14:46:00 -0600299{
300 struct fio_file *f = io_u->file;
Jens Axboe1ae83d42013-01-12 01:44:15 -0700301 uint64_t b, offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600302 int ret;
303
Jens Axboeff58fce2010-08-25 12:02:08 +0200304 assert(ddir_rw(ddir));
305
Jens Axboe37cf9e32012-03-17 12:54:30 +0100306 b = offset = -1ULL;
307
Jens Axboe38dad622010-07-20 14:46:00 -0600308 if (rw_seq) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600309 if (td_random(td)) {
Jens Axboe6aca9b32013-07-25 12:45:26 -0600310 if (should_do_random(td, ddir)) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600311 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600312 *is_random = 1;
313 } else {
314 *is_random = 0;
Jens Axboe211c9b82013-04-26 08:56:17 -0600315 io_u->flags |= IO_U_F_BUSY_OK;
316 ret = get_next_seq_offset(td, f, ddir, &offset);
317 if (ret)
318 ret = get_next_rand_block(td, f, ddir, &b);
319 }
Jens Axboe6aca9b32013-07-25 12:45:26 -0600320 } else {
321 *is_random = 0;
Jens Axboe37cf9e32012-03-17 12:54:30 +0100322 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600323 }
Jens Axboe38dad622010-07-20 14:46:00 -0600324 } else {
325 io_u->flags |= IO_U_F_BUSY_OK;
Jens Axboe6aca9b32013-07-25 12:45:26 -0600326 *is_random = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600327
328 if (td->o.rw_seq == RW_SEQ_SEQ) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100329 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600330 if (ret) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100331 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600332 *is_random = 0;
333 }
Jens Axboe38dad622010-07-20 14:46:00 -0600334 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
335 if (f->last_start != -1ULL)
Jens Axboe37cf9e32012-03-17 12:54:30 +0100336 offset = f->last_start - f->file_offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600337 else
Jens Axboe37cf9e32012-03-17 12:54:30 +0100338 offset = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600339 ret = 0;
340 } else {
341 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
342 ret = 1;
343 }
344 }
Bruce Cran6d68b992013-01-13 17:21:58 +0000345
Jens Axboe37cf9e32012-03-17 12:54:30 +0100346 if (!ret) {
347 if (offset != -1ULL)
348 io_u->offset = offset;
349 else if (b != -1ULL)
350 io_u->offset = b * td->o.ba[ddir];
351 else {
Jens Axboe4e0a8fa2013-04-15 11:40:57 +0200352 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
Jens Axboe37cf9e32012-03-17 12:54:30 +0100353 ret = 1;
354 }
355 }
356
Jens Axboe38dad622010-07-20 14:46:00 -0600357 return ret;
358}
359
Jens Axboe10ba5352006-10-20 11:39:27 +0200360/*
361 * For random io, generate a random new block and see if it's used. Repeat
362 * until we find a free one. For sequential io, just return the end of
363 * the last io issued.
364 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600365static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
366 unsigned int *is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200367{
Jens Axboe9bf20612007-03-01 09:33:57 +0100368 struct fio_file *f = io_u->file;
Jens Axboe4ba66132008-02-05 10:05:50 +0100369 enum fio_ddir ddir = io_u->ddir;
Jens Axboe38dad622010-07-20 14:46:00 -0600370 int rw_seq_hit = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200371
Jens Axboeff58fce2010-08-25 12:02:08 +0200372 assert(ddir_rw(ddir));
373
Jens Axboe38dad622010-07-20 14:46:00 -0600374 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
375 rw_seq_hit = 1;
Jens Axboe5736c102010-07-20 12:03:25 -0600376 td->ddir_seq_nr = td->o.ddir_seq_nr;
Jens Axboe38dad622010-07-20 14:46:00 -0600377 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200378
Jens Axboe6aca9b32013-07-25 12:45:26 -0600379 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
Jens Axboe38dad622010-07-20 14:46:00 -0600380 return 1;
Jens Axboe10ba5352006-10-20 11:39:27 +0200381
Jens Axboe009bd842008-05-15 10:19:46 +0200382 if (io_u->offset >= f->io_size) {
383 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100384 (unsigned long long) io_u->offset,
385 (unsigned long long) f->io_size);
Jens Axboe009bd842008-05-15 10:19:46 +0200386 return 1;
387 }
388
389 io_u->offset += f->file_offset;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100390 if (io_u->offset >= f->real_file_size) {
391 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100392 (unsigned long long) io_u->offset,
393 (unsigned long long) f->real_file_size);
Jens Axboe10ba5352006-10-20 11:39:27 +0200394 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100395 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200396
397 return 0;
398}
399
Jens Axboe6aca9b32013-07-25 12:45:26 -0600400static int get_next_offset(struct thread_data *td, struct io_u *io_u,
401 unsigned int *is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100402{
Jens Axboed72be542012-11-30 19:37:46 +0100403 if (td->flags & TD_F_PROFILE_OPS) {
404 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100405
Jens Axboed72be542012-11-30 19:37:46 +0100406 if (ops->fill_io_u_off)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600407 return ops->fill_io_u_off(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100408 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100409
Jens Axboe6aca9b32013-07-25 12:45:26 -0600410 return __get_next_offset(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100411}
412
Jens Axboe79944122011-05-24 11:26:16 +0200413static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
414 unsigned int buflen)
415{
416 struct fio_file *f = io_u->file;
417
Jens Axboebedc9dc2014-03-17 12:51:09 -0600418 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
Jens Axboe79944122011-05-24 11:26:16 +0200419}
420
Jens Axboe6aca9b32013-07-25 12:45:26 -0600421static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
422 unsigned int is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200423{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600424 int ddir = io_u->ddir;
Jens Axboe24d23ca2012-11-13 08:31:24 -0700425 unsigned int buflen = 0;
Jens Axboef3059de2008-06-11 15:37:32 +0200426 unsigned int minbs, maxbs;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200427 unsigned long r, rand_max;
Jens Axboe10ba5352006-10-20 11:39:27 +0200428
Erwan Velu9ee1c642014-04-02 10:51:16 +0200429 assert(ddir_rw(ddir));
Jens Axboe6aca9b32013-07-25 12:45:26 -0600430
431 if (td->o.bs_is_seq_rand)
432 ddir = is_random ? DDIR_WRITE: DDIR_READ;
Jens Axboeff58fce2010-08-25 12:02:08 +0200433
Jens Axboef3059de2008-06-11 15:37:32 +0200434 minbs = td->o.min_bs[ddir];
435 maxbs = td->o.max_bs[ddir];
436
Jens Axboe79944122011-05-24 11:26:16 +0200437 if (minbs == maxbs)
438 return minbs;
439
Jens Axboe52c58022012-02-06 21:58:56 +0100440 /*
441 * If we can't satisfy the min block size from here, then fail
442 */
443 if (!io_u_fits(td, io_u, minbs))
444 return 0;
445
Jens Axboe4c07ad82011-03-28 09:51:09 +0200446 if (td->o.use_os_rand)
447 rand_max = OS_RAND_MAX;
448 else
449 rand_max = FRAND_MAX;
450
Jens Axboe79944122011-05-24 11:26:16 +0200451 do {
Jens Axboe4c07ad82011-03-28 09:51:09 +0200452 if (td->o.use_os_rand)
453 r = os_random_long(&td->bsrange_state);
454 else
455 r = __rand(&td->__bsrange_state);
456
Jens Axboe720e84a2009-04-21 08:29:55 +0200457 if (!td->o.bssplit_nr[ddir]) {
Jens Axboef3059de2008-06-11 15:37:32 +0200458 buflen = 1 + (unsigned int) ((double) maxbs *
Jens Axboe4c07ad82011-03-28 09:51:09 +0200459 (r / (rand_max + 1.0)));
Jens Axboef3059de2008-06-11 15:37:32 +0200460 if (buflen < minbs)
461 buflen = minbs;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100462 } else {
Jens Axboe564ca972007-12-14 12:21:19 +0100463 long perc = 0;
464 unsigned int i;
465
Jens Axboe720e84a2009-04-21 08:29:55 +0200466 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
467 struct bssplit *bsp = &td->o.bssplit[ddir][i];
Jens Axboe564ca972007-12-14 12:21:19 +0100468
469 buflen = bsp->bs;
470 perc += bsp->perc;
Jens Axboe79944122011-05-24 11:26:16 +0200471 if ((r <= ((rand_max / 100L) * perc)) &&
472 io_u_fits(td, io_u, buflen))
Jens Axboe564ca972007-12-14 12:21:19 +0100473 break;
474 }
475 }
Jens Axboe79944122011-05-24 11:26:16 +0200476
Josef Bacika9f70b12013-07-08 20:32:50 -0400477 if (td->o.do_verify && td->o.verify != VERIFY_NONE)
478 buflen = (buflen + td->o.verify_interval - 1) &
479 ~(td->o.verify_interval - 1);
480
Jens Axboef3059de2008-06-11 15:37:32 +0200481 if (!td->o.bs_unaligned && is_power_of_2(minbs))
482 buflen = (buflen + minbs - 1) & ~(minbs - 1);
Jens Axboe10ba5352006-10-20 11:39:27 +0200483
Jens Axboe79944122011-05-24 11:26:16 +0200484 } while (!io_u_fits(td, io_u, buflen));
Jens Axboe6a5e6882007-07-26 10:47:51 +0200485
Jens Axboe10ba5352006-10-20 11:39:27 +0200486 return buflen;
487}
488
Jens Axboe6aca9b32013-07-25 12:45:26 -0600489static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
490 unsigned int is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100491{
Jens Axboed72be542012-11-30 19:37:46 +0100492 if (td->flags & TD_F_PROFILE_OPS) {
493 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100494
Jens Axboed72be542012-11-30 19:37:46 +0100495 if (ops->fill_io_u_size)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600496 return ops->fill_io_u_size(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100497 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100498
Jens Axboe6aca9b32013-07-25 12:45:26 -0600499 return __get_next_buflen(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100500}
501
Jens Axboeafe24a52007-03-16 20:27:27 +0100502static void set_rwmix_bytes(struct thread_data *td)
503{
Jens Axboeafe24a52007-03-16 20:27:27 +0100504 unsigned int diff;
505
506 /*
507 * we do time or byte based switch. this is needed because
508 * buffered writes may issue a lot quicker than they complete,
509 * whereas reads do not.
510 */
Jens Axboee47f7992007-03-21 14:05:39 +0100511 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
Jens Axboe04c540d2008-05-28 10:35:26 +0200512 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
Jens Axboee47f7992007-03-21 14:05:39 +0100513}
514
515static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
516{
517 unsigned int v;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200518 unsigned long r;
Jens Axboee47f7992007-03-21 14:05:39 +0100519
Jens Axboe4c07ad82011-03-28 09:51:09 +0200520 if (td->o.use_os_rand) {
521 r = os_random_long(&td->rwmix_state);
522 v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
523 } else {
524 r = __rand(&td->__rwmix_state);
525 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
526 }
527
Jens Axboe04c540d2008-05-28 10:35:26 +0200528 if (v <= td->o.rwmix[DDIR_READ])
Jens Axboee47f7992007-03-21 14:05:39 +0100529 return DDIR_READ;
530
531 return DDIR_WRITE;
Jens Axboeafe24a52007-03-16 20:27:27 +0100532}
533
Jens Axboe002e7182013-05-17 12:39:53 +0200534void io_u_quiesce(struct thread_data *td)
535{
536 /*
537 * We are going to sleep, ensure that we flush anything pending as
538 * not to skew our latency numbers.
539 *
540 * Changed to only monitor 'in flight' requests here instead of the
541 * td->cur_depth, b/c td->cur_depth does not accurately represent
542 * io's that have been actually submitted to an async engine,
543 * and cur_depth is meaningless for sync engines.
544 */
545 while (td->io_u_in_flight) {
546 int fio_unused ret;
547
548 ret = io_u_queued_complete(td, 1, NULL);
549 }
550}
551
Jens Axboe581e7142009-06-09 12:47:16 +0200552static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
553{
554 enum fio_ddir odir = ddir ^ 1;
555 struct timeval t;
556 long usec;
557
Jens Axboeff58fce2010-08-25 12:02:08 +0200558 assert(ddir_rw(ddir));
559
Jens Axboe315fcfe2013-02-08 19:05:25 +0100560 if (td->rate_pending_usleep[ddir] <= 0)
Jens Axboe581e7142009-06-09 12:47:16 +0200561 return ddir;
562
563 /*
564 * We have too much pending sleep in this direction. See if we
565 * should switch.
566 */
Jens Axboe315fcfe2013-02-08 19:05:25 +0100567 if (td_rw(td) && td->o.rwmix[odir]) {
Jens Axboe581e7142009-06-09 12:47:16 +0200568 /*
569 * Other direction does not have too much pending, switch
570 */
571 if (td->rate_pending_usleep[odir] < 100000)
572 return odir;
573
574 /*
575 * Both directions have pending sleep. Sleep the minimum time
576 * and deduct from both.
577 */
578 if (td->rate_pending_usleep[ddir] <=
579 td->rate_pending_usleep[odir]) {
580 usec = td->rate_pending_usleep[ddir];
581 } else {
582 usec = td->rate_pending_usleep[odir];
583 ddir = odir;
584 }
585 } else
586 usec = td->rate_pending_usleep[ddir];
587
Jens Axboe002e7182013-05-17 12:39:53 +0200588 io_u_quiesce(td);
Jens Axboe78c1eda2011-08-30 15:34:14 -0600589
Jens Axboe581e7142009-06-09 12:47:16 +0200590 fio_gettime(&t, NULL);
591 usec_sleep(td, usec);
592 usec = utime_since_now(&t);
593
594 td->rate_pending_usleep[ddir] -= usec;
595
596 odir = ddir ^ 1;
597 if (td_rw(td) && __should_check_rate(td, odir))
598 td->rate_pending_usleep[odir] -= usec;
Jens Axboe0b9d69e2009-09-11 22:29:54 +0200599
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200600 if (ddir_trim(ddir))
601 return ddir;
Jens Axboee0224c62013-02-07 19:55:24 +0100602
Jens Axboe581e7142009-06-09 12:47:16 +0200603 return ddir;
604}
605
Jens Axboe10ba5352006-10-20 11:39:27 +0200606/*
607 * Return the data direction for the next io_u. If the job is a
608 * mixed read/write workload, check the rwmix cycle and switch if
609 * necessary.
610 */
Jens Axboe1e97cce2006-12-05 11:44:16 +0100611static enum fio_ddir get_rw_ddir(struct thread_data *td)
Jens Axboe10ba5352006-10-20 11:39:27 +0200612{
Jens Axboe581e7142009-06-09 12:47:16 +0200613 enum fio_ddir ddir;
614
Jens Axboe5f9099e2009-06-16 22:40:26 +0200615 /*
616 * see if it's time to fsync
617 */
618 if (td->o.fsync_blocks &&
619 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
620 td->io_issues[DDIR_WRITE] && should_fsync(td))
621 return DDIR_SYNC;
622
623 /*
624 * see if it's time to fdatasync
625 */
626 if (td->o.fdatasync_blocks &&
627 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
628 td->io_issues[DDIR_WRITE] && should_fsync(td))
629 return DDIR_DATASYNC;
630
Jens Axboe44f29692010-03-09 20:09:44 +0100631 /*
632 * see if it's time to sync_file_range
633 */
634 if (td->sync_file_range_nr &&
635 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
636 td->io_issues[DDIR_WRITE] && should_fsync(td))
637 return DDIR_SYNC_FILE_RANGE;
638
Jens Axboe10ba5352006-10-20 11:39:27 +0200639 if (td_rw(td)) {
Jens Axboe10ba5352006-10-20 11:39:27 +0200640 /*
641 * Check if it's time to seed a new data direction.
642 */
Jens Axboee4928662008-04-07 09:19:46 +0200643 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
Jens Axboee47f7992007-03-21 14:05:39 +0100644 /*
645 * Put a top limit on how many bytes we do for
646 * one data direction, to avoid overflowing the
647 * ranges too much
648 */
649 ddir = get_rand_ddir(td);
Jens Axboee47f7992007-03-21 14:05:39 +0100650
651 if (ddir != td->rwmix_ddir)
652 set_rwmix_bytes(td);
653
654 td->rwmix_ddir = ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200655 }
Jens Axboe581e7142009-06-09 12:47:16 +0200656 ddir = td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200657 } else if (td_read(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200658 ddir = DDIR_READ;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200659 else if (td_write(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200660 ddir = DDIR_WRITE;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200661 else
662 ddir = DDIR_TRIM;
Jens Axboe581e7142009-06-09 12:47:16 +0200663
664 td->rwmix_ddir = rate_ddir(td, ddir);
665 return td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200666}
667
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200668static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
669{
Jens Axboebcd5abf2013-01-23 09:27:25 -0700670 io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200671
672 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
673 td->o.barrier_blocks &&
674 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
675 td->io_issues[DDIR_WRITE])
676 io_u->flags |= IO_U_F_BARRIER;
677}
678
Jens Axboee8462bd2009-07-06 12:59:04 +0200679void put_file_log(struct thread_data *td, struct fio_file *f)
Jens Axboe60f2c652008-05-16 12:31:36 +0200680{
681 int ret = put_file(td, f);
682
683 if (ret)
684 td_verror(td, ret, "file close");
685}
686
Jens Axboe10ba5352006-10-20 11:39:27 +0200687void put_io_u(struct thread_data *td, struct io_u *io_u)
688{
Jens Axboee8462bd2009-07-06 12:59:04 +0200689 td_io_u_lock(td);
690
Steven Langd7ee2a72011-10-26 09:46:50 +0200691 if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF))
Jens Axboe60f2c652008-05-16 12:31:36 +0200692 put_file_log(td, io_u->file);
Jens Axboe10ba5352006-10-20 11:39:27 +0200693 io_u->file = NULL;
Steven Langd7ee2a72011-10-26 09:46:50 +0200694 io_u->flags &= ~IO_U_F_FREE_DEF;
695 io_u->flags |= IO_U_F_FREE;
696
Radha Ramachandran0c412142009-11-03 21:45:31 +0100697 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
698 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200699 io_u_qpush(&td->io_u_freelist, io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200700 td_io_u_unlock(td);
701 td_io_u_free_notify(td);
Jens Axboe10ba5352006-10-20 11:39:27 +0200702}
703
Radha Ramachandranf2bba182009-06-15 08:40:16 +0200704void clear_io_u(struct thread_data *td, struct io_u *io_u)
705{
706 io_u->flags &= ~IO_U_F_FLIGHT;
707 put_io_u(td, io_u);
708}
709
Jens Axboe755200a2007-02-19 13:08:12 +0100710void requeue_io_u(struct thread_data *td, struct io_u **io_u)
711{
712 struct io_u *__io_u = *io_u;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700713 enum fio_ddir ddir = acct_ddir(__io_u);
Jens Axboe755200a2007-02-19 13:08:12 +0100714
Jens Axboe465221b2008-05-30 22:07:49 +0200715 dprint(FD_IO, "requeue %p\n", __io_u);
716
Jens Axboee8462bd2009-07-06 12:59:04 +0200717 td_io_u_lock(td);
718
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100719 __io_u->flags |= IO_U_F_FREE;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700720 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
721 td->io_issues[ddir]--;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100722
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100723 __io_u->flags &= ~IO_U_F_FLIGHT;
Radha Ramachandran0c412142009-11-03 21:45:31 +0100724 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
725 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200726
727 io_u_rpush(&td->io_u_requeues, __io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200728 td_io_u_unlock(td);
Jens Axboe755200a2007-02-19 13:08:12 +0100729 *io_u = NULL;
730}
731
Jens Axboe9bf20612007-03-01 09:33:57 +0100732static int fill_io_u(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +0200733{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600734 unsigned int is_random;
735
Jens Axboeb4c5e1a2007-10-25 18:44:45 +0200736 if (td->io_ops->flags & FIO_NOIO)
737 goto out;
738
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200739 set_rw_ddir(td, io_u);
Jens Axboea00735e2006-11-03 08:58:08 +0100740
Jens Axboe87dc1ab2006-10-24 14:41:26 +0200741 /*
Jens Axboeff58fce2010-08-25 12:02:08 +0200742 * fsync() or fdatasync() or trim etc, we are done
Jens Axboe5f9099e2009-06-16 22:40:26 +0200743 */
Jens Axboeff58fce2010-08-25 12:02:08 +0200744 if (!ddir_rw(io_u->ddir))
Jens Axboe5f9099e2009-06-16 22:40:26 +0200745 goto out;
746
747 /*
Jens Axboe48f5abd2007-07-20 13:25:04 +0200748 * See if it's time to switch to a new zone
749 */
Jens Axboe13af05a2012-02-11 09:04:02 +0100750 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
Jens Axboe48f5abd2007-07-20 13:25:04 +0200751 td->zone_bytes = 0;
Steven Noonaned335852012-01-31 13:58:00 +0100752 io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
753 io_u->file->last_pos = io_u->file->file_offset;
Jens Axboe48f5abd2007-07-20 13:25:04 +0200754 td->io_skip_bytes += td->o.zone_skip;
755 }
756
757 /*
Jens Axboec685b5b2007-02-10 20:02:28 +0100758 * No log, let the seq/rand engine retrieve the next buflen and
759 * position.
Jens Axboe10ba5352006-10-20 11:39:27 +0200760 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600761 if (get_next_offset(td, io_u, &is_random)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +0100762 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100763 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100764 }
Jens Axboec685b5b2007-02-10 20:02:28 +0100765
Jens Axboe6aca9b32013-07-25 12:45:26 -0600766 io_u->buflen = get_next_buflen(td, io_u, is_random);
Jens Axboe2ba1c292008-02-01 13:16:38 +0100767 if (!io_u->buflen) {
768 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100769 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100770 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200771
Jens Axboe2ba1c292008-02-01 13:16:38 +0100772 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
773 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
Jens Axboe4b91ee82013-02-25 10:18:33 +0100774 dprint(FD_IO, " off=%llu/%lu > %llu\n",
775 (unsigned long long) io_u->offset, io_u->buflen,
776 (unsigned long long) io_u->file->real_file_size);
Jens Axboe6a5e6882007-07-26 10:47:51 +0200777 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100778 }
Jens Axboe6a5e6882007-07-26 10:47:51 +0200779
Jens Axboebca4ed42007-02-12 05:13:23 +0100780 /*
781 * mark entry before potentially trimming io_u
782 */
Jens Axboe303032a2008-03-26 10:11:10 +0100783 if (td_random(td) && file_randommap(td, io_u->file))
Jens Axboe9bf20612007-03-01 09:33:57 +0100784 mark_random_map(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +0200785
Jens Axboec38e9462007-03-27 08:48:48 +0200786out:
Jens Axboe2ba1c292008-02-01 13:16:38 +0100787 dprint_io_u(io_u, "fill_io_u");
Jens Axboed9d91e32008-01-31 13:25:42 +0100788 td->zone_bytes += io_u->buflen;
Jens Axboebca4ed42007-02-12 05:13:23 +0100789 return 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200790}
791
Jens Axboe838bc702008-05-22 13:08:23 +0200792static void __io_u_mark_map(unsigned int *map, unsigned int nr)
793{
Jens Axboe2b13e712011-01-19 14:04:16 -0700794 int idx = 0;
Jens Axboe838bc702008-05-22 13:08:23 +0200795
796 switch (nr) {
797 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700798 idx = 6;
Jens Axboe838bc702008-05-22 13:08:23 +0200799 break;
800 case 33 ... 64:
Jens Axboe2b13e712011-01-19 14:04:16 -0700801 idx = 5;
Jens Axboe838bc702008-05-22 13:08:23 +0200802 break;
803 case 17 ... 32:
Jens Axboe2b13e712011-01-19 14:04:16 -0700804 idx = 4;
Jens Axboe838bc702008-05-22 13:08:23 +0200805 break;
806 case 9 ... 16:
Jens Axboe2b13e712011-01-19 14:04:16 -0700807 idx = 3;
Jens Axboe838bc702008-05-22 13:08:23 +0200808 break;
809 case 5 ... 8:
Jens Axboe2b13e712011-01-19 14:04:16 -0700810 idx = 2;
Jens Axboe838bc702008-05-22 13:08:23 +0200811 break;
812 case 1 ... 4:
Jens Axboe2b13e712011-01-19 14:04:16 -0700813 idx = 1;
Jens Axboe838bc702008-05-22 13:08:23 +0200814 case 0:
815 break;
816 }
817
Jens Axboe2b13e712011-01-19 14:04:16 -0700818 map[idx]++;
Jens Axboe838bc702008-05-22 13:08:23 +0200819}
820
821void io_u_mark_submit(struct thread_data *td, unsigned int nr)
822{
823 __io_u_mark_map(td->ts.io_u_submit, nr);
824 td->ts.total_submit++;
825}
826
827void io_u_mark_complete(struct thread_data *td, unsigned int nr)
828{
829 __io_u_mark_map(td->ts.io_u_complete, nr);
830 td->ts.total_complete++;
831}
832
Jens Axboed8005752008-05-15 09:49:09 +0200833void io_u_mark_depth(struct thread_data *td, unsigned int nr)
Jens Axboe71619dc2007-01-13 23:56:33 +0100834{
Jens Axboe2b13e712011-01-19 14:04:16 -0700835 int idx = 0;
Jens Axboe71619dc2007-01-13 23:56:33 +0100836
837 switch (td->cur_depth) {
838 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700839 idx = 6;
Jens Axboea783e612007-06-19 09:50:28 +0200840 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100841 case 32 ... 63:
Jens Axboe2b13e712011-01-19 14:04:16 -0700842 idx = 5;
Jens Axboea783e612007-06-19 09:50:28 +0200843 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100844 case 16 ... 31:
Jens Axboe2b13e712011-01-19 14:04:16 -0700845 idx = 4;
Jens Axboea783e612007-06-19 09:50:28 +0200846 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100847 case 8 ... 15:
Jens Axboe2b13e712011-01-19 14:04:16 -0700848 idx = 3;
Jens Axboea783e612007-06-19 09:50:28 +0200849 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100850 case 4 ... 7:
Jens Axboe2b13e712011-01-19 14:04:16 -0700851 idx = 2;
Jens Axboea783e612007-06-19 09:50:28 +0200852 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100853 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700854 idx = 1;
Jens Axboe71619dc2007-01-13 23:56:33 +0100855 case 1:
856 break;
857 }
858
Jens Axboe2b13e712011-01-19 14:04:16 -0700859 td->ts.io_u_map[idx] += nr;
Jens Axboe71619dc2007-01-13 23:56:33 +0100860}
861
Jens Axboe04a0fea2007-06-19 12:48:41 +0200862static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
863{
Jens Axboe2b13e712011-01-19 14:04:16 -0700864 int idx = 0;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200865
866 assert(usec < 1000);
867
868 switch (usec) {
869 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700870 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200871 break;
872 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700873 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200874 break;
875 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700876 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200877 break;
878 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700879 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200880 break;
881 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700882 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200883 break;
884 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700885 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200886 break;
887 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700888 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200889 break;
890 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700891 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200892 break;
893 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700894 idx = 1;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200895 case 0 ... 1:
896 break;
897 }
898
Jens Axboe2b13e712011-01-19 14:04:16 -0700899 assert(idx < FIO_IO_U_LAT_U_NR);
900 td->ts.io_u_lat_u[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200901}
902
903static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
Jens Axboeec118302007-02-17 04:38:20 +0100904{
Jens Axboe2b13e712011-01-19 14:04:16 -0700905 int idx = 0;
Jens Axboeec118302007-02-17 04:38:20 +0100906
907 switch (msec) {
908 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700909 idx = 11;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200910 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100911 case 1000 ... 1999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700912 idx = 10;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200913 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100914 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700915 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200916 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100917 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700918 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200919 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100920 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700921 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200922 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100923 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700924 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200925 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100926 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700927 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200928 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100929 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700930 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200931 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100932 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700933 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200934 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100935 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700936 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200937 break;
Jens Axboeec118302007-02-17 04:38:20 +0100938 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700939 idx = 1;
Jens Axboeec118302007-02-17 04:38:20 +0100940 case 0 ... 1:
941 break;
942 }
943
Jens Axboe2b13e712011-01-19 14:04:16 -0700944 assert(idx < FIO_IO_U_LAT_M_NR);
945 td->ts.io_u_lat_m[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200946}
947
948static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
949{
950 if (usec < 1000)
951 io_u_mark_lat_usec(td, usec);
952 else
953 io_u_mark_lat_msec(td, usec / 1000);
Jens Axboeec118302007-02-17 04:38:20 +0100954}
955
Jens Axboe0aabe162007-02-23 08:45:55 +0100956/*
957 * Get next file to service by choosing one at random
958 */
Jens Axboe2cc52932009-06-09 14:14:20 +0200959static struct fio_file *get_next_file_rand(struct thread_data *td,
960 enum fio_file_flags goodf,
Jens Axboed6aed792009-06-03 08:41:15 +0200961 enum fio_file_flags badf)
Jens Axboe0aabe162007-02-23 08:45:55 +0100962{
Jens Axboe0aabe162007-02-23 08:45:55 +0100963 struct fio_file *f;
Jens Axboe1c178182007-03-13 13:25:18 +0100964 int fno;
Jens Axboe0aabe162007-02-23 08:45:55 +0100965
966 do {
Jens Axboe87b10672009-03-04 09:39:47 +0100967 int opened = 0;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200968 unsigned long r;
Jens Axboe7c83c082007-03-01 10:04:15 +0100969
Jens Axboe4c07ad82011-03-28 09:51:09 +0200970 if (td->o.use_os_rand) {
971 r = os_random_long(&td->next_file_state);
972 fno = (unsigned int) ((double) td->o.nr_files
973 * (r / (OS_RAND_MAX + 1.0)));
974 } else {
975 r = __rand(&td->__next_file_state);
976 fno = (unsigned int) ((double) td->o.nr_files
977 * (r / (FRAND_MAX + 1.0)));
978 }
979
Jens Axboe126d65c2008-03-01 18:04:31 +0100980 f = td->files[fno];
Jens Axboed6aed792009-06-03 08:41:15 +0200981 if (fio_file_done(f))
Jens Axboe059e63c2007-03-27 20:34:47 +0200982 continue;
Jens Axboe1c178182007-03-13 13:25:18 +0100983
Jens Axboed6aed792009-06-03 08:41:15 +0200984 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +0100985 int err;
986
Jens Axboe002fe732014-02-11 08:31:13 -0700987 if (td->nr_open_files >= td->o.open_files)
988 return ERR_PTR(-EBUSY);
989
Jens Axboe87b10672009-03-04 09:39:47 +0100990 err = td_io_open_file(td, f);
991 if (err)
992 continue;
993 opened = 1;
994 }
995
Jens Axboe2ba1c292008-02-01 13:16:38 +0100996 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
997 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100998 return f;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100999 }
Jens Axboe87b10672009-03-04 09:39:47 +01001000 if (opened)
1001 td_io_close_file(td, f);
Jens Axboe0aabe162007-02-23 08:45:55 +01001002 } while (1);
1003}
1004
1005/*
1006 * Get next file to service by doing round robin between all available ones
1007 */
Jens Axboe1c178182007-03-13 13:25:18 +01001008static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1009 int badf)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001010{
1011 unsigned int old_next_file = td->next_file;
1012 struct fio_file *f;
1013
1014 do {
Jens Axboe87b10672009-03-04 09:39:47 +01001015 int opened = 0;
1016
Jens Axboe126d65c2008-03-01 18:04:31 +01001017 f = td->files[td->next_file];
Jens Axboe3d7c3912007-02-19 13:16:12 +01001018
1019 td->next_file++;
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001020 if (td->next_file >= td->o.nr_files)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001021 td->next_file = 0;
1022
Jens Axboe87b10672009-03-04 09:39:47 +01001023 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
Jens Axboed6aed792009-06-03 08:41:15 +02001024 if (fio_file_done(f)) {
Jens Axboed5ed68e2007-03-28 09:33:43 +02001025 f = NULL;
Jens Axboe059e63c2007-03-27 20:34:47 +02001026 continue;
Jens Axboed5ed68e2007-03-28 09:33:43 +02001027 }
Jens Axboe059e63c2007-03-27 20:34:47 +02001028
Jens Axboed6aed792009-06-03 08:41:15 +02001029 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +01001030 int err;
1031
Jens Axboe002fe732014-02-11 08:31:13 -07001032 if (td->nr_open_files >= td->o.open_files)
1033 return ERR_PTR(-EBUSY);
1034
Jens Axboe87b10672009-03-04 09:39:47 +01001035 err = td_io_open_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001036 if (err) {
1037 dprint(FD_FILE, "error %d on open of %s\n",
1038 err, f->file_name);
Jens Axboe87c27b42009-05-20 10:45:12 +02001039 f = NULL;
Jens Axboe87b10672009-03-04 09:39:47 +01001040 continue;
Jens Axboeb5696bf2009-03-04 16:03:49 +01001041 }
Jens Axboe87b10672009-03-04 09:39:47 +01001042 opened = 1;
1043 }
1044
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001045 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1046 f->flags);
Jens Axboe1c178182007-03-13 13:25:18 +01001047 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
Jens Axboe3d7c3912007-02-19 13:16:12 +01001048 break;
1049
Jens Axboe87b10672009-03-04 09:39:47 +01001050 if (opened)
1051 td_io_close_file(td, f);
1052
Jens Axboe3d7c3912007-02-19 13:16:12 +01001053 f = NULL;
1054 } while (td->next_file != old_next_file);
1055
Jens Axboe2ba1c292008-02-01 13:16:38 +01001056 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
Jens Axboe3d7c3912007-02-19 13:16:12 +01001057 return f;
1058}
1059
Jens Axboe7eb36572010-03-08 13:58:49 +01001060static struct fio_file *__get_next_file(struct thread_data *td)
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001061{
Jens Axboe1907dbc2007-03-12 11:44:28 +01001062 struct fio_file *f;
1063
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001064 assert(td->o.nr_files <= td->files_index);
Jens Axboe1c178182007-03-13 13:25:18 +01001065
Jens Axboeb5696bf2009-03-04 16:03:49 +01001066 if (td->nr_done_files >= td->o.nr_files) {
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001067 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1068 " nr_files=%d\n", td->nr_open_files,
1069 td->nr_done_files,
1070 td->o.nr_files);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001071 return NULL;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001072 }
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001073
Jens Axboe1907dbc2007-03-12 11:44:28 +01001074 f = td->file_service_file;
Jens Axboed6aed792009-06-03 08:41:15 +02001075 if (f && fio_file_open(f) && !fio_file_closing(f)) {
Jens Axboea086c252009-03-04 08:27:37 +01001076 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1077 goto out;
1078 if (td->file_service_left--)
1079 goto out;
1080 }
Jens Axboe1907dbc2007-03-12 11:44:28 +01001081
Jens Axboea086c252009-03-04 08:27:37 +01001082 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1083 td->o.file_service_type == FIO_FSERVICE_SEQ)
Jens Axboed6aed792009-06-03 08:41:15 +02001084 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001085 else
Jens Axboed6aed792009-06-03 08:41:15 +02001086 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboe1907dbc2007-03-12 11:44:28 +01001087
Jens Axboe002fe732014-02-11 08:31:13 -07001088 if (IS_ERR(f))
1089 return f;
1090
Jens Axboe1907dbc2007-03-12 11:44:28 +01001091 td->file_service_file = f;
1092 td->file_service_left = td->file_service_nr - 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001093out:
Jens Axboe0dac4212014-02-25 13:43:17 -08001094 if (f)
1095 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1096 else
1097 dprint(FD_FILE, "get_next_file: NULL\n");
Jens Axboe1907dbc2007-03-12 11:44:28 +01001098 return f;
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001099}
1100
Jens Axboe7eb36572010-03-08 13:58:49 +01001101static struct fio_file *get_next_file(struct thread_data *td)
1102{
Jens Axboed72be542012-11-30 19:37:46 +01001103 if (!(td->flags & TD_F_PROFILE_OPS)) {
1104 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +01001105
Jens Axboed72be542012-11-30 19:37:46 +01001106 if (ops->get_next_file)
1107 return ops->get_next_file(td);
1108 }
Jens Axboe7eb36572010-03-08 13:58:49 +01001109
1110 return __get_next_file(td);
1111}
1112
Jens Axboe002fe732014-02-11 08:31:13 -07001113static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
Jens Axboe429f6672007-07-23 10:38:43 +02001114{
1115 struct fio_file *f;
1116
1117 do {
1118 f = get_next_file(td);
Jens Axboe002fe732014-02-11 08:31:13 -07001119 if (IS_ERR_OR_NULL(f))
1120 return PTR_ERR(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001121
Jens Axboe429f6672007-07-23 10:38:43 +02001122 io_u->file = f;
1123 get_file(f);
1124
1125 if (!fill_io_u(td, io_u))
1126 break;
1127
Jens Axboeb5696bf2009-03-04 16:03:49 +01001128 put_file_log(td, f);
Jens Axboe429f6672007-07-23 10:38:43 +02001129 td_io_close_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001130 io_u->file = NULL;
Jens Axboed6aed792009-06-03 08:41:15 +02001131 fio_file_set_done(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001132 td->nr_done_files++;
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001133 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1134 td->nr_done_files, td->o.nr_files);
Jens Axboe429f6672007-07-23 10:38:43 +02001135 } while (1);
1136
1137 return 0;
1138}
1139
Jens Axboe3e260a42013-12-09 12:38:53 -07001140static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1141 unsigned long tusec, unsigned long max_usec)
1142{
1143 if (!td->error)
1144 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1145 td_verror(td, ETIMEDOUT, "max latency exceeded");
1146 icd->error = ETIMEDOUT;
1147}
1148
1149static void lat_new_cycle(struct thread_data *td)
1150{
1151 fio_gettime(&td->latency_ts, NULL);
1152 td->latency_ios = ddir_rw_sum(td->io_blocks);
1153 td->latency_failed = 0;
1154}
1155
1156/*
1157 * We had an IO outside the latency target. Reduce the queue depth. If we
1158 * are at QD=1, then it's time to give up.
1159 */
1160static int __lat_target_failed(struct thread_data *td)
1161{
1162 if (td->latency_qd == 1)
1163 return 1;
1164
1165 td->latency_qd_high = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001166
1167 if (td->latency_qd == td->latency_qd_low)
1168 td->latency_qd_low--;
1169
Jens Axboe3e260a42013-12-09 12:38:53 -07001170 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1171
1172 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1173
1174 /*
1175 * When we ramp QD down, quiesce existing IO to prevent
1176 * a storm of ramp downs due to pending higher depth.
1177 */
1178 io_u_quiesce(td);
1179 lat_new_cycle(td);
1180 return 0;
1181}
1182
1183static int lat_target_failed(struct thread_data *td)
1184{
1185 if (td->o.latency_percentile.u.f == 100.0)
1186 return __lat_target_failed(td);
1187
1188 td->latency_failed++;
1189 return 0;
1190}
1191
1192void lat_target_init(struct thread_data *td)
1193{
Jens Axboe6bb58212014-02-21 13:55:31 -08001194 td->latency_end_run = 0;
1195
Jens Axboe3e260a42013-12-09 12:38:53 -07001196 if (td->o.latency_target) {
1197 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1198 fio_gettime(&td->latency_ts, NULL);
1199 td->latency_qd = 1;
1200 td->latency_qd_high = td->o.iodepth;
1201 td->latency_qd_low = 1;
1202 td->latency_ios = ddir_rw_sum(td->io_blocks);
1203 } else
1204 td->latency_qd = td->o.iodepth;
1205}
1206
Jens Axboe6bb58212014-02-21 13:55:31 -08001207void lat_target_reset(struct thread_data *td)
1208{
1209 if (!td->latency_end_run)
1210 lat_target_init(td);
1211}
1212
Jens Axboe3e260a42013-12-09 12:38:53 -07001213static void lat_target_success(struct thread_data *td)
1214{
1215 const unsigned int qd = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001216 struct thread_options *o = &td->o;
Jens Axboe3e260a42013-12-09 12:38:53 -07001217
1218 td->latency_qd_low = td->latency_qd;
1219
1220 /*
1221 * If we haven't failed yet, we double up to a failing value instead
1222 * of bisecting from highest possible queue depth. If we have set
1223 * a limit other than td->o.iodepth, bisect between that.
1224 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001225 if (td->latency_qd_high != o->iodepth)
Jens Axboe3e260a42013-12-09 12:38:53 -07001226 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1227 else
1228 td->latency_qd *= 2;
1229
Jens Axboe6bb58212014-02-21 13:55:31 -08001230 if (td->latency_qd > o->iodepth)
1231 td->latency_qd = o->iodepth;
Jens Axboe3e260a42013-12-09 12:38:53 -07001232
1233 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
Jens Axboe6bb58212014-02-21 13:55:31 -08001234
Jens Axboe3e260a42013-12-09 12:38:53 -07001235 /*
Jens Axboe6bb58212014-02-21 13:55:31 -08001236 * Same as last one, we are done. Let it run a latency cycle, so
1237 * we get only the results from the targeted depth.
Jens Axboe3e260a42013-12-09 12:38:53 -07001238 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001239 if (td->latency_qd == qd) {
1240 if (td->latency_end_run) {
1241 dprint(FD_RATE, "We are done\n");
1242 td->done = 1;
1243 } else {
1244 dprint(FD_RATE, "Quiesce and final run\n");
1245 io_u_quiesce(td);
1246 td->latency_end_run = 1;
1247 reset_all_stats(td);
1248 reset_io_stats(td);
1249 }
1250 }
Jens Axboe3e260a42013-12-09 12:38:53 -07001251
1252 lat_new_cycle(td);
1253}
1254
1255/*
1256 * Check if we can bump the queue depth
1257 */
1258void lat_target_check(struct thread_data *td)
1259{
1260 uint64_t usec_window;
1261 uint64_t ios;
1262 double success_ios;
1263
1264 usec_window = utime_since_now(&td->latency_ts);
1265 if (usec_window < td->o.latency_window)
1266 return;
1267
1268 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1269 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1270 success_ios *= 100.0;
1271
1272 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1273
1274 if (success_ios >= td->o.latency_percentile.u.f)
1275 lat_target_success(td);
1276 else
1277 __lat_target_failed(td);
1278}
1279
1280/*
1281 * If latency target is enabled, we might be ramping up or down and not
1282 * using the full queue depth available.
1283 */
1284int queue_full(struct thread_data *td)
1285{
1286 const int qempty = io_u_qempty(&td->io_u_freelist);
1287
1288 if (qempty)
1289 return 1;
1290 if (!td->o.latency_target)
1291 return 0;
1292
1293 return td->cur_depth >= td->latency_qd;
1294}
Jens Axboe429f6672007-07-23 10:38:43 +02001295
Jens Axboe10ba5352006-10-20 11:39:27 +02001296struct io_u *__get_io_u(struct thread_data *td)
1297{
Jens Axboe0cae66f2014-03-03 13:55:32 -07001298 struct io_u *io_u = NULL;
Jens Axboe10ba5352006-10-20 11:39:27 +02001299
Jens Axboee8462bd2009-07-06 12:59:04 +02001300 td_io_u_lock(td);
1301
1302again:
Jens Axboe2ae0b202013-05-28 14:16:55 +02001303 if (!io_u_rempty(&td->io_u_requeues))
1304 io_u = io_u_rpop(&td->io_u_requeues);
Jens Axboe3e260a42013-12-09 12:38:53 -07001305 else if (!queue_full(td)) {
Jens Axboe2ae0b202013-05-28 14:16:55 +02001306 io_u = io_u_qpop(&td->io_u_freelist);
Jens Axboe10ba5352006-10-20 11:39:27 +02001307
Jens Axboe225ba9e2014-02-26 14:31:15 -08001308 io_u->file = NULL;
Jens Axboe6040dab2006-10-24 19:38:15 +02001309 io_u->buflen = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001310 io_u->resid = 0;
Jens Axboed7762cf2007-02-23 12:34:57 +01001311 io_u->end_io = NULL;
Jens Axboe755200a2007-02-19 13:08:12 +01001312 }
1313
1314 if (io_u) {
Jens Axboe0c6e7512007-02-22 11:19:39 +01001315 assert(io_u->flags & IO_U_F_FREE);
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001316 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +02001317 io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER);
Jens Axboe82af2a72012-03-13 13:45:58 +01001318 io_u->flags &= ~IO_U_F_VER_LIST;
Jens Axboe0c6e7512007-02-22 11:19:39 +01001319
Jens Axboe755200a2007-02-19 13:08:12 +01001320 io_u->error = 0;
Jens Axboebcd5abf2013-01-23 09:27:25 -07001321 io_u->acct_ddir = -1;
Jens Axboe10ba5352006-10-20 11:39:27 +02001322 td->cur_depth++;
Radha Ramachandran0c412142009-11-03 21:45:31 +01001323 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
Jens Axboef9401282014-02-06 12:17:37 -07001324 io_u->ipo = NULL;
Jens Axboe1dec3e02010-03-19 10:33:39 +01001325 } else if (td->o.verify_async) {
1326 /*
1327 * We ran out, wait for async verify threads to finish and
1328 * return one
1329 */
1330 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1331 goto again;
Jens Axboe10ba5352006-10-20 11:39:27 +02001332 }
1333
Jens Axboee8462bd2009-07-06 12:59:04 +02001334 td_io_u_unlock(td);
Jens Axboe10ba5352006-10-20 11:39:27 +02001335 return io_u;
1336}
1337
Jens Axboe0d29de82010-09-01 13:54:15 +02001338static int check_get_trim(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +02001339{
Jens Axboed72be542012-11-30 19:37:46 +01001340 if (!(td->flags & TD_F_TRIM_BACKLOG))
1341 return 0;
1342
1343 if (td->trim_entries) {
Jens Axboe0d29de82010-09-01 13:54:15 +02001344 int get_trim = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001345
Jens Axboe0d29de82010-09-01 13:54:15 +02001346 if (td->trim_batch) {
1347 td->trim_batch--;
1348 get_trim = 1;
1349 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1350 td->last_ddir != DDIR_READ) {
1351 td->trim_batch = td->o.trim_batch;
1352 if (!td->trim_batch)
1353 td->trim_batch = td->o.trim_backlog;
1354 get_trim = 1;
1355 }
1356
1357 if (get_trim && !get_next_trim(td, io_u))
1358 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001359 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001360
Jens Axboe0d29de82010-09-01 13:54:15 +02001361 return 0;
1362}
1363
1364static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1365{
Jens Axboed72be542012-11-30 19:37:46 +01001366 if (!(td->flags & TD_F_VER_BACKLOG))
1367 return 0;
1368
1369 if (td->io_hist_len) {
Jens Axboe9e144182010-06-15 14:25:36 +02001370 int get_verify = 0;
1371
Jens Axboed1ece0c2012-03-07 09:32:58 +01001372 if (td->verify_batch)
Jens Axboe9e144182010-06-15 14:25:36 +02001373 get_verify = 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001374 else if (!(td->io_hist_len % td->o.verify_backlog) &&
Jens Axboe9e144182010-06-15 14:25:36 +02001375 td->last_ddir != DDIR_READ) {
1376 td->verify_batch = td->o.verify_batch;
Jens Axboef8a75c92010-06-15 14:27:28 +02001377 if (!td->verify_batch)
1378 td->verify_batch = td->o.verify_backlog;
Jens Axboe9e144182010-06-15 14:25:36 +02001379 get_verify = 1;
1380 }
1381
Jens Axboed1ece0c2012-03-07 09:32:58 +01001382 if (get_verify && !get_next_verify(td, io_u)) {
1383 td->verify_batch--;
Jens Axboe0d29de82010-09-01 13:54:15 +02001384 return 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001385 }
Jens Axboe9e144182010-06-15 14:25:36 +02001386 }
1387
Jens Axboe0d29de82010-09-01 13:54:15 +02001388 return 0;
1389}
1390
1391/*
Jens Axboede789762011-09-16 22:11:23 +02001392 * Fill offset and start time into the buffer content, to prevent too
Jens Axboe23f394d2011-09-16 22:45:27 +02001393 * easy compressible data for simple de-dupe attempts. Do this for every
1394 * 512b block in the range, since that should be the smallest block size
1395 * we can expect from a device.
Jens Axboede789762011-09-16 22:11:23 +02001396 */
1397static void small_content_scramble(struct io_u *io_u)
1398{
Jens Axboe23f394d2011-09-16 22:45:27 +02001399 unsigned int i, nr_blocks = io_u->buflen / 512;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001400 uint64_t boffset;
Jens Axboe23f394d2011-09-16 22:45:27 +02001401 unsigned int offset;
1402 void *p, *end;
Jens Axboede789762011-09-16 22:11:23 +02001403
Jens Axboe23f394d2011-09-16 22:45:27 +02001404 if (!nr_blocks)
1405 return;
1406
1407 p = io_u->xfer_buf;
Jens Axboefba76ee2011-09-27 14:27:48 -06001408 boffset = io_u->offset;
Jens Axboe81f03662012-02-02 09:20:09 +01001409 io_u->buf_filled_len = 0;
Jens Axboefad82f72011-09-19 11:33:30 +02001410
Jens Axboe23f394d2011-09-16 22:45:27 +02001411 for (i = 0; i < nr_blocks; i++) {
1412 /*
1413 * Fill the byte offset into a "random" start offset of
1414 * the buffer, given by the product of the usec time
1415 * and the actual offset.
1416 */
Jens Axboefad82f72011-09-19 11:33:30 +02001417 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001418 offset &= ~(sizeof(uint64_t) - 1);
1419 if (offset >= 512 - sizeof(uint64_t))
1420 offset -= sizeof(uint64_t);
Jens Axboefba76ee2011-09-27 14:27:48 -06001421 memcpy(p + offset, &boffset, sizeof(boffset));
Jens Axboe23f394d2011-09-16 22:45:27 +02001422
1423 end = p + 512 - sizeof(io_u->start_time);
1424 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1425 p += 512;
Jens Axboefad82f72011-09-19 11:33:30 +02001426 boffset += 512;
Jens Axboe23f394d2011-09-16 22:45:27 +02001427 }
Jens Axboede789762011-09-16 22:11:23 +02001428}
1429
1430/*
Jens Axboe0d29de82010-09-01 13:54:15 +02001431 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1432 * etc. The returned io_u is fully ready to be prepped and submitted.
1433 */
1434struct io_u *get_io_u(struct thread_data *td)
1435{
1436 struct fio_file *f;
1437 struct io_u *io_u;
Jens Axboede789762011-09-16 22:11:23 +02001438 int do_scramble = 0;
Jens Axboe002fe732014-02-11 08:31:13 -07001439 long ret = 0;
Jens Axboe0d29de82010-09-01 13:54:15 +02001440
1441 io_u = __get_io_u(td);
1442 if (!io_u) {
1443 dprint(FD_IO, "__get_io_u failed\n");
1444 return NULL;
1445 }
1446
1447 if (check_get_verify(td, io_u))
1448 goto out;
1449 if (check_get_trim(td, io_u))
1450 goto out;
1451
Jens Axboe755200a2007-02-19 13:08:12 +01001452 /*
1453 * from a requeue, io_u already setup
1454 */
1455 if (io_u->file)
Jens Axboe77f392b2007-02-19 20:13:09 +01001456 goto out;
Jens Axboe755200a2007-02-19 13:08:12 +01001457
Jens Axboe429f6672007-07-23 10:38:43 +02001458 /*
1459 * If using an iolog, grab next piece if any available.
1460 */
Jens Axboed72be542012-11-30 19:37:46 +01001461 if (td->flags & TD_F_READ_IOLOG) {
Jens Axboe429f6672007-07-23 10:38:43 +02001462 if (read_iolog_get(td, io_u))
1463 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001464 } else if (set_io_u_file(td, io_u)) {
Jens Axboe002fe732014-02-11 08:31:13 -07001465 ret = -EBUSY;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001466 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001467 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001468 }
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001469
Jens Axboe429f6672007-07-23 10:38:43 +02001470 f = io_u->file;
Jens Axboe002fe732014-02-11 08:31:13 -07001471 if (!f) {
1472 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1473 goto err_put;
1474 }
1475
Jens Axboed6aed792009-06-03 08:41:15 +02001476 assert(fio_file_open(f));
Jens Axboe97af62c2007-05-22 11:12:13 +02001477
Jens Axboeff58fce2010-08-25 12:02:08 +02001478 if (ddir_rw(io_u->ddir)) {
Jens Axboed0656a92008-02-01 18:33:23 +01001479 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +01001480 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001481 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001482 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001483
Jens Axboe38dad622010-07-20 14:46:00 -06001484 f->last_start = io_u->offset;
Jens Axboe36167d82007-02-18 05:41:31 +01001485 f->last_pos = io_u->offset + io_u->buflen;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001486
Jens Axboefd684182011-09-19 09:24:44 +02001487 if (io_u->ddir == DDIR_WRITE) {
Jens Axboed72be542012-11-30 19:37:46 +01001488 if (td->flags & TD_F_REFILL_BUFFERS) {
Jens Axboe9c426842012-03-02 21:02:12 +01001489 io_u_fill_buffer(td, io_u,
1490 io_u->xfer_buflen, io_u->xfer_buflen);
Jens Axboebedc9dc2014-03-17 12:51:09 -06001491 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1492 !(td->flags & TD_F_COMPRESS))
Jens Axboefd684182011-09-19 09:24:44 +02001493 do_scramble = 1;
Jens Axboed72be542012-11-30 19:37:46 +01001494 if (td->flags & TD_F_VER_NONE) {
Jens Axboe629f1d72012-03-09 19:02:01 +01001495 populate_verify_io_u(td, io_u);
1496 do_scramble = 0;
1497 }
Jens Axboefd684182011-09-19 09:24:44 +02001498 } else if (io_u->ddir == DDIR_READ) {
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001499 /*
1500 * Reset the buf_filled parameters so next time if the
1501 * buffer is used for writes it is refilled.
1502 */
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001503 io_u->buf_filled_len = 0;
1504 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001505 }
1506
Jens Axboe165faf12007-02-07 11:30:37 +01001507 /*
1508 * Set io data pointers.
1509 */
Jens Axboecec6b552007-02-06 20:15:38 +01001510 io_u->xfer_buf = io_u->buf;
1511 io_u->xfer_buflen = io_u->buflen;
Jens Axboe5973caf2008-05-21 19:52:35 +02001512
Jens Axboe6ac7a332008-03-01 15:22:32 +01001513out:
Jens Axboe0d29de82010-09-01 13:54:15 +02001514 assert(io_u->file);
Jens Axboe429f6672007-07-23 10:38:43 +02001515 if (!td_io_prep(td, io_u)) {
Jens Axboe993bf482008-11-14 13:04:53 +01001516 if (!td->o.disable_slat)
1517 fio_gettime(&io_u->start_time, NULL);
Jens Axboede789762011-09-16 22:11:23 +02001518 if (do_scramble)
1519 small_content_scramble(io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001520 return io_u;
Jens Axboe36167d82007-02-18 05:41:31 +01001521 }
Jens Axboe429f6672007-07-23 10:38:43 +02001522err_put:
Jens Axboe2ba1c292008-02-01 13:16:38 +01001523 dprint(FD_IO, "get_io_u failed\n");
Jens Axboe429f6672007-07-23 10:38:43 +02001524 put_io_u(td, io_u);
Jens Axboe002fe732014-02-11 08:31:13 -07001525 return ERR_PTR(ret);
Jens Axboe10ba5352006-10-20 11:39:27 +02001526}
1527
Jens Axboe54517922007-03-05 10:06:06 +01001528void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1529{
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001530 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
Jens Axboe825f8182010-08-25 10:47:18 +02001531 const char *msg[] = { "read", "write", "sync", "datasync",
1532 "sync_file_range", "wait", "trim" };
1533
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001534 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1535 return;
Jens Axboe54517922007-03-05 10:06:06 +01001536
1537 log_err("fio: io_u error");
1538
1539 if (io_u->file)
1540 log_err(" on file %s", io_u->file->file_name);
1541
1542 log_err(": %s\n", strerror(io_u->error));
1543
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001544 log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
1545 io_u->offset, io_u->xfer_buflen);
Jens Axboe54517922007-03-05 10:06:06 +01001546
1547 if (!td->error)
1548 td_verror(td, io_u->error, "io_u error");
1549}
1550
Jens Axboeaba6c952014-02-13 19:59:56 -07001551static inline int gtod_reduce(struct thread_data *td)
1552{
Jens Axboe729fe3a2014-02-14 08:46:35 -07001553 return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat
Jens Axboeb74b8202014-02-13 20:04:02 -07001554 && td->o.disable_bw;
Jens Axboeaba6c952014-02-13 19:59:56 -07001555}
1556
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001557static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1558 struct io_completion_data *icd,
1559 const enum fio_ddir idx, unsigned int bytes)
1560{
Jens Axboe24d23ca2012-11-13 08:31:24 -07001561 unsigned long lusec = 0;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001562
Jens Axboeaba6c952014-02-13 19:59:56 -07001563 if (!gtod_reduce(td))
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001564 lusec = utime_since(&io_u->issue_time, &icd->time);
1565
1566 if (!td->o.disable_lat) {
1567 unsigned long tusec;
1568
1569 tusec = utime_since(&io_u->start_time, &icd->time);
1570 add_lat_sample(td, idx, tusec, bytes);
Jens Axboe15501532012-10-24 16:37:45 +02001571
Jens Axboed4afedf2013-05-22 22:21:29 +02001572 if (td->flags & TD_F_PROFILE_OPS) {
1573 struct prof_io_ops *ops = &td->prof_io_ops;
1574
1575 if (ops->io_u_lat)
1576 icd->error = ops->io_u_lat(td, tusec);
1577 }
1578
Jens Axboe3e260a42013-12-09 12:38:53 -07001579 if (td->o.max_latency && tusec > td->o.max_latency)
1580 lat_fatal(td, icd, tusec, td->o.max_latency);
1581 if (td->o.latency_target && tusec > td->o.latency_target) {
1582 if (lat_target_failed(td))
1583 lat_fatal(td, icd, tusec, td->o.latency_target);
Jens Axboe15501532012-10-24 16:37:45 +02001584 }
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001585 }
1586
1587 if (!td->o.disable_clat) {
1588 add_clat_sample(td, idx, lusec, bytes);
1589 io_u_mark_latency(td, lusec);
1590 }
1591
1592 if (!td->o.disable_bw)
1593 add_bw_sample(td, idx, bytes, &icd->time);
1594
Jens Axboeaba6c952014-02-13 19:59:56 -07001595 if (!gtod_reduce(td))
1596 add_iops_sample(td, idx, bytes, &icd->time);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001597}
1598
Steven Lang1b8dbf22011-11-09 13:48:01 +01001599static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1600{
Jens Axboe1ae83d42013-01-12 01:44:15 -07001601 uint64_t secs, remainder, bps, bytes;
1602
Steven Lang1b8dbf22011-11-09 13:48:01 +01001603 bytes = td->this_io_bytes[ddir];
1604 bps = td->rate_bps[ddir];
1605 secs = bytes / bps;
1606 remainder = bytes % bps;
1607 return remainder * 1000000 / bps + secs * 1000000;
1608}
1609
Jens Axboe97601022007-02-18 12:47:29 +01001610static void io_completed(struct thread_data *td, struct io_u *io_u,
1611 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001612{
Jens Axboe44f29692010-03-09 20:09:44 +01001613 struct fio_file *f;
Jens Axboe10ba5352006-10-20 11:39:27 +02001614
Jens Axboe2ba1c292008-02-01 13:16:38 +01001615 dprint_io_u(io_u, "io complete");
1616
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001617 td_io_u_lock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001618 assert(io_u->flags & IO_U_F_FLIGHT);
Jens Axboe38dad622010-07-20 14:46:00 -06001619 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
Jens Axboef9401282014-02-06 12:17:37 -07001620
1621 /*
1622 * Mark IO ok to verify
1623 */
1624 if (io_u->ipo) {
1625 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1626 write_barrier();
1627 }
1628
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001629 td_io_u_unlock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001630
Jens Axboe5f9099e2009-06-16 22:40:26 +02001631 if (ddir_sync(io_u->ddir)) {
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001632 td->last_was_sync = 1;
Jens Axboe44f29692010-03-09 20:09:44 +01001633 f = io_u->file;
1634 if (f) {
1635 f->first_write = -1ULL;
1636 f->last_write = -1ULL;
1637 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001638 return;
1639 }
1640
1641 td->last_was_sync = 0;
Jens Axboe9e144182010-06-15 14:25:36 +02001642 td->last_ddir = io_u->ddir;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001643
Jens Axboeff58fce2010-08-25 12:02:08 +02001644 if (!io_u->error && ddir_rw(io_u->ddir)) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001645 unsigned int bytes = io_u->buflen - io_u->resid;
Jens Axboe1e97cce2006-12-05 11:44:16 +01001646 const enum fio_ddir idx = io_u->ddir;
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001647 const enum fio_ddir odx = io_u->ddir ^ 1;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001648 int ret;
Jens Axboe10ba5352006-10-20 11:39:27 +02001649
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001650 td->io_blocks[idx]++;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001651 td->this_io_blocks[idx]++;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001652 td->io_bytes[idx] += bytes;
webeeae2fafc2012-03-23 13:41:41 +01001653
1654 if (!(io_u->flags & IO_U_F_VER_LIST))
1655 td->this_io_bytes[idx] += bytes;
Jens Axboe10ba5352006-10-20 11:39:27 +02001656
Jens Axboe44f29692010-03-09 20:09:44 +01001657 if (idx == DDIR_WRITE) {
1658 f = io_u->file;
1659 if (f) {
1660 if (f->first_write == -1ULL ||
1661 io_u->offset < f->first_write)
1662 f->first_write = io_u->offset;
1663 if (f->last_write == -1ULL ||
1664 ((io_u->offset + bytes) > f->last_write))
1665 f->last_write = io_u->offset + bytes;
1666 }
1667 }
1668
Steven Lang6b1190f2012-02-07 09:42:59 +01001669 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1670 td->runstate == TD_VERIFYING)) {
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001671 account_io_completion(td, io_u, icd, idx, bytes);
Jens Axboe40e1a6f2009-06-11 10:55:39 +02001672
Jens Axboeb23b6a22009-06-11 22:06:23 +02001673 if (__should_check_rate(td, idx)) {
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001674 td->rate_pending_usleep[idx] =
Steven Lang1b8dbf22011-11-09 13:48:01 +01001675 (usec_for_io(td, idx) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001676 utime_since_now(&td->start));
Jens Axboeb23b6a22009-06-11 22:06:23 +02001677 }
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001678 if (idx != DDIR_TRIM && __should_check_rate(td, odx))
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001679 td->rate_pending_usleep[odx] =
Steven Lang1b8dbf22011-11-09 13:48:01 +01001680 (usec_for_io(td, odx) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001681 utime_since_now(&td->start));
Jens Axboe721938a2008-09-10 09:46:16 +02001682 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001683
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001684 icd->bytes_done[idx] += bytes;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001685
Jens Axboed7762cf2007-02-23 12:34:57 +01001686 if (io_u->end_io) {
Jens Axboe36690c92007-03-26 10:23:34 +02001687 ret = io_u->end_io(td, io_u);
Jens Axboe3af6ef32007-02-18 06:57:43 +01001688 if (ret && !icd->error)
1689 icd->error = ret;
1690 }
Jens Axboeff58fce2010-08-25 12:02:08 +02001691 } else if (io_u->error) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001692 icd->error = io_u->error;
Jens Axboe54517922007-03-05 10:06:06 +01001693 io_u_log_error(td, io_u);
1694 }
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001695 if (icd->error) {
1696 enum error_type_bit eb = td_error_type(io_u->ddir, icd->error);
1697 if (!td_non_fatal_error(td, eb, icd->error))
1698 return;
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001699 /*
1700 * If there is a non_fatal error, then add to the error count
1701 * and clear all the errors.
1702 */
1703 update_error_count(td, icd->error);
1704 td_clear_error(td);
1705 icd->error = 0;
1706 io_u->error = 0;
1707 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001708}
1709
Jens Axboe9520ebb2008-10-16 21:03:27 +02001710static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1711 int nr)
Jens Axboe36167d82007-02-18 05:41:31 +01001712{
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001713 int ddir;
Jens Axboeaba6c952014-02-13 19:59:56 -07001714
1715 if (!gtod_reduce(td))
Jens Axboe9520ebb2008-10-16 21:03:27 +02001716 fio_gettime(&icd->time, NULL);
Jens Axboe36167d82007-02-18 05:41:31 +01001717
Jens Axboe3af6ef32007-02-18 06:57:43 +01001718 icd->nr = nr;
1719
Jens Axboe36167d82007-02-18 05:41:31 +01001720 icd->error = 0;
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001721 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1722 icd->bytes_done[ddir] = 0;
Jens Axboe36167d82007-02-18 05:41:31 +01001723}
1724
Jens Axboe97601022007-02-18 12:47:29 +01001725static void ios_completed(struct thread_data *td,
1726 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001727{
1728 struct io_u *io_u;
1729 int i;
1730
Jens Axboe10ba5352006-10-20 11:39:27 +02001731 for (i = 0; i < icd->nr; i++) {
1732 io_u = td->io_ops->event(td, i);
1733
1734 io_completed(td, io_u, icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001735
1736 if (!(io_u->flags & IO_U_F_FREE_DEF))
1737 put_io_u(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +02001738 }
1739}
Jens Axboe97601022007-02-18 12:47:29 +01001740
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001741/*
1742 * Complete a single io_u for the sync engines.
1743 */
Jens Axboe581e7142009-06-09 12:47:16 +02001744int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
Jens Axboe100f49f2013-01-23 10:15:57 -07001745 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001746{
1747 struct io_completion_data icd;
1748
Jens Axboe9520ebb2008-10-16 21:03:27 +02001749 init_icd(td, &icd, 1);
Jens Axboe97601022007-02-18 12:47:29 +01001750 io_completed(td, io_u, &icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001751
1752 if (!(io_u->flags & IO_U_F_FREE_DEF))
1753 put_io_u(td, io_u);
Jens Axboe97601022007-02-18 12:47:29 +01001754
Jens Axboe581e7142009-06-09 12:47:16 +02001755 if (icd.error) {
1756 td_verror(td, icd.error, "io_u_sync_complete");
1757 return -1;
1758 }
Jens Axboe97601022007-02-18 12:47:29 +01001759
Jens Axboe581e7142009-06-09 12:47:16 +02001760 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001761 int ddir;
1762
1763 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1764 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001765 }
1766
1767 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001768}
1769
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001770/*
1771 * Called to complete min_events number of io for the async engines.
1772 */
Jens Axboe581e7142009-06-09 12:47:16 +02001773int io_u_queued_complete(struct thread_data *td, int min_evts,
Jens Axboe100f49f2013-01-23 10:15:57 -07001774 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001775{
Jens Axboe97601022007-02-18 12:47:29 +01001776 struct io_completion_data icd;
Jens Axboe00de55e2007-02-20 10:45:57 +01001777 struct timespec *tvp = NULL;
Jens Axboe97601022007-02-18 12:47:29 +01001778 int ret;
Davide Libenzi4d06a332007-03-22 07:43:50 +01001779 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
Jens Axboe97601022007-02-18 12:47:29 +01001780
Jens Axboe49504212008-06-05 09:03:30 +02001781 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
Jens Axboeb271fe62008-02-04 10:49:41 +01001782
Jens Axboe49504212008-06-05 09:03:30 +02001783 if (!min_evts)
Jens Axboe00de55e2007-02-20 10:45:57 +01001784 tvp = &ts;
Jens Axboe97601022007-02-18 12:47:29 +01001785
Jens Axboe49504212008-06-05 09:03:30 +02001786 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
Jens Axboe97601022007-02-18 12:47:29 +01001787 if (ret < 0) {
Jens Axboee1161c32007-02-22 19:36:48 +01001788 td_verror(td, -ret, "td_io_getevents");
Jens Axboe97601022007-02-18 12:47:29 +01001789 return ret;
1790 } else if (!ret)
1791 return ret;
1792
Jens Axboe9520ebb2008-10-16 21:03:27 +02001793 init_icd(td, &icd, ret);
Jens Axboe97601022007-02-18 12:47:29 +01001794 ios_completed(td, &icd);
Jens Axboe581e7142009-06-09 12:47:16 +02001795 if (icd.error) {
1796 td_verror(td, icd.error, "io_u_queued_complete");
1797 return -1;
1798 }
Jens Axboe97601022007-02-18 12:47:29 +01001799
Jens Axboe581e7142009-06-09 12:47:16 +02001800 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001801 int ddir;
1802
1803 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1804 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001805 }
1806
1807 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001808}
Jens Axboe7e77dd02007-02-20 10:57:34 +01001809
1810/*
1811 * Call when io_u is really queued, to update the submission latency.
1812 */
1813void io_u_queued(struct thread_data *td, struct io_u *io_u)
1814{
Jens Axboe9520ebb2008-10-16 21:03:27 +02001815 if (!td->o.disable_slat) {
1816 unsigned long slat_time;
Jens Axboe7e77dd02007-02-20 10:57:34 +01001817
Jens Axboe9520ebb2008-10-16 21:03:27 +02001818 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
Jens Axboe29a90dd2009-06-10 06:57:47 +02001819 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
Jens Axboe9520ebb2008-10-16 21:03:27 +02001820 }
Jens Axboe7e77dd02007-02-20 10:57:34 +01001821}
Jens Axboe433afcb2007-02-22 10:39:01 +01001822
Jens Axboecc86c392013-05-03 15:12:33 +02001823void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1824 unsigned int max_bs)
Jens Axboe5973caf2008-05-21 19:52:35 +02001825{
Jens Axboece35b1e2014-01-14 15:35:58 -07001826 if (td->o.buffer_pattern_bytes)
1827 fill_buffer_pattern(td, buf, max_bs);
1828 else if (!td->o.zero_buffers) {
Jens Axboe9c426842012-03-02 21:02:12 +01001829 unsigned int perc = td->o.compress_percentage;
1830
1831 if (perc) {
Jens Axboef97a43a2012-03-09 19:06:24 +01001832 unsigned int seg = min_write;
1833
1834 seg = min(min_write, td->o.compress_chunk);
Jens Axboecc86c392013-05-03 15:12:33 +02001835 if (!seg)
1836 seg = min_write;
1837
1838 fill_random_buf_percentage(&td->buf_state, buf,
Jens Axboef97a43a2012-03-09 19:06:24 +01001839 perc, seg, max_bs);
Jens Axboe9c426842012-03-02 21:02:12 +01001840 } else
Jens Axboecc86c392013-05-03 15:12:33 +02001841 fill_random_buf(&td->buf_state, buf, max_bs);
Jens Axboe9c426842012-03-02 21:02:12 +01001842 } else
Jens Axboecc86c392013-05-03 15:12:33 +02001843 memset(buf, 0, max_bs);
1844}
1845
1846/*
1847 * "randomly" fill the buffer contents
1848 */
1849void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1850 unsigned int min_write, unsigned int max_bs)
1851{
1852 io_u->buf_filled_len = 0;
1853 fill_io_buffer(td, io_u->buf, min_write, max_bs);
Jens Axboe5973caf2008-05-21 19:52:35 +02001854}