blob: 64ff73cd5555b8d4efa0303085f7944cae07544c [file] [log] [blame]
Jens Axboe10ba5352006-10-20 11:39:27 +02001#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
Jens Axboe0c6e7512007-02-22 11:19:39 +01006#include <assert.h>
Jens Axboe10ba5352006-10-20 11:39:27 +02007
8#include "fio.h"
Jens Axboe5973caf2008-05-21 19:52:35 +02009#include "hash.h"
Jens Axboe4f5af7b2009-06-03 08:45:40 +020010#include "verify.h"
Jens Axboe0d29de82010-09-01 13:54:15 +020011#include "trim.h"
Jens Axboe1fbbf722010-03-25 23:03:18 +010012#include "lib/rand.h"
Jens Axboe7ebd7962012-11-28 21:24:46 +010013#include "lib/axmap.h"
Jens Axboe10ba5352006-10-20 11:39:27 +020014
Jens Axboe97601022007-02-18 12:47:29 +010015struct io_completion_data {
16 int nr; /* input */
Jens Axboe97601022007-02-18 12:47:29 +010017
18 int error; /* output */
Jens Axboe100f49f2013-01-23 10:15:57 -070019 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
Jens Axboe97601022007-02-18 12:47:29 +010020 struct timeval time; /* output */
21};
22
Jens Axboe10ba5352006-10-20 11:39:27 +020023/*
Jens Axboe7ebd7962012-11-28 21:24:46 +010024 * The ->io_axmap contains a map of blocks we have or have not done io
Jens Axboe10ba5352006-10-20 11:39:27 +020025 * to yet. Used to make sure we cover the entire range in a fair fashion.
26 */
Jens Axboe1ae83d42013-01-12 01:44:15 -070027static int random_map_free(struct fio_file *f, const uint64_t block)
Jens Axboe10ba5352006-10-20 11:39:27 +020028{
Jens Axboe7ebd7962012-11-28 21:24:46 +010029 return !axmap_isset(f->io_axmap, block);
Jens Axboe10ba5352006-10-20 11:39:27 +020030}
31
32/*
Jens Axboedf415582006-10-20 11:41:03 +020033 * Mark a given offset as used in the map.
34 */
Jens Axboe9bf20612007-03-01 09:33:57 +010035static void mark_random_map(struct thread_data *td, struct io_u *io_u)
Jens Axboedf415582006-10-20 11:41:03 +020036{
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010037 unsigned int min_bs = td->o.rw_min_bs;
Jens Axboe9bf20612007-03-01 09:33:57 +010038 struct fio_file *f = io_u->file;
Jens Axboe51ede0b2012-11-22 13:50:29 +010039 unsigned int nr_blocks;
Jens Axboe1ae83d42013-01-12 01:44:15 -070040 uint64_t block;
Jens Axboedf415582006-10-20 11:41:03 +020041
Jens Axboe1ae83d42013-01-12 01:44:15 -070042 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
Jens Axboec685b5b2007-02-10 20:02:28 +010043 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
44
Jens Axboe2ab9e982012-11-22 15:14:17 +010045 if (!(io_u->flags & IO_U_F_BUSY_OK))
Jens Axboe7ebd7962012-11-28 21:24:46 +010046 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
Jens Axboedf415582006-10-20 11:41:03 +020047
Jens Axboe51ede0b2012-11-22 13:50:29 +010048 if ((nr_blocks * min_bs) < io_u->buflen)
49 io_u->buflen = nr_blocks * min_bs;
Jens Axboedf415582006-10-20 11:41:03 +020050}
51
Jens Axboe74776732013-01-11 14:03:25 +010052static uint64_t last_block(struct thread_data *td, struct fio_file *f,
53 enum fio_ddir ddir)
Jens Axboe2ba1c292008-02-01 13:16:38 +010054{
Jens Axboe74776732013-01-11 14:03:25 +010055 uint64_t max_blocks;
56 uint64_t max_size;
Jens Axboe2ba1c292008-02-01 13:16:38 +010057
Jens Axboeff58fce2010-08-25 12:02:08 +020058 assert(ddir_rw(ddir));
59
Jens Axboed9dd70f2008-05-23 12:37:23 +020060 /*
61 * Hmm, should we make sure that ->io_size <= ->real_file_size?
62 */
63 max_size = f->io_size;
64 if (max_size > f->real_file_size)
65 max_size = f->real_file_size;
66
Steven Noonaned335852012-01-31 13:58:00 +010067 if (td->o.zone_range)
68 max_size = td->o.zone_range;
69
Jens Axboe1ae83d42013-01-12 01:44:15 -070070 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
Jens Axboe2ba1c292008-02-01 13:16:38 +010071 if (!max_blocks)
72 return 0;
73
Jens Axboe67778e82008-05-15 09:20:08 +020074 return max_blocks;
Jens Axboe2ba1c292008-02-01 13:16:38 +010075}
76
Jens Axboe1ae83d42013-01-12 01:44:15 -070077struct rand_off {
78 struct flist_head list;
79 uint64_t off;
80};
81
Jens Axboee25839d2012-11-06 10:49:42 +010082static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -070083 enum fio_ddir ddir, uint64_t *b)
Jens Axboeec4015d2007-03-23 08:04:27 +010084{
Jens Axboe1ae83d42013-01-12 01:44:15 -070085 uint64_t r, lastb;
Jens Axboe74776732013-01-11 14:03:25 +010086
87 lastb = last_block(td, f, ddir);
88 if (!lastb)
89 return 1;
Jens Axboeec4015d2007-03-23 08:04:27 +010090
Jens Axboe8055e412012-11-26 08:43:47 +010091 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
Jens Axboe1ae83d42013-01-12 01:44:15 -070092 uint64_t rmax;
Jens Axboe15b87722011-10-12 09:42:33 +020093
Jens Axboe8055e412012-11-26 08:43:47 +010094 rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
95
96 if (td->o.use_os_rand) {
97 rmax = OS_RAND_MAX;
98 r = os_random_long(&td->random_state);
99 } else {
100 rmax = FRAND_MAX;
101 r = __rand(&td->__random_state);
102 }
103
Jens Axboe4b91ee82013-02-25 10:18:33 +0100104 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
Jens Axboe8055e412012-11-26 08:43:47 +0100105
Jens Axboe1ae83d42013-01-12 01:44:15 -0700106 *b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0));
Jens Axboe51ede0b2012-11-22 13:50:29 +0100107 } else {
Jens Axboe8055e412012-11-26 08:43:47 +0100108 uint64_t off = 0;
109
Jens Axboe74776732013-01-11 14:03:25 +0100110 if (lfsr_next(&f->lfsr, &off, lastb))
Jens Axboe8055e412012-11-26 08:43:47 +0100111 return 1;
112
113 *b = off;
Jens Axboe51ede0b2012-11-22 13:50:29 +0100114 }
Jens Axboe2615cc42011-03-28 09:35:09 +0200115
Jens Axboeec4015d2007-03-23 08:04:27 +0100116 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100117 * if we are not maintaining a random map, we are done.
Jens Axboeec4015d2007-03-23 08:04:27 +0100118 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100119 if (!file_randommap(td, f))
120 goto ret;
Jens Axboe43c63a72007-05-21 13:23:30 +0200121
122 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100123 * calculate map offset and check if it's free
Jens Axboe43c63a72007-05-21 13:23:30 +0200124 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100125 if (random_map_free(f, *b))
126 goto ret;
127
Jens Axboe4b91ee82013-02-25 10:18:33 +0100128 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
129 (unsigned long long) *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100130
Jens Axboe7ebd7962012-11-28 21:24:46 +0100131 *b = axmap_next_free(f->io_axmap, *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100132 if (*b == (uint64_t) -1ULL)
133 return 1;
Jens Axboe0ce8b112011-01-27 22:25:29 +0100134ret:
135 return 0;
Jens Axboeec4015d2007-03-23 08:04:27 +0100136}
137
Jens Axboe925fee32012-11-06 13:50:32 +0100138static int __get_next_rand_offset_zipf(struct thread_data *td,
139 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700140 uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100141{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100142 *b = zipf_next(&f->zipf);
Jens Axboee25839d2012-11-06 10:49:42 +0100143 return 0;
144}
145
Jens Axboe925fee32012-11-06 13:50:32 +0100146static int __get_next_rand_offset_pareto(struct thread_data *td,
147 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700148 uint64_t *b)
Jens Axboe925fee32012-11-06 13:50:32 +0100149{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100150 *b = pareto_next(&f->zipf);
Jens Axboe925fee32012-11-06 13:50:32 +0100151 return 0;
152}
153
Jens Axboe1ae83d42013-01-12 01:44:15 -0700154static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
155{
156 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
157 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
158
159 return r1->off - r2->off;
160}
161
162static int get_off_from_method(struct thread_data *td, struct fio_file *f,
163 enum fio_ddir ddir, uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100164{
165 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
166 return __get_next_rand_offset(td, f, ddir, b);
167 else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
168 return __get_next_rand_offset_zipf(td, f, ddir, b);
Jens Axboe925fee32012-11-06 13:50:32 +0100169 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
170 return __get_next_rand_offset_pareto(td, f, ddir, b);
Jens Axboee25839d2012-11-06 10:49:42 +0100171
172 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
173 return 1;
174}
175
Jens Axboebcd5abf2013-01-23 09:27:25 -0700176/*
177 * Sort the reads for a verify phase in batches of verifysort_nr, if
178 * specified.
179 */
180static inline int should_sort_io(struct thread_data *td)
181{
182 if (!td->o.verifysort_nr || !td->o.do_verify)
183 return 0;
184 if (!td_random(td))
185 return 0;
186 if (td->runstate != TD_VERIFYING)
187 return 0;
188 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
189 return 0;
190
191 return 1;
192}
193
Jens Axboed9472272013-07-25 10:20:45 -0600194static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
Jens Axboe211c9b82013-04-26 08:56:17 -0600195{
196 unsigned int v;
197 unsigned long r;
198
Jens Axboed9472272013-07-25 10:20:45 -0600199 if (td->o.perc_rand[ddir] == 100)
Jens Axboe211c9b82013-04-26 08:56:17 -0600200 return 1;
201
202 if (td->o.use_os_rand) {
Jens Axboed9472272013-07-25 10:20:45 -0600203 r = os_random_long(&td->seq_rand_state[ddir]);
Jens Axboe211c9b82013-04-26 08:56:17 -0600204 v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
205 } else {
Jens Axboed9472272013-07-25 10:20:45 -0600206 r = __rand(&td->__seq_rand_state[ddir]);
Jens Axboe211c9b82013-04-26 08:56:17 -0600207 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
208 }
209
Jens Axboed9472272013-07-25 10:20:45 -0600210 return v <= td->o.perc_rand[ddir];
Jens Axboe211c9b82013-04-26 08:56:17 -0600211}
212
Jens Axboe1ae83d42013-01-12 01:44:15 -0700213static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
214 enum fio_ddir ddir, uint64_t *b)
215{
216 struct rand_off *r;
217 int i, ret = 1;
218
Jens Axboebcd5abf2013-01-23 09:27:25 -0700219 if (!should_sort_io(td))
Jens Axboe1ae83d42013-01-12 01:44:15 -0700220 return get_off_from_method(td, f, ddir, b);
221
222 if (!flist_empty(&td->next_rand_list)) {
223 struct rand_off *r;
224fetch:
225 r = flist_entry(td->next_rand_list.next, struct rand_off, list);
226 flist_del(&r->list);
227 *b = r->off;
228 free(r);
229 return 0;
230 }
231
232 for (i = 0; i < td->o.verifysort_nr; i++) {
233 r = malloc(sizeof(*r));
234
235 ret = get_off_from_method(td, f, ddir, &r->off);
236 if (ret) {
237 free(r);
238 break;
239 }
240
241 flist_add(&r->list, &td->next_rand_list);
242 }
243
244 if (ret && !i)
245 return ret;
246
247 assert(!flist_empty(&td->next_rand_list));
248 flist_sort(NULL, &td->next_rand_list, flist_cmp);
249 goto fetch;
250}
251
Jens Axboe38dad622010-07-20 14:46:00 -0600252static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700253 enum fio_ddir ddir, uint64_t *b)
Jens Axboe38dad622010-07-20 14:46:00 -0600254{
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100255 if (!get_next_rand_offset(td, f, ddir, b))
256 return 0;
257
258 if (td->o.time_based) {
Jens Axboe33c48812013-01-21 09:46:06 -0700259 fio_file_reset(td, f);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100260 if (!get_next_rand_offset(td, f, ddir, b))
261 return 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600262 }
263
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100264 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100265 f->file_name, (unsigned long long) f->last_pos,
266 (unsigned long long) f->real_file_size);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100267 return 1;
Jens Axboe38dad622010-07-20 14:46:00 -0600268}
269
Jens Axboe37cf9e32012-03-17 12:54:30 +0100270static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700271 enum fio_ddir ddir, uint64_t *offset)
Jens Axboe38dad622010-07-20 14:46:00 -0600272{
Jens Axboeff58fce2010-08-25 12:02:08 +0200273 assert(ddir_rw(ddir));
274
Dan Ehrenbergce95d652012-08-16 08:58:21 +0200275 if (f->last_pos >= f->io_size + get_start_offset(td) && td->o.time_based)
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100276 f->last_pos = f->last_pos - f->io_size;
277
Jens Axboe38dad622010-07-20 14:46:00 -0600278 if (f->last_pos < f->real_file_size) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700279 uint64_t pos;
Jens Axboe059b0802011-08-25 09:09:37 +0200280
Jens Axboea66da7a2011-08-31 13:14:12 -0600281 if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
282 f->last_pos = f->real_file_size;
283
284 pos = f->last_pos - f->file_offset;
Jens Axboe059b0802011-08-25 09:09:37 +0200285 if (pos)
286 pos += td->o.ddir_seq_add;
287
Jens Axboe37cf9e32012-03-17 12:54:30 +0100288 *offset = pos;
Jens Axboe38dad622010-07-20 14:46:00 -0600289 return 0;
290 }
291
292 return 1;
293}
294
295static int get_next_block(struct thread_data *td, struct io_u *io_u,
Jens Axboe6aca9b32013-07-25 12:45:26 -0600296 enum fio_ddir ddir, int rw_seq,
297 unsigned int *is_random)
Jens Axboe38dad622010-07-20 14:46:00 -0600298{
299 struct fio_file *f = io_u->file;
Jens Axboe1ae83d42013-01-12 01:44:15 -0700300 uint64_t b, offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600301 int ret;
302
Jens Axboeff58fce2010-08-25 12:02:08 +0200303 assert(ddir_rw(ddir));
304
Jens Axboe37cf9e32012-03-17 12:54:30 +0100305 b = offset = -1ULL;
306
Jens Axboe38dad622010-07-20 14:46:00 -0600307 if (rw_seq) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600308 if (td_random(td)) {
Jens Axboe6aca9b32013-07-25 12:45:26 -0600309 if (should_do_random(td, ddir)) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600310 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600311 *is_random = 1;
312 } else {
313 *is_random = 0;
Jens Axboe211c9b82013-04-26 08:56:17 -0600314 io_u->flags |= IO_U_F_BUSY_OK;
315 ret = get_next_seq_offset(td, f, ddir, &offset);
316 if (ret)
317 ret = get_next_rand_block(td, f, ddir, &b);
318 }
Jens Axboe6aca9b32013-07-25 12:45:26 -0600319 } else {
320 *is_random = 0;
Jens Axboe37cf9e32012-03-17 12:54:30 +0100321 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600322 }
Jens Axboe38dad622010-07-20 14:46:00 -0600323 } else {
324 io_u->flags |= IO_U_F_BUSY_OK;
Jens Axboe6aca9b32013-07-25 12:45:26 -0600325 *is_random = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600326
327 if (td->o.rw_seq == RW_SEQ_SEQ) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100328 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600329 if (ret) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100330 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600331 *is_random = 0;
332 }
Jens Axboe38dad622010-07-20 14:46:00 -0600333 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
334 if (f->last_start != -1ULL)
Jens Axboe37cf9e32012-03-17 12:54:30 +0100335 offset = f->last_start - f->file_offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600336 else
Jens Axboe37cf9e32012-03-17 12:54:30 +0100337 offset = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600338 ret = 0;
339 } else {
340 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
341 ret = 1;
342 }
343 }
Bruce Cran6d68b992013-01-13 17:21:58 +0000344
Jens Axboe37cf9e32012-03-17 12:54:30 +0100345 if (!ret) {
346 if (offset != -1ULL)
347 io_u->offset = offset;
348 else if (b != -1ULL)
349 io_u->offset = b * td->o.ba[ddir];
350 else {
Jens Axboe4e0a8fa2013-04-15 11:40:57 +0200351 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
Jens Axboe37cf9e32012-03-17 12:54:30 +0100352 ret = 1;
353 }
354 }
355
Jens Axboe38dad622010-07-20 14:46:00 -0600356 return ret;
357}
358
Jens Axboe10ba5352006-10-20 11:39:27 +0200359/*
360 * For random io, generate a random new block and see if it's used. Repeat
361 * until we find a free one. For sequential io, just return the end of
362 * the last io issued.
363 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600364static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
365 unsigned int *is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200366{
Jens Axboe9bf20612007-03-01 09:33:57 +0100367 struct fio_file *f = io_u->file;
Jens Axboe4ba66132008-02-05 10:05:50 +0100368 enum fio_ddir ddir = io_u->ddir;
Jens Axboe38dad622010-07-20 14:46:00 -0600369 int rw_seq_hit = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200370
Jens Axboeff58fce2010-08-25 12:02:08 +0200371 assert(ddir_rw(ddir));
372
Jens Axboe38dad622010-07-20 14:46:00 -0600373 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
374 rw_seq_hit = 1;
Jens Axboe5736c102010-07-20 12:03:25 -0600375 td->ddir_seq_nr = td->o.ddir_seq_nr;
Jens Axboe38dad622010-07-20 14:46:00 -0600376 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200377
Jens Axboe6aca9b32013-07-25 12:45:26 -0600378 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
Jens Axboe38dad622010-07-20 14:46:00 -0600379 return 1;
Jens Axboe10ba5352006-10-20 11:39:27 +0200380
Jens Axboe009bd842008-05-15 10:19:46 +0200381 if (io_u->offset >= f->io_size) {
382 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100383 (unsigned long long) io_u->offset,
384 (unsigned long long) f->io_size);
Jens Axboe009bd842008-05-15 10:19:46 +0200385 return 1;
386 }
387
388 io_u->offset += f->file_offset;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100389 if (io_u->offset >= f->real_file_size) {
390 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100391 (unsigned long long) io_u->offset,
392 (unsigned long long) f->real_file_size);
Jens Axboe10ba5352006-10-20 11:39:27 +0200393 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100394 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200395
396 return 0;
397}
398
Jens Axboe6aca9b32013-07-25 12:45:26 -0600399static int get_next_offset(struct thread_data *td, struct io_u *io_u,
400 unsigned int *is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100401{
Jens Axboed72be542012-11-30 19:37:46 +0100402 if (td->flags & TD_F_PROFILE_OPS) {
403 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100404
Jens Axboed72be542012-11-30 19:37:46 +0100405 if (ops->fill_io_u_off)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600406 return ops->fill_io_u_off(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100407 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100408
Jens Axboe6aca9b32013-07-25 12:45:26 -0600409 return __get_next_offset(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100410}
411
Jens Axboe79944122011-05-24 11:26:16 +0200412static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
413 unsigned int buflen)
414{
415 struct fio_file *f = io_u->file;
416
Dan Ehrenbergce95d652012-08-16 08:58:21 +0200417 return io_u->offset + buflen <= f->io_size + get_start_offset(td);
Jens Axboe79944122011-05-24 11:26:16 +0200418}
419
Jens Axboe6aca9b32013-07-25 12:45:26 -0600420static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
421 unsigned int is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200422{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600423 int ddir = io_u->ddir;
Jens Axboe24d23ca2012-11-13 08:31:24 -0700424 unsigned int buflen = 0;
Jens Axboef3059de2008-06-11 15:37:32 +0200425 unsigned int minbs, maxbs;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200426 unsigned long r, rand_max;
Jens Axboe10ba5352006-10-20 11:39:27 +0200427
Jens Axboe6aca9b32013-07-25 12:45:26 -0600428 assert(ddir_rw(io_u->ddir));
429
430 if (td->o.bs_is_seq_rand)
431 ddir = is_random ? DDIR_WRITE: DDIR_READ;
432 else
433 ddir = io_u->ddir;
Jens Axboeff58fce2010-08-25 12:02:08 +0200434
Jens Axboef3059de2008-06-11 15:37:32 +0200435 minbs = td->o.min_bs[ddir];
436 maxbs = td->o.max_bs[ddir];
437
Jens Axboe79944122011-05-24 11:26:16 +0200438 if (minbs == maxbs)
439 return minbs;
440
Jens Axboe52c58022012-02-06 21:58:56 +0100441 /*
442 * If we can't satisfy the min block size from here, then fail
443 */
444 if (!io_u_fits(td, io_u, minbs))
445 return 0;
446
Jens Axboe4c07ad82011-03-28 09:51:09 +0200447 if (td->o.use_os_rand)
448 rand_max = OS_RAND_MAX;
449 else
450 rand_max = FRAND_MAX;
451
Jens Axboe79944122011-05-24 11:26:16 +0200452 do {
Jens Axboe4c07ad82011-03-28 09:51:09 +0200453 if (td->o.use_os_rand)
454 r = os_random_long(&td->bsrange_state);
455 else
456 r = __rand(&td->__bsrange_state);
457
Jens Axboe720e84a2009-04-21 08:29:55 +0200458 if (!td->o.bssplit_nr[ddir]) {
Jens Axboef3059de2008-06-11 15:37:32 +0200459 buflen = 1 + (unsigned int) ((double) maxbs *
Jens Axboe4c07ad82011-03-28 09:51:09 +0200460 (r / (rand_max + 1.0)));
Jens Axboef3059de2008-06-11 15:37:32 +0200461 if (buflen < minbs)
462 buflen = minbs;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100463 } else {
Jens Axboe564ca972007-12-14 12:21:19 +0100464 long perc = 0;
465 unsigned int i;
466
Jens Axboe720e84a2009-04-21 08:29:55 +0200467 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
468 struct bssplit *bsp = &td->o.bssplit[ddir][i];
Jens Axboe564ca972007-12-14 12:21:19 +0100469
470 buflen = bsp->bs;
471 perc += bsp->perc;
Jens Axboe79944122011-05-24 11:26:16 +0200472 if ((r <= ((rand_max / 100L) * perc)) &&
473 io_u_fits(td, io_u, buflen))
Jens Axboe564ca972007-12-14 12:21:19 +0100474 break;
475 }
476 }
Jens Axboe79944122011-05-24 11:26:16 +0200477
Josef Bacika9f70b12013-07-08 20:32:50 -0400478 if (td->o.do_verify && td->o.verify != VERIFY_NONE)
479 buflen = (buflen + td->o.verify_interval - 1) &
480 ~(td->o.verify_interval - 1);
481
Jens Axboef3059de2008-06-11 15:37:32 +0200482 if (!td->o.bs_unaligned && is_power_of_2(minbs))
483 buflen = (buflen + minbs - 1) & ~(minbs - 1);
Jens Axboe10ba5352006-10-20 11:39:27 +0200484
Jens Axboe79944122011-05-24 11:26:16 +0200485 } while (!io_u_fits(td, io_u, buflen));
Jens Axboe6a5e6882007-07-26 10:47:51 +0200486
Jens Axboe10ba5352006-10-20 11:39:27 +0200487 return buflen;
488}
489
Jens Axboe6aca9b32013-07-25 12:45:26 -0600490static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
491 unsigned int is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100492{
Jens Axboed72be542012-11-30 19:37:46 +0100493 if (td->flags & TD_F_PROFILE_OPS) {
494 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100495
Jens Axboed72be542012-11-30 19:37:46 +0100496 if (ops->fill_io_u_size)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600497 return ops->fill_io_u_size(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100498 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100499
Jens Axboe6aca9b32013-07-25 12:45:26 -0600500 return __get_next_buflen(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100501}
502
Jens Axboeafe24a52007-03-16 20:27:27 +0100503static void set_rwmix_bytes(struct thread_data *td)
504{
Jens Axboeafe24a52007-03-16 20:27:27 +0100505 unsigned int diff;
506
507 /*
508 * we do time or byte based switch. this is needed because
509 * buffered writes may issue a lot quicker than they complete,
510 * whereas reads do not.
511 */
Jens Axboee47f7992007-03-21 14:05:39 +0100512 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
Jens Axboe04c540d2008-05-28 10:35:26 +0200513 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
Jens Axboee47f7992007-03-21 14:05:39 +0100514}
515
516static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
517{
518 unsigned int v;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200519 unsigned long r;
Jens Axboee47f7992007-03-21 14:05:39 +0100520
Jens Axboe4c07ad82011-03-28 09:51:09 +0200521 if (td->o.use_os_rand) {
522 r = os_random_long(&td->rwmix_state);
523 v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
524 } else {
525 r = __rand(&td->__rwmix_state);
526 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
527 }
528
Jens Axboe04c540d2008-05-28 10:35:26 +0200529 if (v <= td->o.rwmix[DDIR_READ])
Jens Axboee47f7992007-03-21 14:05:39 +0100530 return DDIR_READ;
531
532 return DDIR_WRITE;
Jens Axboeafe24a52007-03-16 20:27:27 +0100533}
534
Jens Axboe002e7182013-05-17 12:39:53 +0200535void io_u_quiesce(struct thread_data *td)
536{
537 /*
538 * We are going to sleep, ensure that we flush anything pending as
539 * not to skew our latency numbers.
540 *
541 * Changed to only monitor 'in flight' requests here instead of the
542 * td->cur_depth, b/c td->cur_depth does not accurately represent
543 * io's that have been actually submitted to an async engine,
544 * and cur_depth is meaningless for sync engines.
545 */
546 while (td->io_u_in_flight) {
547 int fio_unused ret;
548
549 ret = io_u_queued_complete(td, 1, NULL);
550 }
551}
552
Jens Axboe581e7142009-06-09 12:47:16 +0200553static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
554{
555 enum fio_ddir odir = ddir ^ 1;
556 struct timeval t;
557 long usec;
558
Jens Axboeff58fce2010-08-25 12:02:08 +0200559 assert(ddir_rw(ddir));
560
Jens Axboe315fcfe2013-02-08 19:05:25 +0100561 if (td->rate_pending_usleep[ddir] <= 0)
Jens Axboe581e7142009-06-09 12:47:16 +0200562 return ddir;
563
564 /*
565 * We have too much pending sleep in this direction. See if we
566 * should switch.
567 */
Jens Axboe315fcfe2013-02-08 19:05:25 +0100568 if (td_rw(td) && td->o.rwmix[odir]) {
Jens Axboe581e7142009-06-09 12:47:16 +0200569 /*
570 * Other direction does not have too much pending, switch
571 */
572 if (td->rate_pending_usleep[odir] < 100000)
573 return odir;
574
575 /*
576 * Both directions have pending sleep. Sleep the minimum time
577 * and deduct from both.
578 */
579 if (td->rate_pending_usleep[ddir] <=
580 td->rate_pending_usleep[odir]) {
581 usec = td->rate_pending_usleep[ddir];
582 } else {
583 usec = td->rate_pending_usleep[odir];
584 ddir = odir;
585 }
586 } else
587 usec = td->rate_pending_usleep[ddir];
588
Jens Axboe002e7182013-05-17 12:39:53 +0200589 io_u_quiesce(td);
Jens Axboe78c1eda2011-08-30 15:34:14 -0600590
Jens Axboe581e7142009-06-09 12:47:16 +0200591 fio_gettime(&t, NULL);
592 usec_sleep(td, usec);
593 usec = utime_since_now(&t);
594
595 td->rate_pending_usleep[ddir] -= usec;
596
597 odir = ddir ^ 1;
598 if (td_rw(td) && __should_check_rate(td, odir))
599 td->rate_pending_usleep[odir] -= usec;
Jens Axboe0b9d69e2009-09-11 22:29:54 +0200600
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200601 if (ddir_trim(ddir))
602 return ddir;
Jens Axboee0224c62013-02-07 19:55:24 +0100603
Jens Axboe581e7142009-06-09 12:47:16 +0200604 return ddir;
605}
606
Jens Axboe10ba5352006-10-20 11:39:27 +0200607/*
608 * Return the data direction for the next io_u. If the job is a
609 * mixed read/write workload, check the rwmix cycle and switch if
610 * necessary.
611 */
Jens Axboe1e97cce2006-12-05 11:44:16 +0100612static enum fio_ddir get_rw_ddir(struct thread_data *td)
Jens Axboe10ba5352006-10-20 11:39:27 +0200613{
Jens Axboe581e7142009-06-09 12:47:16 +0200614 enum fio_ddir ddir;
615
Jens Axboe5f9099e2009-06-16 22:40:26 +0200616 /*
617 * see if it's time to fsync
618 */
619 if (td->o.fsync_blocks &&
620 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
621 td->io_issues[DDIR_WRITE] && should_fsync(td))
622 return DDIR_SYNC;
623
624 /*
625 * see if it's time to fdatasync
626 */
627 if (td->o.fdatasync_blocks &&
628 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
629 td->io_issues[DDIR_WRITE] && should_fsync(td))
630 return DDIR_DATASYNC;
631
Jens Axboe44f29692010-03-09 20:09:44 +0100632 /*
633 * see if it's time to sync_file_range
634 */
635 if (td->sync_file_range_nr &&
636 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
637 td->io_issues[DDIR_WRITE] && should_fsync(td))
638 return DDIR_SYNC_FILE_RANGE;
639
Jens Axboe10ba5352006-10-20 11:39:27 +0200640 if (td_rw(td)) {
Jens Axboe10ba5352006-10-20 11:39:27 +0200641 /*
642 * Check if it's time to seed a new data direction.
643 */
Jens Axboee4928662008-04-07 09:19:46 +0200644 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
Jens Axboee47f7992007-03-21 14:05:39 +0100645 /*
646 * Put a top limit on how many bytes we do for
647 * one data direction, to avoid overflowing the
648 * ranges too much
649 */
650 ddir = get_rand_ddir(td);
Jens Axboee47f7992007-03-21 14:05:39 +0100651
652 if (ddir != td->rwmix_ddir)
653 set_rwmix_bytes(td);
654
655 td->rwmix_ddir = ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200656 }
Jens Axboe581e7142009-06-09 12:47:16 +0200657 ddir = td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200658 } else if (td_read(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200659 ddir = DDIR_READ;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200660 else if (td_write(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200661 ddir = DDIR_WRITE;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200662 else
663 ddir = DDIR_TRIM;
Jens Axboe581e7142009-06-09 12:47:16 +0200664
665 td->rwmix_ddir = rate_ddir(td, ddir);
666 return td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200667}
668
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200669static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
670{
Jens Axboebcd5abf2013-01-23 09:27:25 -0700671 io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200672
673 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
674 td->o.barrier_blocks &&
675 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
676 td->io_issues[DDIR_WRITE])
677 io_u->flags |= IO_U_F_BARRIER;
678}
679
Jens Axboee8462bd2009-07-06 12:59:04 +0200680void put_file_log(struct thread_data *td, struct fio_file *f)
Jens Axboe60f2c652008-05-16 12:31:36 +0200681{
682 int ret = put_file(td, f);
683
684 if (ret)
685 td_verror(td, ret, "file close");
686}
687
Jens Axboe10ba5352006-10-20 11:39:27 +0200688void put_io_u(struct thread_data *td, struct io_u *io_u)
689{
Jens Axboee8462bd2009-07-06 12:59:04 +0200690 td_io_u_lock(td);
691
Steven Langd7ee2a72011-10-26 09:46:50 +0200692 if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF))
Jens Axboe60f2c652008-05-16 12:31:36 +0200693 put_file_log(td, io_u->file);
Jens Axboe10ba5352006-10-20 11:39:27 +0200694 io_u->file = NULL;
Steven Langd7ee2a72011-10-26 09:46:50 +0200695 io_u->flags &= ~IO_U_F_FREE_DEF;
696 io_u->flags |= IO_U_F_FREE;
697
Radha Ramachandran0c412142009-11-03 21:45:31 +0100698 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
699 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200700 io_u_qpush(&td->io_u_freelist, io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200701 td_io_u_unlock(td);
702 td_io_u_free_notify(td);
Jens Axboe10ba5352006-10-20 11:39:27 +0200703}
704
Radha Ramachandranf2bba182009-06-15 08:40:16 +0200705void clear_io_u(struct thread_data *td, struct io_u *io_u)
706{
707 io_u->flags &= ~IO_U_F_FLIGHT;
708 put_io_u(td, io_u);
709}
710
Jens Axboe755200a2007-02-19 13:08:12 +0100711void requeue_io_u(struct thread_data *td, struct io_u **io_u)
712{
713 struct io_u *__io_u = *io_u;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700714 enum fio_ddir ddir = acct_ddir(__io_u);
Jens Axboe755200a2007-02-19 13:08:12 +0100715
Jens Axboe465221b2008-05-30 22:07:49 +0200716 dprint(FD_IO, "requeue %p\n", __io_u);
717
Jens Axboee8462bd2009-07-06 12:59:04 +0200718 td_io_u_lock(td);
719
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100720 __io_u->flags |= IO_U_F_FREE;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700721 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
722 td->io_issues[ddir]--;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100723
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100724 __io_u->flags &= ~IO_U_F_FLIGHT;
Radha Ramachandran0c412142009-11-03 21:45:31 +0100725 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
726 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200727
728 io_u_rpush(&td->io_u_requeues, __io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200729 td_io_u_unlock(td);
Jens Axboe755200a2007-02-19 13:08:12 +0100730 *io_u = NULL;
731}
732
Jens Axboe9bf20612007-03-01 09:33:57 +0100733static int fill_io_u(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +0200734{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600735 unsigned int is_random;
736
Jens Axboeb4c5e1a2007-10-25 18:44:45 +0200737 if (td->io_ops->flags & FIO_NOIO)
738 goto out;
739
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200740 set_rw_ddir(td, io_u);
Jens Axboea00735e2006-11-03 08:58:08 +0100741
Jens Axboe87dc1ab2006-10-24 14:41:26 +0200742 /*
Jens Axboeff58fce2010-08-25 12:02:08 +0200743 * fsync() or fdatasync() or trim etc, we are done
Jens Axboe5f9099e2009-06-16 22:40:26 +0200744 */
Jens Axboeff58fce2010-08-25 12:02:08 +0200745 if (!ddir_rw(io_u->ddir))
Jens Axboe5f9099e2009-06-16 22:40:26 +0200746 goto out;
747
748 /*
Jens Axboe48f5abd2007-07-20 13:25:04 +0200749 * See if it's time to switch to a new zone
750 */
Jens Axboe13af05a2012-02-11 09:04:02 +0100751 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
Jens Axboe48f5abd2007-07-20 13:25:04 +0200752 td->zone_bytes = 0;
Steven Noonaned335852012-01-31 13:58:00 +0100753 io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
754 io_u->file->last_pos = io_u->file->file_offset;
Jens Axboe48f5abd2007-07-20 13:25:04 +0200755 td->io_skip_bytes += td->o.zone_skip;
756 }
757
758 /*
Jens Axboec685b5b2007-02-10 20:02:28 +0100759 * No log, let the seq/rand engine retrieve the next buflen and
760 * position.
Jens Axboe10ba5352006-10-20 11:39:27 +0200761 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600762 if (get_next_offset(td, io_u, &is_random)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +0100763 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100764 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100765 }
Jens Axboec685b5b2007-02-10 20:02:28 +0100766
Jens Axboe6aca9b32013-07-25 12:45:26 -0600767 io_u->buflen = get_next_buflen(td, io_u, is_random);
Jens Axboe2ba1c292008-02-01 13:16:38 +0100768 if (!io_u->buflen) {
769 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100770 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100771 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200772
Jens Axboe2ba1c292008-02-01 13:16:38 +0100773 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
774 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
Jens Axboe4b91ee82013-02-25 10:18:33 +0100775 dprint(FD_IO, " off=%llu/%lu > %llu\n",
776 (unsigned long long) io_u->offset, io_u->buflen,
777 (unsigned long long) io_u->file->real_file_size);
Jens Axboe6a5e6882007-07-26 10:47:51 +0200778 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100779 }
Jens Axboe6a5e6882007-07-26 10:47:51 +0200780
Jens Axboebca4ed42007-02-12 05:13:23 +0100781 /*
782 * mark entry before potentially trimming io_u
783 */
Jens Axboe303032a2008-03-26 10:11:10 +0100784 if (td_random(td) && file_randommap(td, io_u->file))
Jens Axboe9bf20612007-03-01 09:33:57 +0100785 mark_random_map(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +0200786
Jens Axboec38e9462007-03-27 08:48:48 +0200787out:
Jens Axboe2ba1c292008-02-01 13:16:38 +0100788 dprint_io_u(io_u, "fill_io_u");
Jens Axboed9d91e32008-01-31 13:25:42 +0100789 td->zone_bytes += io_u->buflen;
Jens Axboebca4ed42007-02-12 05:13:23 +0100790 return 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200791}
792
Jens Axboe838bc702008-05-22 13:08:23 +0200793static void __io_u_mark_map(unsigned int *map, unsigned int nr)
794{
Jens Axboe2b13e712011-01-19 14:04:16 -0700795 int idx = 0;
Jens Axboe838bc702008-05-22 13:08:23 +0200796
797 switch (nr) {
798 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700799 idx = 6;
Jens Axboe838bc702008-05-22 13:08:23 +0200800 break;
801 case 33 ... 64:
Jens Axboe2b13e712011-01-19 14:04:16 -0700802 idx = 5;
Jens Axboe838bc702008-05-22 13:08:23 +0200803 break;
804 case 17 ... 32:
Jens Axboe2b13e712011-01-19 14:04:16 -0700805 idx = 4;
Jens Axboe838bc702008-05-22 13:08:23 +0200806 break;
807 case 9 ... 16:
Jens Axboe2b13e712011-01-19 14:04:16 -0700808 idx = 3;
Jens Axboe838bc702008-05-22 13:08:23 +0200809 break;
810 case 5 ... 8:
Jens Axboe2b13e712011-01-19 14:04:16 -0700811 idx = 2;
Jens Axboe838bc702008-05-22 13:08:23 +0200812 break;
813 case 1 ... 4:
Jens Axboe2b13e712011-01-19 14:04:16 -0700814 idx = 1;
Jens Axboe838bc702008-05-22 13:08:23 +0200815 case 0:
816 break;
817 }
818
Jens Axboe2b13e712011-01-19 14:04:16 -0700819 map[idx]++;
Jens Axboe838bc702008-05-22 13:08:23 +0200820}
821
822void io_u_mark_submit(struct thread_data *td, unsigned int nr)
823{
824 __io_u_mark_map(td->ts.io_u_submit, nr);
825 td->ts.total_submit++;
826}
827
828void io_u_mark_complete(struct thread_data *td, unsigned int nr)
829{
830 __io_u_mark_map(td->ts.io_u_complete, nr);
831 td->ts.total_complete++;
832}
833
Jens Axboed8005752008-05-15 09:49:09 +0200834void io_u_mark_depth(struct thread_data *td, unsigned int nr)
Jens Axboe71619dc2007-01-13 23:56:33 +0100835{
Jens Axboe2b13e712011-01-19 14:04:16 -0700836 int idx = 0;
Jens Axboe71619dc2007-01-13 23:56:33 +0100837
838 switch (td->cur_depth) {
839 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700840 idx = 6;
Jens Axboea783e612007-06-19 09:50:28 +0200841 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100842 case 32 ... 63:
Jens Axboe2b13e712011-01-19 14:04:16 -0700843 idx = 5;
Jens Axboea783e612007-06-19 09:50:28 +0200844 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100845 case 16 ... 31:
Jens Axboe2b13e712011-01-19 14:04:16 -0700846 idx = 4;
Jens Axboea783e612007-06-19 09:50:28 +0200847 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100848 case 8 ... 15:
Jens Axboe2b13e712011-01-19 14:04:16 -0700849 idx = 3;
Jens Axboea783e612007-06-19 09:50:28 +0200850 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100851 case 4 ... 7:
Jens Axboe2b13e712011-01-19 14:04:16 -0700852 idx = 2;
Jens Axboea783e612007-06-19 09:50:28 +0200853 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100854 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700855 idx = 1;
Jens Axboe71619dc2007-01-13 23:56:33 +0100856 case 1:
857 break;
858 }
859
Jens Axboe2b13e712011-01-19 14:04:16 -0700860 td->ts.io_u_map[idx] += nr;
Jens Axboe71619dc2007-01-13 23:56:33 +0100861}
862
Jens Axboe04a0fea2007-06-19 12:48:41 +0200863static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
864{
Jens Axboe2b13e712011-01-19 14:04:16 -0700865 int idx = 0;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200866
867 assert(usec < 1000);
868
869 switch (usec) {
870 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700871 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200872 break;
873 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700874 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200875 break;
876 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700877 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200878 break;
879 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700880 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200881 break;
882 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700883 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200884 break;
885 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700886 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200887 break;
888 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700889 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200890 break;
891 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700892 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200893 break;
894 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700895 idx = 1;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200896 case 0 ... 1:
897 break;
898 }
899
Jens Axboe2b13e712011-01-19 14:04:16 -0700900 assert(idx < FIO_IO_U_LAT_U_NR);
901 td->ts.io_u_lat_u[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200902}
903
904static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
Jens Axboeec118302007-02-17 04:38:20 +0100905{
Jens Axboe2b13e712011-01-19 14:04:16 -0700906 int idx = 0;
Jens Axboeec118302007-02-17 04:38:20 +0100907
908 switch (msec) {
909 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700910 idx = 11;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200911 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100912 case 1000 ... 1999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700913 idx = 10;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200914 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100915 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700916 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200917 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100918 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700919 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200920 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100921 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700922 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200923 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100924 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700925 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200926 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100927 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700928 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200929 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100930 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700931 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200932 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100933 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700934 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200935 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100936 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700937 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200938 break;
Jens Axboeec118302007-02-17 04:38:20 +0100939 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700940 idx = 1;
Jens Axboeec118302007-02-17 04:38:20 +0100941 case 0 ... 1:
942 break;
943 }
944
Jens Axboe2b13e712011-01-19 14:04:16 -0700945 assert(idx < FIO_IO_U_LAT_M_NR);
946 td->ts.io_u_lat_m[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200947}
948
949static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
950{
951 if (usec < 1000)
952 io_u_mark_lat_usec(td, usec);
953 else
954 io_u_mark_lat_msec(td, usec / 1000);
Jens Axboeec118302007-02-17 04:38:20 +0100955}
956
Jens Axboe0aabe162007-02-23 08:45:55 +0100957/*
958 * Get next file to service by choosing one at random
959 */
Jens Axboe2cc52932009-06-09 14:14:20 +0200960static struct fio_file *get_next_file_rand(struct thread_data *td,
961 enum fio_file_flags goodf,
Jens Axboed6aed792009-06-03 08:41:15 +0200962 enum fio_file_flags badf)
Jens Axboe0aabe162007-02-23 08:45:55 +0100963{
Jens Axboe0aabe162007-02-23 08:45:55 +0100964 struct fio_file *f;
Jens Axboe1c178182007-03-13 13:25:18 +0100965 int fno;
Jens Axboe0aabe162007-02-23 08:45:55 +0100966
967 do {
Jens Axboe87b10672009-03-04 09:39:47 +0100968 int opened = 0;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200969 unsigned long r;
Jens Axboe7c83c082007-03-01 10:04:15 +0100970
Jens Axboe4c07ad82011-03-28 09:51:09 +0200971 if (td->o.use_os_rand) {
972 r = os_random_long(&td->next_file_state);
973 fno = (unsigned int) ((double) td->o.nr_files
974 * (r / (OS_RAND_MAX + 1.0)));
975 } else {
976 r = __rand(&td->__next_file_state);
977 fno = (unsigned int) ((double) td->o.nr_files
978 * (r / (FRAND_MAX + 1.0)));
979 }
980
Jens Axboe126d65c2008-03-01 18:04:31 +0100981 f = td->files[fno];
Jens Axboed6aed792009-06-03 08:41:15 +0200982 if (fio_file_done(f))
Jens Axboe059e63c2007-03-27 20:34:47 +0200983 continue;
Jens Axboe1c178182007-03-13 13:25:18 +0100984
Jens Axboed6aed792009-06-03 08:41:15 +0200985 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +0100986 int err;
987
988 err = td_io_open_file(td, f);
989 if (err)
990 continue;
991 opened = 1;
992 }
993
Jens Axboe2ba1c292008-02-01 13:16:38 +0100994 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
995 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100996 return f;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100997 }
Jens Axboe87b10672009-03-04 09:39:47 +0100998 if (opened)
999 td_io_close_file(td, f);
Jens Axboe0aabe162007-02-23 08:45:55 +01001000 } while (1);
1001}
1002
1003/*
1004 * Get next file to service by doing round robin between all available ones
1005 */
Jens Axboe1c178182007-03-13 13:25:18 +01001006static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1007 int badf)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001008{
1009 unsigned int old_next_file = td->next_file;
1010 struct fio_file *f;
1011
1012 do {
Jens Axboe87b10672009-03-04 09:39:47 +01001013 int opened = 0;
1014
Jens Axboe126d65c2008-03-01 18:04:31 +01001015 f = td->files[td->next_file];
Jens Axboe3d7c3912007-02-19 13:16:12 +01001016
1017 td->next_file++;
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001018 if (td->next_file >= td->o.nr_files)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001019 td->next_file = 0;
1020
Jens Axboe87b10672009-03-04 09:39:47 +01001021 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
Jens Axboed6aed792009-06-03 08:41:15 +02001022 if (fio_file_done(f)) {
Jens Axboed5ed68e2007-03-28 09:33:43 +02001023 f = NULL;
Jens Axboe059e63c2007-03-27 20:34:47 +02001024 continue;
Jens Axboed5ed68e2007-03-28 09:33:43 +02001025 }
Jens Axboe059e63c2007-03-27 20:34:47 +02001026
Jens Axboed6aed792009-06-03 08:41:15 +02001027 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +01001028 int err;
1029
1030 err = td_io_open_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001031 if (err) {
1032 dprint(FD_FILE, "error %d on open of %s\n",
1033 err, f->file_name);
Jens Axboe87c27b42009-05-20 10:45:12 +02001034 f = NULL;
Jens Axboe87b10672009-03-04 09:39:47 +01001035 continue;
Jens Axboeb5696bf2009-03-04 16:03:49 +01001036 }
Jens Axboe87b10672009-03-04 09:39:47 +01001037 opened = 1;
1038 }
1039
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001040 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1041 f->flags);
Jens Axboe1c178182007-03-13 13:25:18 +01001042 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
Jens Axboe3d7c3912007-02-19 13:16:12 +01001043 break;
1044
Jens Axboe87b10672009-03-04 09:39:47 +01001045 if (opened)
1046 td_io_close_file(td, f);
1047
Jens Axboe3d7c3912007-02-19 13:16:12 +01001048 f = NULL;
1049 } while (td->next_file != old_next_file);
1050
Jens Axboe2ba1c292008-02-01 13:16:38 +01001051 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
Jens Axboe3d7c3912007-02-19 13:16:12 +01001052 return f;
1053}
1054
Jens Axboe7eb36572010-03-08 13:58:49 +01001055static struct fio_file *__get_next_file(struct thread_data *td)
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001056{
Jens Axboe1907dbc2007-03-12 11:44:28 +01001057 struct fio_file *f;
1058
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001059 assert(td->o.nr_files <= td->files_index);
Jens Axboe1c178182007-03-13 13:25:18 +01001060
Jens Axboeb5696bf2009-03-04 16:03:49 +01001061 if (td->nr_done_files >= td->o.nr_files) {
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001062 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1063 " nr_files=%d\n", td->nr_open_files,
1064 td->nr_done_files,
1065 td->o.nr_files);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001066 return NULL;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001067 }
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001068
Jens Axboe1907dbc2007-03-12 11:44:28 +01001069 f = td->file_service_file;
Jens Axboed6aed792009-06-03 08:41:15 +02001070 if (f && fio_file_open(f) && !fio_file_closing(f)) {
Jens Axboea086c252009-03-04 08:27:37 +01001071 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1072 goto out;
1073 if (td->file_service_left--)
1074 goto out;
1075 }
Jens Axboe1907dbc2007-03-12 11:44:28 +01001076
Jens Axboea086c252009-03-04 08:27:37 +01001077 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1078 td->o.file_service_type == FIO_FSERVICE_SEQ)
Jens Axboed6aed792009-06-03 08:41:15 +02001079 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001080 else
Jens Axboed6aed792009-06-03 08:41:15 +02001081 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboe1907dbc2007-03-12 11:44:28 +01001082
1083 td->file_service_file = f;
1084 td->file_service_left = td->file_service_nr - 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001085out:
Jens Axboe683023e2009-03-04 08:41:31 +01001086 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
Jens Axboe1907dbc2007-03-12 11:44:28 +01001087 return f;
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001088}
1089
Jens Axboe7eb36572010-03-08 13:58:49 +01001090static struct fio_file *get_next_file(struct thread_data *td)
1091{
Jens Axboed72be542012-11-30 19:37:46 +01001092 if (!(td->flags & TD_F_PROFILE_OPS)) {
1093 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +01001094
Jens Axboed72be542012-11-30 19:37:46 +01001095 if (ops->get_next_file)
1096 return ops->get_next_file(td);
1097 }
Jens Axboe7eb36572010-03-08 13:58:49 +01001098
1099 return __get_next_file(td);
1100}
1101
Jens Axboe429f6672007-07-23 10:38:43 +02001102static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
1103{
1104 struct fio_file *f;
1105
1106 do {
1107 f = get_next_file(td);
1108 if (!f)
1109 return 1;
1110
Jens Axboe429f6672007-07-23 10:38:43 +02001111 io_u->file = f;
1112 get_file(f);
1113
1114 if (!fill_io_u(td, io_u))
1115 break;
1116
Jens Axboeb5696bf2009-03-04 16:03:49 +01001117 put_file_log(td, f);
Jens Axboe429f6672007-07-23 10:38:43 +02001118 td_io_close_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001119 io_u->file = NULL;
Jens Axboed6aed792009-06-03 08:41:15 +02001120 fio_file_set_done(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001121 td->nr_done_files++;
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001122 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1123 td->nr_done_files, td->o.nr_files);
Jens Axboe429f6672007-07-23 10:38:43 +02001124 } while (1);
1125
1126 return 0;
1127}
1128
Jens Axboe3e260a42013-12-09 12:38:53 -07001129static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1130 unsigned long tusec, unsigned long max_usec)
1131{
1132 if (!td->error)
1133 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1134 td_verror(td, ETIMEDOUT, "max latency exceeded");
1135 icd->error = ETIMEDOUT;
1136}
1137
1138static void lat_new_cycle(struct thread_data *td)
1139{
1140 fio_gettime(&td->latency_ts, NULL);
1141 td->latency_ios = ddir_rw_sum(td->io_blocks);
1142 td->latency_failed = 0;
1143}
1144
1145/*
1146 * We had an IO outside the latency target. Reduce the queue depth. If we
1147 * are at QD=1, then it's time to give up.
1148 */
1149static int __lat_target_failed(struct thread_data *td)
1150{
1151 if (td->latency_qd == 1)
1152 return 1;
1153
1154 td->latency_qd_high = td->latency_qd;
1155 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1156
1157 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1158
1159 /*
1160 * When we ramp QD down, quiesce existing IO to prevent
1161 * a storm of ramp downs due to pending higher depth.
1162 */
1163 io_u_quiesce(td);
1164 lat_new_cycle(td);
1165 return 0;
1166}
1167
1168static int lat_target_failed(struct thread_data *td)
1169{
1170 if (td->o.latency_percentile.u.f == 100.0)
1171 return __lat_target_failed(td);
1172
1173 td->latency_failed++;
1174 return 0;
1175}
1176
1177void lat_target_init(struct thread_data *td)
1178{
1179 if (td->o.latency_target) {
1180 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1181 fio_gettime(&td->latency_ts, NULL);
1182 td->latency_qd = 1;
1183 td->latency_qd_high = td->o.iodepth;
1184 td->latency_qd_low = 1;
1185 td->latency_ios = ddir_rw_sum(td->io_blocks);
1186 } else
1187 td->latency_qd = td->o.iodepth;
1188}
1189
1190static void lat_target_success(struct thread_data *td)
1191{
1192 const unsigned int qd = td->latency_qd;
1193
1194 td->latency_qd_low = td->latency_qd;
1195
1196 /*
1197 * If we haven't failed yet, we double up to a failing value instead
1198 * of bisecting from highest possible queue depth. If we have set
1199 * a limit other than td->o.iodepth, bisect between that.
1200 */
1201 if (td->latency_qd_high != td->o.iodepth)
1202 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1203 else
1204 td->latency_qd *= 2;
1205
1206 if (td->latency_qd > td->o.iodepth)
1207 td->latency_qd = td->o.iodepth;
1208
1209 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1210 /*
1211 * Same as last one, we are done
1212 */
1213 if (td->latency_qd == qd)
1214 td->done = 1;
1215
1216 lat_new_cycle(td);
1217}
1218
1219/*
1220 * Check if we can bump the queue depth
1221 */
1222void lat_target_check(struct thread_data *td)
1223{
1224 uint64_t usec_window;
1225 uint64_t ios;
1226 double success_ios;
1227
1228 usec_window = utime_since_now(&td->latency_ts);
1229 if (usec_window < td->o.latency_window)
1230 return;
1231
1232 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1233 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1234 success_ios *= 100.0;
1235
1236 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1237
1238 if (success_ios >= td->o.latency_percentile.u.f)
1239 lat_target_success(td);
1240 else
1241 __lat_target_failed(td);
1242}
1243
1244/*
1245 * If latency target is enabled, we might be ramping up or down and not
1246 * using the full queue depth available.
1247 */
1248int queue_full(struct thread_data *td)
1249{
1250 const int qempty = io_u_qempty(&td->io_u_freelist);
1251
1252 if (qempty)
1253 return 1;
1254 if (!td->o.latency_target)
1255 return 0;
1256
1257 return td->cur_depth >= td->latency_qd;
1258}
Jens Axboe429f6672007-07-23 10:38:43 +02001259
Jens Axboe10ba5352006-10-20 11:39:27 +02001260struct io_u *__get_io_u(struct thread_data *td)
1261{
Jens Axboe2ae0b202013-05-28 14:16:55 +02001262 struct io_u *io_u;
Jens Axboe10ba5352006-10-20 11:39:27 +02001263
Jens Axboee8462bd2009-07-06 12:59:04 +02001264 td_io_u_lock(td);
1265
1266again:
Jens Axboe2ae0b202013-05-28 14:16:55 +02001267 if (!io_u_rempty(&td->io_u_requeues))
1268 io_u = io_u_rpop(&td->io_u_requeues);
Jens Axboe3e260a42013-12-09 12:38:53 -07001269 else if (!queue_full(td)) {
Jens Axboe2ae0b202013-05-28 14:16:55 +02001270 io_u = io_u_qpop(&td->io_u_freelist);
Jens Axboe10ba5352006-10-20 11:39:27 +02001271
Jens Axboe6040dab2006-10-24 19:38:15 +02001272 io_u->buflen = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001273 io_u->resid = 0;
Jens Axboe755200a2007-02-19 13:08:12 +01001274 io_u->file = NULL;
Jens Axboed7762cf2007-02-23 12:34:57 +01001275 io_u->end_io = NULL;
Jens Axboe755200a2007-02-19 13:08:12 +01001276 }
1277
1278 if (io_u) {
Jens Axboe0c6e7512007-02-22 11:19:39 +01001279 assert(io_u->flags & IO_U_F_FREE);
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001280 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +02001281 io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER);
Jens Axboe82af2a72012-03-13 13:45:58 +01001282 io_u->flags &= ~IO_U_F_VER_LIST;
Jens Axboe0c6e7512007-02-22 11:19:39 +01001283
Jens Axboe755200a2007-02-19 13:08:12 +01001284 io_u->error = 0;
Jens Axboebcd5abf2013-01-23 09:27:25 -07001285 io_u->acct_ddir = -1;
Jens Axboe10ba5352006-10-20 11:39:27 +02001286 td->cur_depth++;
Radha Ramachandran0c412142009-11-03 21:45:31 +01001287 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
Jens Axboef9401282014-02-06 12:17:37 -07001288 io_u->ipo = NULL;
Jens Axboe1dec3e02010-03-19 10:33:39 +01001289 } else if (td->o.verify_async) {
1290 /*
1291 * We ran out, wait for async verify threads to finish and
1292 * return one
1293 */
1294 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1295 goto again;
Jens Axboe10ba5352006-10-20 11:39:27 +02001296 }
1297
Jens Axboee8462bd2009-07-06 12:59:04 +02001298 td_io_u_unlock(td);
Jens Axboe10ba5352006-10-20 11:39:27 +02001299 return io_u;
1300}
1301
Jens Axboe0d29de82010-09-01 13:54:15 +02001302static int check_get_trim(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +02001303{
Jens Axboed72be542012-11-30 19:37:46 +01001304 if (!(td->flags & TD_F_TRIM_BACKLOG))
1305 return 0;
1306
1307 if (td->trim_entries) {
Jens Axboe0d29de82010-09-01 13:54:15 +02001308 int get_trim = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001309
Jens Axboe0d29de82010-09-01 13:54:15 +02001310 if (td->trim_batch) {
1311 td->trim_batch--;
1312 get_trim = 1;
1313 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1314 td->last_ddir != DDIR_READ) {
1315 td->trim_batch = td->o.trim_batch;
1316 if (!td->trim_batch)
1317 td->trim_batch = td->o.trim_backlog;
1318 get_trim = 1;
1319 }
1320
1321 if (get_trim && !get_next_trim(td, io_u))
1322 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001323 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001324
Jens Axboe0d29de82010-09-01 13:54:15 +02001325 return 0;
1326}
1327
1328static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1329{
Jens Axboed72be542012-11-30 19:37:46 +01001330 if (!(td->flags & TD_F_VER_BACKLOG))
1331 return 0;
1332
1333 if (td->io_hist_len) {
Jens Axboe9e144182010-06-15 14:25:36 +02001334 int get_verify = 0;
1335
Jens Axboed1ece0c2012-03-07 09:32:58 +01001336 if (td->verify_batch)
Jens Axboe9e144182010-06-15 14:25:36 +02001337 get_verify = 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001338 else if (!(td->io_hist_len % td->o.verify_backlog) &&
Jens Axboe9e144182010-06-15 14:25:36 +02001339 td->last_ddir != DDIR_READ) {
1340 td->verify_batch = td->o.verify_batch;
Jens Axboef8a75c92010-06-15 14:27:28 +02001341 if (!td->verify_batch)
1342 td->verify_batch = td->o.verify_backlog;
Jens Axboe9e144182010-06-15 14:25:36 +02001343 get_verify = 1;
1344 }
1345
Jens Axboed1ece0c2012-03-07 09:32:58 +01001346 if (get_verify && !get_next_verify(td, io_u)) {
1347 td->verify_batch--;
Jens Axboe0d29de82010-09-01 13:54:15 +02001348 return 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001349 }
Jens Axboe9e144182010-06-15 14:25:36 +02001350 }
1351
Jens Axboe0d29de82010-09-01 13:54:15 +02001352 return 0;
1353}
1354
1355/*
Jens Axboede789762011-09-16 22:11:23 +02001356 * Fill offset and start time into the buffer content, to prevent too
Jens Axboe23f394d2011-09-16 22:45:27 +02001357 * easy compressible data for simple de-dupe attempts. Do this for every
1358 * 512b block in the range, since that should be the smallest block size
1359 * we can expect from a device.
Jens Axboede789762011-09-16 22:11:23 +02001360 */
1361static void small_content_scramble(struct io_u *io_u)
1362{
Jens Axboe23f394d2011-09-16 22:45:27 +02001363 unsigned int i, nr_blocks = io_u->buflen / 512;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001364 uint64_t boffset;
Jens Axboe23f394d2011-09-16 22:45:27 +02001365 unsigned int offset;
1366 void *p, *end;
Jens Axboede789762011-09-16 22:11:23 +02001367
Jens Axboe23f394d2011-09-16 22:45:27 +02001368 if (!nr_blocks)
1369 return;
1370
1371 p = io_u->xfer_buf;
Jens Axboefba76ee2011-09-27 14:27:48 -06001372 boffset = io_u->offset;
Jens Axboe81f03662012-02-02 09:20:09 +01001373 io_u->buf_filled_len = 0;
Jens Axboefad82f72011-09-19 11:33:30 +02001374
Jens Axboe23f394d2011-09-16 22:45:27 +02001375 for (i = 0; i < nr_blocks; i++) {
1376 /*
1377 * Fill the byte offset into a "random" start offset of
1378 * the buffer, given by the product of the usec time
1379 * and the actual offset.
1380 */
Jens Axboefad82f72011-09-19 11:33:30 +02001381 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001382 offset &= ~(sizeof(uint64_t) - 1);
1383 if (offset >= 512 - sizeof(uint64_t))
1384 offset -= sizeof(uint64_t);
Jens Axboefba76ee2011-09-27 14:27:48 -06001385 memcpy(p + offset, &boffset, sizeof(boffset));
Jens Axboe23f394d2011-09-16 22:45:27 +02001386
1387 end = p + 512 - sizeof(io_u->start_time);
1388 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1389 p += 512;
Jens Axboefad82f72011-09-19 11:33:30 +02001390 boffset += 512;
Jens Axboe23f394d2011-09-16 22:45:27 +02001391 }
Jens Axboede789762011-09-16 22:11:23 +02001392}
1393
1394/*
Jens Axboe0d29de82010-09-01 13:54:15 +02001395 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1396 * etc. The returned io_u is fully ready to be prepped and submitted.
1397 */
1398struct io_u *get_io_u(struct thread_data *td)
1399{
1400 struct fio_file *f;
1401 struct io_u *io_u;
Jens Axboede789762011-09-16 22:11:23 +02001402 int do_scramble = 0;
Jens Axboe0d29de82010-09-01 13:54:15 +02001403
1404 io_u = __get_io_u(td);
1405 if (!io_u) {
1406 dprint(FD_IO, "__get_io_u failed\n");
1407 return NULL;
1408 }
1409
1410 if (check_get_verify(td, io_u))
1411 goto out;
1412 if (check_get_trim(td, io_u))
1413 goto out;
1414
Jens Axboe755200a2007-02-19 13:08:12 +01001415 /*
1416 * from a requeue, io_u already setup
1417 */
1418 if (io_u->file)
Jens Axboe77f392b2007-02-19 20:13:09 +01001419 goto out;
Jens Axboe755200a2007-02-19 13:08:12 +01001420
Jens Axboe429f6672007-07-23 10:38:43 +02001421 /*
1422 * If using an iolog, grab next piece if any available.
1423 */
Jens Axboed72be542012-11-30 19:37:46 +01001424 if (td->flags & TD_F_READ_IOLOG) {
Jens Axboe429f6672007-07-23 10:38:43 +02001425 if (read_iolog_get(td, io_u))
1426 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001427 } else if (set_io_u_file(td, io_u)) {
1428 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001429 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001430 }
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001431
Jens Axboe429f6672007-07-23 10:38:43 +02001432 f = io_u->file;
Jens Axboed6aed792009-06-03 08:41:15 +02001433 assert(fio_file_open(f));
Jens Axboe97af62c2007-05-22 11:12:13 +02001434
Jens Axboeff58fce2010-08-25 12:02:08 +02001435 if (ddir_rw(io_u->ddir)) {
Jens Axboed0656a92008-02-01 18:33:23 +01001436 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +01001437 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001438 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001439 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001440
Jens Axboe38dad622010-07-20 14:46:00 -06001441 f->last_start = io_u->offset;
Jens Axboe36167d82007-02-18 05:41:31 +01001442 f->last_pos = io_u->offset + io_u->buflen;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001443
Jens Axboefd684182011-09-19 09:24:44 +02001444 if (io_u->ddir == DDIR_WRITE) {
Jens Axboed72be542012-11-30 19:37:46 +01001445 if (td->flags & TD_F_REFILL_BUFFERS) {
Jens Axboe9c426842012-03-02 21:02:12 +01001446 io_u_fill_buffer(td, io_u,
1447 io_u->xfer_buflen, io_u->xfer_buflen);
Jens Axboed72be542012-11-30 19:37:46 +01001448 } else if (td->flags & TD_F_SCRAMBLE_BUFFERS)
Jens Axboefd684182011-09-19 09:24:44 +02001449 do_scramble = 1;
Jens Axboed72be542012-11-30 19:37:46 +01001450 if (td->flags & TD_F_VER_NONE) {
Jens Axboe629f1d72012-03-09 19:02:01 +01001451 populate_verify_io_u(td, io_u);
1452 do_scramble = 0;
1453 }
Jens Axboefd684182011-09-19 09:24:44 +02001454 } else if (io_u->ddir == DDIR_READ) {
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001455 /*
1456 * Reset the buf_filled parameters so next time if the
1457 * buffer is used for writes it is refilled.
1458 */
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001459 io_u->buf_filled_len = 0;
1460 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001461 }
1462
Jens Axboe165faf12007-02-07 11:30:37 +01001463 /*
1464 * Set io data pointers.
1465 */
Jens Axboecec6b552007-02-06 20:15:38 +01001466 io_u->xfer_buf = io_u->buf;
1467 io_u->xfer_buflen = io_u->buflen;
Jens Axboe5973caf2008-05-21 19:52:35 +02001468
Jens Axboe6ac7a332008-03-01 15:22:32 +01001469out:
Jens Axboe0d29de82010-09-01 13:54:15 +02001470 assert(io_u->file);
Jens Axboe429f6672007-07-23 10:38:43 +02001471 if (!td_io_prep(td, io_u)) {
Jens Axboe993bf482008-11-14 13:04:53 +01001472 if (!td->o.disable_slat)
1473 fio_gettime(&io_u->start_time, NULL);
Jens Axboede789762011-09-16 22:11:23 +02001474 if (do_scramble)
1475 small_content_scramble(io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001476 return io_u;
Jens Axboe36167d82007-02-18 05:41:31 +01001477 }
Jens Axboe429f6672007-07-23 10:38:43 +02001478err_put:
Jens Axboe2ba1c292008-02-01 13:16:38 +01001479 dprint(FD_IO, "get_io_u failed\n");
Jens Axboe429f6672007-07-23 10:38:43 +02001480 put_io_u(td, io_u);
1481 return NULL;
Jens Axboe10ba5352006-10-20 11:39:27 +02001482}
1483
Jens Axboe54517922007-03-05 10:06:06 +01001484void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1485{
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001486 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
Jens Axboe825f8182010-08-25 10:47:18 +02001487 const char *msg[] = { "read", "write", "sync", "datasync",
1488 "sync_file_range", "wait", "trim" };
1489
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001490 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1491 return;
Jens Axboe54517922007-03-05 10:06:06 +01001492
1493 log_err("fio: io_u error");
1494
1495 if (io_u->file)
1496 log_err(" on file %s", io_u->file->file_name);
1497
1498 log_err(": %s\n", strerror(io_u->error));
1499
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001500 log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
1501 io_u->offset, io_u->xfer_buflen);
Jens Axboe54517922007-03-05 10:06:06 +01001502
1503 if (!td->error)
1504 td_verror(td, io_u->error, "io_u error");
1505}
1506
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001507static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1508 struct io_completion_data *icd,
1509 const enum fio_ddir idx, unsigned int bytes)
1510{
Jens Axboe24d23ca2012-11-13 08:31:24 -07001511 unsigned long lusec = 0;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001512
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001513 if (!td->o.disable_clat || !td->o.disable_bw)
1514 lusec = utime_since(&io_u->issue_time, &icd->time);
1515
1516 if (!td->o.disable_lat) {
1517 unsigned long tusec;
1518
1519 tusec = utime_since(&io_u->start_time, &icd->time);
1520 add_lat_sample(td, idx, tusec, bytes);
Jens Axboe15501532012-10-24 16:37:45 +02001521
Jens Axboed4afedf2013-05-22 22:21:29 +02001522 if (td->flags & TD_F_PROFILE_OPS) {
1523 struct prof_io_ops *ops = &td->prof_io_ops;
1524
1525 if (ops->io_u_lat)
1526 icd->error = ops->io_u_lat(td, tusec);
1527 }
1528
Jens Axboe3e260a42013-12-09 12:38:53 -07001529 if (td->o.max_latency && tusec > td->o.max_latency)
1530 lat_fatal(td, icd, tusec, td->o.max_latency);
1531 if (td->o.latency_target && tusec > td->o.latency_target) {
1532 if (lat_target_failed(td))
1533 lat_fatal(td, icd, tusec, td->o.latency_target);
Jens Axboe15501532012-10-24 16:37:45 +02001534 }
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001535 }
1536
1537 if (!td->o.disable_clat) {
1538 add_clat_sample(td, idx, lusec, bytes);
1539 io_u_mark_latency(td, lusec);
1540 }
1541
1542 if (!td->o.disable_bw)
1543 add_bw_sample(td, idx, bytes, &icd->time);
1544
Erwan Velu9b7e6002013-08-02 16:39:40 +02001545 add_iops_sample(td, idx, bytes, &icd->time);
Jens Axboeddf24e42013-08-09 12:53:44 -06001546
1547 if (td->o.number_ios && !--td->o.number_ios)
1548 td->done = 1;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001549}
1550
Steven Lang1b8dbf22011-11-09 13:48:01 +01001551static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1552{
Jens Axboe1ae83d42013-01-12 01:44:15 -07001553 uint64_t secs, remainder, bps, bytes;
1554
Steven Lang1b8dbf22011-11-09 13:48:01 +01001555 bytes = td->this_io_bytes[ddir];
1556 bps = td->rate_bps[ddir];
1557 secs = bytes / bps;
1558 remainder = bytes % bps;
1559 return remainder * 1000000 / bps + secs * 1000000;
1560}
1561
Jens Axboe97601022007-02-18 12:47:29 +01001562static void io_completed(struct thread_data *td, struct io_u *io_u,
1563 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001564{
Jens Axboe44f29692010-03-09 20:09:44 +01001565 struct fio_file *f;
Jens Axboe10ba5352006-10-20 11:39:27 +02001566
Jens Axboe2ba1c292008-02-01 13:16:38 +01001567 dprint_io_u(io_u, "io complete");
1568
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001569 td_io_u_lock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001570 assert(io_u->flags & IO_U_F_FLIGHT);
Jens Axboe38dad622010-07-20 14:46:00 -06001571 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
Jens Axboef9401282014-02-06 12:17:37 -07001572
1573 /*
1574 * Mark IO ok to verify
1575 */
1576 if (io_u->ipo) {
1577 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1578 write_barrier();
1579 }
1580
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001581 td_io_u_unlock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001582
Jens Axboe5f9099e2009-06-16 22:40:26 +02001583 if (ddir_sync(io_u->ddir)) {
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001584 td->last_was_sync = 1;
Jens Axboe44f29692010-03-09 20:09:44 +01001585 f = io_u->file;
1586 if (f) {
1587 f->first_write = -1ULL;
1588 f->last_write = -1ULL;
1589 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001590 return;
1591 }
1592
1593 td->last_was_sync = 0;
Jens Axboe9e144182010-06-15 14:25:36 +02001594 td->last_ddir = io_u->ddir;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001595
Jens Axboeff58fce2010-08-25 12:02:08 +02001596 if (!io_u->error && ddir_rw(io_u->ddir)) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001597 unsigned int bytes = io_u->buflen - io_u->resid;
Jens Axboe1e97cce2006-12-05 11:44:16 +01001598 const enum fio_ddir idx = io_u->ddir;
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001599 const enum fio_ddir odx = io_u->ddir ^ 1;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001600 int ret;
Jens Axboe10ba5352006-10-20 11:39:27 +02001601
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001602 td->io_blocks[idx]++;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001603 td->this_io_blocks[idx]++;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001604 td->io_bytes[idx] += bytes;
webeeae2fafc2012-03-23 13:41:41 +01001605
1606 if (!(io_u->flags & IO_U_F_VER_LIST))
1607 td->this_io_bytes[idx] += bytes;
Jens Axboe10ba5352006-10-20 11:39:27 +02001608
Jens Axboe44f29692010-03-09 20:09:44 +01001609 if (idx == DDIR_WRITE) {
1610 f = io_u->file;
1611 if (f) {
1612 if (f->first_write == -1ULL ||
1613 io_u->offset < f->first_write)
1614 f->first_write = io_u->offset;
1615 if (f->last_write == -1ULL ||
1616 ((io_u->offset + bytes) > f->last_write))
1617 f->last_write = io_u->offset + bytes;
1618 }
1619 }
1620
Steven Lang6b1190f2012-02-07 09:42:59 +01001621 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1622 td->runstate == TD_VERIFYING)) {
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001623 account_io_completion(td, io_u, icd, idx, bytes);
Jens Axboe40e1a6f2009-06-11 10:55:39 +02001624
Jens Axboeb23b6a22009-06-11 22:06:23 +02001625 if (__should_check_rate(td, idx)) {
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001626 td->rate_pending_usleep[idx] =
Steven Lang1b8dbf22011-11-09 13:48:01 +01001627 (usec_for_io(td, idx) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001628 utime_since_now(&td->start));
Jens Axboeb23b6a22009-06-11 22:06:23 +02001629 }
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001630 if (idx != DDIR_TRIM && __should_check_rate(td, odx))
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001631 td->rate_pending_usleep[odx] =
Steven Lang1b8dbf22011-11-09 13:48:01 +01001632 (usec_for_io(td, odx) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001633 utime_since_now(&td->start));
Jens Axboe721938a2008-09-10 09:46:16 +02001634 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001635
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001636 icd->bytes_done[idx] += bytes;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001637
Jens Axboed7762cf2007-02-23 12:34:57 +01001638 if (io_u->end_io) {
Jens Axboe36690c92007-03-26 10:23:34 +02001639 ret = io_u->end_io(td, io_u);
Jens Axboe3af6ef32007-02-18 06:57:43 +01001640 if (ret && !icd->error)
1641 icd->error = ret;
1642 }
Jens Axboeff58fce2010-08-25 12:02:08 +02001643 } else if (io_u->error) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001644 icd->error = io_u->error;
Jens Axboe54517922007-03-05 10:06:06 +01001645 io_u_log_error(td, io_u);
1646 }
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001647 if (icd->error) {
1648 enum error_type_bit eb = td_error_type(io_u->ddir, icd->error);
1649 if (!td_non_fatal_error(td, eb, icd->error))
1650 return;
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001651 /*
1652 * If there is a non_fatal error, then add to the error count
1653 * and clear all the errors.
1654 */
1655 update_error_count(td, icd->error);
1656 td_clear_error(td);
1657 icd->error = 0;
1658 io_u->error = 0;
1659 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001660}
1661
Jens Axboe9520ebb2008-10-16 21:03:27 +02001662static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1663 int nr)
Jens Axboe36167d82007-02-18 05:41:31 +01001664{
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001665 int ddir;
Jens Axboe9520ebb2008-10-16 21:03:27 +02001666 if (!td->o.disable_clat || !td->o.disable_bw)
1667 fio_gettime(&icd->time, NULL);
Jens Axboe36167d82007-02-18 05:41:31 +01001668
Jens Axboe3af6ef32007-02-18 06:57:43 +01001669 icd->nr = nr;
1670
Jens Axboe36167d82007-02-18 05:41:31 +01001671 icd->error = 0;
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001672 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1673 icd->bytes_done[ddir] = 0;
Jens Axboe36167d82007-02-18 05:41:31 +01001674}
1675
Jens Axboe97601022007-02-18 12:47:29 +01001676static void ios_completed(struct thread_data *td,
1677 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001678{
1679 struct io_u *io_u;
1680 int i;
1681
Jens Axboe10ba5352006-10-20 11:39:27 +02001682 for (i = 0; i < icd->nr; i++) {
1683 io_u = td->io_ops->event(td, i);
1684
1685 io_completed(td, io_u, icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001686
1687 if (!(io_u->flags & IO_U_F_FREE_DEF))
1688 put_io_u(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +02001689 }
1690}
Jens Axboe97601022007-02-18 12:47:29 +01001691
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001692/*
1693 * Complete a single io_u for the sync engines.
1694 */
Jens Axboe581e7142009-06-09 12:47:16 +02001695int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
Jens Axboe100f49f2013-01-23 10:15:57 -07001696 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001697{
1698 struct io_completion_data icd;
1699
Jens Axboe9520ebb2008-10-16 21:03:27 +02001700 init_icd(td, &icd, 1);
Jens Axboe97601022007-02-18 12:47:29 +01001701 io_completed(td, io_u, &icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001702
1703 if (!(io_u->flags & IO_U_F_FREE_DEF))
1704 put_io_u(td, io_u);
Jens Axboe97601022007-02-18 12:47:29 +01001705
Jens Axboe581e7142009-06-09 12:47:16 +02001706 if (icd.error) {
1707 td_verror(td, icd.error, "io_u_sync_complete");
1708 return -1;
1709 }
Jens Axboe97601022007-02-18 12:47:29 +01001710
Jens Axboe581e7142009-06-09 12:47:16 +02001711 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001712 int ddir;
1713
1714 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1715 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001716 }
1717
1718 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001719}
1720
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001721/*
1722 * Called to complete min_events number of io for the async engines.
1723 */
Jens Axboe581e7142009-06-09 12:47:16 +02001724int io_u_queued_complete(struct thread_data *td, int min_evts,
Jens Axboe100f49f2013-01-23 10:15:57 -07001725 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001726{
Jens Axboe97601022007-02-18 12:47:29 +01001727 struct io_completion_data icd;
Jens Axboe00de55e2007-02-20 10:45:57 +01001728 struct timespec *tvp = NULL;
Jens Axboe97601022007-02-18 12:47:29 +01001729 int ret;
Davide Libenzi4d06a332007-03-22 07:43:50 +01001730 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
Jens Axboe97601022007-02-18 12:47:29 +01001731
Jens Axboe49504212008-06-05 09:03:30 +02001732 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
Jens Axboeb271fe62008-02-04 10:49:41 +01001733
Jens Axboe49504212008-06-05 09:03:30 +02001734 if (!min_evts)
Jens Axboe00de55e2007-02-20 10:45:57 +01001735 tvp = &ts;
Jens Axboe97601022007-02-18 12:47:29 +01001736
Jens Axboe49504212008-06-05 09:03:30 +02001737 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
Jens Axboe97601022007-02-18 12:47:29 +01001738 if (ret < 0) {
Jens Axboee1161c32007-02-22 19:36:48 +01001739 td_verror(td, -ret, "td_io_getevents");
Jens Axboe97601022007-02-18 12:47:29 +01001740 return ret;
1741 } else if (!ret)
1742 return ret;
1743
Jens Axboe9520ebb2008-10-16 21:03:27 +02001744 init_icd(td, &icd, ret);
Jens Axboe97601022007-02-18 12:47:29 +01001745 ios_completed(td, &icd);
Jens Axboe581e7142009-06-09 12:47:16 +02001746 if (icd.error) {
1747 td_verror(td, icd.error, "io_u_queued_complete");
1748 return -1;
1749 }
Jens Axboe97601022007-02-18 12:47:29 +01001750
Jens Axboe581e7142009-06-09 12:47:16 +02001751 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001752 int ddir;
1753
1754 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1755 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001756 }
1757
1758 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001759}
Jens Axboe7e77dd02007-02-20 10:57:34 +01001760
1761/*
1762 * Call when io_u is really queued, to update the submission latency.
1763 */
1764void io_u_queued(struct thread_data *td, struct io_u *io_u)
1765{
Jens Axboe9520ebb2008-10-16 21:03:27 +02001766 if (!td->o.disable_slat) {
1767 unsigned long slat_time;
Jens Axboe7e77dd02007-02-20 10:57:34 +01001768
Jens Axboe9520ebb2008-10-16 21:03:27 +02001769 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
Jens Axboe29a90dd2009-06-10 06:57:47 +02001770 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
Jens Axboe9520ebb2008-10-16 21:03:27 +02001771 }
Jens Axboe7e77dd02007-02-20 10:57:34 +01001772}
Jens Axboe433afcb2007-02-22 10:39:01 +01001773
Jens Axboecc86c392013-05-03 15:12:33 +02001774void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1775 unsigned int max_bs)
Jens Axboe5973caf2008-05-21 19:52:35 +02001776{
Jens Axboece35b1e2014-01-14 15:35:58 -07001777 if (td->o.buffer_pattern_bytes)
1778 fill_buffer_pattern(td, buf, max_bs);
1779 else if (!td->o.zero_buffers) {
Jens Axboe9c426842012-03-02 21:02:12 +01001780 unsigned int perc = td->o.compress_percentage;
1781
1782 if (perc) {
Jens Axboef97a43a2012-03-09 19:06:24 +01001783 unsigned int seg = min_write;
1784
1785 seg = min(min_write, td->o.compress_chunk);
Jens Axboecc86c392013-05-03 15:12:33 +02001786 if (!seg)
1787 seg = min_write;
1788
1789 fill_random_buf_percentage(&td->buf_state, buf,
Jens Axboef97a43a2012-03-09 19:06:24 +01001790 perc, seg, max_bs);
Jens Axboe9c426842012-03-02 21:02:12 +01001791 } else
Jens Axboecc86c392013-05-03 15:12:33 +02001792 fill_random_buf(&td->buf_state, buf, max_bs);
Jens Axboe9c426842012-03-02 21:02:12 +01001793 } else
Jens Axboecc86c392013-05-03 15:12:33 +02001794 memset(buf, 0, max_bs);
1795}
1796
1797/*
1798 * "randomly" fill the buffer contents
1799 */
1800void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1801 unsigned int min_write, unsigned int max_bs)
1802{
1803 io_u->buf_filled_len = 0;
1804 fill_io_buffer(td, io_u->buf, min_write, max_bs);
Jens Axboe5973caf2008-05-21 19:52:35 +02001805}