blob: efbcea9eb15bb47dca4e0f802f85ad86197665de [file] [log] [blame]
Jens Axboe10ba5352006-10-20 11:39:27 +02001#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
Jens Axboe0c6e7512007-02-22 11:19:39 +01006#include <assert.h>
Jens Axboe10ba5352006-10-20 11:39:27 +02007
8#include "fio.h"
Jens Axboe5973caf2008-05-21 19:52:35 +02009#include "hash.h"
Jens Axboe4f5af7b2009-06-03 08:45:40 +020010#include "verify.h"
Jens Axboe0d29de82010-09-01 13:54:15 +020011#include "trim.h"
Jens Axboe1fbbf722010-03-25 23:03:18 +010012#include "lib/rand.h"
Jens Axboe7ebd7962012-11-28 21:24:46 +010013#include "lib/axmap.h"
Jens Axboe002fe732014-02-11 08:31:13 -070014#include "err.h"
Jens Axboe10ba5352006-10-20 11:39:27 +020015
Jens Axboe97601022007-02-18 12:47:29 +010016struct io_completion_data {
17 int nr; /* input */
Jens Axboe97601022007-02-18 12:47:29 +010018
19 int error; /* output */
Jens Axboe100f49f2013-01-23 10:15:57 -070020 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
Jens Axboe97601022007-02-18 12:47:29 +010021 struct timeval time; /* output */
22};
23
Jens Axboe10ba5352006-10-20 11:39:27 +020024/*
Jens Axboe7ebd7962012-11-28 21:24:46 +010025 * The ->io_axmap contains a map of blocks we have or have not done io
Jens Axboe10ba5352006-10-20 11:39:27 +020026 * to yet. Used to make sure we cover the entire range in a fair fashion.
27 */
Jens Axboe1ae83d42013-01-12 01:44:15 -070028static int random_map_free(struct fio_file *f, const uint64_t block)
Jens Axboe10ba5352006-10-20 11:39:27 +020029{
Jens Axboe7ebd7962012-11-28 21:24:46 +010030 return !axmap_isset(f->io_axmap, block);
Jens Axboe10ba5352006-10-20 11:39:27 +020031}
32
33/*
Jens Axboedf415582006-10-20 11:41:03 +020034 * Mark a given offset as used in the map.
35 */
Jens Axboe9bf20612007-03-01 09:33:57 +010036static void mark_random_map(struct thread_data *td, struct io_u *io_u)
Jens Axboedf415582006-10-20 11:41:03 +020037{
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010038 unsigned int min_bs = td->o.rw_min_bs;
Jens Axboe9bf20612007-03-01 09:33:57 +010039 struct fio_file *f = io_u->file;
Jens Axboe51ede0b2012-11-22 13:50:29 +010040 unsigned int nr_blocks;
Jens Axboe1ae83d42013-01-12 01:44:15 -070041 uint64_t block;
Jens Axboedf415582006-10-20 11:41:03 +020042
Jens Axboe1ae83d42013-01-12 01:44:15 -070043 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
Jens Axboec685b5b2007-02-10 20:02:28 +010044 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
45
Jens Axboe2ab9e982012-11-22 15:14:17 +010046 if (!(io_u->flags & IO_U_F_BUSY_OK))
Jens Axboe7ebd7962012-11-28 21:24:46 +010047 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
Jens Axboedf415582006-10-20 11:41:03 +020048
Jens Axboe51ede0b2012-11-22 13:50:29 +010049 if ((nr_blocks * min_bs) < io_u->buflen)
50 io_u->buflen = nr_blocks * min_bs;
Jens Axboedf415582006-10-20 11:41:03 +020051}
52
Jens Axboe74776732013-01-11 14:03:25 +010053static uint64_t last_block(struct thread_data *td, struct fio_file *f,
54 enum fio_ddir ddir)
Jens Axboe2ba1c292008-02-01 13:16:38 +010055{
Jens Axboe74776732013-01-11 14:03:25 +010056 uint64_t max_blocks;
57 uint64_t max_size;
Jens Axboe2ba1c292008-02-01 13:16:38 +010058
Jens Axboeff58fce2010-08-25 12:02:08 +020059 assert(ddir_rw(ddir));
60
Jens Axboed9dd70f2008-05-23 12:37:23 +020061 /*
62 * Hmm, should we make sure that ->io_size <= ->real_file_size?
63 */
64 max_size = f->io_size;
65 if (max_size > f->real_file_size)
66 max_size = f->real_file_size;
67
Steven Noonaned335852012-01-31 13:58:00 +010068 if (td->o.zone_range)
69 max_size = td->o.zone_range;
70
Jens Axboe1ae83d42013-01-12 01:44:15 -070071 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
Jens Axboe2ba1c292008-02-01 13:16:38 +010072 if (!max_blocks)
73 return 0;
74
Jens Axboe67778e82008-05-15 09:20:08 +020075 return max_blocks;
Jens Axboe2ba1c292008-02-01 13:16:38 +010076}
77
Jens Axboe1ae83d42013-01-12 01:44:15 -070078struct rand_off {
79 struct flist_head list;
80 uint64_t off;
81};
82
Jens Axboee25839d2012-11-06 10:49:42 +010083static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -070084 enum fio_ddir ddir, uint64_t *b)
Jens Axboeec4015d2007-03-23 08:04:27 +010085{
Jens Axboe46ad62d2014-11-23 18:41:11 -070086 uint64_t r;
Jens Axboeec4015d2007-03-23 08:04:27 +010087
Jens Axboe8055e412012-11-26 08:43:47 +010088 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
Jens Axboe46ad62d2014-11-23 18:41:11 -070089 uint64_t lastb;
90
91 lastb = last_block(td, f, ddir);
92 if (!lastb)
93 return 1;
94
Jens Axboef6787012014-11-05 18:39:23 -070095 r = __rand(&td->random_state);
Jens Axboe8055e412012-11-26 08:43:47 +010096
Jens Axboe4b91ee82013-02-25 10:18:33 +010097 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
Jens Axboe8055e412012-11-26 08:43:47 +010098
Jens Axboe559073f2014-11-05 18:34:02 -070099 *b = lastb * (r / ((uint64_t) FRAND_MAX + 1.0));
Jens Axboe51ede0b2012-11-22 13:50:29 +0100100 } else {
Jens Axboe8055e412012-11-26 08:43:47 +0100101 uint64_t off = 0;
102
Jens Axboe46ad62d2014-11-23 18:41:11 -0700103 if (lfsr_next(&f->lfsr, &off))
Jens Axboe8055e412012-11-26 08:43:47 +0100104 return 1;
105
106 *b = off;
Jens Axboe51ede0b2012-11-22 13:50:29 +0100107 }
Jens Axboe2615cc42011-03-28 09:35:09 +0200108
Jens Axboeec4015d2007-03-23 08:04:27 +0100109 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100110 * if we are not maintaining a random map, we are done.
Jens Axboeec4015d2007-03-23 08:04:27 +0100111 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100112 if (!file_randommap(td, f))
113 goto ret;
Jens Axboe43c63a72007-05-21 13:23:30 +0200114
115 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100116 * calculate map offset and check if it's free
Jens Axboe43c63a72007-05-21 13:23:30 +0200117 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100118 if (random_map_free(f, *b))
119 goto ret;
120
Jens Axboe4b91ee82013-02-25 10:18:33 +0100121 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
122 (unsigned long long) *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100123
Jens Axboe7ebd7962012-11-28 21:24:46 +0100124 *b = axmap_next_free(f->io_axmap, *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100125 if (*b == (uint64_t) -1ULL)
126 return 1;
Jens Axboe0ce8b112011-01-27 22:25:29 +0100127ret:
128 return 0;
Jens Axboeec4015d2007-03-23 08:04:27 +0100129}
130
Jens Axboe925fee32012-11-06 13:50:32 +0100131static int __get_next_rand_offset_zipf(struct thread_data *td,
132 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700133 uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100134{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100135 *b = zipf_next(&f->zipf);
Jens Axboee25839d2012-11-06 10:49:42 +0100136 return 0;
137}
138
Jens Axboe925fee32012-11-06 13:50:32 +0100139static int __get_next_rand_offset_pareto(struct thread_data *td,
140 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700141 uint64_t *b)
Jens Axboe925fee32012-11-06 13:50:32 +0100142{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100143 *b = pareto_next(&f->zipf);
Jens Axboe925fee32012-11-06 13:50:32 +0100144 return 0;
145}
146
Jens Axboe1ae83d42013-01-12 01:44:15 -0700147static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
148{
149 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
150 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
151
152 return r1->off - r2->off;
153}
154
155static int get_off_from_method(struct thread_data *td, struct fio_file *f,
156 enum fio_ddir ddir, uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100157{
158 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
159 return __get_next_rand_offset(td, f, ddir, b);
160 else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
161 return __get_next_rand_offset_zipf(td, f, ddir, b);
Jens Axboe925fee32012-11-06 13:50:32 +0100162 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
163 return __get_next_rand_offset_pareto(td, f, ddir, b);
Jens Axboee25839d2012-11-06 10:49:42 +0100164
165 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
166 return 1;
167}
168
Jens Axboebcd5abf2013-01-23 09:27:25 -0700169/*
170 * Sort the reads for a verify phase in batches of verifysort_nr, if
171 * specified.
172 */
173static inline int should_sort_io(struct thread_data *td)
174{
175 if (!td->o.verifysort_nr || !td->o.do_verify)
176 return 0;
177 if (!td_random(td))
178 return 0;
179 if (td->runstate != TD_VERIFYING)
180 return 0;
181 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
182 return 0;
183
184 return 1;
185}
186
Jens Axboed9472272013-07-25 10:20:45 -0600187static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
Jens Axboe211c9b82013-04-26 08:56:17 -0600188{
189 unsigned int v;
190 unsigned long r;
191
Jens Axboed9472272013-07-25 10:20:45 -0600192 if (td->o.perc_rand[ddir] == 100)
Jens Axboe211c9b82013-04-26 08:56:17 -0600193 return 1;
194
Jens Axboef6787012014-11-05 18:39:23 -0700195 r = __rand(&td->seq_rand_state[ddir]);
Jens Axboe559073f2014-11-05 18:34:02 -0700196 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
Jens Axboe211c9b82013-04-26 08:56:17 -0600197
Jens Axboed9472272013-07-25 10:20:45 -0600198 return v <= td->o.perc_rand[ddir];
Jens Axboe211c9b82013-04-26 08:56:17 -0600199}
200
Jens Axboe1ae83d42013-01-12 01:44:15 -0700201static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
202 enum fio_ddir ddir, uint64_t *b)
203{
204 struct rand_off *r;
205 int i, ret = 1;
206
Jens Axboebcd5abf2013-01-23 09:27:25 -0700207 if (!should_sort_io(td))
Jens Axboe1ae83d42013-01-12 01:44:15 -0700208 return get_off_from_method(td, f, ddir, b);
209
210 if (!flist_empty(&td->next_rand_list)) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700211fetch:
Jens Axboe12dbd062014-07-03 21:19:57 -0600212 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
Jens Axboe1ae83d42013-01-12 01:44:15 -0700213 flist_del(&r->list);
214 *b = r->off;
215 free(r);
216 return 0;
217 }
218
219 for (i = 0; i < td->o.verifysort_nr; i++) {
220 r = malloc(sizeof(*r));
221
222 ret = get_off_from_method(td, f, ddir, &r->off);
223 if (ret) {
224 free(r);
225 break;
226 }
227
228 flist_add(&r->list, &td->next_rand_list);
229 }
230
231 if (ret && !i)
232 return ret;
233
234 assert(!flist_empty(&td->next_rand_list));
235 flist_sort(NULL, &td->next_rand_list, flist_cmp);
236 goto fetch;
237}
238
Jens Axboe38dad622010-07-20 14:46:00 -0600239static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700240 enum fio_ddir ddir, uint64_t *b)
Jens Axboe38dad622010-07-20 14:46:00 -0600241{
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100242 if (!get_next_rand_offset(td, f, ddir, b))
243 return 0;
244
245 if (td->o.time_based) {
Jens Axboe33c48812013-01-21 09:46:06 -0700246 fio_file_reset(td, f);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100247 if (!get_next_rand_offset(td, f, ddir, b))
248 return 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600249 }
250
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100251 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
Jens Axboe08a99be2014-12-14 19:01:24 -0700252 f->file_name, (unsigned long long) f->last_pos[ddir],
Jens Axboe4b91ee82013-02-25 10:18:33 +0100253 (unsigned long long) f->real_file_size);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100254 return 1;
Jens Axboe38dad622010-07-20 14:46:00 -0600255}
256
Jens Axboe37cf9e32012-03-17 12:54:30 +0100257static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700258 enum fio_ddir ddir, uint64_t *offset)
Jens Axboe38dad622010-07-20 14:46:00 -0600259{
Jens Axboe8a423942014-09-28 16:18:43 -0600260 struct thread_options *o = &td->o;
261
Jens Axboeff58fce2010-08-25 12:02:08 +0200262 assert(ddir_rw(ddir));
263
Jens Axboe08a99be2014-12-14 19:01:24 -0700264 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
Jens Axboe8a423942014-09-28 16:18:43 -0600265 o->time_based)
Jens Axboe08a99be2014-12-14 19:01:24 -0700266 f->last_pos[ddir] = f->last_pos[ddir] - f->io_size;
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100267
Jens Axboe08a99be2014-12-14 19:01:24 -0700268 if (f->last_pos[ddir] < f->real_file_size) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700269 uint64_t pos;
Jens Axboe059b0802011-08-25 09:09:37 +0200270
Jens Axboe08a99be2014-12-14 19:01:24 -0700271 if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0)
272 f->last_pos[ddir] = f->real_file_size;
Jens Axboea66da7a2011-08-31 13:14:12 -0600273
Jens Axboe08a99be2014-12-14 19:01:24 -0700274 pos = f->last_pos[ddir] - f->file_offset;
Jens Axboe8a423942014-09-28 16:18:43 -0600275 if (pos && o->ddir_seq_add) {
276 pos += o->ddir_seq_add;
277
278 /*
279 * If we reach beyond the end of the file
280 * with holed IO, wrap around to the
281 * beginning again.
282 */
283 if (pos >= f->real_file_size)
284 pos = f->file_offset;
285 }
Jens Axboe059b0802011-08-25 09:09:37 +0200286
Jens Axboe37cf9e32012-03-17 12:54:30 +0100287 *offset = pos;
Jens Axboe38dad622010-07-20 14:46:00 -0600288 return 0;
289 }
290
291 return 1;
292}
293
294static int get_next_block(struct thread_data *td, struct io_u *io_u,
Jens Axboe6aca9b32013-07-25 12:45:26 -0600295 enum fio_ddir ddir, int rw_seq,
296 unsigned int *is_random)
Jens Axboe38dad622010-07-20 14:46:00 -0600297{
298 struct fio_file *f = io_u->file;
Jens Axboe1ae83d42013-01-12 01:44:15 -0700299 uint64_t b, offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600300 int ret;
301
Jens Axboeff58fce2010-08-25 12:02:08 +0200302 assert(ddir_rw(ddir));
303
Jens Axboe37cf9e32012-03-17 12:54:30 +0100304 b = offset = -1ULL;
305
Jens Axboe38dad622010-07-20 14:46:00 -0600306 if (rw_seq) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600307 if (td_random(td)) {
Jens Axboe6aca9b32013-07-25 12:45:26 -0600308 if (should_do_random(td, ddir)) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600309 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600310 *is_random = 1;
311 } else {
312 *is_random = 0;
Jens Axboe211c9b82013-04-26 08:56:17 -0600313 io_u->flags |= IO_U_F_BUSY_OK;
314 ret = get_next_seq_offset(td, f, ddir, &offset);
315 if (ret)
316 ret = get_next_rand_block(td, f, ddir, &b);
317 }
Jens Axboe6aca9b32013-07-25 12:45:26 -0600318 } else {
319 *is_random = 0;
Jens Axboe37cf9e32012-03-17 12:54:30 +0100320 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600321 }
Jens Axboe38dad622010-07-20 14:46:00 -0600322 } else {
323 io_u->flags |= IO_U_F_BUSY_OK;
Jens Axboe6aca9b32013-07-25 12:45:26 -0600324 *is_random = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600325
326 if (td->o.rw_seq == RW_SEQ_SEQ) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100327 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600328 if (ret) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100329 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600330 *is_random = 0;
331 }
Jens Axboe38dad622010-07-20 14:46:00 -0600332 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
Jens Axboe08a99be2014-12-14 19:01:24 -0700333 if (f->last_start[ddir] != -1ULL)
334 offset = f->last_start[ddir] - f->file_offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600335 else
Jens Axboe37cf9e32012-03-17 12:54:30 +0100336 offset = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600337 ret = 0;
338 } else {
339 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
340 ret = 1;
341 }
342 }
Bruce Cran6d68b992013-01-13 17:21:58 +0000343
Jens Axboe37cf9e32012-03-17 12:54:30 +0100344 if (!ret) {
345 if (offset != -1ULL)
346 io_u->offset = offset;
347 else if (b != -1ULL)
348 io_u->offset = b * td->o.ba[ddir];
349 else {
Jens Axboe4e0a8fa2013-04-15 11:40:57 +0200350 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
Jens Axboe37cf9e32012-03-17 12:54:30 +0100351 ret = 1;
352 }
353 }
354
Jens Axboe38dad622010-07-20 14:46:00 -0600355 return ret;
356}
357
Jens Axboe10ba5352006-10-20 11:39:27 +0200358/*
359 * For random io, generate a random new block and see if it's used. Repeat
360 * until we find a free one. For sequential io, just return the end of
361 * the last io issued.
362 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600363static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
364 unsigned int *is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200365{
Jens Axboe9bf20612007-03-01 09:33:57 +0100366 struct fio_file *f = io_u->file;
Jens Axboe4ba66132008-02-05 10:05:50 +0100367 enum fio_ddir ddir = io_u->ddir;
Jens Axboe38dad622010-07-20 14:46:00 -0600368 int rw_seq_hit = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200369
Jens Axboeff58fce2010-08-25 12:02:08 +0200370 assert(ddir_rw(ddir));
371
Jens Axboe38dad622010-07-20 14:46:00 -0600372 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
373 rw_seq_hit = 1;
Jens Axboe5736c102010-07-20 12:03:25 -0600374 td->ddir_seq_nr = td->o.ddir_seq_nr;
Jens Axboe38dad622010-07-20 14:46:00 -0600375 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200376
Jens Axboe6aca9b32013-07-25 12:45:26 -0600377 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
Jens Axboe38dad622010-07-20 14:46:00 -0600378 return 1;
Jens Axboe10ba5352006-10-20 11:39:27 +0200379
Jens Axboe009bd842008-05-15 10:19:46 +0200380 if (io_u->offset >= f->io_size) {
381 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100382 (unsigned long long) io_u->offset,
383 (unsigned long long) f->io_size);
Jens Axboe009bd842008-05-15 10:19:46 +0200384 return 1;
385 }
386
387 io_u->offset += f->file_offset;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100388 if (io_u->offset >= f->real_file_size) {
389 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100390 (unsigned long long) io_u->offset,
391 (unsigned long long) f->real_file_size);
Jens Axboe10ba5352006-10-20 11:39:27 +0200392 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100393 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200394
395 return 0;
396}
397
Jens Axboe6aca9b32013-07-25 12:45:26 -0600398static int get_next_offset(struct thread_data *td, struct io_u *io_u,
399 unsigned int *is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100400{
Jens Axboed72be542012-11-30 19:37:46 +0100401 if (td->flags & TD_F_PROFILE_OPS) {
402 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100403
Jens Axboed72be542012-11-30 19:37:46 +0100404 if (ops->fill_io_u_off)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600405 return ops->fill_io_u_off(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100406 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100407
Jens Axboe6aca9b32013-07-25 12:45:26 -0600408 return __get_next_offset(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100409}
410
Jens Axboe79944122011-05-24 11:26:16 +0200411static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
412 unsigned int buflen)
413{
414 struct fio_file *f = io_u->file;
415
Jens Axboebedc9dc2014-03-17 12:51:09 -0600416 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
Jens Axboe79944122011-05-24 11:26:16 +0200417}
418
Jens Axboe6aca9b32013-07-25 12:45:26 -0600419static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
420 unsigned int is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200421{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600422 int ddir = io_u->ddir;
Jens Axboe24d23ca2012-11-13 08:31:24 -0700423 unsigned int buflen = 0;
Jens Axboef3059de2008-06-11 15:37:32 +0200424 unsigned int minbs, maxbs;
Jens Axboe559073f2014-11-05 18:34:02 -0700425 unsigned long r;
Jens Axboe10ba5352006-10-20 11:39:27 +0200426
Erwan Velu9ee1c642014-04-02 10:51:16 +0200427 assert(ddir_rw(ddir));
Jens Axboe6aca9b32013-07-25 12:45:26 -0600428
429 if (td->o.bs_is_seq_rand)
430 ddir = is_random ? DDIR_WRITE: DDIR_READ;
Jens Axboeff58fce2010-08-25 12:02:08 +0200431
Jens Axboef3059de2008-06-11 15:37:32 +0200432 minbs = td->o.min_bs[ddir];
433 maxbs = td->o.max_bs[ddir];
434
Jens Axboe79944122011-05-24 11:26:16 +0200435 if (minbs == maxbs)
436 return minbs;
437
Jens Axboe52c58022012-02-06 21:58:56 +0100438 /*
439 * If we can't satisfy the min block size from here, then fail
440 */
441 if (!io_u_fits(td, io_u, minbs))
442 return 0;
443
Jens Axboe79944122011-05-24 11:26:16 +0200444 do {
Jens Axboef6787012014-11-05 18:39:23 -0700445 r = __rand(&td->bsrange_state);
Jens Axboe4c07ad82011-03-28 09:51:09 +0200446
Jens Axboe720e84a2009-04-21 08:29:55 +0200447 if (!td->o.bssplit_nr[ddir]) {
Jens Axboef3059de2008-06-11 15:37:32 +0200448 buflen = 1 + (unsigned int) ((double) maxbs *
Jens Axboe559073f2014-11-05 18:34:02 -0700449 (r / (FRAND_MAX + 1.0)));
Jens Axboef3059de2008-06-11 15:37:32 +0200450 if (buflen < minbs)
451 buflen = minbs;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100452 } else {
Jens Axboe564ca972007-12-14 12:21:19 +0100453 long perc = 0;
454 unsigned int i;
455
Jens Axboe720e84a2009-04-21 08:29:55 +0200456 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
457 struct bssplit *bsp = &td->o.bssplit[ddir][i];
Jens Axboe564ca972007-12-14 12:21:19 +0100458
459 buflen = bsp->bs;
460 perc += bsp->perc;
Jens Axboe559073f2014-11-05 18:34:02 -0700461 if ((r <= ((FRAND_MAX / 100L) * perc)) &&
Jens Axboe79944122011-05-24 11:26:16 +0200462 io_u_fits(td, io_u, buflen))
Jens Axboe564ca972007-12-14 12:21:19 +0100463 break;
464 }
465 }
Jens Axboe79944122011-05-24 11:26:16 +0200466
Josef Bacika9f70b12013-07-08 20:32:50 -0400467 if (td->o.do_verify && td->o.verify != VERIFY_NONE)
468 buflen = (buflen + td->o.verify_interval - 1) &
469 ~(td->o.verify_interval - 1);
470
Jens Axboef3059de2008-06-11 15:37:32 +0200471 if (!td->o.bs_unaligned && is_power_of_2(minbs))
472 buflen = (buflen + minbs - 1) & ~(minbs - 1);
Jens Axboe10ba5352006-10-20 11:39:27 +0200473
Jens Axboe79944122011-05-24 11:26:16 +0200474 } while (!io_u_fits(td, io_u, buflen));
Jens Axboe6a5e6882007-07-26 10:47:51 +0200475
Jens Axboe10ba5352006-10-20 11:39:27 +0200476 return buflen;
477}
478
Jens Axboe6aca9b32013-07-25 12:45:26 -0600479static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
480 unsigned int is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100481{
Jens Axboed72be542012-11-30 19:37:46 +0100482 if (td->flags & TD_F_PROFILE_OPS) {
483 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100484
Jens Axboed72be542012-11-30 19:37:46 +0100485 if (ops->fill_io_u_size)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600486 return ops->fill_io_u_size(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100487 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100488
Jens Axboe6aca9b32013-07-25 12:45:26 -0600489 return __get_next_buflen(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100490}
491
Jens Axboeafe24a52007-03-16 20:27:27 +0100492static void set_rwmix_bytes(struct thread_data *td)
493{
Jens Axboeafe24a52007-03-16 20:27:27 +0100494 unsigned int diff;
495
496 /*
497 * we do time or byte based switch. this is needed because
498 * buffered writes may issue a lot quicker than they complete,
499 * whereas reads do not.
500 */
Jens Axboee47f7992007-03-21 14:05:39 +0100501 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
Jens Axboe04c540d2008-05-28 10:35:26 +0200502 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
Jens Axboee47f7992007-03-21 14:05:39 +0100503}
504
505static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
506{
507 unsigned int v;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200508 unsigned long r;
Jens Axboee47f7992007-03-21 14:05:39 +0100509
Jens Axboef6787012014-11-05 18:39:23 -0700510 r = __rand(&td->rwmix_state);
Jens Axboe559073f2014-11-05 18:34:02 -0700511 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
Jens Axboe4c07ad82011-03-28 09:51:09 +0200512
Jens Axboe04c540d2008-05-28 10:35:26 +0200513 if (v <= td->o.rwmix[DDIR_READ])
Jens Axboee47f7992007-03-21 14:05:39 +0100514 return DDIR_READ;
515
516 return DDIR_WRITE;
Jens Axboeafe24a52007-03-16 20:27:27 +0100517}
518
Jens Axboe002e7182013-05-17 12:39:53 +0200519void io_u_quiesce(struct thread_data *td)
520{
521 /*
522 * We are going to sleep, ensure that we flush anything pending as
523 * not to skew our latency numbers.
524 *
525 * Changed to only monitor 'in flight' requests here instead of the
526 * td->cur_depth, b/c td->cur_depth does not accurately represent
527 * io's that have been actually submitted to an async engine,
528 * and cur_depth is meaningless for sync engines.
529 */
530 while (td->io_u_in_flight) {
531 int fio_unused ret;
532
533 ret = io_u_queued_complete(td, 1, NULL);
534 }
535}
536
Jens Axboe581e7142009-06-09 12:47:16 +0200537static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
538{
539 enum fio_ddir odir = ddir ^ 1;
540 struct timeval t;
541 long usec;
542
Jens Axboeff58fce2010-08-25 12:02:08 +0200543 assert(ddir_rw(ddir));
544
Jens Axboe315fcfe2013-02-08 19:05:25 +0100545 if (td->rate_pending_usleep[ddir] <= 0)
Jens Axboe581e7142009-06-09 12:47:16 +0200546 return ddir;
547
548 /*
549 * We have too much pending sleep in this direction. See if we
550 * should switch.
551 */
Jens Axboe315fcfe2013-02-08 19:05:25 +0100552 if (td_rw(td) && td->o.rwmix[odir]) {
Jens Axboe581e7142009-06-09 12:47:16 +0200553 /*
554 * Other direction does not have too much pending, switch
555 */
556 if (td->rate_pending_usleep[odir] < 100000)
557 return odir;
558
559 /*
560 * Both directions have pending sleep. Sleep the minimum time
561 * and deduct from both.
562 */
563 if (td->rate_pending_usleep[ddir] <=
564 td->rate_pending_usleep[odir]) {
565 usec = td->rate_pending_usleep[ddir];
566 } else {
567 usec = td->rate_pending_usleep[odir];
568 ddir = odir;
569 }
570 } else
571 usec = td->rate_pending_usleep[ddir];
572
Jens Axboe002e7182013-05-17 12:39:53 +0200573 io_u_quiesce(td);
Jens Axboe78c1eda2011-08-30 15:34:14 -0600574
Jens Axboe581e7142009-06-09 12:47:16 +0200575 fio_gettime(&t, NULL);
576 usec_sleep(td, usec);
577 usec = utime_since_now(&t);
578
579 td->rate_pending_usleep[ddir] -= usec;
580
581 odir = ddir ^ 1;
582 if (td_rw(td) && __should_check_rate(td, odir))
583 td->rate_pending_usleep[odir] -= usec;
Jens Axboe0b9d69e2009-09-11 22:29:54 +0200584
Jens Axboed4eb4652014-11-10 15:40:24 -0700585 if (ddir == DDIR_TRIM)
586 return DDIR_TRIM;
Jens Axboee0224c62013-02-07 19:55:24 +0100587
Jens Axboe581e7142009-06-09 12:47:16 +0200588 return ddir;
589}
590
Jens Axboe10ba5352006-10-20 11:39:27 +0200591/*
592 * Return the data direction for the next io_u. If the job is a
593 * mixed read/write workload, check the rwmix cycle and switch if
594 * necessary.
595 */
Jens Axboe1e97cce2006-12-05 11:44:16 +0100596static enum fio_ddir get_rw_ddir(struct thread_data *td)
Jens Axboe10ba5352006-10-20 11:39:27 +0200597{
Jens Axboe581e7142009-06-09 12:47:16 +0200598 enum fio_ddir ddir;
599
Jens Axboe5f9099e2009-06-16 22:40:26 +0200600 /*
601 * see if it's time to fsync
602 */
603 if (td->o.fsync_blocks &&
604 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
605 td->io_issues[DDIR_WRITE] && should_fsync(td))
606 return DDIR_SYNC;
607
608 /*
609 * see if it's time to fdatasync
610 */
611 if (td->o.fdatasync_blocks &&
612 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
613 td->io_issues[DDIR_WRITE] && should_fsync(td))
614 return DDIR_DATASYNC;
615
Jens Axboe44f29692010-03-09 20:09:44 +0100616 /*
617 * see if it's time to sync_file_range
618 */
619 if (td->sync_file_range_nr &&
620 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
621 td->io_issues[DDIR_WRITE] && should_fsync(td))
622 return DDIR_SYNC_FILE_RANGE;
623
Jens Axboe10ba5352006-10-20 11:39:27 +0200624 if (td_rw(td)) {
Jens Axboe10ba5352006-10-20 11:39:27 +0200625 /*
626 * Check if it's time to seed a new data direction.
627 */
Jens Axboee4928662008-04-07 09:19:46 +0200628 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
Jens Axboee47f7992007-03-21 14:05:39 +0100629 /*
630 * Put a top limit on how many bytes we do for
631 * one data direction, to avoid overflowing the
632 * ranges too much
633 */
634 ddir = get_rand_ddir(td);
Jens Axboee47f7992007-03-21 14:05:39 +0100635
636 if (ddir != td->rwmix_ddir)
637 set_rwmix_bytes(td);
638
639 td->rwmix_ddir = ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200640 }
Jens Axboe581e7142009-06-09 12:47:16 +0200641 ddir = td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200642 } else if (td_read(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200643 ddir = DDIR_READ;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200644 else if (td_write(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200645 ddir = DDIR_WRITE;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200646 else
647 ddir = DDIR_TRIM;
Jens Axboe581e7142009-06-09 12:47:16 +0200648
649 td->rwmix_ddir = rate_ddir(td, ddir);
650 return td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200651}
652
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200653static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
654{
Jens Axboebcd5abf2013-01-23 09:27:25 -0700655 io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200656
657 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
658 td->o.barrier_blocks &&
659 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
660 td->io_issues[DDIR_WRITE])
661 io_u->flags |= IO_U_F_BARRIER;
662}
663
Jens Axboee8462bd2009-07-06 12:59:04 +0200664void put_file_log(struct thread_data *td, struct fio_file *f)
Jens Axboe60f2c652008-05-16 12:31:36 +0200665{
Jens Axboe71b84ca2014-04-14 12:01:45 -0600666 unsigned int ret = put_file(td, f);
Jens Axboe60f2c652008-05-16 12:31:36 +0200667
668 if (ret)
669 td_verror(td, ret, "file close");
670}
671
Jens Axboe10ba5352006-10-20 11:39:27 +0200672void put_io_u(struct thread_data *td, struct io_u *io_u)
673{
Jens Axboee8462bd2009-07-06 12:59:04 +0200674 td_io_u_lock(td);
675
Jens Axboee69fdf72014-07-23 16:11:43 +0200676 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
Jens Axboe60f2c652008-05-16 12:31:36 +0200677 put_file_log(td, io_u->file);
Jens Axboee69fdf72014-07-23 16:11:43 +0200678
Jens Axboe10ba5352006-10-20 11:39:27 +0200679 io_u->file = NULL;
Steven Langd7ee2a72011-10-26 09:46:50 +0200680 io_u->flags |= IO_U_F_FREE;
681
Radha Ramachandran0c412142009-11-03 21:45:31 +0100682 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
683 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200684 io_u_qpush(&td->io_u_freelist, io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200685 td_io_u_unlock(td);
686 td_io_u_free_notify(td);
Jens Axboe10ba5352006-10-20 11:39:27 +0200687}
688
Radha Ramachandranf2bba182009-06-15 08:40:16 +0200689void clear_io_u(struct thread_data *td, struct io_u *io_u)
690{
691 io_u->flags &= ~IO_U_F_FLIGHT;
692 put_io_u(td, io_u);
693}
694
Jens Axboe755200a2007-02-19 13:08:12 +0100695void requeue_io_u(struct thread_data *td, struct io_u **io_u)
696{
697 struct io_u *__io_u = *io_u;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700698 enum fio_ddir ddir = acct_ddir(__io_u);
Jens Axboe755200a2007-02-19 13:08:12 +0100699
Jens Axboe465221b2008-05-30 22:07:49 +0200700 dprint(FD_IO, "requeue %p\n", __io_u);
701
Jens Axboee8462bd2009-07-06 12:59:04 +0200702 td_io_u_lock(td);
703
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100704 __io_u->flags |= IO_U_F_FREE;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700705 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
706 td->io_issues[ddir]--;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100707
Jens Axboe4d2e0f42007-02-24 13:31:57 +0100708 __io_u->flags &= ~IO_U_F_FLIGHT;
Radha Ramachandran0c412142009-11-03 21:45:31 +0100709 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
710 td->cur_depth--;
Jens Axboe2ae0b202013-05-28 14:16:55 +0200711
712 io_u_rpush(&td->io_u_requeues, __io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200713 td_io_u_unlock(td);
Jens Axboe755200a2007-02-19 13:08:12 +0100714 *io_u = NULL;
715}
716
Jens Axboe9bf20612007-03-01 09:33:57 +0100717static int fill_io_u(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +0200718{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600719 unsigned int is_random;
720
Jens Axboeb4c5e1a2007-10-25 18:44:45 +0200721 if (td->io_ops->flags & FIO_NOIO)
722 goto out;
723
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200724 set_rw_ddir(td, io_u);
Jens Axboea00735e2006-11-03 08:58:08 +0100725
Jens Axboe87dc1ab2006-10-24 14:41:26 +0200726 /*
Jens Axboeff58fce2010-08-25 12:02:08 +0200727 * fsync() or fdatasync() or trim etc, we are done
Jens Axboe5f9099e2009-06-16 22:40:26 +0200728 */
Jens Axboeff58fce2010-08-25 12:02:08 +0200729 if (!ddir_rw(io_u->ddir))
Jens Axboe5f9099e2009-06-16 22:40:26 +0200730 goto out;
731
732 /*
Jens Axboe48f5abd2007-07-20 13:25:04 +0200733 * See if it's time to switch to a new zone
734 */
Jens Axboe13af05a2012-02-11 09:04:02 +0100735 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
Jens Axboe418bf542014-09-28 16:20:58 -0600736 struct fio_file *f = io_u->file;
737
Jens Axboe48f5abd2007-07-20 13:25:04 +0200738 td->zone_bytes = 0;
Jens Axboe418bf542014-09-28 16:20:58 -0600739 f->file_offset += td->o.zone_range + td->o.zone_skip;
740
741 /*
742 * Wrap from the beginning, if we exceed the file size
743 */
744 if (f->file_offset >= f->real_file_size)
745 f->file_offset = f->real_file_size - f->file_offset;
Jens Axboe08a99be2014-12-14 19:01:24 -0700746 f->last_pos[io_u->ddir] = f->file_offset;
Jens Axboe48f5abd2007-07-20 13:25:04 +0200747 td->io_skip_bytes += td->o.zone_skip;
748 }
749
750 /*
Jens Axboec685b5b2007-02-10 20:02:28 +0100751 * No log, let the seq/rand engine retrieve the next buflen and
752 * position.
Jens Axboe10ba5352006-10-20 11:39:27 +0200753 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600754 if (get_next_offset(td, io_u, &is_random)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +0100755 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100756 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100757 }
Jens Axboec685b5b2007-02-10 20:02:28 +0100758
Jens Axboe6aca9b32013-07-25 12:45:26 -0600759 io_u->buflen = get_next_buflen(td, io_u, is_random);
Jens Axboe2ba1c292008-02-01 13:16:38 +0100760 if (!io_u->buflen) {
761 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100762 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100763 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200764
Jens Axboe2ba1c292008-02-01 13:16:38 +0100765 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
766 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
Jens Axboe4b91ee82013-02-25 10:18:33 +0100767 dprint(FD_IO, " off=%llu/%lu > %llu\n",
768 (unsigned long long) io_u->offset, io_u->buflen,
769 (unsigned long long) io_u->file->real_file_size);
Jens Axboe6a5e6882007-07-26 10:47:51 +0200770 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100771 }
Jens Axboe6a5e6882007-07-26 10:47:51 +0200772
Jens Axboebca4ed42007-02-12 05:13:23 +0100773 /*
774 * mark entry before potentially trimming io_u
775 */
Jens Axboe303032a2008-03-26 10:11:10 +0100776 if (td_random(td) && file_randommap(td, io_u->file))
Jens Axboe9bf20612007-03-01 09:33:57 +0100777 mark_random_map(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +0200778
Jens Axboec38e9462007-03-27 08:48:48 +0200779out:
Jens Axboe2ba1c292008-02-01 13:16:38 +0100780 dprint_io_u(io_u, "fill_io_u");
Jens Axboed9d91e32008-01-31 13:25:42 +0100781 td->zone_bytes += io_u->buflen;
Jens Axboebca4ed42007-02-12 05:13:23 +0100782 return 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200783}
784
Jens Axboe838bc702008-05-22 13:08:23 +0200785static void __io_u_mark_map(unsigned int *map, unsigned int nr)
786{
Jens Axboe2b13e712011-01-19 14:04:16 -0700787 int idx = 0;
Jens Axboe838bc702008-05-22 13:08:23 +0200788
789 switch (nr) {
790 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700791 idx = 6;
Jens Axboe838bc702008-05-22 13:08:23 +0200792 break;
793 case 33 ... 64:
Jens Axboe2b13e712011-01-19 14:04:16 -0700794 idx = 5;
Jens Axboe838bc702008-05-22 13:08:23 +0200795 break;
796 case 17 ... 32:
Jens Axboe2b13e712011-01-19 14:04:16 -0700797 idx = 4;
Jens Axboe838bc702008-05-22 13:08:23 +0200798 break;
799 case 9 ... 16:
Jens Axboe2b13e712011-01-19 14:04:16 -0700800 idx = 3;
Jens Axboe838bc702008-05-22 13:08:23 +0200801 break;
802 case 5 ... 8:
Jens Axboe2b13e712011-01-19 14:04:16 -0700803 idx = 2;
Jens Axboe838bc702008-05-22 13:08:23 +0200804 break;
805 case 1 ... 4:
Jens Axboe2b13e712011-01-19 14:04:16 -0700806 idx = 1;
Jens Axboe838bc702008-05-22 13:08:23 +0200807 case 0:
808 break;
809 }
810
Jens Axboe2b13e712011-01-19 14:04:16 -0700811 map[idx]++;
Jens Axboe838bc702008-05-22 13:08:23 +0200812}
813
814void io_u_mark_submit(struct thread_data *td, unsigned int nr)
815{
816 __io_u_mark_map(td->ts.io_u_submit, nr);
817 td->ts.total_submit++;
818}
819
820void io_u_mark_complete(struct thread_data *td, unsigned int nr)
821{
822 __io_u_mark_map(td->ts.io_u_complete, nr);
823 td->ts.total_complete++;
824}
825
Jens Axboed8005752008-05-15 09:49:09 +0200826void io_u_mark_depth(struct thread_data *td, unsigned int nr)
Jens Axboe71619dc2007-01-13 23:56:33 +0100827{
Jens Axboe2b13e712011-01-19 14:04:16 -0700828 int idx = 0;
Jens Axboe71619dc2007-01-13 23:56:33 +0100829
830 switch (td->cur_depth) {
831 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700832 idx = 6;
Jens Axboea783e612007-06-19 09:50:28 +0200833 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100834 case 32 ... 63:
Jens Axboe2b13e712011-01-19 14:04:16 -0700835 idx = 5;
Jens Axboea783e612007-06-19 09:50:28 +0200836 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100837 case 16 ... 31:
Jens Axboe2b13e712011-01-19 14:04:16 -0700838 idx = 4;
Jens Axboea783e612007-06-19 09:50:28 +0200839 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100840 case 8 ... 15:
Jens Axboe2b13e712011-01-19 14:04:16 -0700841 idx = 3;
Jens Axboea783e612007-06-19 09:50:28 +0200842 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100843 case 4 ... 7:
Jens Axboe2b13e712011-01-19 14:04:16 -0700844 idx = 2;
Jens Axboea783e612007-06-19 09:50:28 +0200845 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100846 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700847 idx = 1;
Jens Axboe71619dc2007-01-13 23:56:33 +0100848 case 1:
849 break;
850 }
851
Jens Axboe2b13e712011-01-19 14:04:16 -0700852 td->ts.io_u_map[idx] += nr;
Jens Axboe71619dc2007-01-13 23:56:33 +0100853}
854
Jens Axboe04a0fea2007-06-19 12:48:41 +0200855static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
856{
Jens Axboe2b13e712011-01-19 14:04:16 -0700857 int idx = 0;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200858
859 assert(usec < 1000);
860
861 switch (usec) {
862 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700863 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200864 break;
865 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700866 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200867 break;
868 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700869 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200870 break;
871 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700872 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200873 break;
874 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700875 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200876 break;
877 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700878 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200879 break;
880 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700881 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200882 break;
883 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700884 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200885 break;
886 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700887 idx = 1;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200888 case 0 ... 1:
889 break;
890 }
891
Jens Axboe2b13e712011-01-19 14:04:16 -0700892 assert(idx < FIO_IO_U_LAT_U_NR);
893 td->ts.io_u_lat_u[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200894}
895
896static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
Jens Axboeec118302007-02-17 04:38:20 +0100897{
Jens Axboe2b13e712011-01-19 14:04:16 -0700898 int idx = 0;
Jens Axboeec118302007-02-17 04:38:20 +0100899
900 switch (msec) {
901 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700902 idx = 11;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200903 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100904 case 1000 ... 1999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700905 idx = 10;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200906 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100907 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -0700908 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200909 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100910 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -0700911 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200912 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100913 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -0700914 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200915 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100916 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -0700917 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200918 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100919 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -0700920 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200921 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100922 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -0700923 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200924 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100925 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -0700926 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200927 break;
Jens Axboe8abdce62007-02-21 10:22:55 +0100928 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -0700929 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200930 break;
Jens Axboeec118302007-02-17 04:38:20 +0100931 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700932 idx = 1;
Jens Axboeec118302007-02-17 04:38:20 +0100933 case 0 ... 1:
934 break;
935 }
936
Jens Axboe2b13e712011-01-19 14:04:16 -0700937 assert(idx < FIO_IO_U_LAT_M_NR);
938 td->ts.io_u_lat_m[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200939}
940
941static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
942{
943 if (usec < 1000)
944 io_u_mark_lat_usec(td, usec);
945 else
946 io_u_mark_lat_msec(td, usec / 1000);
Jens Axboeec118302007-02-17 04:38:20 +0100947}
948
Jens Axboe0aabe162007-02-23 08:45:55 +0100949/*
950 * Get next file to service by choosing one at random
951 */
Jens Axboe2cc52932009-06-09 14:14:20 +0200952static struct fio_file *get_next_file_rand(struct thread_data *td,
953 enum fio_file_flags goodf,
Jens Axboed6aed792009-06-03 08:41:15 +0200954 enum fio_file_flags badf)
Jens Axboe0aabe162007-02-23 08:45:55 +0100955{
Jens Axboe0aabe162007-02-23 08:45:55 +0100956 struct fio_file *f;
Jens Axboe1c178182007-03-13 13:25:18 +0100957 int fno;
Jens Axboe0aabe162007-02-23 08:45:55 +0100958
959 do {
Jens Axboe87b10672009-03-04 09:39:47 +0100960 int opened = 0;
Jens Axboe1294c3e2011-05-11 08:15:18 +0200961 unsigned long r;
Jens Axboe7c83c082007-03-01 10:04:15 +0100962
Jens Axboef6787012014-11-05 18:39:23 -0700963 r = __rand(&td->next_file_state);
Jens Axboe559073f2014-11-05 18:34:02 -0700964 fno = (unsigned int) ((double) td->o.nr_files
Jens Axboe4c07ad82011-03-28 09:51:09 +0200965 * (r / (FRAND_MAX + 1.0)));
Jens Axboe4c07ad82011-03-28 09:51:09 +0200966
Jens Axboe126d65c2008-03-01 18:04:31 +0100967 f = td->files[fno];
Jens Axboed6aed792009-06-03 08:41:15 +0200968 if (fio_file_done(f))
Jens Axboe059e63c2007-03-27 20:34:47 +0200969 continue;
Jens Axboe1c178182007-03-13 13:25:18 +0100970
Jens Axboed6aed792009-06-03 08:41:15 +0200971 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +0100972 int err;
973
Jens Axboe002fe732014-02-11 08:31:13 -0700974 if (td->nr_open_files >= td->o.open_files)
975 return ERR_PTR(-EBUSY);
976
Jens Axboe87b10672009-03-04 09:39:47 +0100977 err = td_io_open_file(td, f);
978 if (err)
979 continue;
980 opened = 1;
981 }
982
Jens Axboe2ba1c292008-02-01 13:16:38 +0100983 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
984 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100985 return f;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100986 }
Jens Axboe87b10672009-03-04 09:39:47 +0100987 if (opened)
988 td_io_close_file(td, f);
Jens Axboe0aabe162007-02-23 08:45:55 +0100989 } while (1);
990}
991
992/*
993 * Get next file to service by doing round robin between all available ones
994 */
Jens Axboe1c178182007-03-13 13:25:18 +0100995static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
996 int badf)
Jens Axboe3d7c3912007-02-19 13:16:12 +0100997{
998 unsigned int old_next_file = td->next_file;
999 struct fio_file *f;
1000
1001 do {
Jens Axboe87b10672009-03-04 09:39:47 +01001002 int opened = 0;
1003
Jens Axboe126d65c2008-03-01 18:04:31 +01001004 f = td->files[td->next_file];
Jens Axboe3d7c3912007-02-19 13:16:12 +01001005
1006 td->next_file++;
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001007 if (td->next_file >= td->o.nr_files)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001008 td->next_file = 0;
1009
Jens Axboe87b10672009-03-04 09:39:47 +01001010 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
Jens Axboed6aed792009-06-03 08:41:15 +02001011 if (fio_file_done(f)) {
Jens Axboed5ed68e2007-03-28 09:33:43 +02001012 f = NULL;
Jens Axboe059e63c2007-03-27 20:34:47 +02001013 continue;
Jens Axboed5ed68e2007-03-28 09:33:43 +02001014 }
Jens Axboe059e63c2007-03-27 20:34:47 +02001015
Jens Axboed6aed792009-06-03 08:41:15 +02001016 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +01001017 int err;
1018
Jens Axboe002fe732014-02-11 08:31:13 -07001019 if (td->nr_open_files >= td->o.open_files)
1020 return ERR_PTR(-EBUSY);
1021
Jens Axboe87b10672009-03-04 09:39:47 +01001022 err = td_io_open_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001023 if (err) {
1024 dprint(FD_FILE, "error %d on open of %s\n",
1025 err, f->file_name);
Jens Axboe87c27b42009-05-20 10:45:12 +02001026 f = NULL;
Jens Axboe87b10672009-03-04 09:39:47 +01001027 continue;
Jens Axboeb5696bf2009-03-04 16:03:49 +01001028 }
Jens Axboe87b10672009-03-04 09:39:47 +01001029 opened = 1;
1030 }
1031
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001032 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1033 f->flags);
Jens Axboe1c178182007-03-13 13:25:18 +01001034 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
Jens Axboe3d7c3912007-02-19 13:16:12 +01001035 break;
1036
Jens Axboe87b10672009-03-04 09:39:47 +01001037 if (opened)
1038 td_io_close_file(td, f);
1039
Jens Axboe3d7c3912007-02-19 13:16:12 +01001040 f = NULL;
1041 } while (td->next_file != old_next_file);
1042
Jens Axboe2ba1c292008-02-01 13:16:38 +01001043 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
Jens Axboe3d7c3912007-02-19 13:16:12 +01001044 return f;
1045}
1046
Jens Axboe7eb36572010-03-08 13:58:49 +01001047static struct fio_file *__get_next_file(struct thread_data *td)
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001048{
Jens Axboe1907dbc2007-03-12 11:44:28 +01001049 struct fio_file *f;
1050
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001051 assert(td->o.nr_files <= td->files_index);
Jens Axboe1c178182007-03-13 13:25:18 +01001052
Jens Axboeb5696bf2009-03-04 16:03:49 +01001053 if (td->nr_done_files >= td->o.nr_files) {
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001054 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1055 " nr_files=%d\n", td->nr_open_files,
1056 td->nr_done_files,
1057 td->o.nr_files);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001058 return NULL;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001059 }
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001060
Jens Axboe1907dbc2007-03-12 11:44:28 +01001061 f = td->file_service_file;
Jens Axboed6aed792009-06-03 08:41:15 +02001062 if (f && fio_file_open(f) && !fio_file_closing(f)) {
Jens Axboea086c252009-03-04 08:27:37 +01001063 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1064 goto out;
1065 if (td->file_service_left--)
1066 goto out;
1067 }
Jens Axboe1907dbc2007-03-12 11:44:28 +01001068
Jens Axboea086c252009-03-04 08:27:37 +01001069 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1070 td->o.file_service_type == FIO_FSERVICE_SEQ)
Jens Axboed6aed792009-06-03 08:41:15 +02001071 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001072 else
Jens Axboed6aed792009-06-03 08:41:15 +02001073 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboe1907dbc2007-03-12 11:44:28 +01001074
Jens Axboe002fe732014-02-11 08:31:13 -07001075 if (IS_ERR(f))
1076 return f;
1077
Jens Axboe1907dbc2007-03-12 11:44:28 +01001078 td->file_service_file = f;
1079 td->file_service_left = td->file_service_nr - 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001080out:
Jens Axboe0dac4212014-02-25 13:43:17 -08001081 if (f)
1082 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1083 else
1084 dprint(FD_FILE, "get_next_file: NULL\n");
Jens Axboe1907dbc2007-03-12 11:44:28 +01001085 return f;
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001086}
1087
Jens Axboe7eb36572010-03-08 13:58:49 +01001088static struct fio_file *get_next_file(struct thread_data *td)
1089{
Andrey Kuzmin705fa7e2014-06-27 20:21:22 -06001090 if (td->flags & TD_F_PROFILE_OPS) {
Jens Axboed72be542012-11-30 19:37:46 +01001091 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +01001092
Jens Axboed72be542012-11-30 19:37:46 +01001093 if (ops->get_next_file)
1094 return ops->get_next_file(td);
1095 }
Jens Axboe7eb36572010-03-08 13:58:49 +01001096
1097 return __get_next_file(td);
1098}
1099
Jens Axboe002fe732014-02-11 08:31:13 -07001100static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
Jens Axboe429f6672007-07-23 10:38:43 +02001101{
1102 struct fio_file *f;
1103
1104 do {
1105 f = get_next_file(td);
Jens Axboe002fe732014-02-11 08:31:13 -07001106 if (IS_ERR_OR_NULL(f))
1107 return PTR_ERR(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001108
Jens Axboe429f6672007-07-23 10:38:43 +02001109 io_u->file = f;
1110 get_file(f);
1111
1112 if (!fill_io_u(td, io_u))
1113 break;
1114
Jens Axboeb5696bf2009-03-04 16:03:49 +01001115 put_file_log(td, f);
Jens Axboe429f6672007-07-23 10:38:43 +02001116 td_io_close_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001117 io_u->file = NULL;
Jens Axboed6aed792009-06-03 08:41:15 +02001118 fio_file_set_done(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001119 td->nr_done_files++;
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001120 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1121 td->nr_done_files, td->o.nr_files);
Jens Axboe429f6672007-07-23 10:38:43 +02001122 } while (1);
1123
1124 return 0;
1125}
1126
Jens Axboe3e260a42013-12-09 12:38:53 -07001127static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1128 unsigned long tusec, unsigned long max_usec)
1129{
1130 if (!td->error)
1131 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1132 td_verror(td, ETIMEDOUT, "max latency exceeded");
1133 icd->error = ETIMEDOUT;
1134}
1135
1136static void lat_new_cycle(struct thread_data *td)
1137{
1138 fio_gettime(&td->latency_ts, NULL);
1139 td->latency_ios = ddir_rw_sum(td->io_blocks);
1140 td->latency_failed = 0;
1141}
1142
1143/*
1144 * We had an IO outside the latency target. Reduce the queue depth. If we
1145 * are at QD=1, then it's time to give up.
1146 */
1147static int __lat_target_failed(struct thread_data *td)
1148{
1149 if (td->latency_qd == 1)
1150 return 1;
1151
1152 td->latency_qd_high = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001153
1154 if (td->latency_qd == td->latency_qd_low)
1155 td->latency_qd_low--;
1156
Jens Axboe3e260a42013-12-09 12:38:53 -07001157 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1158
1159 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1160
1161 /*
1162 * When we ramp QD down, quiesce existing IO to prevent
1163 * a storm of ramp downs due to pending higher depth.
1164 */
1165 io_u_quiesce(td);
1166 lat_new_cycle(td);
1167 return 0;
1168}
1169
1170static int lat_target_failed(struct thread_data *td)
1171{
1172 if (td->o.latency_percentile.u.f == 100.0)
1173 return __lat_target_failed(td);
1174
1175 td->latency_failed++;
1176 return 0;
1177}
1178
1179void lat_target_init(struct thread_data *td)
1180{
Jens Axboe6bb58212014-02-21 13:55:31 -08001181 td->latency_end_run = 0;
1182
Jens Axboe3e260a42013-12-09 12:38:53 -07001183 if (td->o.latency_target) {
1184 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1185 fio_gettime(&td->latency_ts, NULL);
1186 td->latency_qd = 1;
1187 td->latency_qd_high = td->o.iodepth;
1188 td->latency_qd_low = 1;
1189 td->latency_ios = ddir_rw_sum(td->io_blocks);
1190 } else
1191 td->latency_qd = td->o.iodepth;
1192}
1193
Jens Axboe6bb58212014-02-21 13:55:31 -08001194void lat_target_reset(struct thread_data *td)
1195{
1196 if (!td->latency_end_run)
1197 lat_target_init(td);
1198}
1199
Jens Axboe3e260a42013-12-09 12:38:53 -07001200static void lat_target_success(struct thread_data *td)
1201{
1202 const unsigned int qd = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001203 struct thread_options *o = &td->o;
Jens Axboe3e260a42013-12-09 12:38:53 -07001204
1205 td->latency_qd_low = td->latency_qd;
1206
1207 /*
1208 * If we haven't failed yet, we double up to a failing value instead
1209 * of bisecting from highest possible queue depth. If we have set
1210 * a limit other than td->o.iodepth, bisect between that.
1211 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001212 if (td->latency_qd_high != o->iodepth)
Jens Axboe3e260a42013-12-09 12:38:53 -07001213 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1214 else
1215 td->latency_qd *= 2;
1216
Jens Axboe6bb58212014-02-21 13:55:31 -08001217 if (td->latency_qd > o->iodepth)
1218 td->latency_qd = o->iodepth;
Jens Axboe3e260a42013-12-09 12:38:53 -07001219
1220 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
Jens Axboe6bb58212014-02-21 13:55:31 -08001221
Jens Axboe3e260a42013-12-09 12:38:53 -07001222 /*
Jens Axboe6bb58212014-02-21 13:55:31 -08001223 * Same as last one, we are done. Let it run a latency cycle, so
1224 * we get only the results from the targeted depth.
Jens Axboe3e260a42013-12-09 12:38:53 -07001225 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001226 if (td->latency_qd == qd) {
1227 if (td->latency_end_run) {
1228 dprint(FD_RATE, "We are done\n");
1229 td->done = 1;
1230 } else {
1231 dprint(FD_RATE, "Quiesce and final run\n");
1232 io_u_quiesce(td);
1233 td->latency_end_run = 1;
1234 reset_all_stats(td);
1235 reset_io_stats(td);
1236 }
1237 }
Jens Axboe3e260a42013-12-09 12:38:53 -07001238
1239 lat_new_cycle(td);
1240}
1241
1242/*
1243 * Check if we can bump the queue depth
1244 */
1245void lat_target_check(struct thread_data *td)
1246{
1247 uint64_t usec_window;
1248 uint64_t ios;
1249 double success_ios;
1250
1251 usec_window = utime_since_now(&td->latency_ts);
1252 if (usec_window < td->o.latency_window)
1253 return;
1254
1255 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1256 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1257 success_ios *= 100.0;
1258
1259 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1260
1261 if (success_ios >= td->o.latency_percentile.u.f)
1262 lat_target_success(td);
1263 else
1264 __lat_target_failed(td);
1265}
1266
1267/*
1268 * If latency target is enabled, we might be ramping up or down and not
1269 * using the full queue depth available.
1270 */
Jens Axboe5a48d302014-09-30 13:29:57 -06001271int queue_full(const struct thread_data *td)
Jens Axboe3e260a42013-12-09 12:38:53 -07001272{
1273 const int qempty = io_u_qempty(&td->io_u_freelist);
1274
1275 if (qempty)
1276 return 1;
1277 if (!td->o.latency_target)
1278 return 0;
1279
1280 return td->cur_depth >= td->latency_qd;
1281}
Jens Axboe429f6672007-07-23 10:38:43 +02001282
Jens Axboe10ba5352006-10-20 11:39:27 +02001283struct io_u *__get_io_u(struct thread_data *td)
1284{
Jens Axboe0cae66f2014-03-03 13:55:32 -07001285 struct io_u *io_u = NULL;
Jens Axboe10ba5352006-10-20 11:39:27 +02001286
Jens Axboede54cfd2014-11-10 20:34:00 -07001287 if (td->stop_io)
1288 return NULL;
1289
Jens Axboee8462bd2009-07-06 12:59:04 +02001290 td_io_u_lock(td);
1291
1292again:
Jens Axboe2ae0b202013-05-28 14:16:55 +02001293 if (!io_u_rempty(&td->io_u_requeues))
1294 io_u = io_u_rpop(&td->io_u_requeues);
Jens Axboe3e260a42013-12-09 12:38:53 -07001295 else if (!queue_full(td)) {
Jens Axboe2ae0b202013-05-28 14:16:55 +02001296 io_u = io_u_qpop(&td->io_u_freelist);
Jens Axboe10ba5352006-10-20 11:39:27 +02001297
Jens Axboe225ba9e2014-02-26 14:31:15 -08001298 io_u->file = NULL;
Jens Axboe6040dab2006-10-24 19:38:15 +02001299 io_u->buflen = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001300 io_u->resid = 0;
Jens Axboed7762cf2007-02-23 12:34:57 +01001301 io_u->end_io = NULL;
Jens Axboe755200a2007-02-19 13:08:12 +01001302 }
1303
1304 if (io_u) {
Jens Axboe0c6e7512007-02-22 11:19:39 +01001305 assert(io_u->flags & IO_U_F_FREE);
Jens Axboee69fdf72014-07-23 16:11:43 +02001306 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
1307 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1308 IO_U_F_VER_LIST);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001309
Jens Axboe755200a2007-02-19 13:08:12 +01001310 io_u->error = 0;
Jens Axboebcd5abf2013-01-23 09:27:25 -07001311 io_u->acct_ddir = -1;
Jens Axboe10ba5352006-10-20 11:39:27 +02001312 td->cur_depth++;
Radha Ramachandran0c412142009-11-03 21:45:31 +01001313 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
Jens Axboef9401282014-02-06 12:17:37 -07001314 io_u->ipo = NULL;
Jens Axboe1dec3e02010-03-19 10:33:39 +01001315 } else if (td->o.verify_async) {
1316 /*
1317 * We ran out, wait for async verify threads to finish and
1318 * return one
1319 */
1320 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1321 goto again;
Jens Axboe10ba5352006-10-20 11:39:27 +02001322 }
1323
Jens Axboee8462bd2009-07-06 12:59:04 +02001324 td_io_u_unlock(td);
Jens Axboe10ba5352006-10-20 11:39:27 +02001325 return io_u;
1326}
1327
Jens Axboe0d29de82010-09-01 13:54:15 +02001328static int check_get_trim(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +02001329{
Jens Axboed72be542012-11-30 19:37:46 +01001330 if (!(td->flags & TD_F_TRIM_BACKLOG))
1331 return 0;
1332
1333 if (td->trim_entries) {
Jens Axboe0d29de82010-09-01 13:54:15 +02001334 int get_trim = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001335
Jens Axboe0d29de82010-09-01 13:54:15 +02001336 if (td->trim_batch) {
1337 td->trim_batch--;
1338 get_trim = 1;
1339 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1340 td->last_ddir != DDIR_READ) {
1341 td->trim_batch = td->o.trim_batch;
1342 if (!td->trim_batch)
1343 td->trim_batch = td->o.trim_backlog;
1344 get_trim = 1;
1345 }
1346
1347 if (get_trim && !get_next_trim(td, io_u))
1348 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001349 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001350
Jens Axboe0d29de82010-09-01 13:54:15 +02001351 return 0;
1352}
1353
1354static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1355{
Jens Axboed72be542012-11-30 19:37:46 +01001356 if (!(td->flags & TD_F_VER_BACKLOG))
1357 return 0;
1358
1359 if (td->io_hist_len) {
Jens Axboe9e144182010-06-15 14:25:36 +02001360 int get_verify = 0;
1361
Jens Axboed1ece0c2012-03-07 09:32:58 +01001362 if (td->verify_batch)
Jens Axboe9e144182010-06-15 14:25:36 +02001363 get_verify = 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001364 else if (!(td->io_hist_len % td->o.verify_backlog) &&
Jens Axboe9e144182010-06-15 14:25:36 +02001365 td->last_ddir != DDIR_READ) {
1366 td->verify_batch = td->o.verify_batch;
Jens Axboef8a75c92010-06-15 14:27:28 +02001367 if (!td->verify_batch)
1368 td->verify_batch = td->o.verify_backlog;
Jens Axboe9e144182010-06-15 14:25:36 +02001369 get_verify = 1;
1370 }
1371
Jens Axboed1ece0c2012-03-07 09:32:58 +01001372 if (get_verify && !get_next_verify(td, io_u)) {
1373 td->verify_batch--;
Jens Axboe0d29de82010-09-01 13:54:15 +02001374 return 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001375 }
Jens Axboe9e144182010-06-15 14:25:36 +02001376 }
1377
Jens Axboe0d29de82010-09-01 13:54:15 +02001378 return 0;
1379}
1380
1381/*
Jens Axboede789762011-09-16 22:11:23 +02001382 * Fill offset and start time into the buffer content, to prevent too
Jens Axboe23f394d2011-09-16 22:45:27 +02001383 * easy compressible data for simple de-dupe attempts. Do this for every
1384 * 512b block in the range, since that should be the smallest block size
1385 * we can expect from a device.
Jens Axboede789762011-09-16 22:11:23 +02001386 */
1387static void small_content_scramble(struct io_u *io_u)
1388{
Jens Axboe23f394d2011-09-16 22:45:27 +02001389 unsigned int i, nr_blocks = io_u->buflen / 512;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001390 uint64_t boffset;
Jens Axboe23f394d2011-09-16 22:45:27 +02001391 unsigned int offset;
1392 void *p, *end;
Jens Axboede789762011-09-16 22:11:23 +02001393
Jens Axboe23f394d2011-09-16 22:45:27 +02001394 if (!nr_blocks)
1395 return;
1396
1397 p = io_u->xfer_buf;
Jens Axboefba76ee2011-09-27 14:27:48 -06001398 boffset = io_u->offset;
Jens Axboe81f03662012-02-02 09:20:09 +01001399 io_u->buf_filled_len = 0;
Jens Axboefad82f72011-09-19 11:33:30 +02001400
Jens Axboe23f394d2011-09-16 22:45:27 +02001401 for (i = 0; i < nr_blocks; i++) {
1402 /*
1403 * Fill the byte offset into a "random" start offset of
1404 * the buffer, given by the product of the usec time
1405 * and the actual offset.
1406 */
Jens Axboefad82f72011-09-19 11:33:30 +02001407 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001408 offset &= ~(sizeof(uint64_t) - 1);
1409 if (offset >= 512 - sizeof(uint64_t))
1410 offset -= sizeof(uint64_t);
Jens Axboefba76ee2011-09-27 14:27:48 -06001411 memcpy(p + offset, &boffset, sizeof(boffset));
Jens Axboe23f394d2011-09-16 22:45:27 +02001412
1413 end = p + 512 - sizeof(io_u->start_time);
1414 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1415 p += 512;
Jens Axboefad82f72011-09-19 11:33:30 +02001416 boffset += 512;
Jens Axboe23f394d2011-09-16 22:45:27 +02001417 }
Jens Axboede789762011-09-16 22:11:23 +02001418}
1419
1420/*
Jens Axboe0d29de82010-09-01 13:54:15 +02001421 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1422 * etc. The returned io_u is fully ready to be prepped and submitted.
1423 */
1424struct io_u *get_io_u(struct thread_data *td)
1425{
1426 struct fio_file *f;
1427 struct io_u *io_u;
Jens Axboede789762011-09-16 22:11:23 +02001428 int do_scramble = 0;
Jens Axboe002fe732014-02-11 08:31:13 -07001429 long ret = 0;
Jens Axboe0d29de82010-09-01 13:54:15 +02001430
1431 io_u = __get_io_u(td);
1432 if (!io_u) {
1433 dprint(FD_IO, "__get_io_u failed\n");
1434 return NULL;
1435 }
1436
1437 if (check_get_verify(td, io_u))
1438 goto out;
1439 if (check_get_trim(td, io_u))
1440 goto out;
1441
Jens Axboe755200a2007-02-19 13:08:12 +01001442 /*
1443 * from a requeue, io_u already setup
1444 */
1445 if (io_u->file)
Jens Axboe77f392b2007-02-19 20:13:09 +01001446 goto out;
Jens Axboe755200a2007-02-19 13:08:12 +01001447
Jens Axboe429f6672007-07-23 10:38:43 +02001448 /*
1449 * If using an iolog, grab next piece if any available.
1450 */
Jens Axboed72be542012-11-30 19:37:46 +01001451 if (td->flags & TD_F_READ_IOLOG) {
Jens Axboe429f6672007-07-23 10:38:43 +02001452 if (read_iolog_get(td, io_u))
1453 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001454 } else if (set_io_u_file(td, io_u)) {
Jens Axboe002fe732014-02-11 08:31:13 -07001455 ret = -EBUSY;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001456 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001457 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001458 }
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001459
Jens Axboe429f6672007-07-23 10:38:43 +02001460 f = io_u->file;
Jens Axboe002fe732014-02-11 08:31:13 -07001461 if (!f) {
1462 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1463 goto err_put;
1464 }
1465
Jens Axboed6aed792009-06-03 08:41:15 +02001466 assert(fio_file_open(f));
Jens Axboe97af62c2007-05-22 11:12:13 +02001467
Jens Axboeff58fce2010-08-25 12:02:08 +02001468 if (ddir_rw(io_u->ddir)) {
Jens Axboed0656a92008-02-01 18:33:23 +01001469 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +01001470 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001471 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001472 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001473
Jens Axboe08a99be2014-12-14 19:01:24 -07001474 f->last_start[io_u->ddir] = io_u->offset;
1475 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001476
Jens Axboefd684182011-09-19 09:24:44 +02001477 if (io_u->ddir == DDIR_WRITE) {
Jens Axboed72be542012-11-30 19:37:46 +01001478 if (td->flags & TD_F_REFILL_BUFFERS) {
Jens Axboe9c426842012-03-02 21:02:12 +01001479 io_u_fill_buffer(td, io_u,
Jens Axboe8e0aa162014-09-26 15:04:58 -06001480 td->o.min_bs[DDIR_WRITE],
1481 io_u->xfer_buflen);
Jens Axboebedc9dc2014-03-17 12:51:09 -06001482 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1483 !(td->flags & TD_F_COMPRESS))
Jens Axboefd684182011-09-19 09:24:44 +02001484 do_scramble = 1;
Jens Axboed72be542012-11-30 19:37:46 +01001485 if (td->flags & TD_F_VER_NONE) {
Jens Axboe629f1d72012-03-09 19:02:01 +01001486 populate_verify_io_u(td, io_u);
1487 do_scramble = 0;
1488 }
Jens Axboefd684182011-09-19 09:24:44 +02001489 } else if (io_u->ddir == DDIR_READ) {
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001490 /*
1491 * Reset the buf_filled parameters so next time if the
1492 * buffer is used for writes it is refilled.
1493 */
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001494 io_u->buf_filled_len = 0;
1495 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001496 }
1497
Jens Axboe165faf12007-02-07 11:30:37 +01001498 /*
1499 * Set io data pointers.
1500 */
Jens Axboecec6b552007-02-06 20:15:38 +01001501 io_u->xfer_buf = io_u->buf;
1502 io_u->xfer_buflen = io_u->buflen;
Jens Axboe5973caf2008-05-21 19:52:35 +02001503
Jens Axboe6ac7a332008-03-01 15:22:32 +01001504out:
Jens Axboe0d29de82010-09-01 13:54:15 +02001505 assert(io_u->file);
Jens Axboe429f6672007-07-23 10:38:43 +02001506 if (!td_io_prep(td, io_u)) {
Jens Axboe993bf482008-11-14 13:04:53 +01001507 if (!td->o.disable_slat)
1508 fio_gettime(&io_u->start_time, NULL);
Jens Axboede789762011-09-16 22:11:23 +02001509 if (do_scramble)
1510 small_content_scramble(io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001511 return io_u;
Jens Axboe36167d82007-02-18 05:41:31 +01001512 }
Jens Axboe429f6672007-07-23 10:38:43 +02001513err_put:
Jens Axboe2ba1c292008-02-01 13:16:38 +01001514 dprint(FD_IO, "get_io_u failed\n");
Jens Axboe429f6672007-07-23 10:38:43 +02001515 put_io_u(td, io_u);
Jens Axboe002fe732014-02-11 08:31:13 -07001516 return ERR_PTR(ret);
Jens Axboe10ba5352006-10-20 11:39:27 +02001517}
1518
Jens Axboe54517922007-03-05 10:06:06 +01001519void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1520{
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001521 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
Jens Axboe825f8182010-08-25 10:47:18 +02001522
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001523 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1524 return;
Jens Axboe54517922007-03-05 10:06:06 +01001525
Robert Elliott2cbdcdb2014-09-16 17:09:48 -05001526 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1527 io_u->file ? " on file " : "",
1528 io_u->file ? io_u->file->file_name : "",
1529 strerror(io_u->error),
1530 io_ddir_name(io_u->ddir),
1531 io_u->offset, io_u->xfer_buflen);
Jens Axboe54517922007-03-05 10:06:06 +01001532
1533 if (!td->error)
1534 td_verror(td, io_u->error, "io_u error");
1535}
1536
Jens Axboeaba6c952014-02-13 19:59:56 -07001537static inline int gtod_reduce(struct thread_data *td)
1538{
Jens Axboe729fe3a2014-02-14 08:46:35 -07001539 return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat
Jens Axboeb74b8202014-02-13 20:04:02 -07001540 && td->o.disable_bw;
Jens Axboeaba6c952014-02-13 19:59:56 -07001541}
1542
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001543static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1544 struct io_completion_data *icd,
1545 const enum fio_ddir idx, unsigned int bytes)
1546{
Jens Axboe24d23ca2012-11-13 08:31:24 -07001547 unsigned long lusec = 0;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001548
Jens Axboeaba6c952014-02-13 19:59:56 -07001549 if (!gtod_reduce(td))
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001550 lusec = utime_since(&io_u->issue_time, &icd->time);
1551
1552 if (!td->o.disable_lat) {
1553 unsigned long tusec;
1554
1555 tusec = utime_since(&io_u->start_time, &icd->time);
Jens Axboeccefd5f2014-06-30 20:59:03 -06001556 add_lat_sample(td, idx, tusec, bytes, io_u->offset);
Jens Axboe15501532012-10-24 16:37:45 +02001557
Jens Axboed4afedf2013-05-22 22:21:29 +02001558 if (td->flags & TD_F_PROFILE_OPS) {
1559 struct prof_io_ops *ops = &td->prof_io_ops;
1560
1561 if (ops->io_u_lat)
1562 icd->error = ops->io_u_lat(td, tusec);
1563 }
1564
Jens Axboe3e260a42013-12-09 12:38:53 -07001565 if (td->o.max_latency && tusec > td->o.max_latency)
1566 lat_fatal(td, icd, tusec, td->o.max_latency);
1567 if (td->o.latency_target && tusec > td->o.latency_target) {
1568 if (lat_target_failed(td))
1569 lat_fatal(td, icd, tusec, td->o.latency_target);
Jens Axboe15501532012-10-24 16:37:45 +02001570 }
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001571 }
1572
1573 if (!td->o.disable_clat) {
Jens Axboeccefd5f2014-06-30 20:59:03 -06001574 add_clat_sample(td, idx, lusec, bytes, io_u->offset);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001575 io_u_mark_latency(td, lusec);
1576 }
1577
1578 if (!td->o.disable_bw)
1579 add_bw_sample(td, idx, bytes, &icd->time);
1580
Jens Axboeaba6c952014-02-13 19:59:56 -07001581 if (!gtod_reduce(td))
1582 add_iops_sample(td, idx, bytes, &icd->time);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001583}
1584
Steven Lang1b8dbf22011-11-09 13:48:01 +01001585static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1586{
Jens Axboe1ae83d42013-01-12 01:44:15 -07001587 uint64_t secs, remainder, bps, bytes;
1588
Steven Lang1b8dbf22011-11-09 13:48:01 +01001589 bytes = td->this_io_bytes[ddir];
1590 bps = td->rate_bps[ddir];
1591 secs = bytes / bps;
1592 remainder = bytes % bps;
1593 return remainder * 1000000 / bps + secs * 1000000;
1594}
1595
Jens Axboee69fdf72014-07-23 16:11:43 +02001596static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
Jens Axboe97601022007-02-18 12:47:29 +01001597 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001598{
Jens Axboee69fdf72014-07-23 16:11:43 +02001599 struct io_u *io_u = *io_u_ptr;
1600 enum fio_ddir ddir = io_u->ddir;
1601 struct fio_file *f = io_u->file;
Jens Axboe10ba5352006-10-20 11:39:27 +02001602
Jens Axboe2ba1c292008-02-01 13:16:38 +01001603 dprint_io_u(io_u, "io complete");
1604
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001605 td_io_u_lock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001606 assert(io_u->flags & IO_U_F_FLIGHT);
Jens Axboe38dad622010-07-20 14:46:00 -06001607 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
Jens Axboef9401282014-02-06 12:17:37 -07001608
1609 /*
1610 * Mark IO ok to verify
1611 */
1612 if (io_u->ipo) {
Jens Axboe890b6652014-05-06 19:06:51 -06001613 /*
1614 * Remove errored entry from the verification list
1615 */
1616 if (io_u->error)
1617 unlog_io_piece(td, io_u);
1618 else {
1619 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1620 write_barrier();
1621 }
Jens Axboef9401282014-02-06 12:17:37 -07001622 }
1623
Jens Axboe2ecc1b52009-11-04 20:58:09 +01001624 td_io_u_unlock(td);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001625
Jens Axboee69fdf72014-07-23 16:11:43 +02001626 if (ddir_sync(ddir)) {
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001627 td->last_was_sync = 1;
Jens Axboe44f29692010-03-09 20:09:44 +01001628 if (f) {
1629 f->first_write = -1ULL;
1630 f->last_write = -1ULL;
1631 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001632 return;
1633 }
1634
1635 td->last_was_sync = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001636 td->last_ddir = ddir;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001637
Jens Axboee69fdf72014-07-23 16:11:43 +02001638 if (!io_u->error && ddir_rw(ddir)) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001639 unsigned int bytes = io_u->buflen - io_u->resid;
Jens Axboee69fdf72014-07-23 16:11:43 +02001640 const enum fio_ddir oddir = ddir ^ 1;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001641 int ret;
Jens Axboe10ba5352006-10-20 11:39:27 +02001642
Jens Axboee69fdf72014-07-23 16:11:43 +02001643 td->io_blocks[ddir]++;
1644 td->this_io_blocks[ddir]++;
1645 td->io_bytes[ddir] += bytes;
webeeae2fafc2012-03-23 13:41:41 +01001646
1647 if (!(io_u->flags & IO_U_F_VER_LIST))
Jens Axboee69fdf72014-07-23 16:11:43 +02001648 td->this_io_bytes[ddir] += bytes;
Jens Axboe10ba5352006-10-20 11:39:27 +02001649
Jens Axboede54cfd2014-11-10 20:34:00 -07001650 if (ddir == DDIR_WRITE) {
1651 if (f) {
1652 if (f->first_write == -1ULL ||
1653 io_u->offset < f->first_write)
1654 f->first_write = io_u->offset;
1655 if (f->last_write == -1ULL ||
1656 ((io_u->offset + bytes) > f->last_write))
1657 f->last_write = io_u->offset + bytes;
1658 }
1659 if (td->last_write_comp) {
1660 int idx = td->last_write_idx++;
1661
1662 td->last_write_comp[idx] = io_u->offset;
1663 if (td->last_write_idx == td->o.iodepth)
1664 td->last_write_idx = 0;
1665 }
Jens Axboe44f29692010-03-09 20:09:44 +01001666 }
1667
Steven Lang6b1190f2012-02-07 09:42:59 +01001668 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1669 td->runstate == TD_VERIFYING)) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001670 account_io_completion(td, io_u, icd, ddir, bytes);
Jens Axboe40e1a6f2009-06-11 10:55:39 +02001671
Jens Axboee69fdf72014-07-23 16:11:43 +02001672 if (__should_check_rate(td, ddir)) {
1673 td->rate_pending_usleep[ddir] =
1674 (usec_for_io(td, ddir) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001675 utime_since_now(&td->start));
Jens Axboeb23b6a22009-06-11 22:06:23 +02001676 }
Jens Axboee69fdf72014-07-23 16:11:43 +02001677 if (ddir != DDIR_TRIM &&
1678 __should_check_rate(td, oddir)) {
1679 td->rate_pending_usleep[oddir] =
1680 (usec_for_io(td, oddir) -
Radha Ramachandranba3e4e02009-12-09 22:31:44 +01001681 utime_since_now(&td->start));
Jens Axboee69fdf72014-07-23 16:11:43 +02001682 }
Jens Axboe721938a2008-09-10 09:46:16 +02001683 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001684
Jens Axboee69fdf72014-07-23 16:11:43 +02001685 icd->bytes_done[ddir] += bytes;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001686
Jens Axboed7762cf2007-02-23 12:34:57 +01001687 if (io_u->end_io) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001688 ret = io_u->end_io(td, io_u_ptr);
1689 io_u = *io_u_ptr;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001690 if (ret && !icd->error)
1691 icd->error = ret;
1692 }
Jens Axboeff58fce2010-08-25 12:02:08 +02001693 } else if (io_u->error) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001694 icd->error = io_u->error;
Jens Axboe54517922007-03-05 10:06:06 +01001695 io_u_log_error(td, io_u);
1696 }
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001697 if (icd->error) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001698 enum error_type_bit eb = td_error_type(ddir, icd->error);
1699
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001700 if (!td_non_fatal_error(td, eb, icd->error))
1701 return;
Jens Axboee69fdf72014-07-23 16:11:43 +02001702
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001703 /*
1704 * If there is a non_fatal error, then add to the error count
1705 * and clear all the errors.
1706 */
1707 update_error_count(td, icd->error);
1708 td_clear_error(td);
1709 icd->error = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001710 if (io_u)
1711 io_u->error = 0;
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001712 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001713}
1714
Jens Axboe9520ebb2008-10-16 21:03:27 +02001715static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1716 int nr)
Jens Axboe36167d82007-02-18 05:41:31 +01001717{
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001718 int ddir;
Jens Axboeaba6c952014-02-13 19:59:56 -07001719
1720 if (!gtod_reduce(td))
Jens Axboe9520ebb2008-10-16 21:03:27 +02001721 fio_gettime(&icd->time, NULL);
Jens Axboe36167d82007-02-18 05:41:31 +01001722
Jens Axboe3af6ef32007-02-18 06:57:43 +01001723 icd->nr = nr;
1724
Jens Axboe36167d82007-02-18 05:41:31 +01001725 icd->error = 0;
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001726 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1727 icd->bytes_done[ddir] = 0;
Jens Axboe36167d82007-02-18 05:41:31 +01001728}
1729
Jens Axboe97601022007-02-18 12:47:29 +01001730static void ios_completed(struct thread_data *td,
1731 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001732{
1733 struct io_u *io_u;
1734 int i;
1735
Jens Axboe10ba5352006-10-20 11:39:27 +02001736 for (i = 0; i < icd->nr; i++) {
1737 io_u = td->io_ops->event(td, i);
1738
Jens Axboee69fdf72014-07-23 16:11:43 +02001739 io_completed(td, &io_u, icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001740
Jens Axboee69fdf72014-07-23 16:11:43 +02001741 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001742 put_io_u(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +02001743 }
1744}
Jens Axboe97601022007-02-18 12:47:29 +01001745
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001746/*
1747 * Complete a single io_u for the sync engines.
1748 */
Jens Axboe581e7142009-06-09 12:47:16 +02001749int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
Jens Axboe100f49f2013-01-23 10:15:57 -07001750 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001751{
1752 struct io_completion_data icd;
1753
Jens Axboe9520ebb2008-10-16 21:03:27 +02001754 init_icd(td, &icd, 1);
Jens Axboee69fdf72014-07-23 16:11:43 +02001755 io_completed(td, &io_u, &icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001756
Jens Axboee69fdf72014-07-23 16:11:43 +02001757 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001758 put_io_u(td, io_u);
Jens Axboe97601022007-02-18 12:47:29 +01001759
Jens Axboe581e7142009-06-09 12:47:16 +02001760 if (icd.error) {
1761 td_verror(td, icd.error, "io_u_sync_complete");
1762 return -1;
1763 }
Jens Axboe97601022007-02-18 12:47:29 +01001764
Jens Axboe581e7142009-06-09 12:47:16 +02001765 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001766 int ddir;
1767
1768 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1769 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001770 }
1771
1772 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001773}
1774
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001775/*
1776 * Called to complete min_events number of io for the async engines.
1777 */
Jens Axboe581e7142009-06-09 12:47:16 +02001778int io_u_queued_complete(struct thread_data *td, int min_evts,
Jens Axboe100f49f2013-01-23 10:15:57 -07001779 uint64_t *bytes)
Jens Axboe97601022007-02-18 12:47:29 +01001780{
Jens Axboe97601022007-02-18 12:47:29 +01001781 struct io_completion_data icd;
Jens Axboe00de55e2007-02-20 10:45:57 +01001782 struct timespec *tvp = NULL;
Jens Axboe97601022007-02-18 12:47:29 +01001783 int ret;
Davide Libenzi4d06a332007-03-22 07:43:50 +01001784 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
Jens Axboe97601022007-02-18 12:47:29 +01001785
Jens Axboe49504212008-06-05 09:03:30 +02001786 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
Jens Axboeb271fe62008-02-04 10:49:41 +01001787
Jens Axboe49504212008-06-05 09:03:30 +02001788 if (!min_evts)
Jens Axboe00de55e2007-02-20 10:45:57 +01001789 tvp = &ts;
Robert Elliott05074832014-09-04 13:51:05 -06001790 else if (min_evts > td->cur_depth)
1791 min_evts = td->cur_depth;
Jens Axboe97601022007-02-18 12:47:29 +01001792
Jens Axboe49504212008-06-05 09:03:30 +02001793 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
Jens Axboe97601022007-02-18 12:47:29 +01001794 if (ret < 0) {
Jens Axboee1161c32007-02-22 19:36:48 +01001795 td_verror(td, -ret, "td_io_getevents");
Jens Axboe97601022007-02-18 12:47:29 +01001796 return ret;
1797 } else if (!ret)
1798 return ret;
1799
Jens Axboe9520ebb2008-10-16 21:03:27 +02001800 init_icd(td, &icd, ret);
Jens Axboe97601022007-02-18 12:47:29 +01001801 ios_completed(td, &icd);
Jens Axboe581e7142009-06-09 12:47:16 +02001802 if (icd.error) {
1803 td_verror(td, icd.error, "io_u_queued_complete");
1804 return -1;
1805 }
Jens Axboe97601022007-02-18 12:47:29 +01001806
Jens Axboe581e7142009-06-09 12:47:16 +02001807 if (bytes) {
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001808 int ddir;
1809
1810 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1811 bytes[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001812 }
1813
1814 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001815}
Jens Axboe7e77dd02007-02-20 10:57:34 +01001816
1817/*
1818 * Call when io_u is really queued, to update the submission latency.
1819 */
1820void io_u_queued(struct thread_data *td, struct io_u *io_u)
1821{
Jens Axboe9520ebb2008-10-16 21:03:27 +02001822 if (!td->o.disable_slat) {
1823 unsigned long slat_time;
Jens Axboe7e77dd02007-02-20 10:57:34 +01001824
Jens Axboe9520ebb2008-10-16 21:03:27 +02001825 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
Jens Axboeccefd5f2014-06-30 20:59:03 -06001826 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
1827 io_u->offset);
Jens Axboe9520ebb2008-10-16 21:03:27 +02001828 }
Jens Axboe7e77dd02007-02-20 10:57:34 +01001829}
Jens Axboe433afcb2007-02-22 10:39:01 +01001830
Jens Axboee66dac22014-09-22 10:02:07 -06001831/*
1832 * See if we should reuse the last seed, if dedupe is enabled
1833 */
1834static struct frand_state *get_buf_state(struct thread_data *td)
1835{
1836 unsigned int v;
1837 unsigned long r;
1838
1839 if (!td->o.dedupe_percentage)
1840 return &td->buf_state;
Jens Axboe64d3bab2014-09-22 14:20:05 -06001841 else if (td->o.dedupe_percentage == 100)
1842 return &td->buf_state_prev;
Jens Axboee66dac22014-09-22 10:02:07 -06001843
1844 r = __rand(&td->dedupe_state);
1845 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
1846
1847 if (v <= td->o.dedupe_percentage)
1848 return &td->buf_state_prev;
1849
1850 return &td->buf_state;
1851}
1852
1853static void save_buf_state(struct thread_data *td, struct frand_state *rs)
1854{
1855 if (rs == &td->buf_state)
1856 frand_copy(&td->buf_state_prev, rs);
1857}
1858
Jens Axboecc86c392013-05-03 15:12:33 +02001859void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1860 unsigned int max_bs)
Jens Axboe5973caf2008-05-21 19:52:35 +02001861{
Jens Axboefd1583f2014-12-03 19:55:33 -07001862 struct thread_options *o = &td->o;
1863
1864 if (o->compress_percentage) {
Jens Axboe9c426842012-03-02 21:02:12 +01001865 unsigned int perc = td->o.compress_percentage;
Jens Axboee66dac22014-09-22 10:02:07 -06001866 struct frand_state *rs;
Jens Axboe8e0aa162014-09-26 15:04:58 -06001867 unsigned int left = max_bs;
Jens Axboee66dac22014-09-22 10:02:07 -06001868
Jens Axboe8e0aa162014-09-26 15:04:58 -06001869 do {
1870 rs = get_buf_state(td);
Jens Axboe9c426842012-03-02 21:02:12 +01001871
Jens Axboe8e0aa162014-09-26 15:04:58 -06001872 min_write = min(min_write, left);
Jens Axboef97a43a2012-03-09 19:06:24 +01001873
Jens Axboe8e0aa162014-09-26 15:04:58 -06001874 if (perc) {
1875 unsigned int seg = min_write;
Jens Axboecc86c392013-05-03 15:12:33 +02001876
Jens Axboe8e0aa162014-09-26 15:04:58 -06001877 seg = min(min_write, td->o.compress_chunk);
1878 if (!seg)
1879 seg = min_write;
1880
1881 fill_random_buf_percentage(rs, buf, perc, seg,
Jens Axboefd1583f2014-12-03 19:55:33 -07001882 min_write, o->buffer_pattern,
1883 o->buffer_pattern_bytes);
Jens Axboe8e0aa162014-09-26 15:04:58 -06001884 } else
1885 fill_random_buf(rs, buf, min_write);
1886
1887 buf += min_write;
1888 left -= min_write;
Jens Axboee66dac22014-09-22 10:02:07 -06001889 save_buf_state(td, rs);
Jens Axboe8e0aa162014-09-26 15:04:58 -06001890 } while (left);
Jens Axboefd1583f2014-12-03 19:55:33 -07001891 } else if (o->buffer_pattern_bytes)
1892 fill_buffer_pattern(td, buf, max_bs);
1893 else
Jens Axboecc86c392013-05-03 15:12:33 +02001894 memset(buf, 0, max_bs);
1895}
1896
1897/*
1898 * "randomly" fill the buffer contents
1899 */
1900void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1901 unsigned int min_write, unsigned int max_bs)
1902{
1903 io_u->buf_filled_len = 0;
1904 fill_io_buffer(td, io_u->buf, min_write, max_bs);
Jens Axboe5973caf2008-05-21 19:52:35 +02001905}