blob: fd63119888327fff8705bbdef066b910d8f37d1e [file] [log] [blame]
Jens Axboe10ba5352006-10-20 11:39:27 +02001#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
Jens Axboe0c6e7512007-02-22 11:19:39 +01006#include <assert.h>
Jens Axboe10ba5352006-10-20 11:39:27 +02007
8#include "fio.h"
Jens Axboe5973caf2008-05-21 19:52:35 +02009#include "hash.h"
Jens Axboe4f5af7b2009-06-03 08:45:40 +020010#include "verify.h"
Jens Axboe0d29de82010-09-01 13:54:15 +020011#include "trim.h"
Jens Axboe1fbbf722010-03-25 23:03:18 +010012#include "lib/rand.h"
Jens Axboe7ebd7962012-11-28 21:24:46 +010013#include "lib/axmap.h"
Jens Axboe002fe732014-02-11 08:31:13 -070014#include "err.h"
Elliott Hugheseda3a602017-05-19 18:53:02 -070015#include "lib/pow2.h"
16#include "minmax.h"
Jens Axboe10ba5352006-10-20 11:39:27 +020017
Jens Axboe97601022007-02-18 12:47:29 +010018struct io_completion_data {
19 int nr; /* input */
Jens Axboe97601022007-02-18 12:47:29 +010020
21 int error; /* output */
Jens Axboe100f49f2013-01-23 10:15:57 -070022 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
Jens Axboe97601022007-02-18 12:47:29 +010023 struct timeval time; /* output */
24};
25
Jens Axboe10ba5352006-10-20 11:39:27 +020026/*
Jens Axboe7ebd7962012-11-28 21:24:46 +010027 * The ->io_axmap contains a map of blocks we have or have not done io
Jens Axboe10ba5352006-10-20 11:39:27 +020028 * to yet. Used to make sure we cover the entire range in a fair fashion.
29 */
Elliott Hugheseda3a602017-05-19 18:53:02 -070030static bool random_map_free(struct fio_file *f, const uint64_t block)
Jens Axboe10ba5352006-10-20 11:39:27 +020031{
Jens Axboe7ebd7962012-11-28 21:24:46 +010032 return !axmap_isset(f->io_axmap, block);
Jens Axboe10ba5352006-10-20 11:39:27 +020033}
34
35/*
Jens Axboedf415582006-10-20 11:41:03 +020036 * Mark a given offset as used in the map.
37 */
Jens Axboe9bf20612007-03-01 09:33:57 +010038static void mark_random_map(struct thread_data *td, struct io_u *io_u)
Jens Axboedf415582006-10-20 11:41:03 +020039{
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010040 unsigned int min_bs = td->o.rw_min_bs;
Jens Axboe9bf20612007-03-01 09:33:57 +010041 struct fio_file *f = io_u->file;
Jens Axboe51ede0b2012-11-22 13:50:29 +010042 unsigned int nr_blocks;
Jens Axboe1ae83d42013-01-12 01:44:15 -070043 uint64_t block;
Jens Axboedf415582006-10-20 11:41:03 +020044
Jens Axboe1ae83d42013-01-12 01:44:15 -070045 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
Jens Axboec685b5b2007-02-10 20:02:28 +010046 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
47
Jens Axboe2ab9e982012-11-22 15:14:17 +010048 if (!(io_u->flags & IO_U_F_BUSY_OK))
Jens Axboe7ebd7962012-11-28 21:24:46 +010049 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
Jens Axboedf415582006-10-20 11:41:03 +020050
Jens Axboe51ede0b2012-11-22 13:50:29 +010051 if ((nr_blocks * min_bs) < io_u->buflen)
52 io_u->buflen = nr_blocks * min_bs;
Jens Axboedf415582006-10-20 11:41:03 +020053}
54
Jens Axboe74776732013-01-11 14:03:25 +010055static uint64_t last_block(struct thread_data *td, struct fio_file *f,
56 enum fio_ddir ddir)
Jens Axboe2ba1c292008-02-01 13:16:38 +010057{
Jens Axboe74776732013-01-11 14:03:25 +010058 uint64_t max_blocks;
59 uint64_t max_size;
Jens Axboe2ba1c292008-02-01 13:16:38 +010060
Jens Axboeff58fce2010-08-25 12:02:08 +020061 assert(ddir_rw(ddir));
62
Jens Axboed9dd70f2008-05-23 12:37:23 +020063 /*
64 * Hmm, should we make sure that ->io_size <= ->real_file_size?
Elliott Hugheseda3a602017-05-19 18:53:02 -070065 * -> not for now since there is code assuming it could go either.
Jens Axboed9dd70f2008-05-23 12:37:23 +020066 */
67 max_size = f->io_size;
68 if (max_size > f->real_file_size)
69 max_size = f->real_file_size;
70
Steven Noonaned335852012-01-31 13:58:00 +010071 if (td->o.zone_range)
72 max_size = td->o.zone_range;
73
Justin Enoe67b9282015-01-27 14:23:20 -080074 if (td->o.min_bs[ddir] > td->o.ba[ddir])
75 max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
76
Jens Axboe1ae83d42013-01-12 01:44:15 -070077 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
Jens Axboe2ba1c292008-02-01 13:16:38 +010078 if (!max_blocks)
79 return 0;
80
Jens Axboe67778e82008-05-15 09:20:08 +020081 return max_blocks;
Jens Axboe2ba1c292008-02-01 13:16:38 +010082}
83
Jens Axboe1ae83d42013-01-12 01:44:15 -070084struct rand_off {
85 struct flist_head list;
86 uint64_t off;
87};
88
Jens Axboee25839d2012-11-06 10:49:42 +010089static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
Elliott Hugheseda3a602017-05-19 18:53:02 -070090 enum fio_ddir ddir, uint64_t *b,
91 uint64_t lastb)
Jens Axboeec4015d2007-03-23 08:04:27 +010092{
Jens Axboe46ad62d2014-11-23 18:41:11 -070093 uint64_t r;
Jens Axboeec4015d2007-03-23 08:04:27 +010094
Elliott Hugheseda3a602017-05-19 18:53:02 -070095 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
96 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) {
Jens Axboe46ad62d2014-11-23 18:41:11 -070097
Jens Axboef6787012014-11-05 18:39:23 -070098 r = __rand(&td->random_state);
Jens Axboe8055e412012-11-26 08:43:47 +010099
Jens Axboe4b91ee82013-02-25 10:18:33 +0100100 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
Jens Axboe8055e412012-11-26 08:43:47 +0100101
Elliott Hugheseda3a602017-05-19 18:53:02 -0700102 *b = lastb * (r / (rand_max(&td->random_state) + 1.0));
Jens Axboe51ede0b2012-11-22 13:50:29 +0100103 } else {
Jens Axboe8055e412012-11-26 08:43:47 +0100104 uint64_t off = 0;
105
Jens Axboed55dd042014-12-15 09:38:43 -0700106 assert(fio_file_lfsr(f));
107
Jens Axboe46ad62d2014-11-23 18:41:11 -0700108 if (lfsr_next(&f->lfsr, &off))
Jens Axboe8055e412012-11-26 08:43:47 +0100109 return 1;
110
111 *b = off;
Jens Axboe51ede0b2012-11-22 13:50:29 +0100112 }
Jens Axboe2615cc42011-03-28 09:35:09 +0200113
Jens Axboeec4015d2007-03-23 08:04:27 +0100114 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100115 * if we are not maintaining a random map, we are done.
Jens Axboeec4015d2007-03-23 08:04:27 +0100116 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100117 if (!file_randommap(td, f))
118 goto ret;
Jens Axboe43c63a72007-05-21 13:23:30 +0200119
120 /*
Jens Axboe51ede0b2012-11-22 13:50:29 +0100121 * calculate map offset and check if it's free
Jens Axboe43c63a72007-05-21 13:23:30 +0200122 */
Jens Axboe51ede0b2012-11-22 13:50:29 +0100123 if (random_map_free(f, *b))
124 goto ret;
125
Jens Axboe4b91ee82013-02-25 10:18:33 +0100126 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
127 (unsigned long long) *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100128
Jens Axboe7ebd7962012-11-28 21:24:46 +0100129 *b = axmap_next_free(f->io_axmap, *b);
Jens Axboe51ede0b2012-11-22 13:50:29 +0100130 if (*b == (uint64_t) -1ULL)
131 return 1;
Jens Axboe0ce8b112011-01-27 22:25:29 +0100132ret:
133 return 0;
Jens Axboeec4015d2007-03-23 08:04:27 +0100134}
135
Jens Axboe925fee32012-11-06 13:50:32 +0100136static int __get_next_rand_offset_zipf(struct thread_data *td,
137 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700138 uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100139{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100140 *b = zipf_next(&f->zipf);
Jens Axboee25839d2012-11-06 10:49:42 +0100141 return 0;
142}
143
Jens Axboe925fee32012-11-06 13:50:32 +0100144static int __get_next_rand_offset_pareto(struct thread_data *td,
145 struct fio_file *f, enum fio_ddir ddir,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700146 uint64_t *b)
Jens Axboe925fee32012-11-06 13:50:32 +0100147{
Jens Axboe9c6f6312012-11-07 09:15:45 +0100148 *b = pareto_next(&f->zipf);
Jens Axboe925fee32012-11-06 13:50:32 +0100149 return 0;
150}
151
Elliott Hugheseda3a602017-05-19 18:53:02 -0700152static int __get_next_rand_offset_gauss(struct thread_data *td,
153 struct fio_file *f, enum fio_ddir ddir,
154 uint64_t *b)
155{
156 *b = gauss_next(&f->gauss);
157 return 0;
158}
159
160static int __get_next_rand_offset_zoned(struct thread_data *td,
161 struct fio_file *f, enum fio_ddir ddir,
162 uint64_t *b)
163{
164 unsigned int v, send, stotal;
165 uint64_t offset, lastb;
166 static int warned;
167 struct zone_split_index *zsi;
168
169 lastb = last_block(td, f, ddir);
170 if (!lastb)
171 return 1;
172
173 if (!td->o.zone_split_nr[ddir]) {
174bail:
175 return __get_next_rand_offset(td, f, ddir, b, lastb);
176 }
177
178 /*
179 * Generate a value, v, between 1 and 100, both inclusive
180 */
181 v = rand32_between(&td->zone_state, 1, 100);
182
183 zsi = &td->zone_state_index[ddir][v - 1];
184 stotal = zsi->size_perc_prev;
185 send = zsi->size_perc;
186
187 /*
188 * Should never happen
189 */
190 if (send == -1U) {
191 if (!warned) {
192 log_err("fio: bug in zoned generation\n");
193 warned = 1;
194 }
195 goto bail;
196 }
197
198 /*
199 * 'send' is some percentage below or equal to 100 that
200 * marks the end of the current IO range. 'stotal' marks
201 * the start, in percent.
202 */
203 if (stotal)
204 offset = stotal * lastb / 100ULL;
205 else
206 offset = 0;
207
208 lastb = lastb * (send - stotal) / 100ULL;
209
210 /*
211 * Generate index from 0..send-of-lastb
212 */
213 if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1)
214 return 1;
215
216 /*
217 * Add our start offset, if any
218 */
219 if (offset)
220 *b += offset;
221
222 return 0;
223}
224
Jens Axboe1ae83d42013-01-12 01:44:15 -0700225static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
226{
227 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
228 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
229
230 return r1->off - r2->off;
231}
232
233static int get_off_from_method(struct thread_data *td, struct fio_file *f,
234 enum fio_ddir ddir, uint64_t *b)
Jens Axboee25839d2012-11-06 10:49:42 +0100235{
Elliott Hugheseda3a602017-05-19 18:53:02 -0700236 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) {
237 uint64_t lastb;
238
239 lastb = last_block(td, f, ddir);
240 if (!lastb)
241 return 1;
242
243 return __get_next_rand_offset(td, f, ddir, b, lastb);
244 } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
Jens Axboee25839d2012-11-06 10:49:42 +0100245 return __get_next_rand_offset_zipf(td, f, ddir, b);
Jens Axboe925fee32012-11-06 13:50:32 +0100246 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
247 return __get_next_rand_offset_pareto(td, f, ddir, b);
Elliott Hugheseda3a602017-05-19 18:53:02 -0700248 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
249 return __get_next_rand_offset_gauss(td, f, ddir, b);
250 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
251 return __get_next_rand_offset_zoned(td, f, ddir, b);
Jens Axboee25839d2012-11-06 10:49:42 +0100252
253 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
254 return 1;
255}
256
Jens Axboebcd5abf2013-01-23 09:27:25 -0700257/*
258 * Sort the reads for a verify phase in batches of verifysort_nr, if
259 * specified.
260 */
Elliott Hugheseda3a602017-05-19 18:53:02 -0700261static inline bool should_sort_io(struct thread_data *td)
Jens Axboebcd5abf2013-01-23 09:27:25 -0700262{
263 if (!td->o.verifysort_nr || !td->o.do_verify)
Elliott Hugheseda3a602017-05-19 18:53:02 -0700264 return false;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700265 if (!td_random(td))
Elliott Hugheseda3a602017-05-19 18:53:02 -0700266 return false;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700267 if (td->runstate != TD_VERIFYING)
Elliott Hugheseda3a602017-05-19 18:53:02 -0700268 return false;
269 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
270 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64)
271 return false;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700272
Elliott Hugheseda3a602017-05-19 18:53:02 -0700273 return true;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700274}
275
Elliott Hugheseda3a602017-05-19 18:53:02 -0700276static bool should_do_random(struct thread_data *td, enum fio_ddir ddir)
Jens Axboe211c9b82013-04-26 08:56:17 -0600277{
278 unsigned int v;
Jens Axboe211c9b82013-04-26 08:56:17 -0600279
Jens Axboed9472272013-07-25 10:20:45 -0600280 if (td->o.perc_rand[ddir] == 100)
Elliott Hugheseda3a602017-05-19 18:53:02 -0700281 return true;
Jens Axboe211c9b82013-04-26 08:56:17 -0600282
Elliott Hugheseda3a602017-05-19 18:53:02 -0700283 v = rand32_between(&td->seq_rand_state[ddir], 1, 100);
Jens Axboe211c9b82013-04-26 08:56:17 -0600284
Jens Axboed9472272013-07-25 10:20:45 -0600285 return v <= td->o.perc_rand[ddir];
Jens Axboe211c9b82013-04-26 08:56:17 -0600286}
287
Jens Axboe1ae83d42013-01-12 01:44:15 -0700288static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
289 enum fio_ddir ddir, uint64_t *b)
290{
291 struct rand_off *r;
292 int i, ret = 1;
293
Jens Axboebcd5abf2013-01-23 09:27:25 -0700294 if (!should_sort_io(td))
Jens Axboe1ae83d42013-01-12 01:44:15 -0700295 return get_off_from_method(td, f, ddir, b);
296
297 if (!flist_empty(&td->next_rand_list)) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700298fetch:
Jens Axboe12dbd062014-07-03 21:19:57 -0600299 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
Jens Axboe1ae83d42013-01-12 01:44:15 -0700300 flist_del(&r->list);
301 *b = r->off;
302 free(r);
303 return 0;
304 }
305
306 for (i = 0; i < td->o.verifysort_nr; i++) {
307 r = malloc(sizeof(*r));
308
309 ret = get_off_from_method(td, f, ddir, &r->off);
310 if (ret) {
311 free(r);
312 break;
313 }
314
315 flist_add(&r->list, &td->next_rand_list);
316 }
317
318 if (ret && !i)
319 return ret;
320
321 assert(!flist_empty(&td->next_rand_list));
322 flist_sort(NULL, &td->next_rand_list, flist_cmp);
323 goto fetch;
324}
325
Jens Axboe38dad622010-07-20 14:46:00 -0600326static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700327 enum fio_ddir ddir, uint64_t *b)
Jens Axboe38dad622010-07-20 14:46:00 -0600328{
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100329 if (!get_next_rand_offset(td, f, ddir, b))
330 return 0;
331
Elliott Hugheseda3a602017-05-19 18:53:02 -0700332 if (td->o.time_based ||
333 (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
Jens Axboe33c48812013-01-21 09:46:06 -0700334 fio_file_reset(td, f);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100335 if (!get_next_rand_offset(td, f, ddir, b))
336 return 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600337 }
338
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100339 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
Jens Axboe08a99be2014-12-14 19:01:24 -0700340 f->file_name, (unsigned long long) f->last_pos[ddir],
Jens Axboe4b91ee82013-02-25 10:18:33 +0100341 (unsigned long long) f->real_file_size);
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100342 return 1;
Jens Axboe38dad622010-07-20 14:46:00 -0600343}
344
Jens Axboe37cf9e32012-03-17 12:54:30 +0100345static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
Jens Axboe1ae83d42013-01-12 01:44:15 -0700346 enum fio_ddir ddir, uint64_t *offset)
Jens Axboe38dad622010-07-20 14:46:00 -0600347{
Jens Axboe8a423942014-09-28 16:18:43 -0600348 struct thread_options *o = &td->o;
349
Jens Axboeff58fce2010-08-25 12:02:08 +0200350 assert(ddir_rw(ddir));
351
Jens Axboe08a99be2014-12-14 19:01:24 -0700352 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
Elliott Hugheseda3a602017-05-19 18:53:02 -0700353 o->time_based) {
354 struct thread_options *o = &td->o;
355 uint64_t io_size = f->io_size + (f->io_size % o->min_bs[ddir]);
356
357 if (io_size > f->last_pos[ddir])
358 f->last_pos[ddir] = 0;
359 else
360 f->last_pos[ddir] = f->last_pos[ddir] - io_size;
361 }
Daniel Ehrenbergc04e4662012-03-16 18:54:15 +0100362
Jens Axboe08a99be2014-12-14 19:01:24 -0700363 if (f->last_pos[ddir] < f->real_file_size) {
Jens Axboe1ae83d42013-01-12 01:44:15 -0700364 uint64_t pos;
Jens Axboe059b0802011-08-25 09:09:37 +0200365
Elliott Hugheseda3a602017-05-19 18:53:02 -0700366 if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
367 if (f->real_file_size > f->io_size)
368 f->last_pos[ddir] = f->io_size;
369 else
370 f->last_pos[ddir] = f->real_file_size;
371 }
Jens Axboea66da7a2011-08-31 13:14:12 -0600372
Jens Axboe08a99be2014-12-14 19:01:24 -0700373 pos = f->last_pos[ddir] - f->file_offset;
Jens Axboe8a423942014-09-28 16:18:43 -0600374 if (pos && o->ddir_seq_add) {
375 pos += o->ddir_seq_add;
376
377 /*
378 * If we reach beyond the end of the file
379 * with holed IO, wrap around to the
Elliott Hugheseda3a602017-05-19 18:53:02 -0700380 * beginning again. If we're doing backwards IO,
381 * wrap to the end.
Jens Axboe8a423942014-09-28 16:18:43 -0600382 */
Elliott Hugheseda3a602017-05-19 18:53:02 -0700383 if (pos >= f->real_file_size) {
384 if (o->ddir_seq_add > 0)
385 pos = f->file_offset;
386 else {
387 if (f->real_file_size > f->io_size)
388 pos = f->io_size;
389 else
390 pos = f->real_file_size;
391
392 pos += o->ddir_seq_add;
393 }
394 }
Jens Axboe8a423942014-09-28 16:18:43 -0600395 }
Jens Axboe059b0802011-08-25 09:09:37 +0200396
Jens Axboe37cf9e32012-03-17 12:54:30 +0100397 *offset = pos;
Jens Axboe38dad622010-07-20 14:46:00 -0600398 return 0;
399 }
400
401 return 1;
402}
403
404static int get_next_block(struct thread_data *td, struct io_u *io_u,
Jens Axboe6aca9b32013-07-25 12:45:26 -0600405 enum fio_ddir ddir, int rw_seq,
406 unsigned int *is_random)
Jens Axboe38dad622010-07-20 14:46:00 -0600407{
408 struct fio_file *f = io_u->file;
Jens Axboe1ae83d42013-01-12 01:44:15 -0700409 uint64_t b, offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600410 int ret;
411
Jens Axboeff58fce2010-08-25 12:02:08 +0200412 assert(ddir_rw(ddir));
413
Jens Axboe37cf9e32012-03-17 12:54:30 +0100414 b = offset = -1ULL;
415
Jens Axboe38dad622010-07-20 14:46:00 -0600416 if (rw_seq) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600417 if (td_random(td)) {
Jens Axboe6aca9b32013-07-25 12:45:26 -0600418 if (should_do_random(td, ddir)) {
Jens Axboe211c9b82013-04-26 08:56:17 -0600419 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600420 *is_random = 1;
421 } else {
422 *is_random = 0;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700423 io_u_set(td, io_u, IO_U_F_BUSY_OK);
Jens Axboe211c9b82013-04-26 08:56:17 -0600424 ret = get_next_seq_offset(td, f, ddir, &offset);
425 if (ret)
426 ret = get_next_rand_block(td, f, ddir, &b);
427 }
Jens Axboe6aca9b32013-07-25 12:45:26 -0600428 } else {
429 *is_random = 0;
Jens Axboe37cf9e32012-03-17 12:54:30 +0100430 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600431 }
Jens Axboe38dad622010-07-20 14:46:00 -0600432 } else {
Elliott Hugheseda3a602017-05-19 18:53:02 -0700433 io_u_set(td, io_u, IO_U_F_BUSY_OK);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600434 *is_random = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600435
436 if (td->o.rw_seq == RW_SEQ_SEQ) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100437 ret = get_next_seq_offset(td, f, ddir, &offset);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600438 if (ret) {
Jens Axboe37cf9e32012-03-17 12:54:30 +0100439 ret = get_next_rand_block(td, f, ddir, &b);
Jens Axboe6aca9b32013-07-25 12:45:26 -0600440 *is_random = 0;
441 }
Jens Axboe38dad622010-07-20 14:46:00 -0600442 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
Jens Axboe08a99be2014-12-14 19:01:24 -0700443 if (f->last_start[ddir] != -1ULL)
444 offset = f->last_start[ddir] - f->file_offset;
Jens Axboe38dad622010-07-20 14:46:00 -0600445 else
Jens Axboe37cf9e32012-03-17 12:54:30 +0100446 offset = 0;
Jens Axboe38dad622010-07-20 14:46:00 -0600447 ret = 0;
448 } else {
449 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
450 ret = 1;
451 }
452 }
Bruce Cran6d68b992013-01-13 17:21:58 +0000453
Jens Axboe37cf9e32012-03-17 12:54:30 +0100454 if (!ret) {
455 if (offset != -1ULL)
456 io_u->offset = offset;
457 else if (b != -1ULL)
458 io_u->offset = b * td->o.ba[ddir];
459 else {
Jens Axboe4e0a8fa2013-04-15 11:40:57 +0200460 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
Jens Axboe37cf9e32012-03-17 12:54:30 +0100461 ret = 1;
462 }
463 }
464
Jens Axboe38dad622010-07-20 14:46:00 -0600465 return ret;
466}
467
Jens Axboe10ba5352006-10-20 11:39:27 +0200468/*
469 * For random io, generate a random new block and see if it's used. Repeat
470 * until we find a free one. For sequential io, just return the end of
471 * the last io issued.
472 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600473static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
474 unsigned int *is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200475{
Jens Axboe9bf20612007-03-01 09:33:57 +0100476 struct fio_file *f = io_u->file;
Jens Axboe4ba66132008-02-05 10:05:50 +0100477 enum fio_ddir ddir = io_u->ddir;
Jens Axboe38dad622010-07-20 14:46:00 -0600478 int rw_seq_hit = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200479
Jens Axboeff58fce2010-08-25 12:02:08 +0200480 assert(ddir_rw(ddir));
481
Jens Axboe38dad622010-07-20 14:46:00 -0600482 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
483 rw_seq_hit = 1;
Jens Axboe5736c102010-07-20 12:03:25 -0600484 td->ddir_seq_nr = td->o.ddir_seq_nr;
Jens Axboe38dad622010-07-20 14:46:00 -0600485 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200486
Jens Axboe6aca9b32013-07-25 12:45:26 -0600487 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
Jens Axboe38dad622010-07-20 14:46:00 -0600488 return 1;
Jens Axboe10ba5352006-10-20 11:39:27 +0200489
Jens Axboe009bd842008-05-15 10:19:46 +0200490 if (io_u->offset >= f->io_size) {
491 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100492 (unsigned long long) io_u->offset,
493 (unsigned long long) f->io_size);
Jens Axboe009bd842008-05-15 10:19:46 +0200494 return 1;
495 }
496
497 io_u->offset += f->file_offset;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100498 if (io_u->offset >= f->real_file_size) {
499 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100500 (unsigned long long) io_u->offset,
501 (unsigned long long) f->real_file_size);
Jens Axboe10ba5352006-10-20 11:39:27 +0200502 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100503 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200504
505 return 0;
506}
507
Jens Axboe6aca9b32013-07-25 12:45:26 -0600508static int get_next_offset(struct thread_data *td, struct io_u *io_u,
509 unsigned int *is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100510{
Jens Axboed72be542012-11-30 19:37:46 +0100511 if (td->flags & TD_F_PROFILE_OPS) {
512 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100513
Jens Axboed72be542012-11-30 19:37:46 +0100514 if (ops->fill_io_u_off)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600515 return ops->fill_io_u_off(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100516 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100517
Jens Axboe6aca9b32013-07-25 12:45:26 -0600518 return __get_next_offset(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100519}
520
Elliott Hugheseda3a602017-05-19 18:53:02 -0700521static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
522 unsigned int buflen)
Jens Axboe79944122011-05-24 11:26:16 +0200523{
524 struct fio_file *f = io_u->file;
525
Jens Axboebedc9dc2014-03-17 12:51:09 -0600526 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
Jens Axboe79944122011-05-24 11:26:16 +0200527}
528
Jens Axboe6aca9b32013-07-25 12:45:26 -0600529static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
530 unsigned int is_random)
Jens Axboe10ba5352006-10-20 11:39:27 +0200531{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600532 int ddir = io_u->ddir;
Jens Axboe24d23ca2012-11-13 08:31:24 -0700533 unsigned int buflen = 0;
Jens Axboef3059de2008-06-11 15:37:32 +0200534 unsigned int minbs, maxbs;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700535 uint64_t frand_max, r;
536 bool power_2;
Jens Axboe10ba5352006-10-20 11:39:27 +0200537
Erwan Velu9ee1c642014-04-02 10:51:16 +0200538 assert(ddir_rw(ddir));
Jens Axboe6aca9b32013-07-25 12:45:26 -0600539
540 if (td->o.bs_is_seq_rand)
541 ddir = is_random ? DDIR_WRITE: DDIR_READ;
Jens Axboeff58fce2010-08-25 12:02:08 +0200542
Jens Axboef3059de2008-06-11 15:37:32 +0200543 minbs = td->o.min_bs[ddir];
544 maxbs = td->o.max_bs[ddir];
545
Jens Axboe79944122011-05-24 11:26:16 +0200546 if (minbs == maxbs)
547 return minbs;
548
Jens Axboe52c58022012-02-06 21:58:56 +0100549 /*
550 * If we can't satisfy the min block size from here, then fail
551 */
552 if (!io_u_fits(td, io_u, minbs))
553 return 0;
554
Elliott Hugheseda3a602017-05-19 18:53:02 -0700555 frand_max = rand_max(&td->bsrange_state);
Jens Axboe79944122011-05-24 11:26:16 +0200556 do {
Jens Axboef6787012014-11-05 18:39:23 -0700557 r = __rand(&td->bsrange_state);
Jens Axboe4c07ad82011-03-28 09:51:09 +0200558
Jens Axboe720e84a2009-04-21 08:29:55 +0200559 if (!td->o.bssplit_nr[ddir]) {
Jens Axboef3059de2008-06-11 15:37:32 +0200560 buflen = 1 + (unsigned int) ((double) maxbs *
Elliott Hugheseda3a602017-05-19 18:53:02 -0700561 (r / (frand_max + 1.0)));
Jens Axboef3059de2008-06-11 15:37:32 +0200562 if (buflen < minbs)
563 buflen = minbs;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100564 } else {
Elliott Hugheseda3a602017-05-19 18:53:02 -0700565 long long perc = 0;
Jens Axboe564ca972007-12-14 12:21:19 +0100566 unsigned int i;
567
Jens Axboe720e84a2009-04-21 08:29:55 +0200568 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
569 struct bssplit *bsp = &td->o.bssplit[ddir][i];
Jens Axboe564ca972007-12-14 12:21:19 +0100570
571 buflen = bsp->bs;
572 perc += bsp->perc;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700573 if (!perc)
574 break;
575 if ((r / perc <= frand_max / 100ULL) &&
Jens Axboe79944122011-05-24 11:26:16 +0200576 io_u_fits(td, io_u, buflen))
Jens Axboe564ca972007-12-14 12:21:19 +0100577 break;
578 }
579 }
Jens Axboe79944122011-05-24 11:26:16 +0200580
Elliott Hugheseda3a602017-05-19 18:53:02 -0700581 power_2 = is_power_of_2(minbs);
582 if (!td->o.bs_unaligned && power_2)
583 buflen &= ~(minbs - 1);
584 else if (!td->o.bs_unaligned && !power_2)
585 buflen -= buflen % minbs;
Jens Axboe79944122011-05-24 11:26:16 +0200586 } while (!io_u_fits(td, io_u, buflen));
Jens Axboe6a5e6882007-07-26 10:47:51 +0200587
Jens Axboe10ba5352006-10-20 11:39:27 +0200588 return buflen;
589}
590
Jens Axboe6aca9b32013-07-25 12:45:26 -0600591static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
592 unsigned int is_random)
Jens Axboe15dc1932010-03-05 10:59:06 +0100593{
Jens Axboed72be542012-11-30 19:37:46 +0100594 if (td->flags & TD_F_PROFILE_OPS) {
595 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +0100596
Jens Axboed72be542012-11-30 19:37:46 +0100597 if (ops->fill_io_u_size)
Jens Axboe6aca9b32013-07-25 12:45:26 -0600598 return ops->fill_io_u_size(td, io_u, is_random);
Jens Axboed72be542012-11-30 19:37:46 +0100599 }
Jens Axboe15dc1932010-03-05 10:59:06 +0100600
Jens Axboe6aca9b32013-07-25 12:45:26 -0600601 return __get_next_buflen(td, io_u, is_random);
Jens Axboe15dc1932010-03-05 10:59:06 +0100602}
603
Jens Axboeafe24a52007-03-16 20:27:27 +0100604static void set_rwmix_bytes(struct thread_data *td)
605{
Jens Axboeafe24a52007-03-16 20:27:27 +0100606 unsigned int diff;
607
608 /*
609 * we do time or byte based switch. this is needed because
610 * buffered writes may issue a lot quicker than they complete,
611 * whereas reads do not.
612 */
Jens Axboee47f7992007-03-21 14:05:39 +0100613 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
Jens Axboe04c540d2008-05-28 10:35:26 +0200614 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
Jens Axboee47f7992007-03-21 14:05:39 +0100615}
616
617static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
618{
619 unsigned int v;
Jens Axboee47f7992007-03-21 14:05:39 +0100620
Elliott Hugheseda3a602017-05-19 18:53:02 -0700621 v = rand32_between(&td->rwmix_state, 1, 100);
Jens Axboe4c07ad82011-03-28 09:51:09 +0200622
Jens Axboe04c540d2008-05-28 10:35:26 +0200623 if (v <= td->o.rwmix[DDIR_READ])
Jens Axboee47f7992007-03-21 14:05:39 +0100624 return DDIR_READ;
625
626 return DDIR_WRITE;
Jens Axboeafe24a52007-03-16 20:27:27 +0100627}
628
Elliott Hugheseda3a602017-05-19 18:53:02 -0700629int io_u_quiesce(struct thread_data *td)
Jens Axboe002e7182013-05-17 12:39:53 +0200630{
Elliott Hugheseda3a602017-05-19 18:53:02 -0700631 int completed = 0;
632
Jens Axboe002e7182013-05-17 12:39:53 +0200633 /*
634 * We are going to sleep, ensure that we flush anything pending as
635 * not to skew our latency numbers.
636 *
637 * Changed to only monitor 'in flight' requests here instead of the
638 * td->cur_depth, b/c td->cur_depth does not accurately represent
639 * io's that have been actually submitted to an async engine,
640 * and cur_depth is meaningless for sync engines.
641 */
Jens Axboe7e63b3d2015-01-15 10:39:12 -0700642 if (td->io_u_queued || td->cur_depth) {
643 int fio_unused ret;
644
645 ret = td_io_commit(td);
646 }
647
Jens Axboe002e7182013-05-17 12:39:53 +0200648 while (td->io_u_in_flight) {
Elliott Hugheseda3a602017-05-19 18:53:02 -0700649 int ret;
Jens Axboe002e7182013-05-17 12:39:53 +0200650
Elliott Hugheseda3a602017-05-19 18:53:02 -0700651 ret = io_u_queued_complete(td, 1);
652 if (ret > 0)
653 completed += ret;
Jens Axboe002e7182013-05-17 12:39:53 +0200654 }
Elliott Hugheseda3a602017-05-19 18:53:02 -0700655
656 if (td->flags & TD_F_REGROW_LOGS)
657 regrow_logs(td);
658
659 return completed;
Jens Axboe002e7182013-05-17 12:39:53 +0200660}
661
Jens Axboe581e7142009-06-09 12:47:16 +0200662static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
663{
664 enum fio_ddir odir = ddir ^ 1;
Jens Axboe581e7142009-06-09 12:47:16 +0200665 long usec;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700666 uint64_t now;
Jens Axboe581e7142009-06-09 12:47:16 +0200667
Jens Axboeff58fce2010-08-25 12:02:08 +0200668 assert(ddir_rw(ddir));
Elliott Hugheseda3a602017-05-19 18:53:02 -0700669 now = utime_since_now(&td->start);
Jens Axboeff58fce2010-08-25 12:02:08 +0200670
Elliott Hugheseda3a602017-05-19 18:53:02 -0700671 /*
672 * if rate_next_io_time is in the past, need to catch up to rate
673 */
674 if (td->rate_next_io_time[ddir] <= now)
Jens Axboe581e7142009-06-09 12:47:16 +0200675 return ddir;
676
677 /*
Elliott Hugheseda3a602017-05-19 18:53:02 -0700678 * We are ahead of rate in this direction. See if we
Jens Axboe581e7142009-06-09 12:47:16 +0200679 * should switch.
680 */
Jens Axboe315fcfe2013-02-08 19:05:25 +0100681 if (td_rw(td) && td->o.rwmix[odir]) {
Jens Axboe581e7142009-06-09 12:47:16 +0200682 /*
Elliott Hugheseda3a602017-05-19 18:53:02 -0700683 * Other direction is behind rate, switch
Jens Axboe581e7142009-06-09 12:47:16 +0200684 */
Elliott Hugheseda3a602017-05-19 18:53:02 -0700685 if (td->rate_next_io_time[odir] <= now)
Jens Axboe581e7142009-06-09 12:47:16 +0200686 return odir;
687
688 /*
Elliott Hugheseda3a602017-05-19 18:53:02 -0700689 * Both directions are ahead of rate. sleep the min
690 * switch if necissary
Jens Axboe581e7142009-06-09 12:47:16 +0200691 */
Elliott Hugheseda3a602017-05-19 18:53:02 -0700692 if (td->rate_next_io_time[ddir] <=
693 td->rate_next_io_time[odir]) {
694 usec = td->rate_next_io_time[ddir] - now;
Jens Axboe581e7142009-06-09 12:47:16 +0200695 } else {
Elliott Hugheseda3a602017-05-19 18:53:02 -0700696 usec = td->rate_next_io_time[odir] - now;
Jens Axboe581e7142009-06-09 12:47:16 +0200697 ddir = odir;
698 }
699 } else
Elliott Hugheseda3a602017-05-19 18:53:02 -0700700 usec = td->rate_next_io_time[ddir] - now;
Jens Axboe581e7142009-06-09 12:47:16 +0200701
Elliott Hugheseda3a602017-05-19 18:53:02 -0700702 if (td->o.io_submit_mode == IO_MODE_INLINE)
703 io_u_quiesce(td);
Jens Axboe78c1eda2011-08-30 15:34:14 -0600704
Jens Axboe7e63b3d2015-01-15 10:39:12 -0700705 usec = usec_sleep(td, usec);
Jens Axboe581e7142009-06-09 12:47:16 +0200706
Jens Axboe581e7142009-06-09 12:47:16 +0200707 return ddir;
708}
709
Jens Axboe10ba5352006-10-20 11:39:27 +0200710/*
711 * Return the data direction for the next io_u. If the job is a
712 * mixed read/write workload, check the rwmix cycle and switch if
713 * necessary.
714 */
Jens Axboe1e97cce2006-12-05 11:44:16 +0100715static enum fio_ddir get_rw_ddir(struct thread_data *td)
Jens Axboe10ba5352006-10-20 11:39:27 +0200716{
Jens Axboe581e7142009-06-09 12:47:16 +0200717 enum fio_ddir ddir;
718
Jens Axboe5f9099e2009-06-16 22:40:26 +0200719 /*
Elliott Hugheseda3a602017-05-19 18:53:02 -0700720 * See if it's time to fsync/fdatasync/sync_file_range first,
721 * and if not then move on to check regular I/Os.
Jens Axboe5f9099e2009-06-16 22:40:26 +0200722 */
Elliott Hugheseda3a602017-05-19 18:53:02 -0700723 if (should_fsync(td)) {
724 if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
725 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
726 return DDIR_SYNC;
Jens Axboe5f9099e2009-06-16 22:40:26 +0200727
Elliott Hugheseda3a602017-05-19 18:53:02 -0700728 if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] &&
729 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks))
730 return DDIR_DATASYNC;
Jens Axboe5f9099e2009-06-16 22:40:26 +0200731
Elliott Hugheseda3a602017-05-19 18:53:02 -0700732 if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] &&
733 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr))
734 return DDIR_SYNC_FILE_RANGE;
735 }
Jens Axboe44f29692010-03-09 20:09:44 +0100736
Jens Axboe10ba5352006-10-20 11:39:27 +0200737 if (td_rw(td)) {
Jens Axboe10ba5352006-10-20 11:39:27 +0200738 /*
739 * Check if it's time to seed a new data direction.
740 */
Jens Axboee4928662008-04-07 09:19:46 +0200741 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
Jens Axboee47f7992007-03-21 14:05:39 +0100742 /*
743 * Put a top limit on how many bytes we do for
744 * one data direction, to avoid overflowing the
745 * ranges too much
746 */
747 ddir = get_rand_ddir(td);
Jens Axboee47f7992007-03-21 14:05:39 +0100748
749 if (ddir != td->rwmix_ddir)
750 set_rwmix_bytes(td);
751
752 td->rwmix_ddir = ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200753 }
Jens Axboe581e7142009-06-09 12:47:16 +0200754 ddir = td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200755 } else if (td_read(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200756 ddir = DDIR_READ;
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200757 else if (td_write(td))
Jens Axboe581e7142009-06-09 12:47:16 +0200758 ddir = DDIR_WRITE;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700759 else if (td_trim(td))
Shaohua Li6eaf09d2012-09-14 08:49:43 +0200760 ddir = DDIR_TRIM;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700761 else
762 ddir = DDIR_INVAL;
Jens Axboe581e7142009-06-09 12:47:16 +0200763
764 td->rwmix_ddir = rate_ddir(td, ddir);
765 return td->rwmix_ddir;
Jens Axboe10ba5352006-10-20 11:39:27 +0200766}
767
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200768static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
769{
Elliott Hugheseda3a602017-05-19 18:53:02 -0700770 enum fio_ddir ddir = get_rw_ddir(td);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200771
Elliott Hugheseda3a602017-05-19 18:53:02 -0700772 if (td_trimwrite(td)) {
773 struct fio_file *f = io_u->file;
774 if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
775 ddir = DDIR_TRIM;
776 else
777 ddir = DDIR_WRITE;
778 }
779
780 io_u->ddir = io_u->acct_ddir = ddir;
781
782 if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) &&
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200783 td->o.barrier_blocks &&
784 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
785 td->io_issues[DDIR_WRITE])
Elliott Hugheseda3a602017-05-19 18:53:02 -0700786 io_u_set(td, io_u, IO_U_F_BARRIER);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200787}
788
Jens Axboee8462bd2009-07-06 12:59:04 +0200789void put_file_log(struct thread_data *td, struct fio_file *f)
Jens Axboe60f2c652008-05-16 12:31:36 +0200790{
Jens Axboe71b84ca2014-04-14 12:01:45 -0600791 unsigned int ret = put_file(td, f);
Jens Axboe60f2c652008-05-16 12:31:36 +0200792
793 if (ret)
794 td_verror(td, ret, "file close");
795}
796
Jens Axboe10ba5352006-10-20 11:39:27 +0200797void put_io_u(struct thread_data *td, struct io_u *io_u)
798{
Elliott Hugheseda3a602017-05-19 18:53:02 -0700799 if (td->parent)
800 td = td->parent;
801
Jens Axboee8462bd2009-07-06 12:59:04 +0200802 td_io_u_lock(td);
803
Jens Axboee69fdf72014-07-23 16:11:43 +0200804 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
Jens Axboe60f2c652008-05-16 12:31:36 +0200805 put_file_log(td, io_u->file);
Jens Axboee69fdf72014-07-23 16:11:43 +0200806
Jens Axboe10ba5352006-10-20 11:39:27 +0200807 io_u->file = NULL;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700808 io_u_set(td, io_u, IO_U_F_FREE);
Steven Langd7ee2a72011-10-26 09:46:50 +0200809
Elliott Hugheseda3a602017-05-19 18:53:02 -0700810 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
Radha Ramachandran0c412142009-11-03 21:45:31 +0100811 td->cur_depth--;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700812 assert(!(td->flags & TD_F_CHILD));
813 }
Jens Axboe2ae0b202013-05-28 14:16:55 +0200814 io_u_qpush(&td->io_u_freelist, io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200815 td_io_u_unlock(td);
816 td_io_u_free_notify(td);
Jens Axboe10ba5352006-10-20 11:39:27 +0200817}
818
Radha Ramachandranf2bba182009-06-15 08:40:16 +0200819void clear_io_u(struct thread_data *td, struct io_u *io_u)
820{
Elliott Hugheseda3a602017-05-19 18:53:02 -0700821 io_u_clear(td, io_u, IO_U_F_FLIGHT);
Radha Ramachandranf2bba182009-06-15 08:40:16 +0200822 put_io_u(td, io_u);
823}
824
Jens Axboe755200a2007-02-19 13:08:12 +0100825void requeue_io_u(struct thread_data *td, struct io_u **io_u)
826{
827 struct io_u *__io_u = *io_u;
Jens Axboebcd5abf2013-01-23 09:27:25 -0700828 enum fio_ddir ddir = acct_ddir(__io_u);
Jens Axboe755200a2007-02-19 13:08:12 +0100829
Jens Axboe465221b2008-05-30 22:07:49 +0200830 dprint(FD_IO, "requeue %p\n", __io_u);
831
Elliott Hugheseda3a602017-05-19 18:53:02 -0700832 if (td->parent)
833 td = td->parent;
834
Jens Axboee8462bd2009-07-06 12:59:04 +0200835 td_io_u_lock(td);
836
Elliott Hugheseda3a602017-05-19 18:53:02 -0700837 io_u_set(td, __io_u, IO_U_F_FREE);
Jens Axboebcd5abf2013-01-23 09:27:25 -0700838 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
839 td->io_issues[ddir]--;
Jens Axboe5ec10ea2008-03-06 15:42:00 +0100840
Elliott Hugheseda3a602017-05-19 18:53:02 -0700841 io_u_clear(td, __io_u, IO_U_F_FLIGHT);
842 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) {
Radha Ramachandran0c412142009-11-03 21:45:31 +0100843 td->cur_depth--;
Elliott Hugheseda3a602017-05-19 18:53:02 -0700844 assert(!(td->flags & TD_F_CHILD));
845 }
Jens Axboe2ae0b202013-05-28 14:16:55 +0200846
847 io_u_rpush(&td->io_u_requeues, __io_u);
Jens Axboee8462bd2009-07-06 12:59:04 +0200848 td_io_u_unlock(td);
Elliott Hugheseda3a602017-05-19 18:53:02 -0700849 td_io_u_free_notify(td);
Jens Axboe755200a2007-02-19 13:08:12 +0100850 *io_u = NULL;
851}
852
Jens Axboe9bf20612007-03-01 09:33:57 +0100853static int fill_io_u(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +0200854{
Jens Axboe6aca9b32013-07-25 12:45:26 -0600855 unsigned int is_random;
856
Elliott Hugheseda3a602017-05-19 18:53:02 -0700857 if (td_ioengine_flagged(td, FIO_NOIO))
Jens Axboeb4c5e1a2007-10-25 18:44:45 +0200858 goto out;
859
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200860 set_rw_ddir(td, io_u);
Jens Axboea00735e2006-11-03 08:58:08 +0100861
Jens Axboe87dc1ab2006-10-24 14:41:26 +0200862 /*
Jens Axboeff58fce2010-08-25 12:02:08 +0200863 * fsync() or fdatasync() or trim etc, we are done
Jens Axboe5f9099e2009-06-16 22:40:26 +0200864 */
Jens Axboeff58fce2010-08-25 12:02:08 +0200865 if (!ddir_rw(io_u->ddir))
Jens Axboe5f9099e2009-06-16 22:40:26 +0200866 goto out;
867
868 /*
Jens Axboe48f5abd2007-07-20 13:25:04 +0200869 * See if it's time to switch to a new zone
870 */
Jens Axboe13af05a2012-02-11 09:04:02 +0100871 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
Jens Axboe418bf542014-09-28 16:20:58 -0600872 struct fio_file *f = io_u->file;
873
Jens Axboe48f5abd2007-07-20 13:25:04 +0200874 td->zone_bytes = 0;
Jens Axboe418bf542014-09-28 16:20:58 -0600875 f->file_offset += td->o.zone_range + td->o.zone_skip;
876
877 /*
878 * Wrap from the beginning, if we exceed the file size
879 */
880 if (f->file_offset >= f->real_file_size)
881 f->file_offset = f->real_file_size - f->file_offset;
Jens Axboe08a99be2014-12-14 19:01:24 -0700882 f->last_pos[io_u->ddir] = f->file_offset;
Jens Axboe48f5abd2007-07-20 13:25:04 +0200883 td->io_skip_bytes += td->o.zone_skip;
884 }
885
886 /*
Jens Axboec685b5b2007-02-10 20:02:28 +0100887 * No log, let the seq/rand engine retrieve the next buflen and
888 * position.
Jens Axboe10ba5352006-10-20 11:39:27 +0200889 */
Jens Axboe6aca9b32013-07-25 12:45:26 -0600890 if (get_next_offset(td, io_u, &is_random)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +0100891 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100892 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100893 }
Jens Axboec685b5b2007-02-10 20:02:28 +0100894
Jens Axboe6aca9b32013-07-25 12:45:26 -0600895 io_u->buflen = get_next_buflen(td, io_u, is_random);
Jens Axboe2ba1c292008-02-01 13:16:38 +0100896 if (!io_u->buflen) {
897 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
Jens Axboebca4ed42007-02-12 05:13:23 +0100898 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100899 }
Jens Axboe10ba5352006-10-20 11:39:27 +0200900
Jens Axboe2ba1c292008-02-01 13:16:38 +0100901 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
Elliott Hugheseda3a602017-05-19 18:53:02 -0700902 dprint(FD_IO, "io_u %p, offset + buflen exceeds file size\n",
903 io_u);
904 dprint(FD_IO, " offset=%llu/buflen=%lu > %llu\n",
Jens Axboe4b91ee82013-02-25 10:18:33 +0100905 (unsigned long long) io_u->offset, io_u->buflen,
906 (unsigned long long) io_u->file->real_file_size);
Jens Axboe6a5e6882007-07-26 10:47:51 +0200907 return 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +0100908 }
Jens Axboe6a5e6882007-07-26 10:47:51 +0200909
Jens Axboebca4ed42007-02-12 05:13:23 +0100910 /*
911 * mark entry before potentially trimming io_u
912 */
Jens Axboe303032a2008-03-26 10:11:10 +0100913 if (td_random(td) && file_randommap(td, io_u->file))
Jens Axboe9bf20612007-03-01 09:33:57 +0100914 mark_random_map(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +0200915
Jens Axboec38e9462007-03-27 08:48:48 +0200916out:
Jens Axboe2ba1c292008-02-01 13:16:38 +0100917 dprint_io_u(io_u, "fill_io_u");
Jens Axboed9d91e32008-01-31 13:25:42 +0100918 td->zone_bytes += io_u->buflen;
Jens Axboebca4ed42007-02-12 05:13:23 +0100919 return 0;
Jens Axboe10ba5352006-10-20 11:39:27 +0200920}
921
Jens Axboe838bc702008-05-22 13:08:23 +0200922static void __io_u_mark_map(unsigned int *map, unsigned int nr)
923{
Jens Axboe2b13e712011-01-19 14:04:16 -0700924 int idx = 0;
Jens Axboe838bc702008-05-22 13:08:23 +0200925
926 switch (nr) {
927 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700928 idx = 6;
Jens Axboe838bc702008-05-22 13:08:23 +0200929 break;
930 case 33 ... 64:
Jens Axboe2b13e712011-01-19 14:04:16 -0700931 idx = 5;
Jens Axboe838bc702008-05-22 13:08:23 +0200932 break;
933 case 17 ... 32:
Jens Axboe2b13e712011-01-19 14:04:16 -0700934 idx = 4;
Jens Axboe838bc702008-05-22 13:08:23 +0200935 break;
936 case 9 ... 16:
Jens Axboe2b13e712011-01-19 14:04:16 -0700937 idx = 3;
Jens Axboe838bc702008-05-22 13:08:23 +0200938 break;
939 case 5 ... 8:
Jens Axboe2b13e712011-01-19 14:04:16 -0700940 idx = 2;
Jens Axboe838bc702008-05-22 13:08:23 +0200941 break;
942 case 1 ... 4:
Jens Axboe2b13e712011-01-19 14:04:16 -0700943 idx = 1;
Jens Axboe838bc702008-05-22 13:08:23 +0200944 case 0:
945 break;
946 }
947
Jens Axboe2b13e712011-01-19 14:04:16 -0700948 map[idx]++;
Jens Axboe838bc702008-05-22 13:08:23 +0200949}
950
951void io_u_mark_submit(struct thread_data *td, unsigned int nr)
952{
953 __io_u_mark_map(td->ts.io_u_submit, nr);
954 td->ts.total_submit++;
955}
956
957void io_u_mark_complete(struct thread_data *td, unsigned int nr)
958{
959 __io_u_mark_map(td->ts.io_u_complete, nr);
960 td->ts.total_complete++;
961}
962
Jens Axboed8005752008-05-15 09:49:09 +0200963void io_u_mark_depth(struct thread_data *td, unsigned int nr)
Jens Axboe71619dc2007-01-13 23:56:33 +0100964{
Jens Axboe2b13e712011-01-19 14:04:16 -0700965 int idx = 0;
Jens Axboe71619dc2007-01-13 23:56:33 +0100966
967 switch (td->cur_depth) {
968 default:
Jens Axboe2b13e712011-01-19 14:04:16 -0700969 idx = 6;
Jens Axboea783e612007-06-19 09:50:28 +0200970 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100971 case 32 ... 63:
Jens Axboe2b13e712011-01-19 14:04:16 -0700972 idx = 5;
Jens Axboea783e612007-06-19 09:50:28 +0200973 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100974 case 16 ... 31:
Jens Axboe2b13e712011-01-19 14:04:16 -0700975 idx = 4;
Jens Axboea783e612007-06-19 09:50:28 +0200976 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100977 case 8 ... 15:
Jens Axboe2b13e712011-01-19 14:04:16 -0700978 idx = 3;
Jens Axboea783e612007-06-19 09:50:28 +0200979 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100980 case 4 ... 7:
Jens Axboe2b13e712011-01-19 14:04:16 -0700981 idx = 2;
Jens Axboea783e612007-06-19 09:50:28 +0200982 break;
Jens Axboe71619dc2007-01-13 23:56:33 +0100983 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -0700984 idx = 1;
Jens Axboe71619dc2007-01-13 23:56:33 +0100985 case 1:
986 break;
987 }
988
Jens Axboe2b13e712011-01-19 14:04:16 -0700989 td->ts.io_u_map[idx] += nr;
Jens Axboe71619dc2007-01-13 23:56:33 +0100990}
991
Jens Axboe04a0fea2007-06-19 12:48:41 +0200992static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
993{
Jens Axboe2b13e712011-01-19 14:04:16 -0700994 int idx = 0;
Jens Axboe04a0fea2007-06-19 12:48:41 +0200995
996 assert(usec < 1000);
997
998 switch (usec) {
999 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -07001000 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001001 break;
1002 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -07001003 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001004 break;
1005 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -07001006 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001007 break;
1008 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -07001009 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001010 break;
1011 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -07001012 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001013 break;
1014 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -07001015 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001016 break;
1017 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -07001018 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001019 break;
1020 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -07001021 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001022 break;
1023 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -07001024 idx = 1;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001025 case 0 ... 1:
1026 break;
1027 }
1028
Jens Axboe2b13e712011-01-19 14:04:16 -07001029 assert(idx < FIO_IO_U_LAT_U_NR);
1030 td->ts.io_u_lat_u[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001031}
1032
1033static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
Jens Axboeec118302007-02-17 04:38:20 +01001034{
Jens Axboe2b13e712011-01-19 14:04:16 -07001035 int idx = 0;
Jens Axboeec118302007-02-17 04:38:20 +01001036
1037 switch (msec) {
1038 default:
Jens Axboe2b13e712011-01-19 14:04:16 -07001039 idx = 11;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001040 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001041 case 1000 ... 1999:
Jens Axboe2b13e712011-01-19 14:04:16 -07001042 idx = 10;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001043 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001044 case 750 ... 999:
Jens Axboe2b13e712011-01-19 14:04:16 -07001045 idx = 9;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001046 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001047 case 500 ... 749:
Jens Axboe2b13e712011-01-19 14:04:16 -07001048 idx = 8;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001049 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001050 case 250 ... 499:
Jens Axboe2b13e712011-01-19 14:04:16 -07001051 idx = 7;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001052 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001053 case 100 ... 249:
Jens Axboe2b13e712011-01-19 14:04:16 -07001054 idx = 6;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001055 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001056 case 50 ... 99:
Jens Axboe2b13e712011-01-19 14:04:16 -07001057 idx = 5;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001058 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001059 case 20 ... 49:
Jens Axboe2b13e712011-01-19 14:04:16 -07001060 idx = 4;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001061 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001062 case 10 ... 19:
Jens Axboe2b13e712011-01-19 14:04:16 -07001063 idx = 3;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001064 break;
Jens Axboe8abdce62007-02-21 10:22:55 +01001065 case 4 ... 9:
Jens Axboe2b13e712011-01-19 14:04:16 -07001066 idx = 2;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001067 break;
Jens Axboeec118302007-02-17 04:38:20 +01001068 case 2 ... 3:
Jens Axboe2b13e712011-01-19 14:04:16 -07001069 idx = 1;
Jens Axboeec118302007-02-17 04:38:20 +01001070 case 0 ... 1:
1071 break;
1072 }
1073
Jens Axboe2b13e712011-01-19 14:04:16 -07001074 assert(idx < FIO_IO_U_LAT_M_NR);
1075 td->ts.io_u_lat_m[idx]++;
Jens Axboe04a0fea2007-06-19 12:48:41 +02001076}
1077
1078static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
1079{
1080 if (usec < 1000)
1081 io_u_mark_lat_usec(td, usec);
1082 else
1083 io_u_mark_lat_msec(td, usec / 1000);
Jens Axboeec118302007-02-17 04:38:20 +01001084}
1085
Elliott Hugheseda3a602017-05-19 18:53:02 -07001086static unsigned int __get_next_fileno_rand(struct thread_data *td)
1087{
1088 unsigned long fileno;
1089
1090 if (td->o.file_service_type == FIO_FSERVICE_RANDOM) {
1091 uint64_t frand_max = rand_max(&td->next_file_state);
1092 unsigned long r;
1093
1094 r = __rand(&td->next_file_state);
1095 return (unsigned int) ((double) td->o.nr_files
1096 * (r / (frand_max + 1.0)));
1097 }
1098
1099 if (td->o.file_service_type == FIO_FSERVICE_ZIPF)
1100 fileno = zipf_next(&td->next_file_zipf);
1101 else if (td->o.file_service_type == FIO_FSERVICE_PARETO)
1102 fileno = pareto_next(&td->next_file_zipf);
1103 else if (td->o.file_service_type == FIO_FSERVICE_GAUSS)
1104 fileno = gauss_next(&td->next_file_gauss);
1105 else {
1106 log_err("fio: bad file service type: %d\n", td->o.file_service_type);
1107 assert(0);
1108 return 0;
1109 }
1110
1111 return fileno >> FIO_FSERVICE_SHIFT;
1112}
1113
Jens Axboe0aabe162007-02-23 08:45:55 +01001114/*
1115 * Get next file to service by choosing one at random
1116 */
Jens Axboe2cc52932009-06-09 14:14:20 +02001117static struct fio_file *get_next_file_rand(struct thread_data *td,
1118 enum fio_file_flags goodf,
Jens Axboed6aed792009-06-03 08:41:15 +02001119 enum fio_file_flags badf)
Jens Axboe0aabe162007-02-23 08:45:55 +01001120{
Jens Axboe0aabe162007-02-23 08:45:55 +01001121 struct fio_file *f;
Jens Axboe1c178182007-03-13 13:25:18 +01001122 int fno;
Jens Axboe0aabe162007-02-23 08:45:55 +01001123
1124 do {
Jens Axboe87b10672009-03-04 09:39:47 +01001125 int opened = 0;
Jens Axboe7c83c082007-03-01 10:04:15 +01001126
Elliott Hugheseda3a602017-05-19 18:53:02 -07001127 fno = __get_next_fileno_rand(td);
Jens Axboe4c07ad82011-03-28 09:51:09 +02001128
Jens Axboe126d65c2008-03-01 18:04:31 +01001129 f = td->files[fno];
Jens Axboed6aed792009-06-03 08:41:15 +02001130 if (fio_file_done(f))
Jens Axboe059e63c2007-03-27 20:34:47 +02001131 continue;
Jens Axboe1c178182007-03-13 13:25:18 +01001132
Jens Axboed6aed792009-06-03 08:41:15 +02001133 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +01001134 int err;
1135
Jens Axboe002fe732014-02-11 08:31:13 -07001136 if (td->nr_open_files >= td->o.open_files)
1137 return ERR_PTR(-EBUSY);
1138
Jens Axboe87b10672009-03-04 09:39:47 +01001139 err = td_io_open_file(td, f);
1140 if (err)
1141 continue;
1142 opened = 1;
1143 }
1144
Jens Axboe2ba1c292008-02-01 13:16:38 +01001145 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
1146 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
Jens Axboe0aabe162007-02-23 08:45:55 +01001147 return f;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001148 }
Jens Axboe87b10672009-03-04 09:39:47 +01001149 if (opened)
1150 td_io_close_file(td, f);
Jens Axboe0aabe162007-02-23 08:45:55 +01001151 } while (1);
1152}
1153
1154/*
1155 * Get next file to service by doing round robin between all available ones
1156 */
Jens Axboe1c178182007-03-13 13:25:18 +01001157static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1158 int badf)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001159{
1160 unsigned int old_next_file = td->next_file;
1161 struct fio_file *f;
1162
1163 do {
Jens Axboe87b10672009-03-04 09:39:47 +01001164 int opened = 0;
1165
Jens Axboe126d65c2008-03-01 18:04:31 +01001166 f = td->files[td->next_file];
Jens Axboe3d7c3912007-02-19 13:16:12 +01001167
1168 td->next_file++;
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001169 if (td->next_file >= td->o.nr_files)
Jens Axboe3d7c3912007-02-19 13:16:12 +01001170 td->next_file = 0;
1171
Jens Axboe87b10672009-03-04 09:39:47 +01001172 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
Jens Axboed6aed792009-06-03 08:41:15 +02001173 if (fio_file_done(f)) {
Jens Axboed5ed68e2007-03-28 09:33:43 +02001174 f = NULL;
Jens Axboe059e63c2007-03-27 20:34:47 +02001175 continue;
Jens Axboed5ed68e2007-03-28 09:33:43 +02001176 }
Jens Axboe059e63c2007-03-27 20:34:47 +02001177
Jens Axboed6aed792009-06-03 08:41:15 +02001178 if (!fio_file_open(f)) {
Jens Axboe87b10672009-03-04 09:39:47 +01001179 int err;
1180
Jens Axboe002fe732014-02-11 08:31:13 -07001181 if (td->nr_open_files >= td->o.open_files)
1182 return ERR_PTR(-EBUSY);
1183
Jens Axboe87b10672009-03-04 09:39:47 +01001184 err = td_io_open_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001185 if (err) {
1186 dprint(FD_FILE, "error %d on open of %s\n",
1187 err, f->file_name);
Jens Axboe87c27b42009-05-20 10:45:12 +02001188 f = NULL;
Jens Axboe87b10672009-03-04 09:39:47 +01001189 continue;
Jens Axboeb5696bf2009-03-04 16:03:49 +01001190 }
Jens Axboe87b10672009-03-04 09:39:47 +01001191 opened = 1;
1192 }
1193
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001194 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1195 f->flags);
Jens Axboe1c178182007-03-13 13:25:18 +01001196 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
Jens Axboe3d7c3912007-02-19 13:16:12 +01001197 break;
1198
Jens Axboe87b10672009-03-04 09:39:47 +01001199 if (opened)
1200 td_io_close_file(td, f);
1201
Jens Axboe3d7c3912007-02-19 13:16:12 +01001202 f = NULL;
1203 } while (td->next_file != old_next_file);
1204
Jens Axboe2ba1c292008-02-01 13:16:38 +01001205 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
Jens Axboe3d7c3912007-02-19 13:16:12 +01001206 return f;
1207}
1208
Jens Axboe7eb36572010-03-08 13:58:49 +01001209static struct fio_file *__get_next_file(struct thread_data *td)
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001210{
Jens Axboe1907dbc2007-03-12 11:44:28 +01001211 struct fio_file *f;
1212
Jens Axboe2dc1bbe2007-03-15 15:01:33 +01001213 assert(td->o.nr_files <= td->files_index);
Jens Axboe1c178182007-03-13 13:25:18 +01001214
Jens Axboeb5696bf2009-03-04 16:03:49 +01001215 if (td->nr_done_files >= td->o.nr_files) {
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001216 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1217 " nr_files=%d\n", td->nr_open_files,
1218 td->nr_done_files,
1219 td->o.nr_files);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001220 return NULL;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001221 }
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001222
Jens Axboe1907dbc2007-03-12 11:44:28 +01001223 f = td->file_service_file;
Jens Axboed6aed792009-06-03 08:41:15 +02001224 if (f && fio_file_open(f) && !fio_file_closing(f)) {
Jens Axboea086c252009-03-04 08:27:37 +01001225 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1226 goto out;
1227 if (td->file_service_left--)
1228 goto out;
1229 }
Jens Axboe1907dbc2007-03-12 11:44:28 +01001230
Jens Axboea086c252009-03-04 08:27:37 +01001231 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1232 td->o.file_service_type == FIO_FSERVICE_SEQ)
Jens Axboed6aed792009-06-03 08:41:15 +02001233 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001234 else
Jens Axboed6aed792009-06-03 08:41:15 +02001235 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
Jens Axboe1907dbc2007-03-12 11:44:28 +01001236
Jens Axboe002fe732014-02-11 08:31:13 -07001237 if (IS_ERR(f))
1238 return f;
1239
Jens Axboe1907dbc2007-03-12 11:44:28 +01001240 td->file_service_file = f;
1241 td->file_service_left = td->file_service_nr - 1;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001242out:
Jens Axboe0dac4212014-02-25 13:43:17 -08001243 if (f)
1244 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1245 else
1246 dprint(FD_FILE, "get_next_file: NULL\n");
Jens Axboe1907dbc2007-03-12 11:44:28 +01001247 return f;
Jens Axboebdb4e2e2007-03-01 09:54:57 +01001248}
1249
Jens Axboe7eb36572010-03-08 13:58:49 +01001250static struct fio_file *get_next_file(struct thread_data *td)
1251{
Andrey Kuzmin705fa7e2014-06-27 20:21:22 -06001252 if (td->flags & TD_F_PROFILE_OPS) {
Jens Axboed72be542012-11-30 19:37:46 +01001253 struct prof_io_ops *ops = &td->prof_io_ops;
Jens Axboe7eb36572010-03-08 13:58:49 +01001254
Jens Axboed72be542012-11-30 19:37:46 +01001255 if (ops->get_next_file)
1256 return ops->get_next_file(td);
1257 }
Jens Axboe7eb36572010-03-08 13:58:49 +01001258
1259 return __get_next_file(td);
1260}
1261
Jens Axboe002fe732014-02-11 08:31:13 -07001262static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
Jens Axboe429f6672007-07-23 10:38:43 +02001263{
1264 struct fio_file *f;
1265
1266 do {
1267 f = get_next_file(td);
Jens Axboe002fe732014-02-11 08:31:13 -07001268 if (IS_ERR_OR_NULL(f))
1269 return PTR_ERR(f);
Jens Axboe429f6672007-07-23 10:38:43 +02001270
Jens Axboe429f6672007-07-23 10:38:43 +02001271 io_u->file = f;
1272 get_file(f);
1273
1274 if (!fill_io_u(td, io_u))
1275 break;
1276
Jens Axboeb5696bf2009-03-04 16:03:49 +01001277 put_file_log(td, f);
Jens Axboe429f6672007-07-23 10:38:43 +02001278 td_io_close_file(td, f);
Jens Axboeb5696bf2009-03-04 16:03:49 +01001279 io_u->file = NULL;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001280 if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
1281 fio_file_reset(td, f);
1282 else {
1283 fio_file_set_done(f);
1284 td->nr_done_files++;
1285 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
Jens Axboe0b9d69e2009-09-11 22:29:54 +02001286 td->nr_done_files, td->o.nr_files);
Elliott Hugheseda3a602017-05-19 18:53:02 -07001287 }
Jens Axboe429f6672007-07-23 10:38:43 +02001288 } while (1);
1289
1290 return 0;
1291}
1292
Jens Axboe3e260a42013-12-09 12:38:53 -07001293static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1294 unsigned long tusec, unsigned long max_usec)
1295{
1296 if (!td->error)
1297 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1298 td_verror(td, ETIMEDOUT, "max latency exceeded");
1299 icd->error = ETIMEDOUT;
1300}
1301
1302static void lat_new_cycle(struct thread_data *td)
1303{
1304 fio_gettime(&td->latency_ts, NULL);
1305 td->latency_ios = ddir_rw_sum(td->io_blocks);
1306 td->latency_failed = 0;
1307}
1308
1309/*
1310 * We had an IO outside the latency target. Reduce the queue depth. If we
1311 * are at QD=1, then it's time to give up.
1312 */
Elliott Hugheseda3a602017-05-19 18:53:02 -07001313static bool __lat_target_failed(struct thread_data *td)
Jens Axboe3e260a42013-12-09 12:38:53 -07001314{
1315 if (td->latency_qd == 1)
Elliott Hugheseda3a602017-05-19 18:53:02 -07001316 return true;
Jens Axboe3e260a42013-12-09 12:38:53 -07001317
1318 td->latency_qd_high = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001319
1320 if (td->latency_qd == td->latency_qd_low)
1321 td->latency_qd_low--;
1322
Jens Axboe3e260a42013-12-09 12:38:53 -07001323 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1324
1325 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1326
1327 /*
1328 * When we ramp QD down, quiesce existing IO to prevent
1329 * a storm of ramp downs due to pending higher depth.
1330 */
1331 io_u_quiesce(td);
1332 lat_new_cycle(td);
Elliott Hugheseda3a602017-05-19 18:53:02 -07001333 return false;
Jens Axboe3e260a42013-12-09 12:38:53 -07001334}
1335
Elliott Hugheseda3a602017-05-19 18:53:02 -07001336static bool lat_target_failed(struct thread_data *td)
Jens Axboe3e260a42013-12-09 12:38:53 -07001337{
1338 if (td->o.latency_percentile.u.f == 100.0)
1339 return __lat_target_failed(td);
1340
1341 td->latency_failed++;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001342 return false;
Jens Axboe3e260a42013-12-09 12:38:53 -07001343}
1344
1345void lat_target_init(struct thread_data *td)
1346{
Jens Axboe6bb58212014-02-21 13:55:31 -08001347 td->latency_end_run = 0;
1348
Jens Axboe3e260a42013-12-09 12:38:53 -07001349 if (td->o.latency_target) {
1350 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1351 fio_gettime(&td->latency_ts, NULL);
1352 td->latency_qd = 1;
1353 td->latency_qd_high = td->o.iodepth;
1354 td->latency_qd_low = 1;
1355 td->latency_ios = ddir_rw_sum(td->io_blocks);
1356 } else
1357 td->latency_qd = td->o.iodepth;
1358}
1359
Jens Axboe6bb58212014-02-21 13:55:31 -08001360void lat_target_reset(struct thread_data *td)
1361{
1362 if (!td->latency_end_run)
1363 lat_target_init(td);
1364}
1365
Jens Axboe3e260a42013-12-09 12:38:53 -07001366static void lat_target_success(struct thread_data *td)
1367{
1368 const unsigned int qd = td->latency_qd;
Jens Axboe6bb58212014-02-21 13:55:31 -08001369 struct thread_options *o = &td->o;
Jens Axboe3e260a42013-12-09 12:38:53 -07001370
1371 td->latency_qd_low = td->latency_qd;
1372
1373 /*
1374 * If we haven't failed yet, we double up to a failing value instead
1375 * of bisecting from highest possible queue depth. If we have set
1376 * a limit other than td->o.iodepth, bisect between that.
1377 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001378 if (td->latency_qd_high != o->iodepth)
Jens Axboe3e260a42013-12-09 12:38:53 -07001379 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1380 else
1381 td->latency_qd *= 2;
1382
Jens Axboe6bb58212014-02-21 13:55:31 -08001383 if (td->latency_qd > o->iodepth)
1384 td->latency_qd = o->iodepth;
Jens Axboe3e260a42013-12-09 12:38:53 -07001385
1386 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
Jens Axboe6bb58212014-02-21 13:55:31 -08001387
Jens Axboe3e260a42013-12-09 12:38:53 -07001388 /*
Jens Axboe6bb58212014-02-21 13:55:31 -08001389 * Same as last one, we are done. Let it run a latency cycle, so
1390 * we get only the results from the targeted depth.
Jens Axboe3e260a42013-12-09 12:38:53 -07001391 */
Jens Axboe6bb58212014-02-21 13:55:31 -08001392 if (td->latency_qd == qd) {
1393 if (td->latency_end_run) {
1394 dprint(FD_RATE, "We are done\n");
1395 td->done = 1;
1396 } else {
1397 dprint(FD_RATE, "Quiesce and final run\n");
1398 io_u_quiesce(td);
1399 td->latency_end_run = 1;
1400 reset_all_stats(td);
1401 reset_io_stats(td);
1402 }
1403 }
Jens Axboe3e260a42013-12-09 12:38:53 -07001404
1405 lat_new_cycle(td);
1406}
1407
1408/*
1409 * Check if we can bump the queue depth
1410 */
1411void lat_target_check(struct thread_data *td)
1412{
1413 uint64_t usec_window;
1414 uint64_t ios;
1415 double success_ios;
1416
1417 usec_window = utime_since_now(&td->latency_ts);
1418 if (usec_window < td->o.latency_window)
1419 return;
1420
1421 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1422 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1423 success_ios *= 100.0;
1424
1425 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1426
1427 if (success_ios >= td->o.latency_percentile.u.f)
1428 lat_target_success(td);
1429 else
1430 __lat_target_failed(td);
1431}
1432
1433/*
1434 * If latency target is enabled, we might be ramping up or down and not
1435 * using the full queue depth available.
1436 */
Elliott Hugheseda3a602017-05-19 18:53:02 -07001437bool queue_full(const struct thread_data *td)
Jens Axboe3e260a42013-12-09 12:38:53 -07001438{
1439 const int qempty = io_u_qempty(&td->io_u_freelist);
1440
1441 if (qempty)
Elliott Hugheseda3a602017-05-19 18:53:02 -07001442 return true;
Jens Axboe3e260a42013-12-09 12:38:53 -07001443 if (!td->o.latency_target)
Elliott Hugheseda3a602017-05-19 18:53:02 -07001444 return false;
Jens Axboe3e260a42013-12-09 12:38:53 -07001445
1446 return td->cur_depth >= td->latency_qd;
1447}
Jens Axboe429f6672007-07-23 10:38:43 +02001448
Jens Axboe10ba5352006-10-20 11:39:27 +02001449struct io_u *__get_io_u(struct thread_data *td)
1450{
Jens Axboe0cae66f2014-03-03 13:55:32 -07001451 struct io_u *io_u = NULL;
Jens Axboe10ba5352006-10-20 11:39:27 +02001452
Jens Axboede54cfd2014-11-10 20:34:00 -07001453 if (td->stop_io)
1454 return NULL;
1455
Jens Axboee8462bd2009-07-06 12:59:04 +02001456 td_io_u_lock(td);
1457
1458again:
Jens Axboe2ae0b202013-05-28 14:16:55 +02001459 if (!io_u_rempty(&td->io_u_requeues))
1460 io_u = io_u_rpop(&td->io_u_requeues);
Jens Axboe3e260a42013-12-09 12:38:53 -07001461 else if (!queue_full(td)) {
Jens Axboe2ae0b202013-05-28 14:16:55 +02001462 io_u = io_u_qpop(&td->io_u_freelist);
Jens Axboe10ba5352006-10-20 11:39:27 +02001463
Jens Axboe225ba9e2014-02-26 14:31:15 -08001464 io_u->file = NULL;
Jens Axboe6040dab2006-10-24 19:38:15 +02001465 io_u->buflen = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001466 io_u->resid = 0;
Jens Axboed7762cf2007-02-23 12:34:57 +01001467 io_u->end_io = NULL;
Jens Axboe755200a2007-02-19 13:08:12 +01001468 }
1469
1470 if (io_u) {
Jens Axboe0c6e7512007-02-22 11:19:39 +01001471 assert(io_u->flags & IO_U_F_FREE);
Elliott Hugheseda3a602017-05-19 18:53:02 -07001472 io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
Jens Axboee69fdf72014-07-23 16:11:43 +02001473 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1474 IO_U_F_VER_LIST);
Jens Axboe0c6e7512007-02-22 11:19:39 +01001475
Jens Axboe755200a2007-02-19 13:08:12 +01001476 io_u->error = 0;
Jens Axboebcd5abf2013-01-23 09:27:25 -07001477 io_u->acct_ddir = -1;
Jens Axboe10ba5352006-10-20 11:39:27 +02001478 td->cur_depth++;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001479 assert(!(td->flags & TD_F_CHILD));
1480 io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
Jens Axboef9401282014-02-06 12:17:37 -07001481 io_u->ipo = NULL;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001482 } else if (td_async_processing(td)) {
Jens Axboe1dec3e02010-03-19 10:33:39 +01001483 /*
1484 * We ran out, wait for async verify threads to finish and
1485 * return one
1486 */
Elliott Hugheseda3a602017-05-19 18:53:02 -07001487 assert(!(td->flags & TD_F_CHILD));
1488 assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock));
Jens Axboe1dec3e02010-03-19 10:33:39 +01001489 goto again;
Jens Axboe10ba5352006-10-20 11:39:27 +02001490 }
1491
Jens Axboee8462bd2009-07-06 12:59:04 +02001492 td_io_u_unlock(td);
Jens Axboe10ba5352006-10-20 11:39:27 +02001493 return io_u;
1494}
1495
Elliott Hugheseda3a602017-05-19 18:53:02 -07001496static bool check_get_trim(struct thread_data *td, struct io_u *io_u)
Jens Axboe10ba5352006-10-20 11:39:27 +02001497{
Jens Axboed72be542012-11-30 19:37:46 +01001498 if (!(td->flags & TD_F_TRIM_BACKLOG))
Elliott Hugheseda3a602017-05-19 18:53:02 -07001499 return false;
Jens Axboed72be542012-11-30 19:37:46 +01001500
1501 if (td->trim_entries) {
Jens Axboe0d29de82010-09-01 13:54:15 +02001502 int get_trim = 0;
Jens Axboe10ba5352006-10-20 11:39:27 +02001503
Jens Axboe0d29de82010-09-01 13:54:15 +02001504 if (td->trim_batch) {
1505 td->trim_batch--;
1506 get_trim = 1;
1507 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1508 td->last_ddir != DDIR_READ) {
1509 td->trim_batch = td->o.trim_batch;
1510 if (!td->trim_batch)
1511 td->trim_batch = td->o.trim_backlog;
1512 get_trim = 1;
1513 }
1514
Elliott Hugheseda3a602017-05-19 18:53:02 -07001515 if (get_trim && get_next_trim(td, io_u))
1516 return true;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001517 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001518
Elliott Hugheseda3a602017-05-19 18:53:02 -07001519 return false;
Jens Axboe0d29de82010-09-01 13:54:15 +02001520}
1521
Elliott Hugheseda3a602017-05-19 18:53:02 -07001522static bool check_get_verify(struct thread_data *td, struct io_u *io_u)
Jens Axboe0d29de82010-09-01 13:54:15 +02001523{
Jens Axboed72be542012-11-30 19:37:46 +01001524 if (!(td->flags & TD_F_VER_BACKLOG))
Elliott Hugheseda3a602017-05-19 18:53:02 -07001525 return false;
Jens Axboed72be542012-11-30 19:37:46 +01001526
1527 if (td->io_hist_len) {
Jens Axboe9e144182010-06-15 14:25:36 +02001528 int get_verify = 0;
1529
Jens Axboed1ece0c2012-03-07 09:32:58 +01001530 if (td->verify_batch)
Jens Axboe9e144182010-06-15 14:25:36 +02001531 get_verify = 1;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001532 else if (!(td->io_hist_len % td->o.verify_backlog) &&
Jens Axboe9e144182010-06-15 14:25:36 +02001533 td->last_ddir != DDIR_READ) {
1534 td->verify_batch = td->o.verify_batch;
Jens Axboef8a75c92010-06-15 14:27:28 +02001535 if (!td->verify_batch)
1536 td->verify_batch = td->o.verify_backlog;
Jens Axboe9e144182010-06-15 14:25:36 +02001537 get_verify = 1;
1538 }
1539
Jens Axboed1ece0c2012-03-07 09:32:58 +01001540 if (get_verify && !get_next_verify(td, io_u)) {
1541 td->verify_batch--;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001542 return true;
Jens Axboed1ece0c2012-03-07 09:32:58 +01001543 }
Jens Axboe9e144182010-06-15 14:25:36 +02001544 }
1545
Elliott Hugheseda3a602017-05-19 18:53:02 -07001546 return false;
Jens Axboe0d29de82010-09-01 13:54:15 +02001547}
1548
1549/*
Jens Axboede789762011-09-16 22:11:23 +02001550 * Fill offset and start time into the buffer content, to prevent too
Jens Axboe23f394d2011-09-16 22:45:27 +02001551 * easy compressible data for simple de-dupe attempts. Do this for every
1552 * 512b block in the range, since that should be the smallest block size
1553 * we can expect from a device.
Jens Axboede789762011-09-16 22:11:23 +02001554 */
1555static void small_content_scramble(struct io_u *io_u)
1556{
Jens Axboe23f394d2011-09-16 22:45:27 +02001557 unsigned int i, nr_blocks = io_u->buflen / 512;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001558 uint64_t boffset;
Jens Axboe23f394d2011-09-16 22:45:27 +02001559 unsigned int offset;
1560 void *p, *end;
Jens Axboede789762011-09-16 22:11:23 +02001561
Jens Axboe23f394d2011-09-16 22:45:27 +02001562 if (!nr_blocks)
1563 return;
1564
1565 p = io_u->xfer_buf;
Jens Axboefba76ee2011-09-27 14:27:48 -06001566 boffset = io_u->offset;
Jens Axboe81f03662012-02-02 09:20:09 +01001567 io_u->buf_filled_len = 0;
Jens Axboefad82f72011-09-19 11:33:30 +02001568
Jens Axboe23f394d2011-09-16 22:45:27 +02001569 for (i = 0; i < nr_blocks; i++) {
1570 /*
1571 * Fill the byte offset into a "random" start offset of
1572 * the buffer, given by the product of the usec time
1573 * and the actual offset.
1574 */
Jens Axboefad82f72011-09-19 11:33:30 +02001575 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001576 offset &= ~(sizeof(uint64_t) - 1);
1577 if (offset >= 512 - sizeof(uint64_t))
1578 offset -= sizeof(uint64_t);
Jens Axboefba76ee2011-09-27 14:27:48 -06001579 memcpy(p + offset, &boffset, sizeof(boffset));
Jens Axboe23f394d2011-09-16 22:45:27 +02001580
1581 end = p + 512 - sizeof(io_u->start_time);
1582 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1583 p += 512;
Jens Axboefad82f72011-09-19 11:33:30 +02001584 boffset += 512;
Jens Axboe23f394d2011-09-16 22:45:27 +02001585 }
Jens Axboede789762011-09-16 22:11:23 +02001586}
1587
1588/*
Jens Axboe0d29de82010-09-01 13:54:15 +02001589 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1590 * etc. The returned io_u is fully ready to be prepped and submitted.
1591 */
1592struct io_u *get_io_u(struct thread_data *td)
1593{
1594 struct fio_file *f;
1595 struct io_u *io_u;
Jens Axboede789762011-09-16 22:11:23 +02001596 int do_scramble = 0;
Jens Axboe002fe732014-02-11 08:31:13 -07001597 long ret = 0;
Jens Axboe0d29de82010-09-01 13:54:15 +02001598
1599 io_u = __get_io_u(td);
1600 if (!io_u) {
1601 dprint(FD_IO, "__get_io_u failed\n");
1602 return NULL;
1603 }
1604
1605 if (check_get_verify(td, io_u))
1606 goto out;
1607 if (check_get_trim(td, io_u))
1608 goto out;
1609
Jens Axboe755200a2007-02-19 13:08:12 +01001610 /*
1611 * from a requeue, io_u already setup
1612 */
1613 if (io_u->file)
Jens Axboe77f392b2007-02-19 20:13:09 +01001614 goto out;
Jens Axboe755200a2007-02-19 13:08:12 +01001615
Jens Axboe429f6672007-07-23 10:38:43 +02001616 /*
1617 * If using an iolog, grab next piece if any available.
1618 */
Jens Axboed72be542012-11-30 19:37:46 +01001619 if (td->flags & TD_F_READ_IOLOG) {
Jens Axboe429f6672007-07-23 10:38:43 +02001620 if (read_iolog_get(td, io_u))
1621 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001622 } else if (set_io_u_file(td, io_u)) {
Jens Axboe002fe732014-02-11 08:31:13 -07001623 ret = -EBUSY;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001624 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001625 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001626 }
Jens Axboe5ec10ea2008-03-06 15:42:00 +01001627
Jens Axboe429f6672007-07-23 10:38:43 +02001628 f = io_u->file;
Jens Axboe002fe732014-02-11 08:31:13 -07001629 if (!f) {
1630 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1631 goto err_put;
1632 }
1633
Jens Axboed6aed792009-06-03 08:41:15 +02001634 assert(fio_file_open(f));
Jens Axboe97af62c2007-05-22 11:12:13 +02001635
Jens Axboeff58fce2010-08-25 12:02:08 +02001636 if (ddir_rw(io_u->ddir)) {
Elliott Hugheseda3a602017-05-19 18:53:02 -07001637 if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
Jens Axboe2ba1c292008-02-01 13:16:38 +01001638 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
Jens Axboe429f6672007-07-23 10:38:43 +02001639 goto err_put;
Jens Axboe2ba1c292008-02-01 13:16:38 +01001640 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001641
Jens Axboe08a99be2014-12-14 19:01:24 -07001642 f->last_start[io_u->ddir] = io_u->offset;
1643 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001644
Jens Axboefd684182011-09-19 09:24:44 +02001645 if (io_u->ddir == DDIR_WRITE) {
Jens Axboed72be542012-11-30 19:37:46 +01001646 if (td->flags & TD_F_REFILL_BUFFERS) {
Jens Axboe9c426842012-03-02 21:02:12 +01001647 io_u_fill_buffer(td, io_u,
Jens Axboe8e0aa162014-09-26 15:04:58 -06001648 td->o.min_bs[DDIR_WRITE],
Elliott Hugheseda3a602017-05-19 18:53:02 -07001649 io_u->buflen);
Jens Axboebedc9dc2014-03-17 12:51:09 -06001650 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1651 !(td->flags & TD_F_COMPRESS))
Jens Axboefd684182011-09-19 09:24:44 +02001652 do_scramble = 1;
Jens Axboed72be542012-11-30 19:37:46 +01001653 if (td->flags & TD_F_VER_NONE) {
Jens Axboe629f1d72012-03-09 19:02:01 +01001654 populate_verify_io_u(td, io_u);
1655 do_scramble = 0;
1656 }
Jens Axboefd684182011-09-19 09:24:44 +02001657 } else if (io_u->ddir == DDIR_READ) {
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001658 /*
1659 * Reset the buf_filled parameters so next time if the
1660 * buffer is used for writes it is refilled.
1661 */
Radha Ramachandrancbe8d752010-07-14 08:36:07 +02001662 io_u->buf_filled_len = 0;
1663 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001664 }
1665
Jens Axboe165faf12007-02-07 11:30:37 +01001666 /*
1667 * Set io data pointers.
1668 */
Jens Axboecec6b552007-02-06 20:15:38 +01001669 io_u->xfer_buf = io_u->buf;
1670 io_u->xfer_buflen = io_u->buflen;
Jens Axboe5973caf2008-05-21 19:52:35 +02001671
Jens Axboe6ac7a332008-03-01 15:22:32 +01001672out:
Jens Axboe0d29de82010-09-01 13:54:15 +02001673 assert(io_u->file);
Jens Axboe429f6672007-07-23 10:38:43 +02001674 if (!td_io_prep(td, io_u)) {
Elliott Hugheseda3a602017-05-19 18:53:02 -07001675 if (!td->o.disable_lat)
Jens Axboe993bf482008-11-14 13:04:53 +01001676 fio_gettime(&io_u->start_time, NULL);
Elliott Hugheseda3a602017-05-19 18:53:02 -07001677
Jens Axboede789762011-09-16 22:11:23 +02001678 if (do_scramble)
1679 small_content_scramble(io_u);
Elliott Hugheseda3a602017-05-19 18:53:02 -07001680
Jens Axboe429f6672007-07-23 10:38:43 +02001681 return io_u;
Jens Axboe36167d82007-02-18 05:41:31 +01001682 }
Jens Axboe429f6672007-07-23 10:38:43 +02001683err_put:
Jens Axboe2ba1c292008-02-01 13:16:38 +01001684 dprint(FD_IO, "get_io_u failed\n");
Jens Axboe429f6672007-07-23 10:38:43 +02001685 put_io_u(td, io_u);
Jens Axboe002fe732014-02-11 08:31:13 -07001686 return ERR_PTR(ret);
Jens Axboe10ba5352006-10-20 11:39:27 +02001687}
1688
Elliott Hugheseda3a602017-05-19 18:53:02 -07001689static void __io_u_log_error(struct thread_data *td, struct io_u *io_u)
Jens Axboe54517922007-03-05 10:06:06 +01001690{
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001691 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
Jens Axboe825f8182010-08-25 10:47:18 +02001692
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001693 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1694 return;
Jens Axboe54517922007-03-05 10:06:06 +01001695
Robert Elliott2cbdcdb2014-09-16 17:09:48 -05001696 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1697 io_u->file ? " on file " : "",
1698 io_u->file ? io_u->file->file_name : "",
1699 strerror(io_u->error),
1700 io_ddir_name(io_u->ddir),
1701 io_u->offset, io_u->xfer_buflen);
Jens Axboe54517922007-03-05 10:06:06 +01001702
Elliott Hugheseda3a602017-05-19 18:53:02 -07001703 if (td->io_ops->errdetails) {
1704 char *err = td->io_ops->errdetails(io_u);
1705
1706 log_err("fio: %s\n", err);
1707 free(err);
1708 }
1709
Jens Axboe54517922007-03-05 10:06:06 +01001710 if (!td->error)
1711 td_verror(td, io_u->error, "io_u error");
1712}
1713
Elliott Hugheseda3a602017-05-19 18:53:02 -07001714void io_u_log_error(struct thread_data *td, struct io_u *io_u)
Jens Axboeaba6c952014-02-13 19:59:56 -07001715{
Elliott Hugheseda3a602017-05-19 18:53:02 -07001716 __io_u_log_error(td, io_u);
1717 if (td->parent)
1718 __io_u_log_error(td->parent, io_u);
1719}
1720
1721static inline bool gtod_reduce(struct thread_data *td)
1722{
1723 return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw)
1724 || td->o.gtod_reduce;
Jens Axboeaba6c952014-02-13 19:59:56 -07001725}
1726
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001727static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1728 struct io_completion_data *icd,
1729 const enum fio_ddir idx, unsigned int bytes)
1730{
Elliott Hugheseda3a602017-05-19 18:53:02 -07001731 const int no_reduce = !gtod_reduce(td);
Jens Axboe24d23ca2012-11-13 08:31:24 -07001732 unsigned long lusec = 0;
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001733
Elliott Hugheseda3a602017-05-19 18:53:02 -07001734 if (td->parent)
1735 td = td->parent;
1736
1737 if (!td->o.stats)
1738 return;
1739
1740 if (no_reduce)
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001741 lusec = utime_since(&io_u->issue_time, &icd->time);
1742
1743 if (!td->o.disable_lat) {
1744 unsigned long tusec;
1745
1746 tusec = utime_since(&io_u->start_time, &icd->time);
Jens Axboeccefd5f2014-06-30 20:59:03 -06001747 add_lat_sample(td, idx, tusec, bytes, io_u->offset);
Jens Axboe15501532012-10-24 16:37:45 +02001748
Jens Axboed4afedf2013-05-22 22:21:29 +02001749 if (td->flags & TD_F_PROFILE_OPS) {
1750 struct prof_io_ops *ops = &td->prof_io_ops;
1751
1752 if (ops->io_u_lat)
1753 icd->error = ops->io_u_lat(td, tusec);
1754 }
1755
Jens Axboe3e260a42013-12-09 12:38:53 -07001756 if (td->o.max_latency && tusec > td->o.max_latency)
1757 lat_fatal(td, icd, tusec, td->o.max_latency);
1758 if (td->o.latency_target && tusec > td->o.latency_target) {
1759 if (lat_target_failed(td))
1760 lat_fatal(td, icd, tusec, td->o.latency_target);
Jens Axboe15501532012-10-24 16:37:45 +02001761 }
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001762 }
1763
Elliott Hugheseda3a602017-05-19 18:53:02 -07001764 if (ddir_rw(idx)) {
1765 if (!td->o.disable_clat) {
1766 add_clat_sample(td, idx, lusec, bytes, io_u->offset);
1767 io_u_mark_latency(td, lusec);
1768 }
1769
1770 if (!td->o.disable_bw && per_unit_log(td->bw_log))
1771 add_bw_sample(td, io_u, bytes, lusec);
1772
1773 if (no_reduce && per_unit_log(td->iops_log))
1774 add_iops_sample(td, io_u, bytes);
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001775 }
1776
Elliott Hugheseda3a602017-05-19 18:53:02 -07001777 if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
1778 uint32_t *info = io_u_block_info(td, io_u);
1779 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
1780 if (io_u->ddir == DDIR_TRIM) {
1781 *info = BLOCK_INFO(BLOCK_STATE_TRIMMED,
1782 BLOCK_INFO_TRIMS(*info) + 1);
1783 } else if (io_u->ddir == DDIR_WRITE) {
1784 *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN,
1785 *info);
1786 }
1787 }
1788 }
Jens Axboec8eeb9d2011-10-05 14:02:22 +02001789}
1790
Elliott Hugheseda3a602017-05-19 18:53:02 -07001791static void file_log_write_comp(const struct thread_data *td, struct fio_file *f,
1792 uint64_t offset, unsigned int bytes)
Steven Lang1b8dbf22011-11-09 13:48:01 +01001793{
Elliott Hugheseda3a602017-05-19 18:53:02 -07001794 int idx;
Jens Axboe1ae83d42013-01-12 01:44:15 -07001795
Elliott Hugheseda3a602017-05-19 18:53:02 -07001796 if (!f)
1797 return;
1798
1799 if (f->first_write == -1ULL || offset < f->first_write)
1800 f->first_write = offset;
1801 if (f->last_write == -1ULL || ((offset + bytes) > f->last_write))
1802 f->last_write = offset + bytes;
1803
1804 if (!f->last_write_comp)
1805 return;
1806
1807 idx = f->last_write_idx++;
1808 f->last_write_comp[idx] = offset;
1809 if (f->last_write_idx == td->o.iodepth)
1810 f->last_write_idx = 0;
Steven Lang1b8dbf22011-11-09 13:48:01 +01001811}
1812
Jens Axboee69fdf72014-07-23 16:11:43 +02001813static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
Jens Axboe97601022007-02-18 12:47:29 +01001814 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001815{
Jens Axboee69fdf72014-07-23 16:11:43 +02001816 struct io_u *io_u = *io_u_ptr;
1817 enum fio_ddir ddir = io_u->ddir;
1818 struct fio_file *f = io_u->file;
Jens Axboe10ba5352006-10-20 11:39:27 +02001819
Jens Axboe2ba1c292008-02-01 13:16:38 +01001820 dprint_io_u(io_u, "io complete");
1821
Jens Axboe0c6e7512007-02-22 11:19:39 +01001822 assert(io_u->flags & IO_U_F_FLIGHT);
Elliott Hugheseda3a602017-05-19 18:53:02 -07001823 io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
Jens Axboef9401282014-02-06 12:17:37 -07001824
1825 /*
1826 * Mark IO ok to verify
1827 */
1828 if (io_u->ipo) {
Jens Axboe890b6652014-05-06 19:06:51 -06001829 /*
1830 * Remove errored entry from the verification list
1831 */
1832 if (io_u->error)
1833 unlog_io_piece(td, io_u);
1834 else {
1835 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1836 write_barrier();
1837 }
Jens Axboef9401282014-02-06 12:17:37 -07001838 }
1839
Jens Axboee69fdf72014-07-23 16:11:43 +02001840 if (ddir_sync(ddir)) {
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001841 td->last_was_sync = 1;
Jens Axboe44f29692010-03-09 20:09:44 +01001842 if (f) {
1843 f->first_write = -1ULL;
1844 f->last_write = -1ULL;
1845 }
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001846 return;
1847 }
1848
1849 td->last_was_sync = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001850 td->last_ddir = ddir;
Jens Axboe87dc1ab2006-10-24 14:41:26 +02001851
Jens Axboee69fdf72014-07-23 16:11:43 +02001852 if (!io_u->error && ddir_rw(ddir)) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001853 unsigned int bytes = io_u->buflen - io_u->resid;
Jens Axboeb29ee5b2008-09-11 10:17:26 +02001854 int ret;
Jens Axboe10ba5352006-10-20 11:39:27 +02001855
Jens Axboee69fdf72014-07-23 16:11:43 +02001856 td->io_blocks[ddir]++;
1857 td->this_io_blocks[ddir]++;
1858 td->io_bytes[ddir] += bytes;
webeeae2fafc2012-03-23 13:41:41 +01001859
1860 if (!(io_u->flags & IO_U_F_VER_LIST))
Jens Axboee69fdf72014-07-23 16:11:43 +02001861 td->this_io_bytes[ddir] += bytes;
Jens Axboe10ba5352006-10-20 11:39:27 +02001862
Elliott Hugheseda3a602017-05-19 18:53:02 -07001863 if (ddir == DDIR_WRITE)
1864 file_log_write_comp(td, f, io_u->offset, bytes);
Jens Axboe44f29692010-03-09 20:09:44 +01001865
Steven Lang6b1190f2012-02-07 09:42:59 +01001866 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
Elliott Hugheseda3a602017-05-19 18:53:02 -07001867 td->runstate == TD_VERIFYING))
Jens Axboee69fdf72014-07-23 16:11:43 +02001868 account_io_completion(td, io_u, icd, ddir, bytes);
Jens Axboe40e1a6f2009-06-11 10:55:39 +02001869
Jens Axboee69fdf72014-07-23 16:11:43 +02001870 icd->bytes_done[ddir] += bytes;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001871
Jens Axboed7762cf2007-02-23 12:34:57 +01001872 if (io_u->end_io) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001873 ret = io_u->end_io(td, io_u_ptr);
1874 io_u = *io_u_ptr;
Jens Axboe3af6ef32007-02-18 06:57:43 +01001875 if (ret && !icd->error)
1876 icd->error = ret;
1877 }
Jens Axboeff58fce2010-08-25 12:02:08 +02001878 } else if (io_u->error) {
Jens Axboe10ba5352006-10-20 11:39:27 +02001879 icd->error = io_u->error;
Jens Axboe54517922007-03-05 10:06:06 +01001880 io_u_log_error(td, io_u);
1881 }
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001882 if (icd->error) {
Jens Axboee69fdf72014-07-23 16:11:43 +02001883 enum error_type_bit eb = td_error_type(ddir, icd->error);
1884
Dmitry Monakhov8b28bd42012-09-23 15:46:09 +04001885 if (!td_non_fatal_error(td, eb, icd->error))
1886 return;
Jens Axboee69fdf72014-07-23 16:11:43 +02001887
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001888 /*
1889 * If there is a non_fatal error, then add to the error count
1890 * and clear all the errors.
1891 */
1892 update_error_count(td, icd->error);
1893 td_clear_error(td);
1894 icd->error = 0;
Jens Axboee69fdf72014-07-23 16:11:43 +02001895 if (io_u)
1896 io_u->error = 0;
Radha Ramachandranf2bba182009-06-15 08:40:16 +02001897 }
Jens Axboe10ba5352006-10-20 11:39:27 +02001898}
1899
Jens Axboe9520ebb2008-10-16 21:03:27 +02001900static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1901 int nr)
Jens Axboe36167d82007-02-18 05:41:31 +01001902{
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001903 int ddir;
Jens Axboeaba6c952014-02-13 19:59:56 -07001904
1905 if (!gtod_reduce(td))
Jens Axboe9520ebb2008-10-16 21:03:27 +02001906 fio_gettime(&icd->time, NULL);
Jens Axboe36167d82007-02-18 05:41:31 +01001907
Jens Axboe3af6ef32007-02-18 06:57:43 +01001908 icd->nr = nr;
1909
Jens Axboe36167d82007-02-18 05:41:31 +01001910 icd->error = 0;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001911 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001912 icd->bytes_done[ddir] = 0;
Jens Axboe36167d82007-02-18 05:41:31 +01001913}
1914
Jens Axboe97601022007-02-18 12:47:29 +01001915static void ios_completed(struct thread_data *td,
1916 struct io_completion_data *icd)
Jens Axboe10ba5352006-10-20 11:39:27 +02001917{
1918 struct io_u *io_u;
1919 int i;
1920
Jens Axboe10ba5352006-10-20 11:39:27 +02001921 for (i = 0; i < icd->nr; i++) {
1922 io_u = td->io_ops->event(td, i);
1923
Jens Axboee69fdf72014-07-23 16:11:43 +02001924 io_completed(td, &io_u, icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001925
Jens Axboee69fdf72014-07-23 16:11:43 +02001926 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001927 put_io_u(td, io_u);
Jens Axboe10ba5352006-10-20 11:39:27 +02001928 }
1929}
Jens Axboe97601022007-02-18 12:47:29 +01001930
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001931/*
1932 * Complete a single io_u for the sync engines.
1933 */
Elliott Hugheseda3a602017-05-19 18:53:02 -07001934int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
Jens Axboe97601022007-02-18 12:47:29 +01001935{
1936 struct io_completion_data icd;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001937 int ddir;
Jens Axboe97601022007-02-18 12:47:29 +01001938
Jens Axboe9520ebb2008-10-16 21:03:27 +02001939 init_icd(td, &icd, 1);
Jens Axboee69fdf72014-07-23 16:11:43 +02001940 io_completed(td, &io_u, &icd);
Jens Axboee8462bd2009-07-06 12:59:04 +02001941
Jens Axboee69fdf72014-07-23 16:11:43 +02001942 if (io_u)
Jens Axboee8462bd2009-07-06 12:59:04 +02001943 put_io_u(td, io_u);
Jens Axboe97601022007-02-18 12:47:29 +01001944
Jens Axboe581e7142009-06-09 12:47:16 +02001945 if (icd.error) {
1946 td_verror(td, icd.error, "io_u_sync_complete");
1947 return -1;
1948 }
Jens Axboe97601022007-02-18 12:47:29 +01001949
Elliott Hugheseda3a602017-05-19 18:53:02 -07001950 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
1951 td->bytes_done[ddir] += icd.bytes_done[ddir];
Jens Axboe581e7142009-06-09 12:47:16 +02001952
1953 return 0;
Jens Axboe97601022007-02-18 12:47:29 +01001954}
1955
Jens Axboee7e6cfb2007-02-20 10:58:34 +01001956/*
1957 * Called to complete min_events number of io for the async engines.
1958 */
Elliott Hugheseda3a602017-05-19 18:53:02 -07001959int io_u_queued_complete(struct thread_data *td, int min_evts)
Jens Axboe97601022007-02-18 12:47:29 +01001960{
Jens Axboe97601022007-02-18 12:47:29 +01001961 struct io_completion_data icd;
Jens Axboe00de55e2007-02-20 10:45:57 +01001962 struct timespec *tvp = NULL;
Elliott Hugheseda3a602017-05-19 18:53:02 -07001963 int ret, ddir;
Davide Libenzi4d06a332007-03-22 07:43:50 +01001964 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
Jens Axboe97601022007-02-18 12:47:29 +01001965
Elliott Hugheseda3a602017-05-19 18:53:02 -07001966 dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
Jens Axboeb271fe62008-02-04 10:49:41 +01001967
Jens Axboe49504212008-06-05 09:03:30 +02001968 if (!min_evts)
Jens Axboe00de55e2007-02-20 10:45:57 +01001969 tvp = &ts;
Robert Elliott05074832014-09-04 13:51:05 -06001970 else if (min_evts > td->cur_depth)
1971 min_evts = td->cur_depth;
Jens Axboe97601022007-02-18 12:47:29 +01001972
Elliott Hugheseda3a602017-05-19 18:53:02 -07001973 /* No worries, td_io_getevents fixes min and max if they are
1974 * set incorrectly */
1975 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp);
Jens Axboe97601022007-02-18 12:47:29 +01001976 if (ret < 0) {
Jens Axboee1161c32007-02-22 19:36:48 +01001977 td_verror(td, -ret, "td_io_getevents");
Jens Axboe97601022007-02-18 12:47:29 +01001978 return ret;
1979 } else if (!ret)
1980 return ret;
1981
Jens Axboe9520ebb2008-10-16 21:03:27 +02001982 init_icd(td, &icd, ret);
Jens Axboe97601022007-02-18 12:47:29 +01001983 ios_completed(td, &icd);
Jens Axboe581e7142009-06-09 12:47:16 +02001984 if (icd.error) {
1985 td_verror(td, icd.error, "io_u_queued_complete");
1986 return -1;
1987 }
Jens Axboe97601022007-02-18 12:47:29 +01001988
Elliott Hugheseda3a602017-05-19 18:53:02 -07001989 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
1990 td->bytes_done[ddir] += icd.bytes_done[ddir];
Shaohua Li6eaf09d2012-09-14 08:49:43 +02001991
Elliott Hugheseda3a602017-05-19 18:53:02 -07001992 return ret;
Jens Axboe97601022007-02-18 12:47:29 +01001993}
Jens Axboe7e77dd02007-02-20 10:57:34 +01001994
1995/*
1996 * Call when io_u is really queued, to update the submission latency.
1997 */
1998void io_u_queued(struct thread_data *td, struct io_u *io_u)
1999{
Elliott Hugheseda3a602017-05-19 18:53:02 -07002000 if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
Jens Axboe9520ebb2008-10-16 21:03:27 +02002001 unsigned long slat_time;
Jens Axboe7e77dd02007-02-20 10:57:34 +01002002
Jens Axboe9520ebb2008-10-16 21:03:27 +02002003 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
Elliott Hugheseda3a602017-05-19 18:53:02 -07002004
2005 if (td->parent)
2006 td = td->parent;
2007
Jens Axboeccefd5f2014-06-30 20:59:03 -06002008 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
2009 io_u->offset);
Jens Axboe9520ebb2008-10-16 21:03:27 +02002010 }
Jens Axboe7e77dd02007-02-20 10:57:34 +01002011}
Jens Axboe433afcb2007-02-22 10:39:01 +01002012
Jens Axboee66dac22014-09-22 10:02:07 -06002013/*
2014 * See if we should reuse the last seed, if dedupe is enabled
2015 */
2016static struct frand_state *get_buf_state(struct thread_data *td)
2017{
2018 unsigned int v;
Jens Axboee66dac22014-09-22 10:02:07 -06002019
2020 if (!td->o.dedupe_percentage)
2021 return &td->buf_state;
Elliott Hugheseda3a602017-05-19 18:53:02 -07002022 else if (td->o.dedupe_percentage == 100) {
2023 frand_copy(&td->buf_state_prev, &td->buf_state);
2024 return &td->buf_state;
2025 }
Jens Axboee66dac22014-09-22 10:02:07 -06002026
Elliott Hugheseda3a602017-05-19 18:53:02 -07002027 v = rand32_between(&td->dedupe_state, 1, 100);
Jens Axboee66dac22014-09-22 10:02:07 -06002028
2029 if (v <= td->o.dedupe_percentage)
2030 return &td->buf_state_prev;
2031
2032 return &td->buf_state;
2033}
2034
2035static void save_buf_state(struct thread_data *td, struct frand_state *rs)
2036{
Elliott Hugheseda3a602017-05-19 18:53:02 -07002037 if (td->o.dedupe_percentage == 100)
2038 frand_copy(rs, &td->buf_state_prev);
2039 else if (rs == &td->buf_state)
Jens Axboee66dac22014-09-22 10:02:07 -06002040 frand_copy(&td->buf_state_prev, rs);
2041}
2042
Jens Axboecc86c392013-05-03 15:12:33 +02002043void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
2044 unsigned int max_bs)
Jens Axboe5973caf2008-05-21 19:52:35 +02002045{
Jens Axboefd1583f2014-12-03 19:55:33 -07002046 struct thread_options *o = &td->o;
2047
Elliott Hugheseda3a602017-05-19 18:53:02 -07002048 if (o->mem_type == MEM_CUDA_MALLOC)
2049 return;
2050
Vasily Tarasovefd633f2015-01-28 09:10:30 -07002051 if (o->compress_percentage || o->dedupe_percentage) {
Jens Axboe9c426842012-03-02 21:02:12 +01002052 unsigned int perc = td->o.compress_percentage;
Jens Axboee66dac22014-09-22 10:02:07 -06002053 struct frand_state *rs;
Jens Axboe8e0aa162014-09-26 15:04:58 -06002054 unsigned int left = max_bs;
Elliott Hugheseda3a602017-05-19 18:53:02 -07002055 unsigned int this_write;
Jens Axboee66dac22014-09-22 10:02:07 -06002056
Jens Axboe8e0aa162014-09-26 15:04:58 -06002057 do {
2058 rs = get_buf_state(td);
Jens Axboe9c426842012-03-02 21:02:12 +01002059
Jens Axboe8e0aa162014-09-26 15:04:58 -06002060 min_write = min(min_write, left);
Jens Axboef97a43a2012-03-09 19:06:24 +01002061
Jens Axboe8e0aa162014-09-26 15:04:58 -06002062 if (perc) {
Elliott Hugheseda3a602017-05-19 18:53:02 -07002063 this_write = min_not_zero(min_write,
2064 td->o.compress_chunk);
Jens Axboecc86c392013-05-03 15:12:33 +02002065
Elliott Hugheseda3a602017-05-19 18:53:02 -07002066 fill_random_buf_percentage(rs, buf, perc,
2067 this_write, this_write,
2068 o->buffer_pattern,
2069 o->buffer_pattern_bytes);
2070 } else {
Jens Axboe8e0aa162014-09-26 15:04:58 -06002071 fill_random_buf(rs, buf, min_write);
Elliott Hugheseda3a602017-05-19 18:53:02 -07002072 this_write = min_write;
2073 }
Jens Axboe8e0aa162014-09-26 15:04:58 -06002074
Elliott Hugheseda3a602017-05-19 18:53:02 -07002075 buf += this_write;
2076 left -= this_write;
Jens Axboee66dac22014-09-22 10:02:07 -06002077 save_buf_state(td, rs);
Jens Axboe8e0aa162014-09-26 15:04:58 -06002078 } while (left);
Jens Axboefd1583f2014-12-03 19:55:33 -07002079 } else if (o->buffer_pattern_bytes)
2080 fill_buffer_pattern(td, buf, max_bs);
Elliott Hugheseda3a602017-05-19 18:53:02 -07002081 else if (o->zero_buffers)
Jens Axboecc86c392013-05-03 15:12:33 +02002082 memset(buf, 0, max_bs);
Elliott Hugheseda3a602017-05-19 18:53:02 -07002083 else
2084 fill_random_buf(get_buf_state(td), buf, max_bs);
Jens Axboecc86c392013-05-03 15:12:33 +02002085}
2086
2087/*
2088 * "randomly" fill the buffer contents
2089 */
2090void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
2091 unsigned int min_write, unsigned int max_bs)
2092{
2093 io_u->buf_filled_len = 0;
2094 fill_io_buffer(td, io_u->buf, min_write, max_bs);
Jens Axboe5973caf2008-05-21 19:52:35 +02002095}
Elliott Hugheseda3a602017-05-19 18:53:02 -07002096
2097static int do_sync_file_range(const struct thread_data *td,
2098 struct fio_file *f)
2099{
2100 off64_t offset, nbytes;
2101
2102 offset = f->first_write;
2103 nbytes = f->last_write - f->first_write;
2104
2105 if (!nbytes)
2106 return 0;
2107
2108 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
2109}
2110
2111int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
2112{
2113 int ret;
2114
2115 if (io_u->ddir == DDIR_SYNC) {
2116 ret = fsync(io_u->file->fd);
2117 } else if (io_u->ddir == DDIR_DATASYNC) {
2118#ifdef CONFIG_FDATASYNC
2119 ret = fdatasync(io_u->file->fd);
2120#else
2121 ret = io_u->xfer_buflen;
2122 io_u->error = EINVAL;
2123#endif
2124 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
2125 ret = do_sync_file_range(td, io_u->file);
2126 else {
2127 ret = io_u->xfer_buflen;
2128 io_u->error = EINVAL;
2129 }
2130
2131 if (ret < 0)
2132 io_u->error = errno;
2133
2134 return ret;
2135}
2136
2137int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
2138{
2139#ifndef FIO_HAVE_TRIM
2140 io_u->error = EINVAL;
2141 return 0;
2142#else
2143 struct fio_file *f = io_u->file;
2144 int ret;
2145
2146 ret = os_trim(f->fd, io_u->offset, io_u->xfer_buflen);
2147 if (!ret)
2148 return io_u->xfer_buflen;
2149
2150 io_u->error = ret;
2151 return 0;
2152#endif
2153}