blob: 8d2a8e5f5ccc023ed32e8c16cccc72e683049890 [file] [log] [blame]
Jens Axboeebac4652005-12-08 15:25:21 +01001/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
Jens Axboeaae22ca2006-09-05 10:46:22 +02005 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
Jens Axboeebac4652005-12-08 15:25:21 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
Jens Axboeebac4652005-12-08 15:25:21 +010022#include <unistd.h>
23#include <fcntl.h>
24#include <string.h>
Jens Axboeebac4652005-12-08 15:25:21 +010025#include <signal.h>
26#include <time.h>
Jens Axboeebac4652005-12-08 15:25:21 +010027#include <assert.h>
Jens Axboeebac4652005-12-08 15:25:21 +010028#include <sys/stat.h>
29#include <sys/wait.h>
30#include <sys/ipc.h>
31#include <sys/shm.h>
32#include <sys/ioctl.h>
33#include <sys/mman.h>
34
35#include "fio.h"
36#include "os.h"
37
38#define MASK (4095)
39
40#define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
41
42int groupid = 0;
43int thread_number = 0;
Jens Axboeebac4652005-12-08 15:25:21 +010044int shm_id = 0;
Jens Axboe53cdc682006-10-18 11:50:58 +020045int temp_stall_ts;
Jens Axboec1d57252006-10-09 19:56:04 +020046char *fio_inst_prefix = _INST_PREFIX;
Jens Axboeebac4652005-12-08 15:25:21 +010047
Jens Axboe3d60d1e2006-05-25 06:31:06 +020048#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
Jens Axboeebac4652005-12-08 15:25:21 +010049
Jens Axboebbfd6b02006-06-07 19:42:54 +020050static volatile int startup_sem;
Jens Axboeebac4652005-12-08 15:25:21 +010051
52#define TERMINATE_ALL (-1)
Jens Axboe75154842006-06-01 13:56:09 +020053#define JOB_START_TIMEOUT (5 * 1000)
Jens Axboeebac4652005-12-08 15:25:21 +010054
55static void terminate_threads(int group_id)
56{
Jens Axboe34572e22006-10-20 09:33:18 +020057 struct thread_data *td;
Jens Axboeebac4652005-12-08 15:25:21 +010058 int i;
59
Jens Axboe34572e22006-10-20 09:33:18 +020060 for_each_td(td, i) {
Jens Axboeebac4652005-12-08 15:25:21 +010061 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
62 td->terminate = 1;
63 td->start_delay = 0;
64 }
65 }
66}
67
68static void sig_handler(int sig)
69{
70 switch (sig) {
71 case SIGALRM:
72 update_io_ticks();
73 disk_util_timer_arm();
74 print_thread_status();
75 break;
76 default:
77 printf("\nfio: terminating on signal\n");
78 fflush(stdout);
79 terminate_threads(TERMINATE_ALL);
80 break;
81 }
82}
83
Jens Axboe906c8d72006-06-13 09:37:56 +020084/*
85 * The ->file_map[] contains a map of blocks we have or have not done io
86 * to yet. Used to make sure we cover the entire range in a fair fashion.
87 */
Jens Axboe53cdc682006-10-18 11:50:58 +020088static int random_map_free(struct thread_data *td, struct fio_file *f,
89 unsigned long long block)
Jens Axboeebac4652005-12-08 15:25:21 +010090{
Jens Axboe53cdc682006-10-18 11:50:58 +020091 unsigned int idx = RAND_MAP_IDX(td, f, block);
92 unsigned int bit = RAND_MAP_BIT(td, f, block);
Jens Axboeebac4652005-12-08 15:25:21 +010093
Jens Axboe53cdc682006-10-18 11:50:58 +020094 return (f->file_map[idx] & (1UL << bit)) == 0;
Jens Axboeebac4652005-12-08 15:25:21 +010095}
96
Jens Axboe906c8d72006-06-13 09:37:56 +020097/*
98 * Return the next free block in the map.
99 */
Jens Axboe53cdc682006-10-18 11:50:58 +0200100static int get_next_free_block(struct thread_data *td, struct fio_file *f,
101 unsigned long long *b)
Jens Axboeebac4652005-12-08 15:25:21 +0100102{
103 int i;
104
105 *b = 0;
106 i = 0;
Jens Axboe53cdc682006-10-18 11:50:58 +0200107 while ((*b) * td->min_bs < f->file_size) {
108 if (f->file_map[i] != -1UL) {
109 *b += ffz(f->file_map[i]);
Jens Axboeebac4652005-12-08 15:25:21 +0100110 return 0;
111 }
112
113 *b += BLOCKS_PER_MAP;
114 i++;
115 }
116
117 return 1;
118}
119
Jens Axboe906c8d72006-06-13 09:37:56 +0200120/*
121 * Mark a given offset as used in the map.
122 */
Jens Axboe53cdc682006-10-18 11:50:58 +0200123static void mark_random_map(struct thread_data *td, struct fio_file *f,
124 struct io_u *io_u)
Jens Axboeebac4652005-12-08 15:25:21 +0100125{
Jens Axboe200bc852006-06-01 16:10:12 +0200126 unsigned long long block = io_u->offset / (unsigned long long) td->min_bs;
Jens Axboeebac4652005-12-08 15:25:21 +0100127 unsigned int blocks = 0;
128
129 while (blocks < (io_u->buflen / td->min_bs)) {
130 unsigned int idx, bit;
131
Jens Axboe53cdc682006-10-18 11:50:58 +0200132 if (!random_map_free(td, f, block))
Jens Axboeebac4652005-12-08 15:25:21 +0100133 break;
134
Jens Axboe53cdc682006-10-18 11:50:58 +0200135 idx = RAND_MAP_IDX(td, f, block);
136 bit = RAND_MAP_BIT(td, f, block);
Jens Axboeebac4652005-12-08 15:25:21 +0100137
Jens Axboe53cdc682006-10-18 11:50:58 +0200138 assert(idx < f->num_maps);
Jens Axboeebac4652005-12-08 15:25:21 +0100139
Jens Axboe53cdc682006-10-18 11:50:58 +0200140 f->file_map[idx] |= (1UL << bit);
Jens Axboeebac4652005-12-08 15:25:21 +0100141 block++;
142 blocks++;
143 }
144
145 if ((blocks * td->min_bs) < io_u->buflen)
146 io_u->buflen = blocks * td->min_bs;
147}
148
Jens Axboe906c8d72006-06-13 09:37:56 +0200149/*
150 * For random io, generate a random new block and see if it's used. Repeat
151 * until we find a free one. For sequential io, just return the end of
152 * the last io issued.
153 */
Jens Axboe53cdc682006-10-18 11:50:58 +0200154static int get_next_offset(struct thread_data *td, struct fio_file *f,
155 unsigned long long *offset)
Jens Axboe20dc95c2005-12-09 10:29:35 +0100156{
157 unsigned long long b, rb;
158 long r;
159
160 if (!td->sequential) {
Jens Axboe085227a2006-06-01 15:54:09 -0700161 unsigned long long max_blocks = td->io_size / td->min_bs;
Jens Axboe20dc95c2005-12-09 10:29:35 +0100162 int loops = 50;
163
164 do {
Jens Axboe6dfd46b2006-06-07 13:57:06 +0200165 r = os_random_long(&td->random_state);
Jens Axboe085227a2006-06-01 15:54:09 -0700166 b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
Jens Axboe53cdc682006-10-18 11:50:58 +0200167 rb = b + (f->file_offset / td->min_bs);
Jens Axboe20dc95c2005-12-09 10:29:35 +0100168 loops--;
Jens Axboe53cdc682006-10-18 11:50:58 +0200169 } while (!random_map_free(td, f, rb) && loops);
Jens Axboe20dc95c2005-12-09 10:29:35 +0100170
171 if (!loops) {
Jens Axboe53cdc682006-10-18 11:50:58 +0200172 if (get_next_free_block(td, f, &b))
Jens Axboe20dc95c2005-12-09 10:29:35 +0100173 return 1;
174 }
175 } else
Jens Axboe53cdc682006-10-18 11:50:58 +0200176 b = f->last_pos / td->min_bs;
Jens Axboe20dc95c2005-12-09 10:29:35 +0100177
Jens Axboe53cdc682006-10-18 11:50:58 +0200178 *offset = (b * td->min_bs) + f->file_offset;
179 if (*offset > f->file_size)
Jens Axboe20dc95c2005-12-09 10:29:35 +0100180 return 1;
181
182 return 0;
183}
184
185static unsigned int get_next_buflen(struct thread_data *td)
186{
187 unsigned int buflen;
188 long r;
189
190 if (td->min_bs == td->max_bs)
191 buflen = td->min_bs;
192 else {
Jens Axboe6dfd46b2006-06-07 13:57:06 +0200193 r = os_random_long(&td->bsrange_state);
Jens Axboe20dc95c2005-12-09 10:29:35 +0100194 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
195 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
196 }
197
Jens Axboeb2a15192006-10-18 14:10:42 +0200198 if (buflen > td->io_size - td->this_io_bytes[td->ddir]) {
199 /*
200 * if using direct/raw io, we may not be able to
201 * shrink the size. so just fail it.
202 */
203 if (td->io_ops->flags & FIO_RAWIO)
204 return 0;
205
Jens Axboe20dc95c2005-12-09 10:29:35 +0100206 buflen = td->io_size - td->this_io_bytes[td->ddir];
Jens Axboeb2a15192006-10-18 14:10:42 +0200207 }
Jens Axboe20dc95c2005-12-09 10:29:35 +0100208
209 return buflen;
210}
211
Jens Axboe906c8d72006-06-13 09:37:56 +0200212/*
213 * Check if we are above the minimum rate given.
214 */
Jens Axboeebac4652005-12-08 15:25:21 +0100215static int check_min_rate(struct thread_data *td, struct timeval *now)
216{
217 unsigned long spent;
218 unsigned long rate;
219 int ddir = td->ddir;
220
221 /*
222 * allow a 2 second settle period in the beginning
223 */
224 if (mtime_since(&td->start, now) < 2000)
225 return 0;
226
227 /*
228 * if rate blocks is set, sample is running
229 */
230 if (td->rate_bytes) {
231 spent = mtime_since(&td->lastrate, now);
232 if (spent < td->ratecycle)
233 return 0;
234
235 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
236 if (rate < td->ratemin) {
Jens Axboeeb8bbf42006-06-08 21:40:11 +0200237 fprintf(f_out, "%s: min rate %d not met, got %ldKiB/sec\n", td->name, td->ratemin, rate);
Jens Axboeebac4652005-12-08 15:25:21 +0100238 return 1;
239 }
240 }
241
242 td->rate_bytes = td->this_io_bytes[ddir];
243 memcpy(&td->lastrate, now, sizeof(*now));
244 return 0;
245}
246
247static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
248{
249 if (!td->timeout)
250 return 0;
251 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
252 return 1;
253
254 return 0;
255}
256
Jens Axboe906c8d72006-06-13 09:37:56 +0200257/*
258 * Return the data direction for the next io_u. If the job is a
259 * mixed read/write workload, check the rwmix cycle and switch if
260 * necessary.
261 */
Jens Axboe3d60d1e2006-05-25 06:31:06 +0200262static int get_rw_ddir(struct thread_data *td)
263{
Jens Axboea6ccc7b2006-06-02 10:14:15 +0200264 if (td_rw(td)) {
265 struct timeval now;
266 unsigned long elapsed;
267
268 gettimeofday(&now, NULL);
269 elapsed = mtime_since_now(&td->rwmix_switch);
270
271 /*
272 * Check if it's time to seed a new data direction.
273 */
274 if (elapsed >= td->rwmixcycle) {
Jens Axboe0ab8db82006-10-18 17:16:23 +0200275 unsigned int v;
Jens Axboea6ccc7b2006-06-02 10:14:15 +0200276 long r;
277
Jens Axboec1ee2ca2006-06-07 22:42:37 +0200278 r = os_random_long(&td->rwmix_state);
279 v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
Jens Axboea6ccc7b2006-06-02 10:14:15 +0200280 if (v < td->rwmixread)
281 td->rwmix_ddir = DDIR_READ;
282 else
283 td->rwmix_ddir = DDIR_WRITE;
284 memcpy(&td->rwmix_switch, &now, sizeof(now));
285 }
286 return td->rwmix_ddir;
287 } else if (td_read(td))
Jens Axboe3d60d1e2006-05-25 06:31:06 +0200288 return DDIR_READ;
289 else
290 return DDIR_WRITE;
291}
292
Jens Axboeaea47d42006-05-26 19:27:29 +0200293static int td_io_prep(struct thread_data *td, struct io_u *io_u)
Jens Axboe20dc95c2005-12-09 10:29:35 +0100294{
Jens Axboe2866c822006-10-09 15:57:48 +0200295 if (td->io_ops->prep && td->io_ops->prep(td, io_u))
Jens Axboe20dc95c2005-12-09 10:29:35 +0100296 return 1;
297
298 return 0;
299}
300
Jens Axboeb1ff3402006-02-17 11:35:58 +0100301void put_io_u(struct thread_data *td, struct io_u *io_u)
Jens Axboeebac4652005-12-08 15:25:21 +0100302{
Jens Axboe53cdc682006-10-18 11:50:58 +0200303 io_u->file = NULL;
Jens Axboeebac4652005-12-08 15:25:21 +0100304 list_del(&io_u->list);
305 list_add(&io_u->list, &td->io_u_freelist);
306 td->cur_depth--;
307}
308
Jens Axboe53cdc682006-10-18 11:50:58 +0200309static int fill_io_u(struct thread_data *td, struct fio_file *f,
310 struct io_u *io_u)
Jens Axboe843a7412006-06-01 21:14:21 -0700311{
312 /*
313 * If using an iolog, grab next piece if any available.
314 */
315 if (td->read_iolog)
316 return read_iolog_get(td, io_u);
317
Jens Axboeaea47d42006-05-26 19:27:29 +0200318 /*
319 * No log, let the seq/rand engine retrieve the next position.
320 */
Jens Axboe53cdc682006-10-18 11:50:58 +0200321 if (!get_next_offset(td, f, &io_u->offset)) {
Jens Axboeaea47d42006-05-26 19:27:29 +0200322 io_u->buflen = get_next_buflen(td);
323
324 if (io_u->buflen) {
325 io_u->ddir = get_rw_ddir(td);
Jens Axboe843a7412006-06-01 21:14:21 -0700326
327 /*
328 * If using a write iolog, store this entry.
329 */
330 if (td->write_iolog)
331 write_iolog_put(td, io_u);
332
Jens Axboe53cdc682006-10-18 11:50:58 +0200333 io_u->file = f;
Jens Axboeaea47d42006-05-26 19:27:29 +0200334 return 0;
335 }
336 }
337
338 return 1;
339}
340
Jens Axboe22f78b32006-06-07 11:30:07 +0200341#define queue_full(td) list_empty(&(td)->io_u_freelist)
Jens Axboeebac4652005-12-08 15:25:21 +0100342
Jens Axboeb1ff3402006-02-17 11:35:58 +0100343struct io_u *__get_io_u(struct thread_data *td)
Jens Axboeebac4652005-12-08 15:25:21 +0100344{
Jens Axboe22f78b32006-06-07 11:30:07 +0200345 struct io_u *io_u = NULL;
Jens Axboeebac4652005-12-08 15:25:21 +0100346
Jens Axboe22f78b32006-06-07 11:30:07 +0200347 if (!queue_full(td)) {
348 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
Jens Axboeebac4652005-12-08 15:25:21 +0100349
Jens Axboe22f78b32006-06-07 11:30:07 +0200350 io_u->error = 0;
351 io_u->resid = 0;
352 list_del(&io_u->list);
353 list_add(&io_u->list, &td->io_u_busylist);
354 td->cur_depth++;
355 }
356
Jens Axboeebac4652005-12-08 15:25:21 +0100357 return io_u;
358}
359
Jens Axboe906c8d72006-06-13 09:37:56 +0200360/*
361 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
362 * etc. The returned io_u is fully ready to be prepped and submitted.
363 */
Jens Axboe53cdc682006-10-18 11:50:58 +0200364static struct io_u *get_io_u(struct thread_data *td, struct fio_file *f)
Jens Axboeebac4652005-12-08 15:25:21 +0100365{
366 struct io_u *io_u;
367
368 io_u = __get_io_u(td);
369 if (!io_u)
370 return NULL;
371
Jens Axboe20dc95c2005-12-09 10:29:35 +0100372 if (td->zone_bytes >= td->zone_size) {
373 td->zone_bytes = 0;
Jens Axboe53cdc682006-10-18 11:50:58 +0200374 f->last_pos += td->zone_skip;
Jens Axboe20dc95c2005-12-09 10:29:35 +0100375 }
376
Jens Axboe53cdc682006-10-18 11:50:58 +0200377 if (fill_io_u(td, f, io_u)) {
Jens Axboeebac4652005-12-08 15:25:21 +0100378 put_io_u(td, io_u);
379 return NULL;
380 }
381
Jens Axboeb2a15192006-10-18 14:10:42 +0200382 if (io_u->buflen + io_u->offset > f->file_size) {
383 if (td->io_ops->flags & FIO_RAWIO) {
384 put_io_u(td, io_u);
385 return NULL;
386 }
387
Jens Axboe53cdc682006-10-18 11:50:58 +0200388 io_u->buflen = f->file_size - io_u->offset;
Jens Axboeb2a15192006-10-18 14:10:42 +0200389 }
Jens Axboeebac4652005-12-08 15:25:21 +0100390
391 if (!io_u->buflen) {
392 put_io_u(td, io_u);
393 return NULL;
394 }
395
Jens Axboe843a7412006-06-01 21:14:21 -0700396 if (!td->read_iolog && !td->sequential)
Jens Axboe53cdc682006-10-18 11:50:58 +0200397 mark_random_map(td, f, io_u);
Jens Axboeebac4652005-12-08 15:25:21 +0100398
Jens Axboe53cdc682006-10-18 11:50:58 +0200399 f->last_pos += io_u->buflen;
Jens Axboeebac4652005-12-08 15:25:21 +0100400
401 if (td->verify != VERIFY_NONE)
Jens Axboee29d1b72006-10-18 15:43:15 +0200402 populate_verify_io_u(td, io_u);
Jens Axboeebac4652005-12-08 15:25:21 +0100403
Jens Axboeaea47d42006-05-26 19:27:29 +0200404 if (td_io_prep(td, io_u)) {
Jens Axboeebac4652005-12-08 15:25:21 +0100405 put_io_u(td, io_u);
406 return NULL;
407 }
408
409 gettimeofday(&io_u->start_time, NULL);
410 return io_u;
411}
412
413static inline void td_set_runstate(struct thread_data *td, int runstate)
414{
Jens Axboeebac4652005-12-08 15:25:21 +0100415 td->runstate = runstate;
416}
417
Jens Axboe53cdc682006-10-18 11:50:58 +0200418static struct fio_file *get_next_file(struct thread_data *td)
419{
Jens Axboe0ab8db82006-10-18 17:16:23 +0200420 unsigned int old_next_file = td->next_file;
Jens Axboeb2a15192006-10-18 14:10:42 +0200421 struct fio_file *f;
Jens Axboe53cdc682006-10-18 11:50:58 +0200422
Jens Axboeb2a15192006-10-18 14:10:42 +0200423 do {
424 f = &td->files[td->next_file];
425
426 td->next_file++;
427 if (td->next_file >= td->nr_files)
428 td->next_file = 0;
429
430 if (f->fd != -1)
431 break;
432
433 f = NULL;
434 } while (td->next_file != old_next_file);
Jens Axboe53cdc682006-10-18 11:50:58 +0200435
436 return f;
437}
438
439static int td_io_sync(struct thread_data *td, struct fio_file *f)
Jens Axboeebac4652005-12-08 15:25:21 +0100440{
Jens Axboe2866c822006-10-09 15:57:48 +0200441 if (td->io_ops->sync)
Jens Axboe53cdc682006-10-18 11:50:58 +0200442 return td->io_ops->sync(td, f);
Jens Axboeebac4652005-12-08 15:25:21 +0100443
444 return 0;
445}
446
Jens Axboe45bee282006-10-18 15:26:44 +0200447static int td_io_getevents(struct thread_data *td, int min, int max,
Jens Axboeebac4652005-12-08 15:25:21 +0100448 struct timespec *t)
449{
Jens Axboe2866c822006-10-09 15:57:48 +0200450 return td->io_ops->getevents(td, min, max, t);
Jens Axboeebac4652005-12-08 15:25:21 +0100451}
452
Jens Axboe45bee282006-10-18 15:26:44 +0200453static int td_io_queue(struct thread_data *td, struct io_u *io_u)
Jens Axboeebac4652005-12-08 15:25:21 +0100454{
455 gettimeofday(&io_u->issue_time, NULL);
456
Jens Axboe2866c822006-10-09 15:57:48 +0200457 return td->io_ops->queue(td, io_u);
Jens Axboeebac4652005-12-08 15:25:21 +0100458}
459
460#define iocb_time(iocb) ((unsigned long) (iocb)->data)
461
462static void io_completed(struct thread_data *td, struct io_u *io_u,
463 struct io_completion_data *icd)
464{
465 struct timeval e;
466 unsigned long msec;
467
468 gettimeofday(&e, NULL);
469
470 if (!io_u->error) {
Jens Axboe20dc95c2005-12-09 10:29:35 +0100471 unsigned int bytes = io_u->buflen - io_u->resid;
Jens Axboe3d60d1e2006-05-25 06:31:06 +0200472 const int idx = io_u->ddir;
Jens Axboeebac4652005-12-08 15:25:21 +0100473
474 td->io_blocks[idx]++;
Jens Axboe20dc95c2005-12-09 10:29:35 +0100475 td->io_bytes[idx] += bytes;
476 td->zone_bytes += bytes;
477 td->this_io_bytes[idx] += bytes;
Jens Axboeebac4652005-12-08 15:25:21 +0100478
479 msec = mtime_since(&io_u->issue_time, &e);
480
Jens Axboe3d60d1e2006-05-25 06:31:06 +0200481 add_clat_sample(td, idx, msec);
482 add_bw_sample(td, idx);
Jens Axboeebac4652005-12-08 15:25:21 +0100483
Jens Axboe3d60d1e2006-05-25 06:31:06 +0200484 if ((td_rw(td) || td_write(td)) && idx == DDIR_WRITE)
Jens Axboeebac4652005-12-08 15:25:21 +0100485 log_io_piece(td, io_u);
486
Jens Axboe20dc95c2005-12-09 10:29:35 +0100487 icd->bytes_done[idx] += bytes;
Jens Axboeebac4652005-12-08 15:25:21 +0100488 } else
489 icd->error = io_u->error;
490}
491
492static void ios_completed(struct thread_data *td,struct io_completion_data *icd)
493{
494 struct io_u *io_u;
495 int i;
496
497 icd->error = 0;
498 icd->bytes_done[0] = icd->bytes_done[1] = 0;
499
500 for (i = 0; i < icd->nr; i++) {
Jens Axboe2866c822006-10-09 15:57:48 +0200501 io_u = td->io_ops->event(td, i);
Jens Axboeebac4652005-12-08 15:25:21 +0100502
503 io_completed(td, io_u, icd);
504 put_io_u(td, io_u);
505 }
506}
507
Jens Axboe906c8d72006-06-13 09:37:56 +0200508/*
509 * When job exits, we can cancel the in-flight IO if we are using async
510 * io. Attempt to do so.
511 */
Jens Axboeebac4652005-12-08 15:25:21 +0100512static void cleanup_pending_aio(struct thread_data *td)
513{
514 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
515 struct list_head *entry, *n;
516 struct io_completion_data icd;
517 struct io_u *io_u;
518 int r;
519
520 /*
521 * get immediately available events, if any
522 */
Jens Axboe45bee282006-10-18 15:26:44 +0200523 r = td_io_getevents(td, 0, td->cur_depth, &ts);
Jens Axboeebac4652005-12-08 15:25:21 +0100524 if (r > 0) {
525 icd.nr = r;
526 ios_completed(td, &icd);
527 }
528
529 /*
530 * now cancel remaining active events
531 */
Jens Axboe2866c822006-10-09 15:57:48 +0200532 if (td->io_ops->cancel) {
Jens Axboeebac4652005-12-08 15:25:21 +0100533 list_for_each_safe(entry, n, &td->io_u_busylist) {
534 io_u = list_entry(entry, struct io_u, list);
535
Jens Axboe2866c822006-10-09 15:57:48 +0200536 r = td->io_ops->cancel(td, io_u);
Jens Axboeebac4652005-12-08 15:25:21 +0100537 if (!r)
538 put_io_u(td, io_u);
539 }
540 }
541
542 if (td->cur_depth) {
Jens Axboe45bee282006-10-18 15:26:44 +0200543 r = td_io_getevents(td, td->cur_depth, td->cur_depth, NULL);
Jens Axboeebac4652005-12-08 15:25:21 +0100544 if (r > 0) {
545 icd.nr = r;
546 ios_completed(td, &icd);
547 }
548 }
549}
550
Jens Axboe906c8d72006-06-13 09:37:56 +0200551/*
552 * The main verify engine. Runs over the writes we previusly submitted,
553 * reads the blocks back in, and checks the crc/md5 of the data.
554 */
Jens Axboea9619d42006-10-18 15:47:42 +0200555void do_verify(struct thread_data *td)
Jens Axboeebac4652005-12-08 15:25:21 +0100556{
557 struct timeval t;
558 struct io_u *io_u, *v_io_u = NULL;
559 struct io_completion_data icd;
Jens Axboe53cdc682006-10-18 11:50:58 +0200560 struct fio_file *f;
Jens Axboee5b401d2006-10-18 16:03:40 +0200561 int ret, i;
562
563 /*
564 * sync io first and invalidate cache, to make sure we really
565 * read from disk.
566 */
567 for_each_file(td, f, i) {
568 td_io_sync(td, f);
569 file_invalidate_cache(td, f);
570 }
Jens Axboeebac4652005-12-08 15:25:21 +0100571
572 td_set_runstate(td, TD_VERIFYING);
573
574 do {
575 if (td->terminate)
576 break;
577
578 gettimeofday(&t, NULL);
579 if (runtime_exceeded(td, &t))
580 break;
581
582 io_u = __get_io_u(td);
583 if (!io_u)
584 break;
585
Jens Axboeaea47d42006-05-26 19:27:29 +0200586 if (get_next_verify(td, io_u)) {
Jens Axboeebac4652005-12-08 15:25:21 +0100587 put_io_u(td, io_u);
588 break;
589 }
590
Jens Axboe53cdc682006-10-18 11:50:58 +0200591 f = get_next_file(td);
592 if (!f)
593 break;
594
595 io_u->file = f;
596
Jens Axboeaea47d42006-05-26 19:27:29 +0200597 if (td_io_prep(td, io_u)) {
Jens Axboeebac4652005-12-08 15:25:21 +0100598 put_io_u(td, io_u);
599 break;
600 }
601
Jens Axboe45bee282006-10-18 15:26:44 +0200602 ret = td_io_queue(td, io_u);
Jens Axboeebac4652005-12-08 15:25:21 +0100603 if (ret) {
604 put_io_u(td, io_u);
605 td_verror(td, ret);
606 break;
607 }
608
609 /*
610 * we have one pending to verify, do that while
611 * we are doing io on the next one
612 */
613 if (do_io_u_verify(td, &v_io_u))
614 break;
615
Jens Axboe45bee282006-10-18 15:26:44 +0200616 ret = td_io_getevents(td, 1, 1, NULL);
Jens Axboeebac4652005-12-08 15:25:21 +0100617 if (ret != 1) {
618 if (ret < 0)
619 td_verror(td, ret);
620 break;
621 }
622
Jens Axboe2866c822006-10-09 15:57:48 +0200623 v_io_u = td->io_ops->event(td, 0);
Jens Axboeebac4652005-12-08 15:25:21 +0100624 icd.nr = 1;
625 icd.error = 0;
626 io_completed(td, v_io_u, &icd);
627
628 if (icd.error) {
629 td_verror(td, icd.error);
630 put_io_u(td, v_io_u);
631 v_io_u = NULL;
632 break;
633 }
634
Jens Axboeebac4652005-12-08 15:25:21 +0100635 /*
636 * if we can't submit more io, we need to verify now
637 */
638 if (queue_full(td) && do_io_u_verify(td, &v_io_u))
639 break;
640
641 } while (1);
642
643 do_io_u_verify(td, &v_io_u);
644
645 if (td->cur_depth)
646 cleanup_pending_aio(td);
647
648 td_set_runstate(td, TD_RUNNING);
649}
650
Jens Axboe32cd46a2006-06-07 13:40:40 +0200651/*
Jens Axboeb990b5c2006-09-14 09:48:22 +0200652 * Not really an io thread, all it does is burn CPU cycles in the specified
653 * manner.
654 */
655static void do_cpuio(struct thread_data *td)
656{
657 struct timeval e;
658 int split = 100 / td->cpuload;
659 int i = 0;
660
661 while (!td->terminate) {
662 gettimeofday(&e, NULL);
663
664 if (runtime_exceeded(td, &e))
665 break;
666
667 if (!(i % split))
668 __usec_sleep(10000);
669 else
670 usec_sleep(td, 10000);
671
672 i++;
673 }
674}
675
676/*
Jens Axboe906c8d72006-06-13 09:37:56 +0200677 * Main IO worker function. It retrieves io_u's to process and queues
Jens Axboe32cd46a2006-06-07 13:40:40 +0200678 * and reaps them, checking for rate and errors along the way.
679 */
Jens Axboeebac4652005-12-08 15:25:21 +0100680static void do_io(struct thread_data *td)
681{
682 struct io_completion_data icd;
683 struct timeval s, e;
684 unsigned long usec;
Jens Axboe53cdc682006-10-18 11:50:58 +0200685 struct fio_file *f;
Jens Axboe84585002006-10-19 20:26:22 +0200686 int i, ret = 0;
Jens Axboeebac4652005-12-08 15:25:21 +0100687
Jens Axboe5853e5a2006-06-08 14:41:05 +0200688 td_set_runstate(td, TD_RUNNING);
689
Jens Axboeebac4652005-12-08 15:25:21 +0100690 while (td->this_io_bytes[td->ddir] < td->io_size) {
691 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
692 struct timespec *timeout;
Jens Axboe84585002006-10-19 20:26:22 +0200693 int min_evts = 0;
Jens Axboeebac4652005-12-08 15:25:21 +0100694 struct io_u *io_u;
695
696 if (td->terminate)
697 break;
698
Jens Axboe53cdc682006-10-18 11:50:58 +0200699 f = get_next_file(td);
700 if (!f)
701 break;
702
703 io_u = get_io_u(td, f);
Jens Axboeebac4652005-12-08 15:25:21 +0100704 if (!io_u)
705 break;
706
707 memcpy(&s, &io_u->start_time, sizeof(s));
708
Jens Axboe45bee282006-10-18 15:26:44 +0200709 ret = td_io_queue(td, io_u);
Jens Axboeebac4652005-12-08 15:25:21 +0100710 if (ret) {
711 put_io_u(td, io_u);
712 td_verror(td, ret);
713 break;
714 }
715
716 add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
717
718 if (td->cur_depth < td->iodepth) {
719 timeout = &ts;
720 min_evts = 0;
721 } else {
722 timeout = NULL;
723 min_evts = 1;
724 }
725
Jens Axboe84585002006-10-19 20:26:22 +0200726
Jens Axboe45bee282006-10-18 15:26:44 +0200727 ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
Jens Axboeebac4652005-12-08 15:25:21 +0100728 if (ret < 0) {
Jens Axboe84585002006-10-19 20:26:22 +0200729 td_verror(td, -ret);
Jens Axboeebac4652005-12-08 15:25:21 +0100730 break;
731 } else if (!ret)
732 continue;
733
734 icd.nr = ret;
735 ios_completed(td, &icd);
736 if (icd.error) {
737 td_verror(td, icd.error);
738 break;
739 }
740
741 /*
742 * the rate is batched for now, it should work for batches
743 * of completions except the very first one which may look
744 * a little bursty
745 */
746 gettimeofday(&e, NULL);
747 usec = utime_since(&s, &e);
748
749 rate_throttle(td, usec, icd.bytes_done[td->ddir]);
750
751 if (check_min_rate(td, &e)) {
Jens Axboe2f9ade32006-10-20 11:25:52 +0200752 if (rate_quit)
753 terminate_threads(td->groupid);
Jens Axboeebac4652005-12-08 15:25:21 +0100754 td_verror(td, ENOMEM);
755 break;
756 }
757
758 if (runtime_exceeded(td, &e))
759 break;
760
761 if (td->thinktime)
762 usec_sleep(td, td->thinktime);
763
764 if (should_fsync(td) && td->fsync_blocks &&
765 (td->io_blocks[DDIR_WRITE] % td->fsync_blocks) == 0)
Jens Axboe53cdc682006-10-18 11:50:58 +0200766 td_io_sync(td, f);
Jens Axboeebac4652005-12-08 15:25:21 +0100767 }
768
Jens Axboe84585002006-10-19 20:26:22 +0200769 if (!ret) {
770 if (td->cur_depth)
771 cleanup_pending_aio(td);
Jens Axboeebac4652005-12-08 15:25:21 +0100772
Jens Axboe84585002006-10-19 20:26:22 +0200773 if (should_fsync(td) && td->end_fsync) {
774 td_set_runstate(td, TD_FSYNCING);
775 for_each_file(td, f, i)
776 td_io_sync(td, f);
777 }
Jens Axboe5853e5a2006-06-08 14:41:05 +0200778 }
Jens Axboeebac4652005-12-08 15:25:21 +0100779}
780
Jens Axboe45bee282006-10-18 15:26:44 +0200781static int td_io_init(struct thread_data *td)
Jens Axboeebac4652005-12-08 15:25:21 +0100782{
Jens Axboe2866c822006-10-09 15:57:48 +0200783 if (td->io_ops->init)
784 return td->io_ops->init(td);
785
786 return 0;
Jens Axboeebac4652005-12-08 15:25:21 +0100787}
788
789static void cleanup_io_u(struct thread_data *td)
790{
791 struct list_head *entry, *n;
792 struct io_u *io_u;
793
794 list_for_each_safe(entry, n, &td->io_u_freelist) {
795 io_u = list_entry(entry, struct io_u, list);
796
797 list_del(&io_u->list);
798 free(io_u);
799 }
800
Jens Axboe2f9ade32006-10-20 11:25:52 +0200801 free_io_mem(td);
Jens Axboeebac4652005-12-08 15:25:21 +0100802}
803
804static int init_io_u(struct thread_data *td)
805{
806 struct io_u *io_u;
807 int i, max_units;
808 char *p;
809
Jens Axboe2866c822006-10-09 15:57:48 +0200810 if (td->io_ops->flags & FIO_CPUIO)
Jens Axboeb990b5c2006-09-14 09:48:22 +0200811 return 0;
812
Jens Axboe2866c822006-10-09 15:57:48 +0200813 if (td->io_ops->flags & FIO_SYNCIO)
Jens Axboeebac4652005-12-08 15:25:21 +0100814 max_units = 1;
815 else
816 max_units = td->iodepth;
817
818 td->orig_buffer_size = td->max_bs * max_units + MASK;
819
Jens Axboe2f9ade32006-10-20 11:25:52 +0200820 if (allocate_io_mem(td))
821 return 1;
Jens Axboeebac4652005-12-08 15:25:21 +0100822
Jens Axboeebac4652005-12-08 15:25:21 +0100823 p = ALIGN(td->orig_buffer);
824 for (i = 0; i < max_units; i++) {
825 io_u = malloc(sizeof(*io_u));
826 memset(io_u, 0, sizeof(*io_u));
827 INIT_LIST_HEAD(&io_u->list);
828
829 io_u->buf = p + td->max_bs * i;
Jens Axboeb1ff3402006-02-17 11:35:58 +0100830 io_u->index = i;
Jens Axboeebac4652005-12-08 15:25:21 +0100831 list_add(&io_u->list, &td->io_u_freelist);
832 }
833
834 return 0;
835}
836
Jens Axboeda867742006-06-06 10:39:10 -0700837static int switch_ioscheduler(struct thread_data *td)
838{
839 char tmp[256], tmp2[128];
840 FILE *f;
841 int ret;
842
843 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
844
845 f = fopen(tmp, "r+");
846 if (!f) {
847 td_verror(td, errno);
848 return 1;
849 }
850
851 /*
852 * Set io scheduler.
853 */
854 ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
855 if (ferror(f) || ret != 1) {
856 td_verror(td, errno);
857 fclose(f);
858 return 1;
859 }
860
861 rewind(f);
862
863 /*
864 * Read back and check that the selected scheduler is now the default.
865 */
866 ret = fread(tmp, 1, sizeof(tmp), f);
867 if (ferror(f) || ret < 0) {
868 td_verror(td, errno);
869 fclose(f);
870 return 1;
871 }
872
873 sprintf(tmp2, "[%s]", td->ioscheduler);
874 if (!strstr(tmp, tmp2)) {
Jens Axboe3b70d7e2006-06-08 21:48:46 +0200875 log_err("fio: io scheduler %s not found\n", td->ioscheduler);
Jens Axboeda867742006-06-06 10:39:10 -0700876 td_verror(td, EINVAL);
877 fclose(f);
878 return 1;
879 }
880
881 fclose(f);
882 return 0;
883}
884
Jens Axboeebac4652005-12-08 15:25:21 +0100885static void clear_io_state(struct thread_data *td)
886{
Jens Axboe53cdc682006-10-18 11:50:58 +0200887 struct fio_file *f;
888 int i;
Jens Axboeebac4652005-12-08 15:25:21 +0100889
Jens Axboeebac4652005-12-08 15:25:21 +0100890 td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
891 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
Jens Axboe20dc95c2005-12-09 10:29:35 +0100892 td->zone_bytes = 0;
Jens Axboeebac4652005-12-08 15:25:21 +0100893
Jens Axboe53cdc682006-10-18 11:50:58 +0200894 for_each_file(td, f, i) {
895 f->last_pos = 0;
896 if (td->io_ops->flags & FIO_SYNCIO)
897 lseek(f->fd, SEEK_SET, 0);
898
899 if (f->file_map)
900 memset(f->file_map, 0, f->num_maps * sizeof(long));
901 }
Jens Axboeebac4652005-12-08 15:25:21 +0100902}
903
Jens Axboe906c8d72006-06-13 09:37:56 +0200904/*
905 * Entry point for the thread based jobs. The process based jobs end up
906 * here as well, after a little setup.
907 */
Jens Axboeebac4652005-12-08 15:25:21 +0100908static void *thread_main(void *data)
909{
910 struct thread_data *td = data;
Jens Axboeebac4652005-12-08 15:25:21 +0100911
912 if (!td->use_thread)
913 setsid();
914
915 td->pid = getpid();
916
Jens Axboeaea47d42006-05-26 19:27:29 +0200917 INIT_LIST_HEAD(&td->io_u_freelist);
918 INIT_LIST_HEAD(&td->io_u_busylist);
919 INIT_LIST_HEAD(&td->io_hist_list);
920 INIT_LIST_HEAD(&td->io_log_list);
921
Jens Axboeebac4652005-12-08 15:25:21 +0100922 if (init_io_u(td))
923 goto err;
924
925 if (fio_setaffinity(td) == -1) {
926 td_verror(td, errno);
927 goto err;
928 }
929
Jens Axboe45bee282006-10-18 15:26:44 +0200930 if (td_io_init(td))
Jens Axboeebac4652005-12-08 15:25:21 +0100931 goto err;
932
Jens Axboeaea47d42006-05-26 19:27:29 +0200933 if (init_iolog(td))
934 goto err;
935
Jens Axboeebac4652005-12-08 15:25:21 +0100936 if (td->ioprio) {
937 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
938 td_verror(td, errno);
939 goto err;
940 }
941 }
942
Jens Axboe1056eaa2006-07-13 10:33:45 -0700943 if (nice(td->nice) == -1) {
Jens Axboeb6f4d882006-06-02 10:32:51 +0200944 td_verror(td, errno);
945 goto err;
946 }
947
Jens Axboe75154842006-06-01 13:56:09 +0200948 if (init_random_state(td))
949 goto err;
950
Jens Axboeda867742006-06-06 10:39:10 -0700951 if (td->ioscheduler && switch_ioscheduler(td))
952 goto err;
953
Jens Axboe75154842006-06-01 13:56:09 +0200954 td_set_runstate(td, TD_INITIALIZED);
Jens Axboebbfd6b02006-06-07 19:42:54 +0200955 fio_sem_up(&startup_sem);
956 fio_sem_down(&td->mutex);
Jens Axboeebac4652005-12-08 15:25:21 +0100957
Jens Axboe53cdc682006-10-18 11:50:58 +0200958 if (!td->create_serialize && setup_files(td))
Jens Axboeebac4652005-12-08 15:25:21 +0100959 goto err;
960
Jens Axboeebac4652005-12-08 15:25:21 +0100961 gettimeofday(&td->epoch, NULL);
962
Jens Axboe4e0ba8a2006-06-06 09:36:28 +0200963 if (td->exec_prerun)
964 system(td->exec_prerun);
965
Jens Axboeebac4652005-12-08 15:25:21 +0100966 while (td->loops--) {
967 getrusage(RUSAGE_SELF, &td->ru_start);
968 gettimeofday(&td->start, NULL);
969 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
970
971 if (td->ratemin)
972 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
973
974 clear_io_state(td);
975 prune_io_piece_log(td);
976
Jens Axboe2866c822006-10-09 15:57:48 +0200977 if (td->io_ops->flags & FIO_CPUIO)
Jens Axboeb990b5c2006-09-14 09:48:22 +0200978 do_cpuio(td);
979 else
980 do_io(td);
Jens Axboeebac4652005-12-08 15:25:21 +0100981
982 td->runtime[td->ddir] += mtime_since_now(&td->start);
Jens Axboeaea47d42006-05-26 19:27:29 +0200983 if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
Jens Axboe3d60d1e2006-05-25 06:31:06 +0200984 td->runtime[td->ddir ^ 1] = td->runtime[td->ddir];
985
Jens Axboeebac4652005-12-08 15:25:21 +0100986 update_rusage_stat(td);
987
988 if (td->error || td->terminate)
989 break;
990
991 if (td->verify == VERIFY_NONE)
992 continue;
993
994 clear_io_state(td);
995 gettimeofday(&td->start, NULL);
996
997 do_verify(td);
998
999 td->runtime[DDIR_READ] += mtime_since_now(&td->start);
1000
1001 if (td->error || td->terminate)
1002 break;
1003 }
1004
Jens Axboeebac4652005-12-08 15:25:21 +01001005 if (td->bw_log)
1006 finish_log(td, td->bw_log, "bw");
1007 if (td->slat_log)
1008 finish_log(td, td->slat_log, "slat");
1009 if (td->clat_log)
1010 finish_log(td, td->clat_log, "clat");
Jens Axboe843a7412006-06-01 21:14:21 -07001011 if (td->write_iolog)
1012 write_iolog_close(td);
Jens Axboe4e0ba8a2006-06-06 09:36:28 +02001013 if (td->exec_postrun)
1014 system(td->exec_postrun);
Jens Axboeebac4652005-12-08 15:25:21 +01001015
1016 if (exitall_on_terminate)
1017 terminate_threads(td->groupid);
1018
1019err:
Jens Axboe53cdc682006-10-18 11:50:58 +02001020 close_files(td);
Jens Axboe2866c822006-10-09 15:57:48 +02001021 close_ioengine(td);
Jens Axboeebac4652005-12-08 15:25:21 +01001022 cleanup_io_u(td);
Jens Axboeebac4652005-12-08 15:25:21 +01001023 td_set_runstate(td, TD_EXITED);
1024 return NULL;
1025
1026}
1027
Jens Axboe906c8d72006-06-13 09:37:56 +02001028/*
1029 * We cannot pass the td data into a forked process, so attach the td and
1030 * pass it to the thread worker.
1031 */
Jens Axboeebac4652005-12-08 15:25:21 +01001032static void *fork_main(int shmid, int offset)
1033{
1034 struct thread_data *td;
1035 void *data;
1036
1037 data = shmat(shmid, NULL, 0);
1038 if (data == (void *) -1) {
1039 perror("shmat");
1040 return NULL;
1041 }
1042
1043 td = data + offset * sizeof(struct thread_data);
1044 thread_main(td);
1045 shmdt(data);
1046 return NULL;
1047}
1048
Jens Axboe906c8d72006-06-13 09:37:56 +02001049/*
Jens Axboe906c8d72006-06-13 09:37:56 +02001050 * Run over the job map and reap the threads that have exited, if any.
1051 */
Jens Axboeebac4652005-12-08 15:25:21 +01001052static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1053{
Jens Axboe34572e22006-10-20 09:33:18 +02001054 struct thread_data *td;
Jens Axboeb990b5c2006-09-14 09:48:22 +02001055 int i, cputhreads;
Jens Axboeebac4652005-12-08 15:25:21 +01001056
1057 /*
1058 * reap exited threads (TD_EXITED -> TD_REAPED)
1059 */
Jens Axboe34572e22006-10-20 09:33:18 +02001060 cputhreads = 0;
1061 for_each_td(td, i) {
Jens Axboe84585002006-10-19 20:26:22 +02001062 /*
1063 * ->io_ops is NULL for a thread that has closed its
1064 * io engine
1065 */
1066 if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
Jens Axboeb990b5c2006-09-14 09:48:22 +02001067 cputhreads++;
1068
Jens Axboeebac4652005-12-08 15:25:21 +01001069 if (td->runstate != TD_EXITED)
1070 continue;
1071
1072 td_set_runstate(td, TD_REAPED);
1073
1074 if (td->use_thread) {
1075 long ret;
1076
1077 if (pthread_join(td->thread, (void *) &ret))
1078 perror("thread_join");
1079 } else
1080 waitpid(td->pid, NULL, 0);
1081
1082 (*nr_running)--;
1083 (*m_rate) -= td->ratemin;
1084 (*t_rate) -= td->rate;
1085 }
Jens Axboeb990b5c2006-09-14 09:48:22 +02001086
1087 if (*nr_running == cputhreads)
1088 terminate_threads(TERMINATE_ALL);
Jens Axboeebac4652005-12-08 15:25:21 +01001089}
1090
Jens Axboe906c8d72006-06-13 09:37:56 +02001091/*
1092 * Main function for kicking off and reaping jobs, as needed.
1093 */
Jens Axboeebac4652005-12-08 15:25:21 +01001094static void run_threads(void)
1095{
Jens Axboeebac4652005-12-08 15:25:21 +01001096 struct thread_data *td;
1097 unsigned long spent;
1098 int i, todo, nr_running, m_rate, t_rate, nr_started;
Jens Axboefcb6ade2006-05-31 12:14:35 +02001099
Jens Axboe2f9ade32006-10-20 11:25:52 +02001100 if (fio_pin_memory())
1101 return;
Jens Axboeebac4652005-12-08 15:25:21 +01001102
Jens Axboec6ae0a52006-06-12 11:04:44 +02001103 if (!terse_output) {
1104 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
1105 fflush(stdout);
1106 }
Jens Axboec04f7ec2006-05-31 10:13:16 +02001107
Jens Axboe4efa9702006-05-31 11:56:49 +02001108 signal(SIGINT, sig_handler);
1109 signal(SIGALRM, sig_handler);
1110
Jens Axboeebac4652005-12-08 15:25:21 +01001111 todo = thread_number;
1112 nr_running = 0;
1113 nr_started = 0;
1114 m_rate = t_rate = 0;
1115
Jens Axboe34572e22006-10-20 09:33:18 +02001116 for_each_td(td, i) {
Jens Axboe263e5292006-10-18 15:37:01 +02001117 print_status_init(td->thread_number - 1);
Jens Axboeebac4652005-12-08 15:25:21 +01001118
1119 init_disk_util(td);
1120
1121 if (!td->create_serialize)
1122 continue;
1123
1124 /*
1125 * do file setup here so it happens sequentially,
1126 * we don't want X number of threads getting their
1127 * client data interspersed on disk
1128 */
Jens Axboe53cdc682006-10-18 11:50:58 +02001129 if (setup_files(td)) {
Jens Axboeebac4652005-12-08 15:25:21 +01001130 td_set_runstate(td, TD_REAPED);
1131 todo--;
1132 }
1133 }
1134
Jens Axboe263e5292006-10-18 15:37:01 +02001135 time_init();
Jens Axboeebac4652005-12-08 15:25:21 +01001136
1137 while (todo) {
Jens Axboe75154842006-06-01 13:56:09 +02001138 struct thread_data *map[MAX_JOBS];
1139 struct timeval this_start;
1140 int this_jobs = 0, left;
1141
Jens Axboeebac4652005-12-08 15:25:21 +01001142 /*
1143 * create threads (TD_NOT_CREATED -> TD_CREATED)
1144 */
Jens Axboe34572e22006-10-20 09:33:18 +02001145 for_each_td(td, i) {
Jens Axboeebac4652005-12-08 15:25:21 +01001146 if (td->runstate != TD_NOT_CREATED)
1147 continue;
1148
1149 /*
1150 * never got a chance to start, killed by other
1151 * thread for some reason
1152 */
1153 if (td->terminate) {
1154 todo--;
1155 continue;
1156 }
1157
1158 if (td->start_delay) {
Jens Axboe263e5292006-10-18 15:37:01 +02001159 spent = mtime_since_genesis();
Jens Axboeebac4652005-12-08 15:25:21 +01001160
1161 if (td->start_delay * 1000 > spent)
1162 continue;
1163 }
1164
1165 if (td->stonewall && (nr_started || nr_running))
1166 break;
1167
Jens Axboe75154842006-06-01 13:56:09 +02001168 /*
1169 * Set state to created. Thread will transition
1170 * to TD_INITIALIZED when it's done setting up.
1171 */
Jens Axboeebac4652005-12-08 15:25:21 +01001172 td_set_runstate(td, TD_CREATED);
Jens Axboe75154842006-06-01 13:56:09 +02001173 map[this_jobs++] = td;
Jens Axboebbfd6b02006-06-07 19:42:54 +02001174 fio_sem_init(&startup_sem, 1);
Jens Axboeebac4652005-12-08 15:25:21 +01001175 nr_started++;
1176
1177 if (td->use_thread) {
1178 if (pthread_create(&td->thread, NULL, thread_main, td)) {
1179 perror("thread_create");
1180 nr_started--;
1181 }
1182 } else {
1183 if (fork())
Jens Axboebbfd6b02006-06-07 19:42:54 +02001184 fio_sem_down(&startup_sem);
Jens Axboeebac4652005-12-08 15:25:21 +01001185 else {
1186 fork_main(shm_id, i);
1187 exit(0);
1188 }
1189 }
1190 }
1191
1192 /*
Jens Axboe75154842006-06-01 13:56:09 +02001193 * Wait for the started threads to transition to
1194 * TD_INITIALIZED.
Jens Axboeebac4652005-12-08 15:25:21 +01001195 */
Jens Axboe75154842006-06-01 13:56:09 +02001196 gettimeofday(&this_start, NULL);
1197 left = this_jobs;
1198 while (left) {
1199 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1200 break;
1201
1202 usleep(100000);
1203
1204 for (i = 0; i < this_jobs; i++) {
1205 td = map[i];
1206 if (!td)
1207 continue;
Jens Axboeb6f4d882006-06-02 10:32:51 +02001208 if (td->runstate == TD_INITIALIZED) {
Jens Axboe75154842006-06-01 13:56:09 +02001209 map[i] = NULL;
1210 left--;
Jens Axboeb6f4d882006-06-02 10:32:51 +02001211 } else if (td->runstate >= TD_EXITED) {
1212 map[i] = NULL;
1213 left--;
1214 todo--;
1215 nr_running++; /* work-around... */
Jens Axboe75154842006-06-01 13:56:09 +02001216 }
1217 }
1218 }
1219
1220 if (left) {
Jens Axboe3b70d7e2006-06-08 21:48:46 +02001221 log_err("fio: %d jobs failed to start\n", left);
Jens Axboe75154842006-06-01 13:56:09 +02001222 for (i = 0; i < this_jobs; i++) {
1223 td = map[i];
1224 if (!td)
1225 continue;
1226 kill(td->pid, SIGTERM);
1227 }
1228 break;
1229 }
1230
1231 /*
Jens Axboeb6f4d882006-06-02 10:32:51 +02001232 * start created threads (TD_INITIALIZED -> TD_RUNNING).
Jens Axboe75154842006-06-01 13:56:09 +02001233 */
Jens Axboe34572e22006-10-20 09:33:18 +02001234 for_each_td(td, i) {
Jens Axboe75154842006-06-01 13:56:09 +02001235 if (td->runstate != TD_INITIALIZED)
Jens Axboeebac4652005-12-08 15:25:21 +01001236 continue;
1237
1238 td_set_runstate(td, TD_RUNNING);
1239 nr_running++;
1240 nr_started--;
1241 m_rate += td->ratemin;
1242 t_rate += td->rate;
Jens Axboe75154842006-06-01 13:56:09 +02001243 todo--;
Jens Axboebbfd6b02006-06-07 19:42:54 +02001244 fio_sem_up(&td->mutex);
Jens Axboeebac4652005-12-08 15:25:21 +01001245 }
1246
1247 reap_threads(&nr_running, &t_rate, &m_rate);
1248
1249 if (todo)
1250 usleep(100000);
1251 }
1252
1253 while (nr_running) {
1254 reap_threads(&nr_running, &t_rate, &m_rate);
1255 usleep(10000);
1256 }
1257
1258 update_io_ticks();
Jens Axboe2f9ade32006-10-20 11:25:52 +02001259 fio_unpin_memory();
Jens Axboeebac4652005-12-08 15:25:21 +01001260}
1261
Jens Axboeebac4652005-12-08 15:25:21 +01001262int main(int argc, char *argv[])
1263{
1264 if (parse_options(argc, argv))
1265 return 1;
1266
1267 if (!thread_number) {
Jens Axboe3b70d7e2006-06-08 21:48:46 +02001268 log_err("Nothing to do\n");
Jens Axboeebac4652005-12-08 15:25:21 +01001269 return 1;
1270 }
1271
1272 disk_util_timer_arm();
1273
1274 run_threads();
1275 show_run_stats();
1276
1277 return 0;
1278}