blob: d225cc41630d0a4838da6d41ae1b7da485f11fa6 [file] [log] [blame]
Jens Axboea4f4fdd2007-02-14 01:16:39 +01001/*
Jens Axboeda751ca2007-03-14 10:59:33 +01002 * syslet engine
3 *
4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
Jens Axboea4f4fdd2007-02-14 01:16:39 +01006 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
Jens Axboeb8846352007-03-14 14:29:16 +010013#include <asm/unistd.h>
Jens Axboea4f4fdd2007-02-14 01:16:39 +010014
15#include "../fio.h"
16#include "../os.h"
17
18#ifdef FIO_HAVE_SYSLET
19
Jens Axboe1760e672007-03-14 20:41:42 +010020#ifdef __NR_pread64
21#define __NR_fio_pread __NR_pread64
22#define __NR_fio_pwrite __NR_pwrite64
23#else
24#define __NR_fio_pread __NR_pread
25#define __NR_fio_pwrite __NR_pwrite
26#endif
27
Jens Axboea4f4fdd2007-02-14 01:16:39 +010028struct syslet_data {
29 struct io_u **events;
30 unsigned int nr_events;
31
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +010032 struct async_head_user ahu;
Jens Axboea4f4fdd2007-02-14 01:16:39 +010033 struct syslet_uatom **ring;
Jens Axboe9ff9de62007-02-23 13:21:45 +010034
35 struct syslet_uatom *head, *tail;
Jens Axboea4f4fdd2007-02-14 01:16:39 +010036};
37
Jens Axboe9ff9de62007-02-23 13:21:45 +010038static void fio_syslet_complete_atom(struct thread_data *td,
39 struct syslet_uatom *atom)
40{
41 struct syslet_data *sd = td->io_ops->data;
Jens Axboe5b38ee82007-02-26 10:44:22 +010042 struct syslet_uatom *last;
Jens Axboe9ff9de62007-02-23 13:21:45 +010043 struct io_u *io_u;
Jens Axboe9ff9de62007-02-23 13:21:45 +010044
45 /*
Jens Axboe5b38ee82007-02-26 10:44:22 +010046 * complete from the beginning of the sequence up to (and
47 * including) this atom
Jens Axboe9ff9de62007-02-23 13:21:45 +010048 */
Jens Axboe5b38ee82007-02-26 10:44:22 +010049 last = atom;
50 io_u = atom->private;
51 atom = io_u->req.head;
Jens Axboe9ff9de62007-02-23 13:21:45 +010052
53 /*
54 * now complete in right order
55 */
Jens Axboe5b38ee82007-02-26 10:44:22 +010056 do {
Jens Axboe9ff9de62007-02-23 13:21:45 +010057 long ret;
58
Jens Axboe9ff9de62007-02-23 13:21:45 +010059 io_u = atom->private;
60 ret = *atom->ret_ptr;
Jens Axboee2e67912007-03-12 09:43:05 +010061 if (ret >= 0)
Jens Axboe9ff9de62007-02-23 13:21:45 +010062 io_u->resid = io_u->xfer_buflen - ret;
63 else if (ret < 0)
64 io_u->error = ret;
65
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010066 assert(sd->nr_events < td->o.iodepth);
Jens Axboe9ff9de62007-02-23 13:21:45 +010067 sd->events[sd->nr_events++] = io_u;
Jens Axboe9ff9de62007-02-23 13:21:45 +010068
Jens Axboe5b38ee82007-02-26 10:44:22 +010069 if (atom == last)
70 break;
Jens Axboe9ff9de62007-02-23 13:21:45 +010071
Jens Axboe5b38ee82007-02-26 10:44:22 +010072 atom = atom->next;
73 } while (1);
74
75 assert(!last->next);
Jens Axboe9ff9de62007-02-23 13:21:45 +010076}
77
Jens Axboea4f4fdd2007-02-14 01:16:39 +010078/*
79 * Inspect the ring to see if we have completed events
80 */
81static void fio_syslet_complete(struct thread_data *td)
82{
83 struct syslet_data *sd = td->io_ops->data;
84
85 do {
86 struct syslet_uatom *atom;
Jens Axboea4f4fdd2007-02-14 01:16:39 +010087
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +010088 atom = sd->ring[sd->ahu.user_ring_idx];
Jens Axboea4f4fdd2007-02-14 01:16:39 +010089 if (!atom)
90 break;
91
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +010092 sd->ring[sd->ahu.user_ring_idx] = NULL;
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010093 if (++sd->ahu.user_ring_idx == td->o.iodepth)
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +010094 sd->ahu.user_ring_idx = 0;
Jens Axboea4f4fdd2007-02-14 01:16:39 +010095
Jens Axboe9ff9de62007-02-23 13:21:45 +010096 fio_syslet_complete_atom(td, atom);
Jens Axboea4f4fdd2007-02-14 01:16:39 +010097 } while (1);
98}
99
100static int fio_syslet_getevents(struct thread_data *td, int min,
101 int fio_unused max,
102 struct timespec fio_unused *t)
103{
104 struct syslet_data *sd = td->io_ops->data;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100105 long ret;
106
107 do {
108 fio_syslet_complete(td);
109
110 /*
111 * do we have enough immediate completions?
112 */
113 if (sd->nr_events >= (unsigned int) min)
114 break;
115
116 /*
117 * OK, we need to wait for some events...
118 */
Jens Axboe9ff9de62007-02-23 13:21:45 +0100119 ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100120 if (ret < 0)
Jens Axboee49499f2007-02-22 11:08:52 +0100121 return -errno;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100122 } while (1);
123
124 ret = sd->nr_events;
125 sd->nr_events = 0;
126 return ret;
127}
128
129static struct io_u *fio_syslet_event(struct thread_data *td, int event)
130{
131 struct syslet_data *sd = td->io_ops->data;
132
133 return sd->events[event];
134}
135
136static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
Jens Axboea2e1b082007-02-14 08:06:19 +0100137 void *arg1, void *arg2, void *arg3, void *ret_ptr,
138 unsigned long flags, void *priv)
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100139{
140 atom->flags = flags;
141 atom->nr = nr;
142 atom->ret_ptr = ret_ptr;
Jens Axboea2e1b082007-02-14 08:06:19 +0100143 atom->next = NULL;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100144 atom->arg_ptr[0] = arg0;
145 atom->arg_ptr[1] = arg1;
146 atom->arg_ptr[2] = arg2;
Jens Axboea2e1b082007-02-14 08:06:19 +0100147 atom->arg_ptr[3] = arg3;
148 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100149 atom->private = priv;
150}
151
152/*
153 * Use seek atom for sync
154 */
155static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
156{
Jens Axboea2e1b082007-02-14 08:06:19 +0100157 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
Jens Axboe7d44a742007-02-14 17:32:08 +0100158 &io_u->req.ret, 0, io_u);
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100159}
160
161static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
162{
163 int nr;
164
165 /*
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100166 * prepare rw
167 */
168 if (io_u->ddir == DDIR_READ)
Jens Axboe1760e672007-03-14 20:41:42 +0100169 nr = __NR_fio_pread;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100170 else
Jens Axboe1760e672007-03-14 20:41:42 +0100171 nr = __NR_fio_pwrite;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100172
Jens Axboea2e1b082007-02-14 08:06:19 +0100173 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
Jens Axboe7d44a742007-02-14 17:32:08 +0100174 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100175}
176
177static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
178{
179 struct fio_file *f = io_u->file;
180
181 if (io_u->ddir == DDIR_SYNC)
182 fio_syslet_prep_sync(io_u, f);
183 else
184 fio_syslet_prep_rw(io_u, f);
185
186 return 0;
187}
188
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100189static void cachemiss_thread_start(void)
190{
191 while (1)
Jens Axboe7756b0d2007-02-26 10:22:31 +0100192 async_thread(NULL, NULL);
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100193}
194
195#define THREAD_STACK_SIZE (16384)
196
197static unsigned long thread_stack_alloc()
198{
Jens Axboe5b38ee82007-02-26 10:44:22 +0100199 return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100200}
201
Jens Axboea0a930e2007-02-27 19:47:13 +0100202static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
203{
204 struct syslet_uatom *atom;
205 struct timeval now;
206
207 fio_gettime(&now, NULL);
208
209 atom = sd->head;
210 while (atom) {
211 struct io_u *io_u = atom->private;
212
213 memcpy(&io_u->issue_time, &now, sizeof(now));
214 io_u_queued(td, io_u);
215 atom = atom->next;
216 }
217}
218
Jens Axboe9ff9de62007-02-23 13:21:45 +0100219static int fio_syslet_commit(struct thread_data *td)
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100220{
221 struct syslet_data *sd = td->io_ops->data;
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100222 struct syslet_uatom *done;
Jens Axboe9ff9de62007-02-23 13:21:45 +0100223
224 if (!sd->head)
225 return 0;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100226
Jens Axboe5b38ee82007-02-26 10:44:22 +0100227 assert(!sd->tail->next);
228
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100229 if (!sd->ahu.new_thread_stack)
230 sd->ahu.new_thread_stack = thread_stack_alloc();
231
Jens Axboea0a930e2007-02-27 19:47:13 +0100232 fio_syslet_queued(td, sd);
233
Jens Axboe7d44a742007-02-14 17:32:08 +0100234 /*
235 * On sync completion, the atom is returned. So on NULL return
236 * it's queued asynchronously.
237 */
Jens Axboe9ff9de62007-02-23 13:21:45 +0100238 done = async_exec(sd->head, &sd->ahu);
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100239
Jens Axboe76f58b92007-03-29 13:26:17 +0200240 if (done == (void *) -1) {
241 log_err("fio: syslets don't appear to work\n");
242 return -1;
243 }
244
Jens Axboe9ff9de62007-02-23 13:21:45 +0100245 sd->head = sd->tail = NULL;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100246
Jens Axboe9ff9de62007-02-23 13:21:45 +0100247 if (done)
248 fio_syslet_complete_atom(td, done);
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100249
Jens Axboe9ff9de62007-02-23 13:21:45 +0100250 return 0;
251}
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100252
Jens Axboe9ff9de62007-02-23 13:21:45 +0100253static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
254{
255 struct syslet_data *sd = td->io_ops->data;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100256
Jens Axboe9ff9de62007-02-23 13:21:45 +0100257 if (sd->tail) {
258 sd->tail->next = &io_u->req.atom;
259 sd->tail = &io_u->req.atom;
260 } else
261 sd->head = sd->tail = &io_u->req.atom;
262
Jens Axboe5b38ee82007-02-26 10:44:22 +0100263 io_u->req.head = sd->head;
Jens Axboe9ff9de62007-02-23 13:21:45 +0100264 return FIO_Q_QUEUED;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100265}
266
Jens Axboedb64e9b2007-02-14 02:10:59 +0100267static int async_head_init(struct syslet_data *sd, unsigned int depth)
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100268{
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100269 unsigned long ring_size;
270
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100271 memset(&sd->ahu, 0, sizeof(struct async_head_user));
Jens Axboe2ca50be2007-02-14 08:31:15 +0100272
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100273 ring_size = sizeof(struct syslet_uatom *) * depth;
274 sd->ring = malloc(ring_size);
275 memset(sd->ring, 0, ring_size);
276
Ingo Molnarbf0dc8f2007-02-21 23:25:44 +0100277 sd->ahu.user_ring_idx = 0;
278 sd->ahu.completion_ring = sd->ring;
279 sd->ahu.ring_size_bytes = ring_size;
280 sd->ahu.head_stack = thread_stack_alloc();
Jens Axboe5b38ee82007-02-26 10:44:22 +0100281 sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
282 sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
Jens Axboedb64e9b2007-02-14 02:10:59 +0100283
284 return 0;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100285}
286
Jens Axboe2ca50be2007-02-14 08:31:15 +0100287static void async_head_exit(struct syslet_data *sd)
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100288{
Jens Axboe7f059a72007-02-14 08:53:11 +0100289 free(sd->ring);
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100290}
291
Jens Axboe76f58b92007-03-29 13:26:17 +0200292static int check_syslet_support(struct syslet_data *sd)
293{
294 struct syslet_uatom atom;
295 void *ret;
296
297 init_atom(&atom, __NR_getpid, NULL, NULL, NULL, NULL, NULL, 0, NULL);
298 ret = async_exec(sd->head, &sd->ahu);
299 if (ret == (void *) -1)
300 return 1;
301
302 return 0;
303}
304
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100305static void fio_syslet_cleanup(struct thread_data *td)
306{
307 struct syslet_data *sd = td->io_ops->data;
308
309 if (sd) {
Jens Axboe2ca50be2007-02-14 08:31:15 +0100310 async_head_exit(sd);
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100311 free(sd->events);
312 free(sd);
313 td->io_ops->data = NULL;
314 }
315}
316
317static int fio_syslet_init(struct thread_data *td)
318{
319 struct syslet_data *sd;
320
321 sd = malloc(sizeof(*sd));
322 memset(sd, 0, sizeof(*sd));
Jens Axboe2dc1bbe2007-03-15 15:01:33 +0100323 sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
324 memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
Jens Axboedb64e9b2007-02-14 02:10:59 +0100325
326 /*
327 * This will handily fail for kernels where syslet isn't available
328 */
Jens Axboe2dc1bbe2007-03-15 15:01:33 +0100329 if (async_head_init(sd, td->o.iodepth)) {
Jens Axboedb64e9b2007-02-14 02:10:59 +0100330 free(sd->events);
331 free(sd);
332 return 1;
333 }
334
Jens Axboe76f58b92007-03-29 13:26:17 +0200335 if (check_syslet_support(sd)) {
336 log_err("fio: syslets do not appear to work\n");
337 free(sd->events);
338 free(sd);
339 return 1;
340 }
341
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100342 td->io_ops->data = sd;
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100343 return 0;
344}
345
346static struct ioengine_ops ioengine = {
347 .name = "syslet-rw",
348 .version = FIO_IOOPS_VERSION,
349 .init = fio_syslet_init,
350 .prep = fio_syslet_prep,
351 .queue = fio_syslet_queue,
Jens Axboe9ff9de62007-02-23 13:21:45 +0100352 .commit = fio_syslet_commit,
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100353 .getevents = fio_syslet_getevents,
354 .event = fio_syslet_event,
355 .cleanup = fio_syslet_cleanup,
Jens Axboeb5af8292007-03-08 12:43:13 +0100356 .open_file = generic_open_file,
357 .close_file = generic_close_file,
Jens Axboea4f4fdd2007-02-14 01:16:39 +0100358};
359
360#else /* FIO_HAVE_SYSLET */
361
362/*
363 * When we have a proper configure system in place, we simply wont build
364 * and install this io engine. For now install a crippled version that
365 * just complains and fails to load.
366 */
367static int fio_syslet_init(struct thread_data fio_unused *td)
368{
369 fprintf(stderr, "fio: syslet not available\n");
370 return 1;
371}
372
373static struct ioengine_ops ioengine = {
374 .name = "syslet-rw",
375 .version = FIO_IOOPS_VERSION,
376 .init = fio_syslet_init,
377};
378
379#endif /* FIO_HAVE_SYSLET */
380
381static void fio_init fio_syslet_register(void)
382{
383 register_ioengine(&ioengine);
384}
385
386static void fio_exit fio_syslet_unregister(void)
387{
388 unregister_ioengine(&ioengine);
389}