Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 1 | /* |
| 2 | * read/write() engine that uses syslet to be async |
| 3 | * |
| 4 | */ |
| 5 | #include <stdio.h> |
| 6 | #include <stdlib.h> |
| 7 | #include <unistd.h> |
| 8 | #include <errno.h> |
| 9 | #include <assert.h> |
| 10 | |
| 11 | #include "../fio.h" |
| 12 | #include "../os.h" |
| 13 | |
| 14 | #ifdef FIO_HAVE_SYSLET |
| 15 | |
| 16 | struct syslet_data { |
| 17 | struct io_u **events; |
| 18 | unsigned int nr_events; |
| 19 | |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 20 | struct async_head_user ahu; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 21 | struct syslet_uatom **ring; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 22 | }; |
| 23 | |
| 24 | /* |
| 25 | * Inspect the ring to see if we have completed events |
| 26 | */ |
| 27 | static void fio_syslet_complete(struct thread_data *td) |
| 28 | { |
| 29 | struct syslet_data *sd = td->io_ops->data; |
| 30 | |
| 31 | do { |
| 32 | struct syslet_uatom *atom; |
| 33 | struct io_u *io_u; |
| 34 | long ret; |
| 35 | |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 36 | atom = sd->ring[sd->ahu.user_ring_idx]; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 37 | if (!atom) |
| 38 | break; |
| 39 | |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 40 | sd->ring[sd->ahu.user_ring_idx] = NULL; |
| 41 | if (++sd->ahu.user_ring_idx == td->iodepth) |
| 42 | sd->ahu.user_ring_idx = 0; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 43 | |
| 44 | io_u = atom->private; |
| 45 | ret = *atom->ret_ptr; |
| 46 | if (ret > 0) |
| 47 | io_u->resid = io_u->xfer_buflen - ret; |
| 48 | else if (ret < 0) |
| 49 | io_u->error = ret; |
| 50 | |
| 51 | sd->events[sd->nr_events++] = io_u; |
| 52 | } while (1); |
| 53 | } |
| 54 | |
| 55 | static int fio_syslet_getevents(struct thread_data *td, int min, |
| 56 | int fio_unused max, |
| 57 | struct timespec fio_unused *t) |
| 58 | { |
| 59 | struct syslet_data *sd = td->io_ops->data; |
| 60 | int get_events; |
| 61 | long ret; |
| 62 | |
| 63 | do { |
| 64 | fio_syslet_complete(td); |
| 65 | |
| 66 | /* |
| 67 | * do we have enough immediate completions? |
| 68 | */ |
| 69 | if (sd->nr_events >= (unsigned int) min) |
| 70 | break; |
| 71 | |
| 72 | /* |
| 73 | * OK, we need to wait for some events... |
| 74 | */ |
| 75 | get_events = min - sd->nr_events; |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 76 | ret = async_wait(get_events, sd->ahu.user_ring_idx, &sd->ahu); |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 77 | if (ret < 0) |
Jens Axboe | e49499f | 2007-02-22 11:08:52 +0100 | [diff] [blame] | 78 | return -errno; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 79 | } while (1); |
| 80 | |
| 81 | ret = sd->nr_events; |
| 82 | sd->nr_events = 0; |
| 83 | return ret; |
| 84 | } |
| 85 | |
| 86 | static struct io_u *fio_syslet_event(struct thread_data *td, int event) |
| 87 | { |
| 88 | struct syslet_data *sd = td->io_ops->data; |
| 89 | |
| 90 | return sd->events[event]; |
| 91 | } |
| 92 | |
| 93 | static void init_atom(struct syslet_uatom *atom, int nr, void *arg0, |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 94 | void *arg1, void *arg2, void *arg3, void *ret_ptr, |
| 95 | unsigned long flags, void *priv) |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 96 | { |
| 97 | atom->flags = flags; |
| 98 | atom->nr = nr; |
| 99 | atom->ret_ptr = ret_ptr; |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 100 | atom->next = NULL; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 101 | atom->arg_ptr[0] = arg0; |
| 102 | atom->arg_ptr[1] = arg1; |
| 103 | atom->arg_ptr[2] = arg2; |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 104 | atom->arg_ptr[3] = arg3; |
| 105 | atom->arg_ptr[4] = atom->arg_ptr[5] = NULL; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 106 | atom->private = priv; |
| 107 | } |
| 108 | |
| 109 | /* |
| 110 | * Use seek atom for sync |
| 111 | */ |
| 112 | static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f) |
| 113 | { |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 114 | init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL, |
Jens Axboe | 7d44a74 | 2007-02-14 17:32:08 +0100 | [diff] [blame] | 115 | &io_u->req.ret, 0, io_u); |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f) |
| 119 | { |
| 120 | int nr; |
| 121 | |
| 122 | /* |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 123 | * prepare rw |
| 124 | */ |
| 125 | if (io_u->ddir == DDIR_READ) |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 126 | nr = __NR_pread64; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 127 | else |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 128 | nr = __NR_pwrite64; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 129 | |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 130 | init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf, |
Jens Axboe | 7d44a74 | 2007-02-14 17:32:08 +0100 | [diff] [blame] | 131 | &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u); |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u) |
| 135 | { |
| 136 | struct fio_file *f = io_u->file; |
| 137 | |
| 138 | if (io_u->ddir == DDIR_SYNC) |
| 139 | fio_syslet_prep_sync(io_u, f); |
| 140 | else |
| 141 | fio_syslet_prep_rw(io_u, f); |
| 142 | |
| 143 | return 0; |
| 144 | } |
| 145 | |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 146 | static void cachemiss_thread_start(void) |
| 147 | { |
| 148 | while (1) |
| 149 | async_thread(); |
| 150 | } |
| 151 | |
| 152 | #define THREAD_STACK_SIZE (16384) |
| 153 | |
| 154 | static unsigned long thread_stack_alloc() |
| 155 | { |
| 156 | return (unsigned long)malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE; |
| 157 | } |
| 158 | |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 159 | static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u) |
| 160 | { |
| 161 | struct syslet_data *sd = td->io_ops->data; |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 162 | struct syslet_uatom *done; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 163 | long ret; |
| 164 | |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 165 | if (!sd->ahu.new_thread_stack) |
| 166 | sd->ahu.new_thread_stack = thread_stack_alloc(); |
| 167 | |
Jens Axboe | 7d44a74 | 2007-02-14 17:32:08 +0100 | [diff] [blame] | 168 | /* |
| 169 | * On sync completion, the atom is returned. So on NULL return |
| 170 | * it's queued asynchronously. |
| 171 | */ |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 172 | done = async_exec(&io_u->req.atom, &sd->ahu); |
| 173 | |
| 174 | if (!done) |
Jens Axboe | 36167d8 | 2007-02-18 05:41:31 +0100 | [diff] [blame] | 175 | return FIO_Q_QUEUED; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 176 | |
| 177 | /* |
| 178 | * completed sync |
| 179 | */ |
Jens Axboe | a2e1b08 | 2007-02-14 08:06:19 +0100 | [diff] [blame] | 180 | ret = io_u->req.ret; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 181 | if (ret != (long) io_u->xfer_buflen) { |
| 182 | if (ret > 0) { |
| 183 | io_u->resid = io_u->xfer_buflen - ret; |
| 184 | io_u->error = 0; |
Jens Axboe | 36167d8 | 2007-02-18 05:41:31 +0100 | [diff] [blame] | 185 | return FIO_Q_COMPLETED; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 186 | } else |
| 187 | io_u->error = errno; |
| 188 | } |
| 189 | |
Jens Axboe | e49499f | 2007-02-22 11:08:52 +0100 | [diff] [blame] | 190 | assert(sd->nr_events < td->iodepth); |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 191 | |
Jens Axboe | e49499f | 2007-02-22 11:08:52 +0100 | [diff] [blame] | 192 | if (io_u->error) |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 193 | |
Jens Axboe | 36167d8 | 2007-02-18 05:41:31 +0100 | [diff] [blame] | 194 | return FIO_Q_COMPLETED; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 195 | } |
| 196 | |
Jens Axboe | db64e9b | 2007-02-14 02:10:59 +0100 | [diff] [blame] | 197 | static int async_head_init(struct syslet_data *sd, unsigned int depth) |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 198 | { |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 199 | unsigned long ring_size; |
| 200 | |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 201 | memset(&sd->ahu, 0, sizeof(struct async_head_user)); |
Jens Axboe | 2ca50be | 2007-02-14 08:31:15 +0100 | [diff] [blame] | 202 | |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 203 | ring_size = sizeof(struct syslet_uatom *) * depth; |
| 204 | sd->ring = malloc(ring_size); |
| 205 | memset(sd->ring, 0, ring_size); |
| 206 | |
Ingo Molnar | bf0dc8f | 2007-02-21 23:25:44 +0100 | [diff] [blame] | 207 | sd->ahu.user_ring_idx = 0; |
| 208 | sd->ahu.completion_ring = sd->ring; |
| 209 | sd->ahu.ring_size_bytes = ring_size; |
| 210 | sd->ahu.head_stack = thread_stack_alloc(); |
| 211 | sd->ahu.head_eip = (unsigned long)cachemiss_thread_start; |
| 212 | sd->ahu.new_thread_eip = (unsigned long)cachemiss_thread_start; |
Jens Axboe | db64e9b | 2007-02-14 02:10:59 +0100 | [diff] [blame] | 213 | |
| 214 | return 0; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 215 | } |
| 216 | |
Jens Axboe | 2ca50be | 2007-02-14 08:31:15 +0100 | [diff] [blame] | 217 | static void async_head_exit(struct syslet_data *sd) |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 218 | { |
Jens Axboe | 7f059a7 | 2007-02-14 08:53:11 +0100 | [diff] [blame] | 219 | free(sd->ring); |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | static void fio_syslet_cleanup(struct thread_data *td) |
| 223 | { |
| 224 | struct syslet_data *sd = td->io_ops->data; |
| 225 | |
| 226 | if (sd) { |
Jens Axboe | 2ca50be | 2007-02-14 08:31:15 +0100 | [diff] [blame] | 227 | async_head_exit(sd); |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 228 | free(sd->events); |
| 229 | free(sd); |
| 230 | td->io_ops->data = NULL; |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | static int fio_syslet_init(struct thread_data *td) |
| 235 | { |
| 236 | struct syslet_data *sd; |
| 237 | |
Jens Axboe | db64e9b | 2007-02-14 02:10:59 +0100 | [diff] [blame] | 238 | |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 239 | sd = malloc(sizeof(*sd)); |
| 240 | memset(sd, 0, sizeof(*sd)); |
| 241 | sd->events = malloc(sizeof(struct io_u *) * td->iodepth); |
| 242 | memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth); |
Jens Axboe | db64e9b | 2007-02-14 02:10:59 +0100 | [diff] [blame] | 243 | |
| 244 | /* |
| 245 | * This will handily fail for kernels where syslet isn't available |
| 246 | */ |
| 247 | if (async_head_init(sd, td->iodepth)) { |
| 248 | free(sd->events); |
| 249 | free(sd); |
| 250 | return 1; |
| 251 | } |
| 252 | |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 253 | td->io_ops->data = sd; |
Jens Axboe | a4f4fdd | 2007-02-14 01:16:39 +0100 | [diff] [blame] | 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | static struct ioengine_ops ioengine = { |
| 258 | .name = "syslet-rw", |
| 259 | .version = FIO_IOOPS_VERSION, |
| 260 | .init = fio_syslet_init, |
| 261 | .prep = fio_syslet_prep, |
| 262 | .queue = fio_syslet_queue, |
| 263 | .getevents = fio_syslet_getevents, |
| 264 | .event = fio_syslet_event, |
| 265 | .cleanup = fio_syslet_cleanup, |
| 266 | }; |
| 267 | |
| 268 | #else /* FIO_HAVE_SYSLET */ |
| 269 | |
| 270 | /* |
| 271 | * When we have a proper configure system in place, we simply wont build |
| 272 | * and install this io engine. For now install a crippled version that |
| 273 | * just complains and fails to load. |
| 274 | */ |
| 275 | static int fio_syslet_init(struct thread_data fio_unused *td) |
| 276 | { |
| 277 | fprintf(stderr, "fio: syslet not available\n"); |
| 278 | return 1; |
| 279 | } |
| 280 | |
| 281 | static struct ioengine_ops ioengine = { |
| 282 | .name = "syslet-rw", |
| 283 | .version = FIO_IOOPS_VERSION, |
| 284 | .init = fio_syslet_init, |
| 285 | }; |
| 286 | |
| 287 | #endif /* FIO_HAVE_SYSLET */ |
| 288 | |
| 289 | static void fio_init fio_syslet_register(void) |
| 290 | { |
| 291 | register_ioengine(&ioengine); |
| 292 | } |
| 293 | |
| 294 | static void fio_exit fio_syslet_unregister(void) |
| 295 | { |
| 296 | unregister_ioengine(&ioengine); |
| 297 | } |