blob: 812b3c4269b83efb96197fe7b7ea19603e1f00c8 [file] [log] [blame]
Jens Axboe79a43182010-09-07 13:28:58 +02001/*
2 * binject engine
3 *
4 * IO engine that uses the Linux binject interface to directly inject
5 * bio's to block devices.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <string.h>
14#include <sys/poll.h>
Jens Axboe83c107b2010-10-11 19:26:33 +020015#include <sys/types.h>
16#include <sys/stat.h>
Jens Axboe79a43182010-09-07 13:28:58 +020017
18#include "../fio.h"
19
20#ifdef FIO_HAVE_BINJECT
21
22struct binject_data {
23 struct b_user_cmd *cmds;
24 struct io_u **events;
25 struct pollfd *pfds;
26 int *fd_flags;
Jens Axboe79a43182010-09-07 13:28:58 +020027};
28
Jens Axboe0e238572010-10-08 11:26:43 +020029struct binject_file {
30 unsigned int bs;
31 int minor;
32 int fd;
33};
34
Jens Axboe79a43182010-09-07 13:28:58 +020035static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
36{
37 struct b_user_cmd *buc = &io_u->buc;
38
39 memset(buc, 0, sizeof(*buc));
40 binject_buc_set_magic(buc);
41
42 buc->buf = (unsigned long) io_u->xfer_buf;
43 buc->len = io_u->xfer_buflen;
44 buc->offset = io_u->offset;
45 buc->usr_ptr = (unsigned long) io_u;
46
47 buc->flags = B_FLAG_NOIDLE | B_FLAG_UNPLUG;
48 assert(buc->buf);
49}
50
51static int pollin_events(struct pollfd *pfds, int fds)
52{
53 int i;
54
55 for (i = 0; i < fds; i++)
56 if (pfds[i].revents & POLLIN)
57 return 1;
58
59 return 0;
60}
61
Jens Axboed01c4042010-10-08 14:53:58 +020062static unsigned int binject_read_commands(struct thread_data *td, void *p,
63 int left, int *err)
64{
65 struct binject_file *bf;
66 struct fio_file *f;
67 int i, ret, events;
68
69one_more:
70 events = 0;
71 for_each_file(td, f, i) {
Jens Axboe47f07dd2013-02-11 14:35:43 +010072 bf = (struct binject_file *) (uintptr_t) f->engine_data;
Jens Axboed01c4042010-10-08 14:53:58 +020073 ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
74 if (ret < 0) {
75 if (errno == EAGAIN)
76 continue;
77 *err = -errno;
78 td_verror(td, errno, "read");
79 break;
80 } else if (ret) {
81 p += ret;
82 events += ret / sizeof(struct b_user_cmd);
83 }
84 }
85
86 if (*err || events)
87 return events;
88
89 usleep(1000);
90 goto one_more;
91}
92
Jens Axboe79a43182010-09-07 13:28:58 +020093static int fio_binject_getevents(struct thread_data *td, unsigned int min,
94 unsigned int max, struct timespec fio_unused *t)
95{
96 struct binject_data *bd = td->io_ops->data;
97 int left = max, ret, r = 0, ev_index = 0;
98 void *buf = bd->cmds;
99 unsigned int i, events;
100 struct fio_file *f;
Jens Axboe0e238572010-10-08 11:26:43 +0200101 struct binject_file *bf;
Jens Axboe79a43182010-09-07 13:28:58 +0200102
103 /*
104 * Fill in the file descriptors
105 */
106 for_each_file(td, f, i) {
Jens Axboe47f07dd2013-02-11 14:35:43 +0100107 bf = (struct binject_file *) (uintptr_t) f->engine_data;
Jens Axboe0e238572010-10-08 11:26:43 +0200108
Jens Axboe79a43182010-09-07 13:28:58 +0200109 /*
110 * don't block for min events == 0
111 */
Jens Axboe4a851612014-04-14 10:04:21 -0600112 if (!min)
113 fio_set_fd_nonblocking(bf->fd, "binject");
114
Jens Axboe0e238572010-10-08 11:26:43 +0200115 bd->pfds[i].fd = bf->fd;
Jens Axboe79a43182010-09-07 13:28:58 +0200116 bd->pfds[i].events = POLLIN;
117 }
118
119 while (left) {
Jens Axboed01c4042010-10-08 14:53:58 +0200120 while (!min) {
Jens Axboe79a43182010-09-07 13:28:58 +0200121 ret = poll(bd->pfds, td->o.nr_files, -1);
122 if (ret < 0) {
123 if (!r)
124 r = -errno;
125 td_verror(td, errno, "poll");
126 break;
127 } else if (!ret)
128 continue;
129
130 if (pollin_events(bd->pfds, td->o.nr_files))
131 break;
Jens Axboe79a43182010-09-07 13:28:58 +0200132 }
133
134 if (r < 0)
135 break;
Jens Axboed01c4042010-10-08 14:53:58 +0200136
137 events = binject_read_commands(td, buf, left, &r);
138
139 if (r < 0)
140 break;
Jens Axboe79a43182010-09-07 13:28:58 +0200141
142 left -= events;
143 r += events;
144
145 for (i = 0; i < events; i++) {
146 struct b_user_cmd *buc = (struct b_user_cmd *) buf + i;
147
Jens Axboe2f681242010-10-21 08:15:59 +0200148 bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr;
Jens Axboe79a43182010-09-07 13:28:58 +0200149 ev_index++;
150 }
151 }
152
153 if (!min) {
Jens Axboe0e238572010-10-08 11:26:43 +0200154 for_each_file(td, f, i) {
Jens Axboe47f07dd2013-02-11 14:35:43 +0100155 bf = (struct binject_file *) (uintptr_t) f->engine_data;
Jens Axboe45550d72014-04-15 08:28:51 -0600156
157 if (fcntl(bf->fd, F_SETFL, bd->fd_flags[i]) < 0)
158 log_err("fio: binject failed to restore fcntl flags: %s\n", strerror(errno));
Jens Axboe0e238572010-10-08 11:26:43 +0200159 }
Jens Axboe79a43182010-09-07 13:28:58 +0200160 }
161
162 if (r > 0)
163 assert(ev_index == r);
164
165 return r;
166}
167
168static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
169{
170 struct b_user_cmd *buc = &io_u->buc;
Jens Axboe47f07dd2013-02-11 14:35:43 +0100171 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
Jens Axboe79a43182010-09-07 13:28:58 +0200172 int ret;
173
Jens Axboe0e238572010-10-08 11:26:43 +0200174 ret = write(bf->fd, buc, sizeof(*buc));
Jens Axboe79a43182010-09-07 13:28:58 +0200175 if (ret < 0)
176 return ret;
177
178 return FIO_Q_QUEUED;
179}
180
181static int fio_binject_prep(struct thread_data *td, struct io_u *io_u)
182{
183 struct binject_data *bd = td->io_ops->data;
184 struct b_user_cmd *buc = &io_u->buc;
Jens Axboe47f07dd2013-02-11 14:35:43 +0100185 struct binject_file *bf = (struct binject_file *) (uintptr_t) io_u->file->engine_data;
Jens Axboe79a43182010-09-07 13:28:58 +0200186
Jens Axboe0e238572010-10-08 11:26:43 +0200187 if (io_u->xfer_buflen & (bf->bs - 1)) {
Jens Axboe79a43182010-09-07 13:28:58 +0200188 log_err("read/write not sector aligned\n");
189 return EINVAL;
190 }
191
192 if (io_u->ddir == DDIR_READ) {
193 binject_buc_init(bd, io_u);
194 buc->type = B_TYPE_READ;
195 } else if (io_u->ddir == DDIR_WRITE) {
196 binject_buc_init(bd, io_u);
Jens Axboe1ef2b6b2010-10-08 15:07:01 +0200197 if (io_u->flags & IO_U_F_BARRIER)
198 buc->type = B_TYPE_WRITEBARRIER;
199 else
200 buc->type = B_TYPE_WRITE;
Jens Axboe79a43182010-09-07 13:28:58 +0200201 } else if (io_u->ddir == DDIR_TRIM) {
202 binject_buc_init(bd, io_u);
203 buc->type = B_TYPE_DISCARD;
204 } else {
205 assert(0);
206 }
207
208 return 0;
209}
210
211static int fio_binject_queue(struct thread_data *td, struct io_u *io_u)
212{
213 int ret;
214
215 fio_ro_check(td, io_u);
216
217 ret = fio_binject_doio(td, io_u);
218
219 if (ret < 0)
220 io_u->error = errno;
221
222 if (io_u->error) {
223 td_verror(td, io_u->error, "xfer");
224 return FIO_Q_COMPLETED;
225 }
226
227 return ret;
228}
229
230static struct io_u *fio_binject_event(struct thread_data *td, int event)
231{
232 struct binject_data *bd = td->io_ops->data;
233
234 return bd->events[event];
235}
236
Jens Axboece4b5052010-10-11 19:34:36 +0200237static int binject_open_ctl(struct thread_data *td)
238{
239 int fd;
240
241 fd = open("/dev/binject-ctl", O_RDWR);
242 if (fd < 0)
243 td_verror(td, errno, "open binject-ctl");
244
245 return fd;
246}
247
Jens Axboe0e238572010-10-08 11:26:43 +0200248static void binject_unmap_dev(struct thread_data *td, struct binject_file *bf)
249{
250 struct b_ioctl_cmd bic;
251 int fdb;
252
253 if (bf->fd >= 0) {
254 close(bf->fd);
255 bf->fd = -1;
256 }
257
Jens Axboece4b5052010-10-11 19:34:36 +0200258 fdb = binject_open_ctl(td);
259 if (fdb < 0)
Jens Axboe0e238572010-10-08 11:26:43 +0200260 return;
Jens Axboe0e238572010-10-08 11:26:43 +0200261
262 bic.minor = bf->minor;
263
Jens Axboef0f346d2010-10-27 09:24:54 -0600264 if (ioctl(fdb, B_IOCTL_DEL, &bic) < 0)
Jens Axboe0e238572010-10-08 11:26:43 +0200265 td_verror(td, errno, "binject dev unmap");
Jens Axboe0e238572010-10-08 11:26:43 +0200266
267 close(fdb);
268}
269
270static int binject_map_dev(struct thread_data *td, struct binject_file *bf,
271 int fd)
272{
273 struct b_ioctl_cmd bic;
274 char name[80];
275 struct stat sb;
276 int fdb, dev_there, loops;
277
Jens Axboece4b5052010-10-11 19:34:36 +0200278 fdb = binject_open_ctl(td);
279 if (fdb < 0)
Jens Axboe0e238572010-10-08 11:26:43 +0200280 return 1;
Jens Axboe0e238572010-10-08 11:26:43 +0200281
282 bic.fd = fd;
283
Jens Axboef0f346d2010-10-27 09:24:54 -0600284 if (ioctl(fdb, B_IOCTL_ADD, &bic) < 0) {
Jens Axboe0e238572010-10-08 11:26:43 +0200285 td_verror(td, errno, "binject dev map");
286 close(fdb);
287 return 1;
288 }
289
290 bf->minor = bic.minor;
291
292 sprintf(name, "/dev/binject%u", bf->minor);
293
294 /*
295 * Wait for udev to create the node...
296 */
297 dev_there = loops = 0;
298 do {
299 if (!stat(name, &sb)) {
300 dev_there = 1;
301 break;
302 }
303
304 usleep(10000);
305 } while (++loops < 100);
306
307 close(fdb);
308
309 if (!dev_there) {
310 log_err("fio: timed out waiting for binject dev\n");
311 goto err_unmap;
312 }
313
314 bf->fd = open(name, O_RDWR);
315 if (bf->fd < 0) {
316 td_verror(td, errno, "binject dev open");
317err_unmap:
318 binject_unmap_dev(td, bf);
319 return 1;
320 }
321
322 return 0;
323}
324
325static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
326{
Jens Axboe47f07dd2013-02-11 14:35:43 +0100327 struct binject_file *bf = (struct binject_file *) (uintptr_t) f->engine_data;
Jens Axboe0e238572010-10-08 11:26:43 +0200328
329 if (bf) {
330 binject_unmap_dev(td, bf);
331 free(bf);
Jens Axboe84b38422012-12-12 09:07:00 +0100332 f->engine_data = 0;
Jens Axboe0e238572010-10-08 11:26:43 +0200333 return generic_close_file(td, f);
334 }
335
336 return 0;
337}
338
Jens Axboe4a435da2010-09-26 03:46:55 +0200339static int fio_binject_open_file(struct thread_data *td, struct fio_file *f)
340{
Jens Axboe0e238572010-10-08 11:26:43 +0200341 struct binject_file *bf;
Jens Axboe4a435da2010-09-26 03:46:55 +0200342 unsigned int bs;
343 int ret;
344
345 ret = generic_open_file(td, f);
346 if (ret)
347 return 1;
348
349 if (f->filetype != FIO_TYPE_BD) {
350 log_err("fio: binject only works with block devices\n");
Jens Axboe0e238572010-10-08 11:26:43 +0200351 goto err_close;
Jens Axboe4a435da2010-09-26 03:46:55 +0200352 }
353 if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
354 td_verror(td, errno, "BLKSSZGET");
Jens Axboe0e238572010-10-08 11:26:43 +0200355 goto err_close;
356 }
357
358 bf = malloc(sizeof(*bf));
359 bf->bs = bs;
360 bf->minor = bf->fd = -1;
Jens Axboe9cbef502013-01-04 13:21:23 +0100361 f->engine_data = (uintptr_t) bf;
Jens Axboe0e238572010-10-08 11:26:43 +0200362
363 if (binject_map_dev(td, bf, f->fd)) {
364err_close:
365 ret = generic_close_file(td, f);
Jens Axboe4a435da2010-09-26 03:46:55 +0200366 return 1;
367 }
368
Jens Axboe4a435da2010-09-26 03:46:55 +0200369 return 0;
370}
371
Jens Axboe79a43182010-09-07 13:28:58 +0200372static void fio_binject_cleanup(struct thread_data *td)
373{
374 struct binject_data *bd = td->io_ops->data;
375
376 if (bd) {
377 free(bd->events);
378 free(bd->cmds);
379 free(bd->fd_flags);
380 free(bd->pfds);
381 free(bd);
382 }
383}
384
385static int fio_binject_init(struct thread_data *td)
386{
387 struct binject_data *bd;
388
389 bd = malloc(sizeof(*bd));
390 memset(bd, 0, sizeof(*bd));
391
392 bd->cmds = malloc(td->o.iodepth * sizeof(struct b_user_cmd));
393 memset(bd->cmds, 0, td->o.iodepth * sizeof(struct b_user_cmd));
394
395 bd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
396 memset(bd->events, 0, td->o.iodepth * sizeof(struct io_u *));
397
398 bd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
399 memset(bd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
400
401 bd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
402 memset(bd->fd_flags, 0, sizeof(int) * td->o.nr_files);
403
404 td->io_ops->data = bd;
405 return 0;
406}
407
408static struct ioengine_ops ioengine = {
409 .name = "binject",
410 .version = FIO_IOOPS_VERSION,
411 .init = fio_binject_init,
412 .prep = fio_binject_prep,
413 .queue = fio_binject_queue,
414 .getevents = fio_binject_getevents,
415 .event = fio_binject_event,
416 .cleanup = fio_binject_cleanup,
Jens Axboe4a435da2010-09-26 03:46:55 +0200417 .open_file = fio_binject_open_file,
Jens Axboe0e238572010-10-08 11:26:43 +0200418 .close_file = fio_binject_close_file,
Jens Axboe79a43182010-09-07 13:28:58 +0200419 .get_file_size = generic_get_file_size,
Jens Axboeca7e0dd2010-10-28 08:52:13 -0600420 .flags = FIO_RAWIO | FIO_BARRIER | FIO_MEMALIGN,
Jens Axboe79a43182010-09-07 13:28:58 +0200421};
422
423#else /* FIO_HAVE_BINJECT */
424
425/*
426 * When we have a proper configure system in place, we simply wont build
427 * and install this io engine. For now install a crippled version that
428 * just complains and fails to load.
429 */
430static int fio_binject_init(struct thread_data fio_unused *td)
431{
Jens Axboea3edaf72010-09-26 10:53:40 +0900432 log_err("fio: ioengine binject not available\n");
Jens Axboe79a43182010-09-07 13:28:58 +0200433 return 1;
434}
435
436static struct ioengine_ops ioengine = {
437 .name = "binject",
438 .version = FIO_IOOPS_VERSION,
439 .init = fio_binject_init,
440};
441
442#endif
443
444static void fio_init fio_binject_register(void)
445{
446 register_ioengine(&ioengine);
447}
448
449static void fio_exit fio_binject_unregister(void)
450{
451 unregister_ioengine(&ioengine);
452}