| /* |
| * The io parts of the fio tool, includes workers for sync and mmap'ed |
| * io, as well as both posix and linux libaio support. |
| * |
| * sync io is implemented on top of aio. |
| * |
| * This is not really specific to fio, if the get_io_u/put_io_u and |
| * structures was pulled into this as well it would be a perfectly |
| * generic io engine that could be used for other projects. |
| * |
| */ |
| #include <stdio.h> |
| #include <stdlib.h> |
| #include <unistd.h> |
| #include <string.h> |
| #include <dlfcn.h> |
| #include <assert.h> |
| |
| #include "fio.h" |
| #include "os.h" |
| |
| static LIST_HEAD(engine_list); |
| |
| static int check_engine_ops(struct ioengine_ops *ops) |
| { |
| if (ops->version != FIO_IOOPS_VERSION) { |
| log_err("bad ioops version %d (want %d)\n", ops->version, FIO_IOOPS_VERSION); |
| return 1; |
| } |
| |
| if (!ops->queue) { |
| log_err("%s: no queue handler\n", ops->name); |
| return 1; |
| } |
| |
| /* |
| * sync engines only need a ->queue() |
| */ |
| if (ops->flags & FIO_SYNCIO) |
| return 0; |
| |
| if (!ops->event) { |
| log_err("%s: no event handler\n", ops->name); |
| return 1; |
| } |
| if (!ops->getevents) { |
| log_err("%s: no getevents handler\n", ops->name); |
| return 1; |
| } |
| if (!ops->queue) { |
| log_err("%s: no queue handler\n", ops->name); |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| void unregister_ioengine(struct ioengine_ops *ops) |
| { |
| list_del(&ops->list); |
| INIT_LIST_HEAD(&ops->list); |
| } |
| |
| void register_ioengine(struct ioengine_ops *ops) |
| { |
| INIT_LIST_HEAD(&ops->list); |
| list_add_tail(&ops->list, &engine_list); |
| } |
| |
| static struct ioengine_ops *find_ioengine(const char *name) |
| { |
| struct ioengine_ops *ops; |
| struct list_head *entry; |
| |
| list_for_each(entry, &engine_list) { |
| ops = list_entry(entry, struct ioengine_ops, list); |
| if (!strcmp(name, ops->name)) |
| return ops; |
| } |
| |
| return NULL; |
| } |
| |
| static struct ioengine_ops *dlopen_ioengine(struct thread_data *td, |
| const char *engine_lib) |
| { |
| struct ioengine_ops *ops; |
| void *dlhandle; |
| |
| dlerror(); |
| dlhandle = dlopen(engine_lib, RTLD_LAZY); |
| if (!dlhandle) { |
| td_vmsg(td, -1, dlerror(), "dlopen"); |
| return NULL; |
| } |
| |
| /* |
| * Unlike the included modules, external engines should have a |
| * non-static ioengine structure that we can reference. |
| */ |
| ops = dlsym(dlhandle, "ioengine"); |
| if (!ops) { |
| td_vmsg(td, -1, dlerror(), "dlsym"); |
| dlclose(dlhandle); |
| return NULL; |
| } |
| |
| ops->dlhandle = dlhandle; |
| return ops; |
| } |
| |
| struct ioengine_ops *load_ioengine(struct thread_data *td, const char *name) |
| { |
| struct ioengine_ops *ops, *ret; |
| char engine[16]; |
| |
| strncpy(engine, name, sizeof(engine) - 1); |
| |
| /* |
| * linux libaio has alias names, so convert to what we want |
| */ |
| if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3)) |
| strcpy(engine, "libaio"); |
| |
| ops = find_ioengine(engine); |
| if (!ops) |
| ops = dlopen_ioengine(td, name); |
| |
| if (!ops) { |
| log_err("fio: engine %s not loadable\n", name); |
| return NULL; |
| } |
| |
| /* |
| * Check that the required methods are there. |
| */ |
| if (check_engine_ops(ops)) |
| return NULL; |
| |
| ret = malloc(sizeof(*ret)); |
| memcpy(ret, ops, sizeof(*ret)); |
| ret->data = NULL; |
| |
| return ret; |
| } |
| |
| void close_ioengine(struct thread_data *td) |
| { |
| if (td->io_ops->cleanup) |
| td->io_ops->cleanup(td); |
| |
| if (td->io_ops->dlhandle) |
| dlclose(td->io_ops->dlhandle); |
| |
| free(td->io_ops); |
| td->io_ops = NULL; |
| } |
| |
| int td_io_prep(struct thread_data *td, struct io_u *io_u) |
| { |
| if (td->io_ops->prep) |
| return td->io_ops->prep(td, io_u); |
| |
| return 0; |
| } |
| |
| int td_io_getevents(struct thread_data *td, int min, int max, |
| struct timespec *t) |
| { |
| if (min > 0 && td->io_ops->commit) { |
| int r = td->io_ops->commit(td); |
| |
| if (r < 0) |
| return r; |
| } |
| if (td->io_ops->getevents) |
| return td->io_ops->getevents(td, min, max, t); |
| |
| return 0; |
| } |
| |
| int td_io_queue(struct thread_data *td, struct io_u *io_u) |
| { |
| int ret; |
| |
| assert((io_u->flags & IO_U_F_FLIGHT) == 0); |
| io_u->flags |= IO_U_F_FLIGHT; |
| |
| io_u->error = 0; |
| io_u->resid = 0; |
| |
| if (td->io_ops->flags & FIO_SYNCIO) { |
| fio_gettime(&io_u->issue_time, NULL); |
| |
| /* |
| * for a sync engine, set the timeout upfront |
| */ |
| if (mtime_since(&td->timeout_end, &io_u->issue_time) < IO_U_TIMEOUT) |
| io_u_set_timeout(td); |
| } |
| |
| if (io_u->ddir != DDIR_SYNC) |
| td->io_issues[io_u->ddir]++; |
| |
| io_u_mark_depth(td, io_u); |
| |
| ret = td->io_ops->queue(td, io_u); |
| |
| if (ret == FIO_Q_QUEUED || ret == FIO_Q_COMPLETED) |
| get_file(io_u->file); |
| |
| if (ret == FIO_Q_QUEUED) { |
| int r; |
| |
| td->io_u_queued++; |
| if (td->io_u_queued > td->iodepth_batch) { |
| r = td_io_commit(td); |
| if (r < 0) |
| return r; |
| } |
| } |
| |
| if ((td->io_ops->flags & FIO_SYNCIO) == 0) { |
| fio_gettime(&io_u->issue_time, NULL); |
| |
| /* |
| * async engine, set the timeout here |
| */ |
| if (ret == FIO_Q_QUEUED && |
| mtime_since(&td->timeout_end, &io_u->issue_time) < IO_U_TIMEOUT) |
| io_u_set_timeout(td); |
| } |
| |
| return ret; |
| } |
| |
| int td_io_init(struct thread_data *td) |
| { |
| if (td->io_ops->init) |
| return td->io_ops->init(td); |
| |
| return 0; |
| } |
| |
| int td_io_commit(struct thread_data *td) |
| { |
| if (!td->cur_depth) |
| return 0; |
| |
| td->io_u_queued = 0; |
| if (td->io_ops->commit) |
| return td->io_ops->commit(td); |
| |
| return 0; |
| } |
| |
| int td_io_open_file(struct thread_data *td, struct fio_file *f) |
| { |
| if (td->io_ops->open_file(td, f)) |
| return 1; |
| |
| f->last_free_lookup = 0; |
| f->last_completed_pos = 0; |
| f->last_pos = 0; |
| f->flags |= FIO_FILE_OPEN; |
| f->flags &= ~FIO_FILE_CLOSING; |
| |
| if (f->file_map) |
| memset(f->file_map, 0, f->num_maps * sizeof(long)); |
| |
| td->nr_open_files++; |
| get_file(f); |
| return 0; |
| } |
| |
| void td_io_close_file(struct thread_data *td, struct fio_file *f) |
| { |
| /* |
| * mark as closing, do real close when last io on it has completed |
| */ |
| f->flags |= FIO_FILE_CLOSING; |
| |
| put_file(td, f); |
| } |
| |