blob: 3688577f7404b242df88116946166a30f08dab60 [file] [log] [blame]
Daniel Gollubfc5c0342014-02-17 14:35:28 +01001/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
11
12struct fio_rbd_iou {
13 struct io_u *io_u;
Jens Axboe1fe34b52014-10-27 10:53:52 -060014 rbd_completion_t completion;
Jens Axboe1fe34b52014-10-27 10:53:52 -060015 int io_seen;
Ketor Mengba1c4312014-10-29 15:57:02 +000016 int io_complete;
Daniel Gollubfc5c0342014-02-17 14:35:28 +010017};
18
19struct rbd_data {
20 rados_t cluster;
21 rados_ioctx_t io_ctx;
22 rbd_image_t image;
23 struct io_u **aio_events;
Jens Axboec6dd7c52014-10-29 08:26:28 -060024 struct io_u **sort_events;
Daniel Gollubfc5c0342014-02-17 14:35:28 +010025};
26
27struct rbd_options {
Jens Axboe6a605302014-10-29 08:30:07 -060028 void *pad;
Daniel Gollubfc5c0342014-02-17 14:35:28 +010029 char *rbd_name;
30 char *pool_name;
31 char *client_name;
Jens Axboecd16fb72014-10-28 10:02:22 -060032 int busy_poll;
Daniel Gollubfc5c0342014-02-17 14:35:28 +010033};
34
35static struct fio_option options[] = {
36 {
Jens Axboe1fe34b52014-10-27 10:53:52 -060037 .name = "rbdname",
38 .lname = "rbd engine rbdname",
39 .type = FIO_OPT_STR_STORE,
40 .help = "RBD name for RBD engine",
41 .off1 = offsetof(struct rbd_options, rbd_name),
42 .category = FIO_OPT_C_ENGINE,
43 .group = FIO_OPT_G_RBD,
44 },
Daniel Gollubfc5c0342014-02-17 14:35:28 +010045 {
Jens Axboecd16fb72014-10-28 10:02:22 -060046 .name = "pool",
47 .lname = "rbd engine pool",
48 .type = FIO_OPT_STR_STORE,
49 .help = "Name of the pool hosting the RBD for the RBD engine",
50 .off1 = offsetof(struct rbd_options, pool_name),
51 .category = FIO_OPT_C_ENGINE,
52 .group = FIO_OPT_G_RBD,
Jens Axboe1fe34b52014-10-27 10:53:52 -060053 },
Daniel Gollubfc5c0342014-02-17 14:35:28 +010054 {
Jens Axboecd16fb72014-10-28 10:02:22 -060055 .name = "clientname",
56 .lname = "rbd engine clientname",
57 .type = FIO_OPT_STR_STORE,
58 .help = "Name of the ceph client to access the RBD for the RBD engine",
59 .off1 = offsetof(struct rbd_options, client_name),
60 .category = FIO_OPT_C_ENGINE,
61 .group = FIO_OPT_G_RBD,
62 },
63 {
64 .name = "busy_poll",
65 .lname = "Busy poll",
66 .type = FIO_OPT_BOOL,
67 .help = "Busy poll for completions instead of sleeping",
Ketor Menga73717a2014-10-29 15:09:57 +000068 .off1 = offsetof(struct rbd_options, busy_poll),
Jens Axboecd16fb72014-10-28 10:02:22 -060069 .def = "0",
70 .category = FIO_OPT_C_ENGINE,
71 .group = FIO_OPT_G_RBD,
Jens Axboe1fe34b52014-10-27 10:53:52 -060072 },
Daniel Gollubfc5c0342014-02-17 14:35:28 +010073 {
Jens Axboe1fe34b52014-10-27 10:53:52 -060074 .name = NULL,
75 },
Daniel Gollubfc5c0342014-02-17 14:35:28 +010076};
77
78static int _fio_setup_rbd_data(struct thread_data *td,
79 struct rbd_data **rbd_data_ptr)
80{
Jens Axboec6dd7c52014-10-29 08:26:28 -060081 struct rbd_data *rbd;
Daniel Gollubfc5c0342014-02-17 14:35:28 +010082
83 if (td->io_ops->data)
84 return 0;
85
Jens Axboec6dd7c52014-10-29 08:26:28 -060086 rbd = calloc(1, sizeof(struct rbd_data));
87 if (!rbd)
Daniel Gollubfc5c0342014-02-17 14:35:28 +010088 goto failed;
89
Jens Axboec6dd7c52014-10-29 08:26:28 -060090 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
91 if (!rbd->aio_events)
Daniel Gollubfc5c0342014-02-17 14:35:28 +010092 goto failed;
93
Jens Axboec6dd7c52014-10-29 08:26:28 -060094 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
95 if (!rbd->sort_events)
96 goto failed;
Daniel Gollubfc5c0342014-02-17 14:35:28 +010097
Jens Axboec6dd7c52014-10-29 08:26:28 -060098 *rbd_data_ptr = rbd;
Daniel Gollubfc5c0342014-02-17 14:35:28 +010099 return 0;
100
101failed:
Jens Axboec6dd7c52014-10-29 08:26:28 -0600102 if (rbd)
103 free(rbd);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100104 return 1;
105
106}
107
108static int _fio_rbd_connect(struct thread_data *td)
109{
Jens Axboec6dd7c52014-10-29 08:26:28 -0600110 struct rbd_data *rbd = td->io_ops->data;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100111 struct rbd_options *o = td->eo;
112 int r;
113
Jens Axboec6dd7c52014-10-29 08:26:28 -0600114 r = rados_create(&rbd->cluster, o->client_name);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100115 if (r < 0) {
116 log_err("rados_create failed.\n");
117 goto failed_early;
118 }
119
Jens Axboec6dd7c52014-10-29 08:26:28 -0600120 r = rados_conf_read_file(rbd->cluster, NULL);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100121 if (r < 0) {
122 log_err("rados_conf_read_file failed.\n");
123 goto failed_early;
124 }
125
Jens Axboec6dd7c52014-10-29 08:26:28 -0600126 r = rados_connect(rbd->cluster);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100127 if (r < 0) {
128 log_err("rados_connect failed.\n");
129 goto failed_shutdown;
130 }
131
Jens Axboec6dd7c52014-10-29 08:26:28 -0600132 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100133 if (r < 0) {
134 log_err("rados_ioctx_create failed.\n");
135 goto failed_shutdown;
136 }
137
Jens Axboec6dd7c52014-10-29 08:26:28 -0600138 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100139 if (r < 0) {
140 log_err("rbd_open failed.\n");
141 goto failed_open;
142 }
143 return 0;
144
145failed_open:
Jens Axboec6dd7c52014-10-29 08:26:28 -0600146 rados_ioctx_destroy(rbd->io_ctx);
147 rbd->io_ctx = NULL;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100148failed_shutdown:
Jens Axboec6dd7c52014-10-29 08:26:28 -0600149 rados_shutdown(rbd->cluster);
150 rbd->cluster = NULL;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100151failed_early:
152 return 1;
153}
154
Jens Axboec6dd7c52014-10-29 08:26:28 -0600155static void _fio_rbd_disconnect(struct rbd_data *rbd)
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100156{
Jens Axboec6dd7c52014-10-29 08:26:28 -0600157 if (!rbd)
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100158 return;
159
160 /* shutdown everything */
Jens Axboec6dd7c52014-10-29 08:26:28 -0600161 if (rbd->image) {
162 rbd_close(rbd->image);
163 rbd->image = NULL;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100164 }
165
Jens Axboec6dd7c52014-10-29 08:26:28 -0600166 if (rbd->io_ctx) {
167 rados_ioctx_destroy(rbd->io_ctx);
168 rbd->io_ctx = NULL;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100169 }
170
Jens Axboec6dd7c52014-10-29 08:26:28 -0600171 if (rbd->cluster) {
172 rados_shutdown(rbd->cluster);
173 rbd->cluster = NULL;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100174 }
175}
176
Jens Axboe1fe34b52014-10-27 10:53:52 -0600177static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100178{
Jens Axboea0c24f92014-10-28 09:03:11 -0600179 struct fio_rbd_iou *fri = data;
180 struct io_u *io_u = fri->io_u;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600181 ssize_t ret;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100182
Jens Axboe1fe34b52014-10-27 10:53:52 -0600183 /*
184 * Looks like return value is 0 for success, or < 0 for
185 * a specific error. So we have to assume that it can't do
186 * partial completions.
187 */
Ketor Mengba1c4312014-10-29 15:57:02 +0000188 fri->io_complete = 1;
189
Jens Axboe1fe34b52014-10-27 10:53:52 -0600190 ret = rbd_aio_get_return_value(fri->completion);
191 if (ret < 0) {
192 io_u->error = ret;
193 io_u->resid = io_u->xfer_buflen;
194 } else
195 io_u->error = 0;
Haomai Wang3f0151b2014-05-21 16:30:55 +0800196}
197
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100198static struct io_u *fio_rbd_event(struct thread_data *td, int event)
199{
Jens Axboec6dd7c52014-10-29 08:26:28 -0600200 struct rbd_data *rbd = td->io_ops->data;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100201
Jens Axboec6dd7c52014-10-29 08:26:28 -0600202 return rbd->aio_events[event];
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100203}
204
Jens Axboec6dd7c52014-10-29 08:26:28 -0600205static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
Jens Axboe1fe34b52014-10-27 10:53:52 -0600206 unsigned int *events)
207{
208 struct fio_rbd_iou *fri = io_u->engine_data;
209
Ketor Mengba1c4312014-10-29 15:57:02 +0000210 if (fri->io_complete) {
Jens Axboe1fe34b52014-10-27 10:53:52 -0600211 fri->io_seen = 1;
Jens Axboec6dd7c52014-10-29 08:26:28 -0600212 rbd->aio_events[*events] = io_u;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600213 (*events)++;
214
215 rbd_aio_release(fri->completion);
216 return 1;
217 }
218
219 return 0;
220}
221
Jens Axboec6dd7c52014-10-29 08:26:28 -0600222static inline int rbd_io_u_seen(struct io_u *io_u)
223{
224 struct fio_rbd_iou *fri = io_u->engine_data;
225
226 return fri->io_seen;
227}
228
229static void rbd_io_u_wait_complete(struct io_u *io_u)
230{
231 struct fio_rbd_iou *fri = io_u->engine_data;
232
233 rbd_aio_wait_for_complete(fri->completion);
234}
235
236static int rbd_io_u_cmp(const void *p1, const void *p2)
237{
238 const struct io_u **a = (const struct io_u **) p1;
239 const struct io_u **b = (const struct io_u **) p2;
240 uint64_t at, bt;
241
242 at = utime_since_now(&(*a)->start_time);
243 bt = utime_since_now(&(*b)->start_time);
244
245 if (at < bt)
246 return -1;
247 else if (at == bt)
248 return 0;
249 else
250 return 1;
251}
252
Jens Axboe1fe34b52014-10-27 10:53:52 -0600253static int rbd_iter_events(struct thread_data *td, unsigned int *events,
254 unsigned int min_evts, int wait)
255{
Jens Axboec6dd7c52014-10-29 08:26:28 -0600256 struct rbd_data *rbd = td->io_ops->data;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600257 unsigned int this_events = 0;
258 struct io_u *io_u;
Jens Axboec6dd7c52014-10-29 08:26:28 -0600259 int i, sidx;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600260
Jens Axboec6dd7c52014-10-29 08:26:28 -0600261 sidx = 0;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600262 io_u_qiter(&td->io_u_all, io_u, i) {
Jens Axboe1fe34b52014-10-27 10:53:52 -0600263 if (!(io_u->flags & IO_U_F_FLIGHT))
264 continue;
Jens Axboec6dd7c52014-10-29 08:26:28 -0600265 if (rbd_io_u_seen(io_u))
Jens Axboe1fe34b52014-10-27 10:53:52 -0600266 continue;
267
Jens Axboec6dd7c52014-10-29 08:26:28 -0600268 if (fri_check_complete(rbd, io_u, events))
Jens Axboe1fe34b52014-10-27 10:53:52 -0600269 this_events++;
Jens Axboec6dd7c52014-10-29 08:26:28 -0600270 else if (wait)
271 rbd->sort_events[sidx++] = io_u;
272 }
Jens Axboe1fe34b52014-10-27 10:53:52 -0600273
Jens Axboec6dd7c52014-10-29 08:26:28 -0600274 if (!wait || !sidx)
275 return this_events;
276
277 /*
278 * Sort events, oldest issue first, then wait on as many as we
279 * need in order of age. If we have enough events, stop waiting,
280 * and just check if any of the older ones are done.
281 */
282 if (sidx > 1)
283 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
284
285 for (i = 0; i < sidx; i++) {
286 io_u = rbd->sort_events[i];
287
288 if (fri_check_complete(rbd, io_u, events)) {
289 this_events++;
290 continue;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600291 }
Jens Axboec6dd7c52014-10-29 08:26:28 -0600292
293 /*
294 * Stop waiting when we have enough, but continue checking
295 * all pending IOs if they are complete.
296 */
Jens Axboe1fe34b52014-10-27 10:53:52 -0600297 if (*events >= min_evts)
Jens Axboec6dd7c52014-10-29 08:26:28 -0600298 continue;
299
300 rbd_io_u_wait_complete(io_u);
301
302 if (fri_check_complete(rbd, io_u, events))
303 this_events++;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600304 }
305
306 return this_events;
307}
308
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100309static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
Jens Axboe0cbbc392014-09-30 16:04:12 -0600310 unsigned int max, const struct timespec *t)
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100311{
Jens Axboe1fe34b52014-10-27 10:53:52 -0600312 unsigned int this_events, events = 0;
Jens Axboecd16fb72014-10-28 10:02:22 -0600313 struct rbd_options *o = td->eo;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600314 int wait = 0;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100315
316 do {
Jens Axboe1fe34b52014-10-27 10:53:52 -0600317 this_events = rbd_iter_events(td, &events, min, wait);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100318
Jens Axboe1fe34b52014-10-27 10:53:52 -0600319 if (events >= min)
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100320 break;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600321 if (this_events)
322 continue;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100323
Jens Axboecd16fb72014-10-28 10:02:22 -0600324 if (!o->busy_poll)
325 wait = 1;
326 else
327 nop;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100328 } while (1);
329
330 return events;
331}
332
333static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
334{
Jens Axboec6dd7c52014-10-29 08:26:28 -0600335 struct rbd_data *rbd = td->io_ops->data;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600336 struct fio_rbd_iou *fri = io_u->engine_data;
337 int r = -1;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100338
339 fio_ro_check(td, io_u);
340
Jens Axboe1fe34b52014-10-27 10:53:52 -0600341 fri->io_seen = 0;
Ketor Mengba1c4312014-10-29 15:57:02 +0000342 fri->io_complete = 0;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600343
Jens Axboea0c24f92014-10-28 09:03:11 -0600344 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
Jens Axboe1fe34b52014-10-27 10:53:52 -0600345 &fri->completion);
Jens Axboea0c24f92014-10-28 09:03:11 -0600346 if (r < 0) {
347 log_err("rbd_aio_create_completion failed.\n");
348 goto failed;
349 }
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100350
Jens Axboea0c24f92014-10-28 09:03:11 -0600351 if (io_u->ddir == DDIR_WRITE) {
Jens Axboec6dd7c52014-10-29 08:26:28 -0600352 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
353 io_u->xfer_buf, fri->completion);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100354 if (r < 0) {
355 log_err("rbd_aio_write failed.\n");
Jens Axboea0c24f92014-10-28 09:03:11 -0600356 goto failed_comp;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100357 }
358
359 } else if (io_u->ddir == DDIR_READ) {
Jens Axboec6dd7c52014-10-29 08:26:28 -0600360 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
361 io_u->xfer_buf, fri->completion);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100362
363 if (r < 0) {
364 log_err("rbd_aio_read failed.\n");
Jens Axboea0c24f92014-10-28 09:03:11 -0600365 goto failed_comp;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100366 }
Jens Axboea0c24f92014-10-28 09:03:11 -0600367 } else if (io_u->ddir == DDIR_TRIM) {
Jens Axboec6dd7c52014-10-29 08:26:28 -0600368 r = rbd_aio_discard(rbd->image, io_u->offset,
369 io_u->xfer_buflen, fri->completion);
Haomai Wang3f0151b2014-05-21 16:30:55 +0800370 if (r < 0) {
Jens Axboea0c24f92014-10-28 09:03:11 -0600371 log_err("rbd_aio_discard failed.\n");
372 goto failed_comp;
Haomai Wang3f0151b2014-05-21 16:30:55 +0800373 }
Jens Axboea0c24f92014-10-28 09:03:11 -0600374 } else if (io_u->ddir == DDIR_SYNC) {
Jens Axboec6dd7c52014-10-29 08:26:28 -0600375 r = rbd_aio_flush(rbd->image, fri->completion);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100376 if (r < 0) {
377 log_err("rbd_flush failed.\n");
Jens Axboea0c24f92014-10-28 09:03:11 -0600378 goto failed_comp;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100379 }
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100380 } else {
381 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
382 io_u->ddir);
Jens Axboea0c24f92014-10-28 09:03:11 -0600383 goto failed_comp;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100384 }
385
386 return FIO_Q_QUEUED;
Jens Axboea0c24f92014-10-28 09:03:11 -0600387failed_comp:
388 rbd_aio_release(fri->completion);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100389failed:
390 io_u->error = r;
391 td_verror(td, io_u->error, "xfer");
392 return FIO_Q_COMPLETED;
393}
394
395static int fio_rbd_init(struct thread_data *td)
396{
397 int r;
398
399 r = _fio_rbd_connect(td);
400 if (r) {
401 log_err("fio_rbd_connect failed, return code: %d .\n", r);
402 goto failed;
403 }
404
405 return 0;
406
407failed:
408 return 1;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100409}
410
411static void fio_rbd_cleanup(struct thread_data *td)
412{
Jens Axboec6dd7c52014-10-29 08:26:28 -0600413 struct rbd_data *rbd = td->io_ops->data;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100414
Jens Axboec6dd7c52014-10-29 08:26:28 -0600415 if (rbd) {
416 _fio_rbd_disconnect(rbd);
417 free(rbd->aio_events);
418 free(rbd->sort_events);
419 free(rbd);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100420 }
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100421}
422
423static int fio_rbd_setup(struct thread_data *td)
424{
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100425 rbd_image_info_t info;
426 struct fio_file *f;
Jens Axboec6dd7c52014-10-29 08:26:28 -0600427 struct rbd_data *rbd = NULL;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100428 int major, minor, extra;
Jens Axboec6dd7c52014-10-29 08:26:28 -0600429 int r;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100430
431 /* log version of librbd. No cluster connection required. */
432 rbd_version(&major, &minor, &extra);
433 log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
434
435 /* allocate engine specific structure to deal with librbd. */
Jens Axboec6dd7c52014-10-29 08:26:28 -0600436 r = _fio_setup_rbd_data(td, &rbd);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100437 if (r) {
438 log_err("fio_setup_rbd_data failed.\n");
439 goto cleanup;
440 }
Jens Axboec6dd7c52014-10-29 08:26:28 -0600441 td->io_ops->data = rbd;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100442
Jens Axboe1fe34b52014-10-27 10:53:52 -0600443 /* librbd does not allow us to run first in the main thread and later
444 * in a fork child. It needs to be the same process context all the
445 * time.
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100446 */
447 td->o.use_thread = 1;
448
449 /* connect in the main thread to determine to determine
450 * the size of the given RADOS block device. And disconnect
451 * later on.
452 */
453 r = _fio_rbd_connect(td);
454 if (r) {
455 log_err("fio_rbd_connect failed.\n");
456 goto cleanup;
457 }
458
459 /* get size of the RADOS block device */
Jens Axboec6dd7c52014-10-29 08:26:28 -0600460 r = rbd_stat(rbd->image, &info, sizeof(info));
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100461 if (r < 0) {
462 log_err("rbd_status failed.\n");
463 goto disconnect;
464 }
465 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
466
467 /* taken from "net" engine. Pretend we deal with files,
468 * even if we do not have any ideas about files.
469 * The size of the RBD is set instead of a artificial file.
470 */
471 if (!td->files_index) {
Jens Axboe5903e7b2014-02-26 13:42:13 -0800472 add_file(td, td->o.filename ? : "rbd", 0, 0);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100473 td->o.nr_files = td->o.nr_files ? : 1;
Jens Axboeb53f2c52014-04-08 21:07:12 -0600474 td->o.open_files++;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100475 }
476 f = td->files[0];
477 f->real_file_size = info.size;
478
479 /* disconnect, then we were only connected to determine
480 * the size of the RBD.
481 */
Jens Axboec6dd7c52014-10-29 08:26:28 -0600482 _fio_rbd_disconnect(rbd);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100483 return 0;
484
485disconnect:
Jens Axboec6dd7c52014-10-29 08:26:28 -0600486 _fio_rbd_disconnect(rbd);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100487cleanup:
488 fio_rbd_cleanup(td);
489 return r;
490}
491
492static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
493{
494 return 0;
495}
496
Jens Axboe1be9f212014-05-19 19:57:05 -0600497static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
498{
Jens Axboe9c1e0472014-10-28 09:00:06 -0600499#if defined(CONFIG_RBD_INVAL)
Jens Axboec6dd7c52014-10-29 08:26:28 -0600500 struct rbd_data *rbd = td->io_ops->data;
Jens Axboe9c1e0472014-10-28 09:00:06 -0600501
Jens Axboec6dd7c52014-10-29 08:26:28 -0600502 return rbd_invalidate_cache(rbd->image);
Jens Axboe9c1e0472014-10-28 09:00:06 -0600503#else
Jens Axboe1be9f212014-05-19 19:57:05 -0600504 return 0;
Jens Axboe9c1e0472014-10-28 09:00:06 -0600505#endif
Jens Axboe1be9f212014-05-19 19:57:05 -0600506}
507
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100508static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
509{
Jens Axboe1fe34b52014-10-27 10:53:52 -0600510 struct fio_rbd_iou *fri = io_u->engine_data;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100511
Jens Axboe1fe34b52014-10-27 10:53:52 -0600512 if (fri) {
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100513 io_u->engine_data = NULL;
Jens Axboe1fe34b52014-10-27 10:53:52 -0600514 free(fri);
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100515 }
516}
517
518static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
519{
Jens Axboe1fe34b52014-10-27 10:53:52 -0600520 struct fio_rbd_iou *fri;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100521
Jens Axboe1fe34b52014-10-27 10:53:52 -0600522 fri = calloc(1, sizeof(*fri));
523 fri->io_u = io_u;
524 io_u->engine_data = fri;
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100525 return 0;
526}
527
Jens Axboe10aa1362014-04-01 21:10:36 -0600528static struct ioengine_ops ioengine = {
Jens Axboe1be9f212014-05-19 19:57:05 -0600529 .name = "rbd",
530 .version = FIO_IOOPS_VERSION,
531 .setup = fio_rbd_setup,
532 .init = fio_rbd_init,
533 .queue = fio_rbd_queue,
534 .getevents = fio_rbd_getevents,
535 .event = fio_rbd_event,
536 .cleanup = fio_rbd_cleanup,
537 .open_file = fio_rbd_open,
538 .invalidate = fio_rbd_invalidate,
539 .options = options,
540 .io_u_init = fio_rbd_io_u_init,
541 .io_u_free = fio_rbd_io_u_free,
542 .option_struct_size = sizeof(struct rbd_options),
Daniel Gollubfc5c0342014-02-17 14:35:28 +0100543};
544
545static void fio_init fio_rbd_register(void)
546{
547 register_ioengine(&ioengine);
548}
549
550static void fio_exit fio_rbd_unregister(void)
551{
552 unregister_ioengine(&ioengine);
553}