blob: 54fd19463462229c1a5bd99b433798ee925eda99 [file] [log] [blame]
ren yufei21b8aee2011-08-01 10:01:57 +02001/*
Bart Van Assche85286c52011-08-07 21:50:51 +02002 * RDMA I/O engine
ren yufei21b8aee2011-08-01 10:01:57 +02003 *
Bart Van Assche85286c52011-08-07 21:50:51 +02004 * RDMA I/O engine based on the IB verbs and RDMA/CM user space libraries.
5 * Supports both RDMA memory semantics and channel semantics
6 * for the InfiniBand, RoCE and iWARP protocols.
ren yufei21b8aee2011-08-01 10:01:57 +02007 *
Bart Van Assche85286c52011-08-07 21:50:51 +02008 * This I/O engine is disabled by default. To enable it, execute:
ren yufei21b8aee2011-08-01 10:01:57 +02009 *
10 * $ export EXTFLAGS="-DFIO_HAVE_RDMA"
11 * $ export EXTLIBS="-libverbs -lrdmacm"
12 *
Bart Van Assche85286c52011-08-07 21:50:51 +020013 * before running make. You will need the Linux RDMA software as well, either
14 * from your Linux distributor or directly from openfabrics.org:
ren yufei21b8aee2011-08-01 10:01:57 +020015 *
16 * http://www.openfabrics.org/downloads/OFED/
17 *
18 */
19#include <stdio.h>
20#include <stdlib.h>
21#include <unistd.h>
22#include <errno.h>
23#include <assert.h>
24#include <netinet/in.h>
25#include <arpa/inet.h>
26#include <netdb.h>
27#include <sys/poll.h>
28#include <sys/types.h>
29#include <sys/socket.h>
30#include <sys/time.h>
31#include <sys/resource.h>
32
33#include <byteswap.h>
34#include <pthread.h>
35#include <inttypes.h>
36
37#include "../fio.h"
38
39#ifdef FIO_HAVE_RDMA
40
41#include <rdma/rdma_cma.h>
42#include <infiniband/arch.h>
43
Bart Van Assche76cc5222011-08-07 21:50:51 +020044#define FIO_RDMA_MAX_IO_DEPTH 128
ren yufei21b8aee2011-08-01 10:01:57 +020045
46enum rdma_io_mode {
47 FIO_RDMA_UNKNOWN = 0,
48 FIO_RDMA_MEM_WRITE,
49 FIO_RDMA_MEM_READ,
50 FIO_RDMA_CHA_SEND,
51 FIO_RDMA_CHA_RECV
52};
53
54struct remote_u {
55 uint64_t buf;
56 uint32_t rkey;
57 uint32_t size;
58};
59
60struct rdma_info_blk {
61 uint32_t mode; /* channel semantic or memory semantic */
62 uint32_t nr; /* client: io depth
63 server: number of records for memory semantic
64 */
Bart Van Assche76cc5222011-08-07 21:50:51 +020065 struct remote_u rmt_us[FIO_RDMA_MAX_IO_DEPTH];
ren yufei21b8aee2011-08-01 10:01:57 +020066};
67
68struct rdma_io_u_data {
69 uint64_t wr_id;
70 struct ibv_send_wr sq_wr;
71 struct ibv_recv_wr rq_wr;
72 struct ibv_sge rdma_sgl;
73};
74
75struct rdmaio_data {
76 int is_client;
77 enum rdma_io_mode rdma_protocol;
78 char host[64];
79 struct sockaddr_in addr;
80
81 struct ibv_recv_wr rq_wr;
82 struct ibv_sge recv_sgl;
83 struct rdma_info_blk recv_buf;
84 struct ibv_mr *recv_mr;
85
86 struct ibv_send_wr sq_wr;
87 struct ibv_sge send_sgl;
88 struct rdma_info_blk send_buf;
89 struct ibv_mr *send_mr;
90
91 struct ibv_comp_channel *channel;
92 struct ibv_cq *cq;
93 struct ibv_pd *pd;
94 struct ibv_qp *qp;
95
96 pthread_t cmthread;
97 struct rdma_event_channel *cm_channel;
98 struct rdma_cm_id *cm_id;
99 struct rdma_cm_id *child_cm_id;
100
101 int cq_event_num;
102
103 struct remote_u *rmt_us;
104 int rmt_nr;
105 struct io_u **io_us_queued;
106 int io_u_queued_nr;
107 struct io_u **io_us_flight;
108 int io_u_flight_nr;
109 struct io_u **io_us_completed;
110 int io_u_completed_nr;
111};
112
113static int client_recv(struct thread_data *td, struct ibv_wc *wc)
114{
115 struct rdmaio_data *rd = td->io_ops->data;
116
117 if (wc->byte_len != sizeof(rd->recv_buf)) {
Yufei Renb6cf38f2011-08-02 12:15:30 +0200118 log_err("Received bogus data, size %d\n", wc->byte_len);
ren yufei21b8aee2011-08-01 10:01:57 +0200119 return 1;
120 }
121
122 /* store mr info for MEMORY semantic */
123 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
124 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
125 /* struct flist_head *entry; */
126 int i = 0;
127
128 rd->rmt_nr = ntohl(rd->recv_buf.nr);
129
130 for (i = 0; i < rd->rmt_nr; i++) {
131 rd->rmt_us[i].buf = ntohll(rd->recv_buf.rmt_us[i].buf);
132 rd->rmt_us[i].rkey = ntohl(rd->recv_buf.rmt_us[i].rkey);
133 rd->rmt_us[i].size = ntohl(rd->recv_buf.rmt_us[i].size);
134
135 dprint(FD_IO,
136 "fio: Received rkey %x addr %" PRIx64
137 " len %d from peer\n", rd->rmt_us[i].rkey,
138 rd->rmt_us[i].buf, rd->rmt_us[i].size);
139 }
140 }
141
142 return 0;
143}
144
145static int server_recv(struct thread_data *td, struct ibv_wc *wc)
146{
147 struct rdmaio_data *rd = td->io_ops->data;
148
Bart Van Assche76cc5222011-08-07 21:50:51 +0200149 if (wc->wr_id == FIO_RDMA_MAX_IO_DEPTH) {
ren yufei21b8aee2011-08-01 10:01:57 +0200150 rd->rdma_protocol = ntohl(rd->recv_buf.mode);
151
152 /* CHANNEL semantic, do nothing */
153 if (rd->rdma_protocol == FIO_RDMA_CHA_SEND)
154 rd->rdma_protocol = FIO_RDMA_CHA_RECV;
155 }
156
157 return 0;
158}
159
160static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode)
161{
162 struct rdmaio_data *rd = td->io_ops->data;
163 struct ibv_wc wc;
164 struct rdma_io_u_data *r_io_u_d;
165 int ret;
166 int compevnum = 0;
167 int i;
168
169 while ((ret = ibv_poll_cq(rd->cq, 1, &wc)) == 1) {
170 ret = 0;
171 compevnum++;
172
173 if (wc.status) {
174 log_err("fio: cq completion status %d(%s)\n",
175 wc.status, ibv_wc_status_str(wc.status));
176 return -1;
177 }
178
179 switch (wc.opcode) {
180
181 case IBV_WC_RECV:
182 if (rd->is_client == 1)
183 client_recv(td, &wc);
184 else
185 server_recv(td, &wc);
186
Bart Van Assche76cc5222011-08-07 21:50:51 +0200187 if (wc.wr_id == FIO_RDMA_MAX_IO_DEPTH)
ren yufei21b8aee2011-08-01 10:01:57 +0200188 break;
189
190 for (i = 0; i < rd->io_u_flight_nr; i++) {
191 r_io_u_d = rd->io_us_flight[i]->engine_data;
192
193 if (wc.wr_id == r_io_u_d->rq_wr.wr_id) {
194 rd->io_us_flight[i]->resid =
195 rd->io_us_flight[i]->buflen
196 - wc.byte_len;
197
198 rd->io_us_flight[i]->error = 0;
199
200 rd->io_us_completed[rd->
201 io_u_completed_nr]
202 = rd->io_us_flight[i];
203 rd->io_u_completed_nr++;
204 break;
205 }
206 }
207 if (i == rd->io_u_flight_nr)
Bart Van Asschee07f72d2011-08-07 21:50:52 +0200208 log_err("fio: recv wr %" PRId64 " not found\n",
ren yufei21b8aee2011-08-01 10:01:57 +0200209 wc.wr_id);
210 else {
211 /* put the last one into middle of the list */
212 rd->io_us_flight[i] =
213 rd->io_us_flight[rd->io_u_flight_nr - 1];
214 rd->io_u_flight_nr--;
215 }
216
217 break;
218
219 case IBV_WC_SEND:
220 case IBV_WC_RDMA_WRITE:
221 case IBV_WC_RDMA_READ:
Bart Van Assche76cc5222011-08-07 21:50:51 +0200222 if (wc.wr_id == FIO_RDMA_MAX_IO_DEPTH)
ren yufei21b8aee2011-08-01 10:01:57 +0200223 break;
224
225 for (i = 0; i < rd->io_u_flight_nr; i++) {
226 r_io_u_d = rd->io_us_flight[i]->engine_data;
227
228 if (wc.wr_id == r_io_u_d->sq_wr.wr_id) {
229 rd->io_us_completed[rd->
230 io_u_completed_nr]
231 = rd->io_us_flight[i];
232 rd->io_u_completed_nr++;
233 break;
234 }
235 }
236 if (i == rd->io_u_flight_nr)
Bart Van Asschee07f72d2011-08-07 21:50:52 +0200237 log_err("fio: send wr %" PRId64 " not found\n",
ren yufei21b8aee2011-08-01 10:01:57 +0200238 wc.wr_id);
239 else {
240 /* put the last one into middle of the list */
241 rd->io_us_flight[i] =
242 rd->io_us_flight[rd->io_u_flight_nr - 1];
243 rd->io_u_flight_nr--;
244 }
245
246 break;
247
248 default:
249 log_info("fio: unknown completion event %d\n",
250 wc.opcode);
251 return -1;
252 }
253 rd->cq_event_num++;
254 }
255 if (ret) {
256 log_err("fio: poll error %d\n", ret);
257 return 1;
258 }
259
260 return compevnum;
261}
262
263/*
264 * Return -1 for error and 'nr events' for a positive number
265 * of events
266 */
267static int rdma_poll_wait(struct thread_data *td, enum ibv_wc_opcode opcode)
268{
269 struct rdmaio_data *rd = td->io_ops->data;
270 struct ibv_cq *ev_cq;
271 void *ev_ctx;
272 int ret;
273
274 if (rd->cq_event_num > 0) { /* previous left */
275 rd->cq_event_num--;
276 return 0;
277 }
278
279again:
280 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
281 log_err("fio: Failed to get cq event!\n");
282 return -1;
283 }
284 if (ev_cq != rd->cq) {
285 log_err("fio: Unknown CQ!\n");
286 return -1;
287 }
288 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
289 log_err("fio: Failed to set notify!\n");
290 return -1;
291 }
292
293 ret = cq_event_handler(td, opcode);
294 if (ret < 1)
295 goto again;
296
297 ibv_ack_cq_events(rd->cq, ret);
298
299 rd->cq_event_num--;
300
301 return ret;
302}
303
304static int fio_rdmaio_setup_qp(struct thread_data *td)
305{
306 struct rdmaio_data *rd = td->io_ops->data;
307 struct ibv_qp_init_attr init_attr;
308 int qp_depth = td->o.iodepth * 2; /* 2 times of io depth */
309
310 if (rd->is_client == 0)
311 rd->pd = ibv_alloc_pd(rd->child_cm_id->verbs);
312 else
313 rd->pd = ibv_alloc_pd(rd->cm_id->verbs);
314 if (rd->pd == NULL) {
315 log_err("fio: ibv_alloc_pd fail\n");
316 return 1;
317 }
318
319 if (rd->is_client == 0)
320 rd->channel = ibv_create_comp_channel(rd->child_cm_id->verbs);
321 else
322 rd->channel = ibv_create_comp_channel(rd->cm_id->verbs);
323 if (rd->channel == NULL) {
324 log_err("fio: ibv_create_comp_channel fail\n");
325 goto err1;
326 }
327
328 if (qp_depth < 16)
329 qp_depth = 16;
330
331 if (rd->is_client == 0)
332 rd->cq = ibv_create_cq(rd->child_cm_id->verbs,
333 qp_depth, rd, rd->channel, 0);
334 else
335 rd->cq = ibv_create_cq(rd->cm_id->verbs,
336 qp_depth, rd, rd->channel, 0);
337 if (rd->cq == NULL) {
338 log_err("fio: ibv_create_cq failed\n");
339 goto err2;
340 }
341
342 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
343 log_err("fio: ibv_create_cq failed\n");
344 goto err3;
345 }
346
347 /* create queue pair */
348 memset(&init_attr, 0, sizeof(init_attr));
349 init_attr.cap.max_send_wr = qp_depth;
350 init_attr.cap.max_recv_wr = qp_depth;
351 init_attr.cap.max_recv_sge = 1;
352 init_attr.cap.max_send_sge = 1;
353 init_attr.qp_type = IBV_QPT_RC;
354 init_attr.send_cq = rd->cq;
355 init_attr.recv_cq = rd->cq;
356
357 if (rd->is_client == 0) {
358 if (rdma_create_qp(rd->child_cm_id, rd->pd, &init_attr) != 0) {
359 log_err("fio: rdma_create_qp failed\n");
360 goto err3;
361 }
362 rd->qp = rd->child_cm_id->qp;
363 } else {
364 if (rdma_create_qp(rd->cm_id, rd->pd, &init_attr) != 0) {
365 log_err("fio: rdma_create_qp failed\n");
366 goto err3;
367 }
368 rd->qp = rd->cm_id->qp;
369 }
370
371 return 0;
372
373err3:
374 ibv_destroy_cq(rd->cq);
375err2:
376 ibv_destroy_comp_channel(rd->channel);
377err1:
378 ibv_dealloc_pd(rd->pd);
379
380 return 1;
381}
382
383static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td)
384{
385 struct rdmaio_data *rd = td->io_ops->data;
386
387 rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf),
388 IBV_ACCESS_LOCAL_WRITE);
389 if (rd->recv_mr == NULL) {
390 log_err("fio: recv_buf reg_mr failed\n");
391 return 1;
392 }
393
394 rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf),
395 0);
396 if (rd->send_mr == NULL) {
397 log_err("fio: send_buf reg_mr failed\n");
398 ibv_dereg_mr(rd->recv_mr);
399 return 1;
400 }
401
402 /* setup work request */
403 /* recv wq */
404 rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf;
405 rd->recv_sgl.length = sizeof rd->recv_buf;
406 rd->recv_sgl.lkey = rd->recv_mr->lkey;
407 rd->rq_wr.sg_list = &rd->recv_sgl;
408 rd->rq_wr.num_sge = 1;
Bart Van Assche76cc5222011-08-07 21:50:51 +0200409 rd->rq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
ren yufei21b8aee2011-08-01 10:01:57 +0200410
411 /* send wq */
412 rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf;
413 rd->send_sgl.length = sizeof rd->send_buf;
414 rd->send_sgl.lkey = rd->send_mr->lkey;
415
416 rd->sq_wr.opcode = IBV_WR_SEND;
417 rd->sq_wr.send_flags = IBV_SEND_SIGNALED;
418 rd->sq_wr.sg_list = &rd->send_sgl;
419 rd->sq_wr.num_sge = 1;
Bart Van Assche76cc5222011-08-07 21:50:51 +0200420 rd->sq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
ren yufei21b8aee2011-08-01 10:01:57 +0200421
422 return 0;
423}
424
425static int get_next_channel_event(struct thread_data *td,
426 struct rdma_event_channel *channel,
427 enum rdma_cm_event_type wait_event)
428{
429 struct rdmaio_data *rd = td->io_ops->data;
430
431 int ret;
432 struct rdma_cm_event *event;
433
434 ret = rdma_get_cm_event(channel, &event);
435 if (ret) {
436 log_err("fio: rdma_get_cm_event");
437 return 1;
438 }
439
440 if (event->event != wait_event) {
441 log_err("fio: event is %s instead of %s\n",
442 rdma_event_str(event->event),
443 rdma_event_str(wait_event));
444 return 1;
445 }
446
447 switch (event->event) {
448 case RDMA_CM_EVENT_CONNECT_REQUEST:
449 rd->child_cm_id = event->id;
450 break;
451 default:
452 break;
453 }
454
455 rdma_ack_cm_event(event);
456
457 return 0;
458}
459
460static int fio_rdmaio_prep(struct thread_data *td, struct io_u *io_u)
461{
462 struct rdmaio_data *rd = td->io_ops->data;
463 struct rdma_io_u_data *r_io_u_d;
464
465 r_io_u_d = io_u->engine_data;
466
467 switch (rd->rdma_protocol) {
468 case FIO_RDMA_MEM_WRITE:
469 case FIO_RDMA_MEM_READ:
470 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
471 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
472 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
473 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
474 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
475 r_io_u_d->sq_wr.num_sge = 1;
476 break;
477 case FIO_RDMA_CHA_SEND:
478 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
479 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
480 r_io_u_d->rdma_sgl.length = io_u->buflen;
481 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
482 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
483 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
484 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
485 r_io_u_d->sq_wr.num_sge = 1;
486 break;
487 case FIO_RDMA_CHA_RECV:
488 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
489 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
490 r_io_u_d->rdma_sgl.length = io_u->buflen;
491 r_io_u_d->rq_wr.wr_id = r_io_u_d->wr_id;
492 r_io_u_d->rq_wr.sg_list = &r_io_u_d->rdma_sgl;
493 r_io_u_d->rq_wr.num_sge = 1;
494 break;
495 default:
496 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
497 break;
498 }
499
500 return 0;
501}
502
503static struct io_u *fio_rdmaio_event(struct thread_data *td, int event)
504{
505 struct rdmaio_data *rd = td->io_ops->data;
506 struct io_u *io_u;
507 int i;
508
509 io_u = rd->io_us_completed[0];
510 for (i = 0; i < rd->io_u_completed_nr - 1; i++) {
511 rd->io_us_completed[i] = rd->io_us_completed[i + 1];
512 }
513 rd->io_u_completed_nr--;
514
515 dprint_io_u(io_u, "fio_rdmaio_event");
516
517 return io_u;
518}
519
520static int fio_rdmaio_getevents(struct thread_data *td, unsigned int min,
521 unsigned int max, struct timespec *t)
522{
523 struct rdmaio_data *rd = td->io_ops->data;
524 int r;
525 enum ibv_wc_opcode comp_opcode;
526 comp_opcode = IBV_WC_RDMA_WRITE;
527 struct ibv_cq *ev_cq;
528 void *ev_ctx;
529 int ret;
530
531 r = 0;
532
533 switch (rd->rdma_protocol) {
534 case FIO_RDMA_MEM_WRITE:
535 comp_opcode = IBV_WC_RDMA_WRITE;
536 break;
537 case FIO_RDMA_MEM_READ:
538 comp_opcode = IBV_WC_RDMA_READ;
539 break;
540 case FIO_RDMA_CHA_SEND:
541 comp_opcode = IBV_WC_SEND;
542 break;
543 case FIO_RDMA_CHA_RECV:
544 comp_opcode = IBV_WC_RECV;
545 break;
546 default:
547 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
548 break;
549 }
550
551 if (rd->cq_event_num > 0) { /* previous left */
552 rd->cq_event_num--;
553 return 0;
554 }
555
556again:
557 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
558 log_err("fio: Failed to get cq event!\n");
559 return -1;
560 }
561 if (ev_cq != rd->cq) {
562 log_err("fio: Unknown CQ!\n");
563 return -1;
564 }
565 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
566 log_err("fio: Failed to set notify!\n");
567 return -1;
568 }
569
570 ret = cq_event_handler(td, comp_opcode);
571 if (ret < 1)
572 goto again;
573
574 ibv_ack_cq_events(rd->cq, ret);
575
576 r += ret;
577 if (r < min)
578 goto again;
579
580 rd->cq_event_num -= r;
581
582 return r;
583}
584
585static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us,
586 unsigned int nr)
587{
588 struct rdmaio_data *rd = td->io_ops->data;
589 struct ibv_send_wr *bad_wr;
Bart Van Asschee07f72d2011-08-07 21:50:52 +0200590#if 0
ren yufei21b8aee2011-08-01 10:01:57 +0200591 enum ibv_wc_opcode comp_opcode;
592 comp_opcode = IBV_WC_RDMA_WRITE;
Bart Van Asschee07f72d2011-08-07 21:50:52 +0200593#endif
ren yufei21b8aee2011-08-01 10:01:57 +0200594 int i, index;
595 struct rdma_io_u_data *r_io_u_d;
596
597 r_io_u_d = NULL;
598
599 for (i = 0; i < nr; i++) {
600 /* RDMA_WRITE or RDMA_READ */
601 switch (rd->rdma_protocol) {
602 case FIO_RDMA_MEM_WRITE:
603 /* compose work request */
604 r_io_u_d = io_us[i]->engine_data;
605 index = rand() % rd->rmt_nr;
606 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_WRITE;
607 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
Yufei Renb6cf38f2011-08-02 12:15:30 +0200608 r_io_u_d->sq_wr.wr.rdma.remote_addr = \
609 rd->rmt_us[index].buf;
ren yufei21b8aee2011-08-01 10:01:57 +0200610 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
611 break;
612 case FIO_RDMA_MEM_READ:
613 /* compose work request */
614 r_io_u_d = io_us[i]->engine_data;
615 index = rand() % rd->rmt_nr;
616 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_READ;
617 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
Yufei Renb6cf38f2011-08-02 12:15:30 +0200618 r_io_u_d->sq_wr.wr.rdma.remote_addr = \
619 rd->rmt_us[index].buf;
ren yufei21b8aee2011-08-01 10:01:57 +0200620 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
621 break;
622 case FIO_RDMA_CHA_SEND:
623 r_io_u_d = io_us[i]->engine_data;
624 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
625 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
626 break;
627 default:
628 log_err("fio: unknown rdma protocol - %d\n",
629 rd->rdma_protocol);
630 break;
631 }
632
633 if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) {
634 log_err("fio: ibv_post_send fail\n");
635 return -1;
636 }
637
638 dprint_io_u(io_us[i], "fio_rdmaio_send");
639 }
640
641 /* wait for completion
642 rdma_poll_wait(td, comp_opcode); */
643
644 return i;
645}
646
647static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us,
648 unsigned int nr)
649{
650 struct rdmaio_data *rd = td->io_ops->data;
651 struct ibv_recv_wr *bad_wr;
652 struct rdma_io_u_data *r_io_u_d;
653 int i;
654
655 i = 0;
656 if (rd->rdma_protocol == FIO_RDMA_CHA_RECV) {
657 /* post io_u into recv queue */
658 for (i = 0; i < nr; i++) {
659 r_io_u_d = io_us[i]->engine_data;
660 if (ibv_post_recv(rd->qp, &r_io_u_d->rq_wr, &bad_wr) !=
661 0) {
662 log_err("fio: ibv_post_recv fail\n");
663 return 1;
664 }
665 }
666 } else if ((rd->rdma_protocol == FIO_RDMA_MEM_READ)
667 || (rd->rdma_protocol == FIO_RDMA_MEM_WRITE)) {
668 /* re-post the rq_wr */
669 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
670 log_err("fio: ibv_post_recv fail\n");
671 return 1;
672 }
673
674 rdma_poll_wait(td, IBV_WC_RECV);
675
676 dprint(FD_IO, "fio: recv FINISH message\n");
677 exit(0);
678 }
679
680 return i;
681}
682
683static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u)
684{
685 struct rdmaio_data *rd = td->io_ops->data;
686
687 fio_ro_check(td, io_u);
688
689 if (rd->io_u_queued_nr == (int)td->o.iodepth)
690 return FIO_Q_BUSY;
691
692 rd->io_us_queued[rd->io_u_queued_nr] = io_u;
693 rd->io_u_queued_nr++;
694
695 dprint_io_u(io_u, "fio_rdmaio_queue");
696
697 return FIO_Q_QUEUED;
698}
699
700static void fio_rdmaio_queued(struct thread_data *td, struct io_u **io_us,
701 unsigned int nr)
702{
703 struct rdmaio_data *rd = td->io_ops->data;
704 struct timeval now;
705 unsigned int i;
706
707 if (!fio_fill_issue_time(td))
708 return;
709
710 fio_gettime(&now, NULL);
711
712 for (i = 0; i < nr; i++) {
713 struct io_u *io_u = io_us[i];
714
715 /* queued -> flight */
716 rd->io_us_flight[rd->io_u_flight_nr] = io_u;
717 rd->io_u_flight_nr++;
718
719 memcpy(&io_u->issue_time, &now, sizeof(now));
720 io_u_queued(td, io_u);
721 }
722}
723
724static int fio_rdmaio_commit(struct thread_data *td)
725{
726 struct rdmaio_data *rd = td->io_ops->data;
727 struct io_u **io_us;
728 int ret;
729
730 if (!rd->io_us_queued)
731 return 0;
732
733 io_us = rd->io_us_queued;
734 do {
735 /* RDMA_WRITE or RDMA_READ */
736 if (rd->is_client) {
737 ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr);
738 } else if (!rd->is_client) {
739 ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr);
740 } else
741 ret = 0; /* must be a SYNC */
742
743 if (ret > 0) {
744 fio_rdmaio_queued(td, io_us, ret);
745 io_u_mark_submit(td, ret);
746 rd->io_u_queued_nr -= ret;
747 io_us += ret;
748 ret = 0;
749 } else
750 break;
751 } while (rd->io_u_queued_nr);
752
753 return ret;
754}
755
756static int fio_rdmaio_connect(struct thread_data *td, struct fio_file *f)
757{
758 struct rdmaio_data *rd = td->io_ops->data;
759 struct rdma_conn_param conn_param;
760 struct ibv_send_wr *bad_wr;
761
762 memset(&conn_param, 0, sizeof conn_param);
763 conn_param.responder_resources = 1;
764 conn_param.initiator_depth = 1;
765 conn_param.retry_count = 10;
766
767 if (rdma_connect(rd->cm_id, &conn_param) != 0) {
768 log_err("fio: rdma_connect fail\n");
769 return 1;
770 }
771
772 if (get_next_channel_event
773 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
774 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
775 return 1;
776 }
777
778 /* send task request */
779 rd->send_buf.mode = htonl(rd->rdma_protocol);
780 rd->send_buf.nr = htonl(td->o.iodepth);
781
782 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
783 log_err("fio: ibv_post_send fail");
784 return 1;
785 }
786
787 rdma_poll_wait(td, IBV_WC_SEND);
788
789 /* wait for remote MR info from server side */
790 rdma_poll_wait(td, IBV_WC_RECV);
791
792 return 0;
793}
794
795static int fio_rdmaio_accept(struct thread_data *td, struct fio_file *f)
796{
797 struct rdmaio_data *rd = td->io_ops->data;
798 struct rdma_conn_param conn_param;
799 struct ibv_send_wr *bad_wr;
800
801 /* rdma_accept() - then wait for accept success */
802 memset(&conn_param, 0, sizeof conn_param);
803 conn_param.responder_resources = 1;
804 conn_param.initiator_depth = 1;
805
806 if (rdma_accept(rd->child_cm_id, &conn_param) != 0) {
807 log_err("fio: rdma_accept\n");
808 return 1;
809 }
810
811 if (get_next_channel_event
812 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
813 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
814 return 1;
815 }
816
817 /* wait for request */
818 rdma_poll_wait(td, IBV_WC_RECV);
819
820 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
821 log_err("fio: ibv_post_send fail");
822 return 1;
823 }
824
825 rdma_poll_wait(td, IBV_WC_SEND);
826
827 return 0;
828}
829
830static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f)
831{
832 if (td_read(td))
833 return fio_rdmaio_accept(td, f);
834 else
835 return fio_rdmaio_connect(td, f);
836}
837
838static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f)
839{
840 struct rdmaio_data *rd = td->io_ops->data;
841 struct ibv_send_wr *bad_wr;
842
843 /* unregister rdma buffer */
844
845 /*
846 * Client sends notification to the server side
847 */
848 /* refer to: http://linux.die.net/man/7/rdma_cm */
849 if ((rd->is_client == 1) && ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE)
850 || (rd->rdma_protocol ==
851 FIO_RDMA_MEM_READ))) {
852 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
853 log_err("fio: ibv_post_send fail");
854 return 1;
855 }
856
857 dprint(FD_IO, "fio: close infomation sent success\n");
858 rdma_poll_wait(td, IBV_WC_SEND);
859 }
860
861 if (rd->is_client == 1)
862 rdma_disconnect(rd->cm_id);
863 else {
864 rdma_disconnect(rd->child_cm_id);
865/* rdma_disconnect(rd->cm_id); */
866 }
867
868/* if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0)
869 {
870 log_err("fio: wait for RDMA_CM_EVENT_DISCONNECTED\n");
871 return 1;
872 }*/
873
874 ibv_destroy_qp(rd->qp);
875 ibv_destroy_cq(rd->cq);
876
877 if (rd->is_client == 1)
878 rdma_destroy_id(rd->cm_id);
879 else {
880 rdma_destroy_id(rd->child_cm_id);
881 rdma_destroy_id(rd->cm_id);
882 }
883
884 ibv_destroy_comp_channel(rd->channel);
885 ibv_dealloc_pd(rd->pd);
886
887 return 0;
888}
889
890static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host,
891 unsigned short port)
892{
893 struct rdmaio_data *rd = td->io_ops->data;
894 struct ibv_recv_wr *bad_wr;
895
896 rd->addr.sin_family = AF_INET;
897 rd->addr.sin_port = htons(port);
898
899 if (inet_aton(host, &rd->addr.sin_addr) != 1) {
900 struct hostent *hent;
901
902 hent = gethostbyname(host);
903 if (!hent) {
904 td_verror(td, errno, "gethostbyname");
905 return 1;
906 }
907
908 memcpy(&rd->addr.sin_addr, hent->h_addr, 4);
909 }
910
911 /* resolve route */
912 if (rdma_resolve_addr(rd->cm_id, NULL,
913 (struct sockaddr *)&rd->addr, 2000) != 0) {
914 log_err("fio: rdma_resolve_addr");
915 return 1;
916 }
917
918 if (get_next_channel_event
919 (td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED)
920 != 0) {
921 log_err("fio: get_next_channel_event");
922 return 1;
923 }
924
925 /* resolve route */
926 if (rdma_resolve_route(rd->cm_id, 2000) != 0) {
927 log_err("fio: rdma_resolve_route");
928 return 1;
929 }
930
931 if (get_next_channel_event
932 (td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED) != 0) {
933 log_err("fio: get_next_channel_event");
934 return 1;
935 }
936
937 /* create qp and buffer */
938 if (fio_rdmaio_setup_qp(td) != 0)
939 return 1;
940
941 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
942 return 1;
943
944 /* post recv buf */
945 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
946 log_err("fio: ibv_post_recv fail\n");
947 return 1;
948 }
949
950 return 0;
951}
952
953static int fio_rdmaio_setup_listen(struct thread_data *td, short port)
954{
955 struct rdmaio_data *rd = td->io_ops->data;
956 struct ibv_recv_wr *bad_wr;
957
958 rd->addr.sin_family = AF_INET;
959 rd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
960 rd->addr.sin_port = htons(port);
961
962 /* rdma_listen */
963 if (rdma_bind_addr(rd->cm_id, (struct sockaddr *)&rd->addr) != 0) {
964 log_err("fio: rdma_bind_addr fail\n");
965 return 1;
966 }
967
968 if (rdma_listen(rd->cm_id, 3) != 0) {
969 log_err("fio: rdma_listen fail\n");
970 return 1;
971 }
972
973 /* wait for CONNECT_REQUEST */
974 if (get_next_channel_event
975 (td, rd->cm_channel, RDMA_CM_EVENT_CONNECT_REQUEST) != 0) {
976 log_err("fio: wait for RDMA_CM_EVENT_CONNECT_REQUEST\n");
977 return 1;
978 }
979
980 if (fio_rdmaio_setup_qp(td) != 0)
981 return 1;
982
983 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
984 return 1;
985
986 /* post recv buf */
987 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
988 log_err("fio: ibv_post_recv fail\n");
989 return 1;
990 }
991
992 return 0;
993}
994
995static int fio_rdmaio_init(struct thread_data *td)
996{
997 struct rdmaio_data *rd = td->io_ops->data;
998 unsigned int port;
999 char host[64], buf[128];
1000 char *sep, *portp, *modep;
1001 int ret;
1002 struct rlimit rl;
1003
1004 if (td_rw(td)) {
1005 log_err("fio: rdma connections must be read OR write\n");
1006 return 1;
1007 }
1008 if (td_random(td)) {
1009 log_err("fio: RDMA network IO can't be random\n");
1010 return 1;
1011 }
1012
1013 /* check RLIMIT_MEMLOCK */
1014 if (getrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1015 log_err("fio: getrlimit fail: %d(%s)\n",
1016 errno, strerror(errno));
1017 return 1;
1018 }
1019
1020 /* soft limit */
1021 if ((rl.rlim_cur != RLIM_INFINITY)
1022 && (rl.rlim_cur < td->orig_buffer_size)) {
Bart Van Asschee07f72d2011-08-07 21:50:52 +02001023 log_err("fio: soft RLIMIT_MEMLOCK is: %" PRId64 "\n",
1024 rl.rlim_cur);
1025 log_err("fio: total block size is: %zd\n",
ren yufei21b8aee2011-08-01 10:01:57 +02001026 td->orig_buffer_size);
1027 /* try to set larger RLIMIT_MEMLOCK */
1028 rl.rlim_cur = rl.rlim_max;
1029 if (setrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1030 log_err("fio: setrlimit fail: %d(%s)\n",
1031 errno, strerror(errno));
1032 log_err("fio: you may try enlarge MEMLOCK by root\n");
1033 log_err("# ulimit -l unlimited\n");
1034 return 1;
1035 }
1036 }
1037
1038 strcpy(buf, td->o.filename);
1039
1040 sep = strchr(buf, '/');
1041 if (!sep)
1042 goto bad_host;
1043
1044 *sep = '\0';
1045 sep++;
1046 strcpy(host, buf);
1047 if (!strlen(host))
1048 goto bad_host;
1049
1050 modep = NULL;
1051 portp = sep;
1052 sep = strchr(portp, '/');
1053 if (sep) {
1054 *sep = '\0';
1055 modep = sep + 1;
1056 }
1057
1058 port = strtol(portp, NULL, 10);
1059 if (!port || port > 65535)
1060 goto bad_host;
1061
1062 if (modep) {
1063 if (!strncmp("rdma_write", modep, strlen(modep)) ||
1064 !strncmp("RDMA_WRITE", modep, strlen(modep)))
1065 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1066 else if (!strncmp("rdma_read", modep, strlen(modep)) ||
1067 !strncmp("RDMA_READ", modep, strlen(modep)))
1068 rd->rdma_protocol = FIO_RDMA_MEM_READ;
1069 else if (!strncmp("send", modep, strlen(modep)) ||
1070 !strncmp("SEND", modep, strlen(modep)))
1071 rd->rdma_protocol = FIO_RDMA_CHA_SEND;
1072 else
1073 goto bad_host;
1074 } else
1075 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1076
1077 rd->cq_event_num = 0;
1078
1079 rd->cm_channel = rdma_create_event_channel();
1080 if (!rd->cm_channel) {
1081 log_err("fio: rdma_create_event_channel fail\n");
1082 return 1;
1083 }
1084
1085 ret = rdma_create_id(rd->cm_channel, &rd->cm_id, rd, RDMA_PS_TCP);
1086 if (ret) {
1087 log_err("fio: rdma_create_id fail\n");
1088 return 1;
1089 }
1090
1091 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
1092 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
1093 rd->rmt_us =
Bart Van Assche76cc5222011-08-07 21:50:51 +02001094 malloc(FIO_RDMA_MAX_IO_DEPTH * sizeof(struct remote_u));
ren yufei21b8aee2011-08-01 10:01:57 +02001095 memset(rd->rmt_us, 0,
Bart Van Assche76cc5222011-08-07 21:50:51 +02001096 FIO_RDMA_MAX_IO_DEPTH * sizeof(struct remote_u));
ren yufei21b8aee2011-08-01 10:01:57 +02001097 rd->rmt_nr = 0;
1098 }
1099
1100 rd->io_us_queued = malloc(td->o.iodepth * sizeof(struct io_u *));
1101 memset(rd->io_us_queued, 0, td->o.iodepth * sizeof(struct io_u *));
1102 rd->io_u_queued_nr = 0;
1103
1104 rd->io_us_flight = malloc(td->o.iodepth * sizeof(struct io_u *));
1105 memset(rd->io_us_flight, 0, td->o.iodepth * sizeof(struct io_u *));
1106 rd->io_u_flight_nr = 0;
1107
1108 rd->io_us_completed = malloc(td->o.iodepth * sizeof(struct io_u *));
1109 memset(rd->io_us_completed, 0, td->o.iodepth * sizeof(struct io_u *));
1110 rd->io_u_completed_nr = 0;
1111
1112 if (td_read(td)) { /* READ as the server */
1113 rd->is_client = 0;
1114 /* server rd->rdma_buf_len will be setup after got request */
1115 ret = fio_rdmaio_setup_listen(td, port);
1116 } else { /* WRITE as the client */
1117 rd->is_client = 1;
1118 ret = fio_rdmaio_setup_connect(td, host, port);
1119 }
1120
1121 struct flist_head *entry;
1122 unsigned int max_bs;
1123 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
1124 /* register each io_u in the free list */
1125 int i = 0;
1126 flist_for_each(entry, &td->io_u_freelist) {
1127 struct io_u *io_u = flist_entry(entry, struct io_u, list);
1128
1129 io_u->engine_data = malloc(sizeof(struct rdma_io_u_data));
1130 memset(io_u->engine_data, 0, sizeof(struct rdma_io_u_data));
1131 ((struct rdma_io_u_data *)io_u->engine_data)->wr_id = i;
1132
1133 io_u->mr = ibv_reg_mr(rd->pd, io_u->buf, max_bs,
1134 IBV_ACCESS_LOCAL_WRITE |
1135 IBV_ACCESS_REMOTE_READ |
1136 IBV_ACCESS_REMOTE_WRITE);
1137 if (io_u->mr == NULL) {
1138 log_err("fio: ibv_reg_mr io_u failed\n");
1139 return 1;
1140 }
1141
1142 rd->send_buf.rmt_us[i].buf =
1143 htonll((uint64_t) (unsigned long)io_u->buf);
1144 rd->send_buf.rmt_us[i].rkey = htonl(io_u->mr->rkey);
1145 rd->send_buf.rmt_us[i].size = htonl(max_bs);
1146
1147/* log_info("fio: Send rkey %x addr %" PRIx64 " len %d to client\n",
1148 io_u->mr->rkey, io_u->buf, max_bs); */
1149 i++;
1150 }
1151
1152 rd->send_buf.nr = htonl(i);
1153
1154 return ret;
1155bad_host:
1156 log_err("fio: bad rdma host/port/protocol: %s\n", td->o.filename);
1157 return 1;
1158}
1159
1160static void fio_rdmaio_cleanup(struct thread_data *td)
1161{
1162 struct rdmaio_data *rd = td->io_ops->data;
1163
1164 if (rd) {
1165/* if (nd->listenfd != -1)
1166 close(nd->listenfd);
1167 if (nd->pipes[0] != -1)
1168 close(nd->pipes[0]);
1169 if (nd->pipes[1] != -1)
1170 close(nd->pipes[1]);
1171*/
1172 free(rd);
1173 }
1174}
1175
1176static int fio_rdmaio_setup(struct thread_data *td)
1177{
1178 struct rdmaio_data *rd;
1179
1180 if (!td->io_ops->data) {
1181 rd = malloc(sizeof(*rd));;
1182
1183 memset(rd, 0, sizeof(*rd));
1184 td->io_ops->data = rd;
1185 }
1186
1187 return 0;
1188}
1189
1190static struct ioengine_ops ioengine_rw = {
Yufei Renb6cf38f2011-08-02 12:15:30 +02001191 .name = "rdma",
1192 .version = FIO_IOOPS_VERSION,
1193 .setup = fio_rdmaio_setup,
1194 .init = fio_rdmaio_init,
1195 .prep = fio_rdmaio_prep,
1196 .queue = fio_rdmaio_queue,
1197 .commit = fio_rdmaio_commit,
1198 .getevents = fio_rdmaio_getevents,
1199 .event = fio_rdmaio_event,
1200 .cleanup = fio_rdmaio_cleanup,
1201 .open_file = fio_rdmaio_open_file,
1202 .close_file = fio_rdmaio_close_file,
1203 .flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO,
ren yufei21b8aee2011-08-01 10:01:57 +02001204};
1205
1206#else /* FIO_HAVE_RDMA */
1207
1208static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f)
1209{
1210 return 0;
1211}
1212
1213static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f)
1214{
1215 return 0;
1216}
1217
1218static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u)
1219{
1220 return FIO_Q_COMPLETED;
1221}
1222
1223static int fio_rdmaio_init(struct thread_data fio_unused * td)
1224{
1225 log_err("fio: rdma(librdmacm libibverbs) not available\n");
1226 log_err(" You haven't compiled rdma ioengine into fio.\n");
1227 log_err(" If you want to try rdma ioengine,\n");
1228 log_err(" make sure OFED is installed,\n");
1229 log_err(" $ ofed_info\n");
1230 log_err(" then try to make fio as follows:\n");
1231 log_err(" $ export EXTFLAGS=\"-DFIO_HAVE_RDMA\"\n");
1232 log_err(" $ export EXTLIBS=\"-libverbs -lrdmacm\"\n");
1233 log_err(" $ make clean && make\n");
1234 return 1;
1235}
1236
1237static struct ioengine_ops ioengine_rw = {
Yufei Renb6cf38f2011-08-02 12:15:30 +02001238 .name = "rdma",
1239 .version = FIO_IOOPS_VERSION,
1240 .init = fio_rdmaio_init,
1241 .queue = fio_rdmaio_queue,
1242 .open_file = fio_rdmaio_open_file,
1243 .close_file = fio_rdmaio_close_file,
1244 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO,
ren yufei21b8aee2011-08-01 10:01:57 +02001245};
1246
1247#endif
1248
1249static void fio_init fio_rdmaio_register(void)
1250{
1251 register_ioengine(&ioengine_rw);
1252}
1253
1254static void fio_exit fio_rdmaio_unregister(void)
1255{
1256 unregister_ioengine(&ioengine_rw);
1257}