blob: dbfd854c32c936b8e934bcc1cb27ce4325b61d8d [file] [log] [blame]
Christoph Hellwiga060b562016-05-03 18:01:09 +02001/*
2 * Copyright (c) 2016 HGST, a Western Digital Company.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13#include <linux/moduleparam.h>
14#include <linux/slab.h>
15#include <rdma/mr_pool.h>
16#include <rdma/rw.h>
17
18enum {
19 RDMA_RW_SINGLE_WR,
20 RDMA_RW_MULTI_WR,
21 RDMA_RW_MR,
Christoph Hellwig0e353e32016-05-03 18:01:12 +020022 RDMA_RW_SIG_MR,
Christoph Hellwiga060b562016-05-03 18:01:09 +020023};
24
25static bool rdma_rw_force_mr;
26module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
27MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
28
29/*
30 * Check if the device might use memory registration. This is currently only
31 * true for iWarp devices. In the future we can hopefully fine tune this based
32 * on HCA driver input.
33 */
34static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
35{
36 if (rdma_protocol_iwarp(dev, port_num))
37 return true;
38 if (unlikely(rdma_rw_force_mr))
39 return true;
40 return false;
41}
42
43/*
44 * Check if the device will use memory registration for this RW operation.
45 * We currently always use memory registrations for iWarp RDMA READs, and
46 * have a debug option to force usage of MRs.
47 *
48 * XXX: In the future we can hopefully fine tune this based on HCA driver
49 * input.
50 */
51static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
52 enum dma_data_direction dir, int dma_nents)
53{
54 if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
55 return true;
56 if (unlikely(rdma_rw_force_mr))
57 return true;
58 return false;
59}
60
Christoph Hellwiga060b562016-05-03 18:01:09 +020061static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
62{
63 /* arbitrary limit to avoid allocating gigantic resources */
64 return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
65}
66
Bart Van Asscheeaa74ec2016-07-21 13:03:09 -070067/* Caller must have zero-initialized *reg. */
Christoph Hellwiga060b562016-05-03 18:01:09 +020068static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
69 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
70 u32 sg_cnt, u32 offset)
71{
72 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
73 u32 nents = min(sg_cnt, pages_per_mr);
74 int count = 0, ret;
75
76 reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
77 if (!reg->mr)
78 return -EAGAIN;
79
80 if (reg->mr->need_inval) {
81 reg->inv_wr.opcode = IB_WR_LOCAL_INV;
82 reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
83 reg->inv_wr.next = &reg->reg_wr.wr;
84 count++;
85 } else {
86 reg->inv_wr.next = NULL;
87 }
88
Bart Van Assche9aa8b322016-05-12 10:49:15 -070089 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
Christoph Hellwiga060b562016-05-03 18:01:09 +020090 if (ret < nents) {
91 ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
92 return -EINVAL;
93 }
94
95 reg->reg_wr.wr.opcode = IB_WR_REG_MR;
96 reg->reg_wr.mr = reg->mr;
97 reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
98 if (rdma_protocol_iwarp(qp->device, port_num))
99 reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
100 count++;
101
102 reg->sge.addr = reg->mr->iova;
103 reg->sge.length = reg->mr->length;
104 return count;
105}
106
107static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
108 u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
109 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
110{
Bart Van Asscheeaa74ec2016-07-21 13:03:09 -0700111 struct rdma_rw_reg_ctx *prev = NULL;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200112 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
113 int i, j, ret = 0, count = 0;
114
115 ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
116 ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
117 if (!ctx->reg) {
118 ret = -ENOMEM;
119 goto out;
120 }
121
122 for (i = 0; i < ctx->nr_ops; i++) {
Christoph Hellwiga060b562016-05-03 18:01:09 +0200123 struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
124 u32 nents = min(sg_cnt, pages_per_mr);
125
126 ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
127 offset);
128 if (ret < 0)
129 goto out_free;
130 count += ret;
131
132 if (prev) {
133 if (reg->mr->need_inval)
134 prev->wr.wr.next = &reg->inv_wr;
135 else
136 prev->wr.wr.next = &reg->reg_wr.wr;
137 }
138
139 reg->reg_wr.wr.next = &reg->wr.wr;
140
141 reg->wr.wr.sg_list = &reg->sge;
142 reg->wr.wr.num_sge = 1;
143 reg->wr.remote_addr = remote_addr;
144 reg->wr.rkey = rkey;
145 if (dir == DMA_TO_DEVICE) {
146 reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
147 } else if (!rdma_cap_read_inv(qp->device, port_num)) {
148 reg->wr.wr.opcode = IB_WR_RDMA_READ;
149 } else {
150 reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
151 reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
152 }
153 count++;
154
155 remote_addr += reg->sge.length;
156 sg_cnt -= nents;
157 for (j = 0; j < nents; j++)
158 sg = sg_next(sg);
Bart Van Asscheeaa74ec2016-07-21 13:03:09 -0700159 prev = reg;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200160 offset = 0;
161 }
162
Bart Van Asscheeaa74ec2016-07-21 13:03:09 -0700163 if (prev)
164 prev->wr.wr.next = NULL;
165
Christoph Hellwiga060b562016-05-03 18:01:09 +0200166 ctx->type = RDMA_RW_MR;
167 return count;
168
169out_free:
170 while (--i >= 0)
171 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
172 kfree(ctx->reg);
173out:
174 return ret;
175}
176
177static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
178 struct scatterlist *sg, u32 sg_cnt, u32 offset,
179 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
180{
181 struct ib_device *dev = qp->pd->device;
Bart Van Assche632bc3f2016-07-21 13:03:30 -0700182 u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
183 qp->max_read_sge;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200184 struct ib_sge *sge;
185 u32 total_len = 0, i, j;
186
187 ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
188
189 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
190 if (!ctx->map.sges)
191 goto out;
192
193 ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
194 if (!ctx->map.wrs)
195 goto out_free_sges;
196
197 for (i = 0; i < ctx->nr_ops; i++) {
198 struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
199 u32 nr_sge = min(sg_cnt, max_sge);
200
201 if (dir == DMA_TO_DEVICE)
202 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
203 else
204 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
205 rdma_wr->remote_addr = remote_addr + total_len;
206 rdma_wr->rkey = rkey;
Bart Van Asscheeaa74ec2016-07-21 13:03:09 -0700207 rdma_wr->wr.num_sge = nr_sge;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200208 rdma_wr->wr.sg_list = sge;
209
210 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
Christoph Hellwiga060b562016-05-03 18:01:09 +0200211 sge->addr = ib_sg_dma_address(dev, sg) + offset;
212 sge->length = ib_sg_dma_len(dev, sg) - offset;
213 sge->lkey = qp->pd->local_dma_lkey;
214
215 total_len += sge->length;
216 sge++;
217 sg_cnt--;
218 offset = 0;
219 }
220
Bart Van Asscheeaa74ec2016-07-21 13:03:09 -0700221 rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
222 &ctx->map.wrs[i + 1].wr : NULL;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200223 }
224
225 ctx->type = RDMA_RW_MULTI_WR;
226 return ctx->nr_ops;
227
228out_free_sges:
229 kfree(ctx->map.sges);
230out:
231 return -ENOMEM;
232}
233
234static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
235 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
236 enum dma_data_direction dir)
237{
238 struct ib_device *dev = qp->pd->device;
239 struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
240
241 ctx->nr_ops = 1;
242
243 ctx->single.sge.lkey = qp->pd->local_dma_lkey;
244 ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset;
245 ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset;
246
247 memset(rdma_wr, 0, sizeof(*rdma_wr));
248 if (dir == DMA_TO_DEVICE)
249 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
250 else
251 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
252 rdma_wr->wr.sg_list = &ctx->single.sge;
253 rdma_wr->wr.num_sge = 1;
254 rdma_wr->remote_addr = remote_addr;
255 rdma_wr->rkey = rkey;
256
257 ctx->type = RDMA_RW_SINGLE_WR;
258 return 1;
259}
260
261/**
262 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
263 * @ctx: context to initialize
264 * @qp: queue pair to operate on
265 * @port_num: port num to which the connection is bound
266 * @sg: scatterlist to READ/WRITE from/to
267 * @sg_cnt: number of entries in @sg
268 * @sg_offset: current byte offset into @sg
269 * @remote_addr:remote address to read/write (relative to @rkey)
270 * @rkey: remote key to operate on
271 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
272 *
273 * Returns the number of WQEs that will be needed on the workqueue if
274 * successful, or a negative error code.
275 */
276int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
277 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
278 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
279{
280 struct ib_device *dev = qp->pd->device;
281 int ret;
282
283 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
284 if (!ret)
285 return -ENOMEM;
286 sg_cnt = ret;
287
288 /*
289 * Skip to the S/G entry that sg_offset falls into:
290 */
291 for (;;) {
292 u32 len = ib_sg_dma_len(dev, sg);
293
294 if (sg_offset < len)
295 break;
296
297 sg = sg_next(sg);
298 sg_offset -= len;
299 sg_cnt--;
300 }
301
302 ret = -EIO;
303 if (WARN_ON_ONCE(sg_cnt == 0))
304 goto out_unmap_sg;
305
306 if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
307 ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
308 sg_offset, remote_addr, rkey, dir);
309 } else if (sg_cnt > 1) {
310 ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
311 remote_addr, rkey, dir);
312 } else {
313 ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
314 remote_addr, rkey, dir);
315 }
316
317 if (ret < 0)
318 goto out_unmap_sg;
319 return ret;
320
321out_unmap_sg:
322 ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
323 return ret;
324}
325EXPORT_SYMBOL(rdma_rw_ctx_init);
326
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200327/**
328 * rdma_rw_ctx_signature init - initialize a RW context with signature offload
329 * @ctx: context to initialize
330 * @qp: queue pair to operate on
331 * @port_num: port num to which the connection is bound
332 * @sg: scatterlist to READ/WRITE from/to
333 * @sg_cnt: number of entries in @sg
334 * @prot_sg: scatterlist to READ/WRITE protection information from/to
335 * @prot_sg_cnt: number of entries in @prot_sg
336 * @sig_attrs: signature offloading algorithms
337 * @remote_addr:remote address to read/write (relative to @rkey)
338 * @rkey: remote key to operate on
339 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
340 *
341 * Returns the number of WQEs that will be needed on the workqueue if
342 * successful, or a negative error code.
343 */
344int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
345 u8 port_num, struct scatterlist *sg, u32 sg_cnt,
346 struct scatterlist *prot_sg, u32 prot_sg_cnt,
347 struct ib_sig_attrs *sig_attrs,
348 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
349{
350 struct ib_device *dev = qp->pd->device;
351 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
352 struct ib_rdma_wr *rdma_wr;
353 struct ib_send_wr *prev_wr = NULL;
354 int count = 0, ret;
355
356 if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
357 pr_err("SG count too large\n");
358 return -EINVAL;
359 }
360
361 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
362 if (!ret)
363 return -ENOMEM;
364 sg_cnt = ret;
365
366 ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir);
367 if (!ret) {
368 ret = -ENOMEM;
369 goto out_unmap_sg;
370 }
371 prot_sg_cnt = ret;
372
373 ctx->type = RDMA_RW_SIG_MR;
374 ctx->nr_ops = 1;
375 ctx->sig = kcalloc(1, sizeof(*ctx->sig), GFP_KERNEL);
376 if (!ctx->sig) {
377 ret = -ENOMEM;
378 goto out_unmap_prot_sg;
379 }
380
381 ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->data, sg, sg_cnt, 0);
382 if (ret < 0)
383 goto out_free_ctx;
384 count += ret;
385 prev_wr = &ctx->sig->data.reg_wr.wr;
386
387 if (prot_sg_cnt) {
388 ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
389 prot_sg, prot_sg_cnt, 0);
390 if (ret < 0)
391 goto out_destroy_data_mr;
392 count += ret;
393
394 if (ctx->sig->prot.inv_wr.next)
395 prev_wr->next = &ctx->sig->prot.inv_wr;
396 else
397 prev_wr->next = &ctx->sig->prot.reg_wr.wr;
398 prev_wr = &ctx->sig->prot.reg_wr.wr;
399 } else {
400 ctx->sig->prot.mr = NULL;
401 }
402
403 ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
404 if (!ctx->sig->sig_mr) {
405 ret = -EAGAIN;
406 goto out_destroy_prot_mr;
407 }
408
409 if (ctx->sig->sig_mr->need_inval) {
410 memset(&ctx->sig->sig_inv_wr, 0, sizeof(ctx->sig->sig_inv_wr));
411
412 ctx->sig->sig_inv_wr.opcode = IB_WR_LOCAL_INV;
413 ctx->sig->sig_inv_wr.ex.invalidate_rkey = ctx->sig->sig_mr->rkey;
414
415 prev_wr->next = &ctx->sig->sig_inv_wr;
416 prev_wr = &ctx->sig->sig_inv_wr;
417 }
418
419 ctx->sig->sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
420 ctx->sig->sig_wr.wr.wr_cqe = NULL;
421 ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge;
422 ctx->sig->sig_wr.wr.num_sge = 1;
423 ctx->sig->sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
424 ctx->sig->sig_wr.sig_attrs = sig_attrs;
425 ctx->sig->sig_wr.sig_mr = ctx->sig->sig_mr;
426 if (prot_sg_cnt)
427 ctx->sig->sig_wr.prot = &ctx->sig->prot.sge;
428 prev_wr->next = &ctx->sig->sig_wr.wr;
429 prev_wr = &ctx->sig->sig_wr.wr;
430 count++;
431
432 ctx->sig->sig_sge.addr = 0;
433 ctx->sig->sig_sge.length = ctx->sig->data.sge.length;
434 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_NONE)
435 ctx->sig->sig_sge.length += ctx->sig->prot.sge.length;
436
437 rdma_wr = &ctx->sig->data.wr;
438 rdma_wr->wr.sg_list = &ctx->sig->sig_sge;
439 rdma_wr->wr.num_sge = 1;
440 rdma_wr->remote_addr = remote_addr;
441 rdma_wr->rkey = rkey;
442 if (dir == DMA_TO_DEVICE)
443 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
444 else
445 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
446 prev_wr->next = &rdma_wr->wr;
447 prev_wr = &rdma_wr->wr;
448 count++;
449
450 return count;
451
452out_destroy_prot_mr:
453 if (prot_sg_cnt)
454 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
455out_destroy_data_mr:
456 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
457out_free_ctx:
458 kfree(ctx->sig);
459out_unmap_prot_sg:
460 ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
461out_unmap_sg:
462 ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
463 return ret;
464}
465EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
466
Christoph Hellwiga060b562016-05-03 18:01:09 +0200467/*
468 * Now that we are going to post the WRs we can update the lkey and need_inval
469 * state on the MRs. If we were doing this at init time, we would get double
470 * or missing invalidations if a context was initialized but not actually
471 * posted.
472 */
473static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
474{
475 reg->mr->need_inval = need_inval;
476 ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
477 reg->reg_wr.key = reg->mr->lkey;
478 reg->sge.lkey = reg->mr->lkey;
479}
480
481/**
482 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
483 * @ctx: context to operate on
484 * @qp: queue pair to operate on
485 * @port_num: port num to which the connection is bound
486 * @cqe: completion queue entry for the last WR
487 * @chain_wr: WR to append to the posted chain
488 *
489 * Return the WR chain for the set of RDMA READ/WRITE operations described by
490 * @ctx, as well as any memory registration operations needed. If @chain_wr
491 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
492 * If @chain_wr is not set @cqe must be set so that the caller gets a
493 * completion notification.
494 */
495struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
496 u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
497{
498 struct ib_send_wr *first_wr, *last_wr;
499 int i;
500
501 switch (ctx->type) {
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200502 case RDMA_RW_SIG_MR:
503 rdma_rw_update_lkey(&ctx->sig->data, true);
504 if (ctx->sig->prot.mr)
505 rdma_rw_update_lkey(&ctx->sig->prot, true);
506
507 ctx->sig->sig_mr->need_inval = true;
508 ib_update_fast_reg_key(ctx->sig->sig_mr,
509 ib_inc_rkey(ctx->sig->sig_mr->lkey));
510 ctx->sig->sig_sge.lkey = ctx->sig->sig_mr->lkey;
511
512 if (ctx->sig->data.inv_wr.next)
513 first_wr = &ctx->sig->data.inv_wr;
514 else
515 first_wr = &ctx->sig->data.reg_wr.wr;
516 last_wr = &ctx->sig->data.wr.wr;
517 break;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200518 case RDMA_RW_MR:
519 for (i = 0; i < ctx->nr_ops; i++) {
520 rdma_rw_update_lkey(&ctx->reg[i],
521 ctx->reg[i].wr.wr.opcode !=
522 IB_WR_RDMA_READ_WITH_INV);
523 }
524
525 if (ctx->reg[0].inv_wr.next)
526 first_wr = &ctx->reg[0].inv_wr;
527 else
528 first_wr = &ctx->reg[0].reg_wr.wr;
529 last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
530 break;
531 case RDMA_RW_MULTI_WR:
532 first_wr = &ctx->map.wrs[0].wr;
533 last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
534 break;
535 case RDMA_RW_SINGLE_WR:
536 first_wr = &ctx->single.wr.wr;
537 last_wr = &ctx->single.wr.wr;
538 break;
539 default:
540 BUG();
541 }
542
543 if (chain_wr) {
544 last_wr->next = chain_wr;
545 } else {
546 last_wr->wr_cqe = cqe;
547 last_wr->send_flags |= IB_SEND_SIGNALED;
548 }
549
550 return first_wr;
551}
552EXPORT_SYMBOL(rdma_rw_ctx_wrs);
553
554/**
555 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
556 * @ctx: context to operate on
557 * @qp: queue pair to operate on
558 * @port_num: port num to which the connection is bound
559 * @cqe: completion queue entry for the last WR
560 * @chain_wr: WR to append to the posted chain
561 *
562 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
563 * any memory registration operations needed. If @chain_wr is non-NULL the
564 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
565 * is not set @cqe must be set so that the caller gets a completion
566 * notification.
567 */
568int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
569 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
570{
571 struct ib_send_wr *first_wr, *bad_wr;
572
573 first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
574 return ib_post_send(qp, first_wr, &bad_wr);
575}
576EXPORT_SYMBOL(rdma_rw_ctx_post);
577
578/**
579 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
580 * @ctx: context to release
581 * @qp: queue pair to operate on
582 * @port_num: port num to which the connection is bound
583 * @sg: scatterlist that was used for the READ/WRITE
584 * @sg_cnt: number of entries in @sg
585 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
586 */
587void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
588 struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
589{
590 int i;
591
592 switch (ctx->type) {
593 case RDMA_RW_MR:
594 for (i = 0; i < ctx->nr_ops; i++)
595 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
596 kfree(ctx->reg);
597 break;
598 case RDMA_RW_MULTI_WR:
599 kfree(ctx->map.wrs);
600 kfree(ctx->map.sges);
601 break;
602 case RDMA_RW_SINGLE_WR:
603 break;
604 default:
605 BUG();
606 break;
607 }
608
609 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
610}
611EXPORT_SYMBOL(rdma_rw_ctx_destroy);
612
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200613/**
614 * rdma_rw_ctx_destroy_signature - release all resources allocated by
615 * rdma_rw_ctx_init_signature
616 * @ctx: context to release
617 * @qp: queue pair to operate on
618 * @port_num: port num to which the connection is bound
619 * @sg: scatterlist that was used for the READ/WRITE
620 * @sg_cnt: number of entries in @sg
621 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
622 * @prot_sg_cnt: number of entries in @prot_sg
623 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
624 */
625void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
626 u8 port_num, struct scatterlist *sg, u32 sg_cnt,
627 struct scatterlist *prot_sg, u32 prot_sg_cnt,
628 enum dma_data_direction dir)
629{
630 if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
631 return;
632
633 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
634 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
635
636 if (ctx->sig->prot.mr) {
637 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
638 ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
639 }
640
641 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->sig->sig_mr);
642 kfree(ctx->sig);
643}
644EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
645
Christoph Hellwiga060b562016-05-03 18:01:09 +0200646void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
647{
648 u32 factor;
649
650 WARN_ON_ONCE(attr->port_num == 0);
651
652 /*
653 * Each context needs at least one RDMA READ or WRITE WR.
654 *
655 * For some hardware we might need more, eventually we should ask the
656 * HCA driver for a multiplier here.
657 */
658 factor = 1;
659
660 /*
661 * If the devices needs MRs to perform RDMA READ or WRITE operations,
662 * we'll need two additional MRs for the registrations and the
663 * invalidation.
664 */
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200665 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
666 factor += 6; /* (inv + reg) * (data + prot + sig) */
667 else if (rdma_rw_can_use_mr(dev, attr->port_num))
Christoph Hellwiga060b562016-05-03 18:01:09 +0200668 factor += 2; /* inv + reg */
669
670 attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
671
672 /*
673 * But maybe we were just too high in the sky and the device doesn't
674 * even support all we need, and we'll have to live with what we get..
675 */
676 attr->cap.max_send_wr =
677 min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
678}
679
680int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
681{
682 struct ib_device *dev = qp->pd->device;
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200683 u32 nr_mrs = 0, nr_sig_mrs = 0;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200684 int ret = 0;
685
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200686 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) {
687 nr_sig_mrs = attr->cap.max_rdma_ctxs;
688 nr_mrs = attr->cap.max_rdma_ctxs * 2;
689 } else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
690 nr_mrs = attr->cap.max_rdma_ctxs;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200691 }
692
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200693 if (nr_mrs) {
694 ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
695 IB_MR_TYPE_MEM_REG,
696 rdma_rw_fr_page_list_len(dev));
697 if (ret) {
698 pr_err("%s: failed to allocated %d MRs\n",
699 __func__, nr_mrs);
700 return ret;
701 }
702 }
703
704 if (nr_sig_mrs) {
705 ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
706 IB_MR_TYPE_SIGNATURE, 2);
707 if (ret) {
708 pr_err("%s: failed to allocated %d SIG MRs\n",
709 __func__, nr_mrs);
710 goto out_free_rdma_mrs;
711 }
712 }
713
714 return 0;
715
716out_free_rdma_mrs:
717 ib_mr_pool_destroy(qp, &qp->rdma_mrs);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200718 return ret;
719}
720
721void rdma_rw_cleanup_mrs(struct ib_qp *qp)
722{
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200723 ib_mr_pool_destroy(qp, &qp->sig_mrs);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200724 ib_mr_pool_destroy(qp, &qp->rdma_mrs);
725}