blob: 294f5c706be972b4b6563e37a32064fade665be2 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_umem.h>
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38
39/* Fast memory region */
40struct qib_fmr {
41 struct ib_fmr ibfmr;
Ralph Campbellf9315512010-05-23 21:44:54 -070042 struct qib_mregion mr; /* must be last */
43};
44
45static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
46{
47 return container_of(ibfmr, struct qib_fmr, ibfmr);
48}
49
Mike Marciniszyn6a826492012-06-27 18:33:12 -040050static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
51 int count)
52{
53 int m, i = 0;
54 int rval = 0;
55
56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
57 for (; i < m; i++) {
Mike Marciniszyn041af0b2015-01-16 10:50:32 -050058 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
Mike Marciniszyn6a826492012-06-27 18:33:12 -040059 if (!mr->map[i])
60 goto bail;
61 }
62 mr->mapsz = m;
63 init_completion(&mr->comp);
64 /* count returning the ptr to user */
65 atomic_set(&mr->refcount, 1);
66 mr->pd = pd;
67 mr->max_segs = count;
68out:
69 return rval;
70bail:
71 while (i)
72 kfree(mr->map[--i]);
73 rval = -ENOMEM;
74 goto out;
75}
76
77static void deinit_qib_mregion(struct qib_mregion *mr)
78{
79 int i = mr->mapsz;
80
81 mr->mapsz = 0;
82 while (i)
83 kfree(mr->map[--i]);
84}
85
86
Ralph Campbellf9315512010-05-23 21:44:54 -070087/**
88 * qib_get_dma_mr - get a DMA memory region
89 * @pd: protection domain for this memory region
90 * @acc: access flags
91 *
92 * Returns the memory region on success, otherwise returns an errno.
93 * Note that all DMA addresses should be created via the
94 * struct ib_dma_mapping_ops functions (see qib_dma.c).
95 */
96struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
97{
Mike Marciniszyn6a826492012-06-27 18:33:12 -040098 struct qib_mr *mr = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -070099 struct ib_mr *ret;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400100 int rval;
Ralph Campbellf9315512010-05-23 21:44:54 -0700101
102 if (to_ipd(pd)->user) {
103 ret = ERR_PTR(-EPERM);
104 goto bail;
105 }
106
Mike Marciniszyn041af0b2015-01-16 10:50:32 -0500107 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700108 if (!mr) {
109 ret = ERR_PTR(-ENOMEM);
110 goto bail;
111 }
112
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400113 rval = init_qib_mregion(&mr->mr, pd, 0);
114 if (rval) {
115 ret = ERR_PTR(rval);
116 goto bail;
117 }
118
119
120 rval = qib_alloc_lkey(&mr->mr, 1);
121 if (rval) {
122 ret = ERR_PTR(rval);
123 goto bail_mregion;
124 }
125
Ralph Campbellf9315512010-05-23 21:44:54 -0700126 mr->mr.access_flags = acc;
Ralph Campbellf9315512010-05-23 21:44:54 -0700127 ret = &mr->ibmr;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400128done:
Ralph Campbellf9315512010-05-23 21:44:54 -0700129 return ret;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400130
131bail_mregion:
132 deinit_qib_mregion(&mr->mr);
133bail:
134 kfree(mr);
135 goto done;
Ralph Campbellf9315512010-05-23 21:44:54 -0700136}
137
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400138static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
Ralph Campbellf9315512010-05-23 21:44:54 -0700139{
140 struct qib_mr *mr;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400141 int rval = -ENOMEM;
142 int m;
Ralph Campbellf9315512010-05-23 21:44:54 -0700143
144 /* Allocate struct plus pointers to first level page tables. */
145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
Mike Marciniszyn041af0b2015-01-16 10:50:32 -0500146 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700147 if (!mr)
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400148 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -0700149
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400150 rval = init_qib_mregion(&mr->mr, pd, count);
151 if (rval)
152 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -0700153 /*
154 * ib_reg_phys_mr() will initialize mr->ibmr except for
155 * lkey and rkey.
156 */
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400157 rval = qib_alloc_lkey(&mr->mr, 0);
158 if (rval)
159 goto bail_mregion;
Ralph Campbellf9315512010-05-23 21:44:54 -0700160 mr->ibmr.lkey = mr->mr.lkey;
161 mr->ibmr.rkey = mr->mr.lkey;
Ralph Campbellf9315512010-05-23 21:44:54 -0700162done:
163 return mr;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400164
165bail_mregion:
166 deinit_qib_mregion(&mr->mr);
167bail:
168 kfree(mr);
169 mr = ERR_PTR(rval);
170 goto done;
Ralph Campbellf9315512010-05-23 21:44:54 -0700171}
172
173/**
174 * qib_reg_phys_mr - register a physical memory region
175 * @pd: protection domain for this memory region
176 * @buffer_list: pointer to the list of physical buffers to register
177 * @num_phys_buf: the number of physical buffers to register
178 * @iova_start: the starting address passed over IB which maps to this MR
179 *
180 * Returns the memory region on success, otherwise returns an errno.
181 */
182struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
183 struct ib_phys_buf *buffer_list,
184 int num_phys_buf, int acc, u64 *iova_start)
185{
186 struct qib_mr *mr;
187 int n, m, i;
188 struct ib_mr *ret;
189
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400190 mr = alloc_mr(num_phys_buf, pd);
191 if (IS_ERR(mr)) {
192 ret = (struct ib_mr *)mr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700193 goto bail;
194 }
195
Ralph Campbellf9315512010-05-23 21:44:54 -0700196 mr->mr.user_base = *iova_start;
197 mr->mr.iova = *iova_start;
Ralph Campbellf9315512010-05-23 21:44:54 -0700198 mr->mr.access_flags = acc;
Ralph Campbellf9315512010-05-23 21:44:54 -0700199
200 m = 0;
201 n = 0;
202 for (i = 0; i < num_phys_buf; i++) {
203 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
204 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
205 mr->mr.length += buffer_list[i].size;
206 n++;
207 if (n == QIB_SEGSZ) {
208 m++;
209 n = 0;
210 }
211 }
212
213 ret = &mr->ibmr;
214
215bail:
216 return ret;
217}
218
219/**
220 * qib_reg_user_mr - register a userspace memory region
221 * @pd: protection domain for this memory region
222 * @start: starting userspace address
223 * @length: length of region to register
Ralph Campbellf9315512010-05-23 21:44:54 -0700224 * @mr_access_flags: access flags for this memory region
225 * @udata: unused by the QLogic_IB driver
226 *
227 * Returns the memory region on success, otherwise returns an errno.
228 */
229struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
230 u64 virt_addr, int mr_access_flags,
231 struct ib_udata *udata)
232{
233 struct qib_mr *mr;
234 struct ib_umem *umem;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200235 struct scatterlist *sg;
236 int n, m, entry;
Ralph Campbellf9315512010-05-23 21:44:54 -0700237 struct ib_mr *ret;
238
239 if (length == 0) {
240 ret = ERR_PTR(-EINVAL);
241 goto bail;
242 }
243
244 umem = ib_umem_get(pd->uobject->context, start, length,
245 mr_access_flags, 0);
246 if (IS_ERR(umem))
247 return (void *) umem;
248
Yishai Hadaseeb84612014-01-28 13:40:15 +0200249 n = umem->nmap;
Ralph Campbellf9315512010-05-23 21:44:54 -0700250
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400251 mr = alloc_mr(n, pd);
252 if (IS_ERR(mr)) {
253 ret = (struct ib_mr *)mr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700254 ib_umem_release(umem);
255 goto bail;
256 }
257
Ralph Campbellf9315512010-05-23 21:44:54 -0700258 mr->mr.user_base = start;
259 mr->mr.iova = virt_addr;
260 mr->mr.length = length;
Haggai Eran406f9e52014-12-11 17:04:12 +0200261 mr->mr.offset = ib_umem_offset(umem);
Ralph Campbellf9315512010-05-23 21:44:54 -0700262 mr->mr.access_flags = mr_access_flags;
263 mr->umem = umem;
264
Mike Marciniszyn2a600f12011-01-10 17:42:22 -0800265 if (is_power_of_2(umem->page_size))
266 mr->mr.page_shift = ilog2(umem->page_size);
Ralph Campbellf9315512010-05-23 21:44:54 -0700267 m = 0;
268 n = 0;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200269 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700270 void *vaddr;
271
Yishai Hadaseeb84612014-01-28 13:40:15 +0200272 vaddr = page_address(sg_page(sg));
Ralph Campbellf9315512010-05-23 21:44:54 -0700273 if (!vaddr) {
274 ret = ERR_PTR(-EINVAL);
275 goto bail;
276 }
277 mr->mr.map[m]->segs[n].vaddr = vaddr;
278 mr->mr.map[m]->segs[n].length = umem->page_size;
279 n++;
280 if (n == QIB_SEGSZ) {
281 m++;
282 n = 0;
283 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700284 }
285 ret = &mr->ibmr;
286
287bail:
288 return ret;
289}
290
291/**
292 * qib_dereg_mr - unregister and free a memory region
293 * @ibmr: the memory region to free
294 *
295 * Returns 0 on success.
296 *
297 * Note that this is called to free MRs created by qib_get_dma_mr()
298 * or qib_reg_user_mr().
299 */
300int qib_dereg_mr(struct ib_mr *ibmr)
301{
302 struct qib_mr *mr = to_imr(ibmr);
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400303 int ret = 0;
304 unsigned long timeout;
Ralph Campbellf9315512010-05-23 21:44:54 -0700305
Sagi Grimberg38071a42015-10-13 19:11:31 +0300306 kfree(mr->pages);
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400307 qib_free_lkey(&mr->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700308
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400309 qib_put_mr(&mr->mr); /* will set completion if last */
310 timeout = wait_for_completion_timeout(&mr->mr.comp,
311 5 * HZ);
312 if (!timeout) {
313 qib_get_mr(&mr->mr);
314 ret = -EBUSY;
315 goto out;
316 }
317 deinit_qib_mregion(&mr->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700318 if (mr->umem)
319 ib_umem_release(mr->umem);
320 kfree(mr);
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400321out:
322 return ret;
Ralph Campbellf9315512010-05-23 21:44:54 -0700323}
324
325/*
326 * Allocate a memory region usable with the
Sagi Grimbergb8533ec2015-10-13 19:11:47 +0300327 * IB_WR_REG_MR send work request.
Ralph Campbellf9315512010-05-23 21:44:54 -0700328 *
329 * Return the memory region on success, otherwise return an errno.
330 */
Sagi Grimberg1302f842015-07-30 10:32:47 +0300331struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
332 enum ib_mr_type mr_type,
333 u32 max_num_sg)
Ralph Campbellf9315512010-05-23 21:44:54 -0700334{
335 struct qib_mr *mr;
336
Sagi Grimberg1302f842015-07-30 10:32:47 +0300337 if (mr_type != IB_MR_TYPE_MEM_REG)
338 return ERR_PTR(-EINVAL);
339
340 mr = alloc_mr(max_num_sg, pd);
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400341 if (IS_ERR(mr))
342 return (struct ib_mr *)mr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700343
Sagi Grimberg38071a42015-10-13 19:11:31 +0300344 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
345 if (!mr->pages)
346 goto err;
347
Ralph Campbellf9315512010-05-23 21:44:54 -0700348 return &mr->ibmr;
Sagi Grimberg38071a42015-10-13 19:11:31 +0300349
350err:
351 qib_dereg_mr(&mr->ibmr);
352 return ERR_PTR(-ENOMEM);
353}
354
355static int qib_set_page(struct ib_mr *ibmr, u64 addr)
356{
357 struct qib_mr *mr = to_imr(ibmr);
358
359 if (unlikely(mr->npages == mr->mr.max_segs))
360 return -ENOMEM;
361
362 mr->pages[mr->npages++] = addr;
363
364 return 0;
365}
366
367int qib_map_mr_sg(struct ib_mr *ibmr,
368 struct scatterlist *sg,
369 int sg_nents)
370{
371 struct qib_mr *mr = to_imr(ibmr);
372
373 mr->npages = 0;
374
375 return ib_sg_to_pages(ibmr, sg, sg_nents, qib_set_page);
Ralph Campbellf9315512010-05-23 21:44:54 -0700376}
377
Ralph Campbellf9315512010-05-23 21:44:54 -0700378/**
379 * qib_alloc_fmr - allocate a fast memory region
380 * @pd: the protection domain for this memory region
381 * @mr_access_flags: access flags for this memory region
382 * @fmr_attr: fast memory region attributes
383 *
384 * Returns the memory region on success, otherwise returns an errno.
385 */
386struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
387 struct ib_fmr_attr *fmr_attr)
388{
389 struct qib_fmr *fmr;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400390 int m;
Ralph Campbellf9315512010-05-23 21:44:54 -0700391 struct ib_fmr *ret;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400392 int rval = -ENOMEM;
Ralph Campbellf9315512010-05-23 21:44:54 -0700393
394 /* Allocate struct plus pointers to first level page tables. */
395 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
Mike Marciniszyn041af0b2015-01-16 10:50:32 -0500396 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700397 if (!fmr)
398 goto bail;
399
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400400 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
401 if (rval)
402 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -0700403
404 /*
405 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
406 * rkey.
407 */
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400408 rval = qib_alloc_lkey(&fmr->mr, 0);
409 if (rval)
410 goto bail_mregion;
Ralph Campbellf9315512010-05-23 21:44:54 -0700411 fmr->ibfmr.rkey = fmr->mr.lkey;
412 fmr->ibfmr.lkey = fmr->mr.lkey;
413 /*
414 * Resources are allocated but no valid mapping (RKEY can't be
415 * used).
416 */
Ralph Campbellf9315512010-05-23 21:44:54 -0700417 fmr->mr.access_flags = mr_access_flags;
418 fmr->mr.max_segs = fmr_attr->max_pages;
Mike Marciniszyn2a600f12011-01-10 17:42:22 -0800419 fmr->mr.page_shift = fmr_attr->page_shift;
Ralph Campbellf9315512010-05-23 21:44:54 -0700420
Ralph Campbellf9315512010-05-23 21:44:54 -0700421 ret = &fmr->ibfmr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700422done:
423 return ret;
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400424
425bail_mregion:
426 deinit_qib_mregion(&fmr->mr);
427bail:
428 kfree(fmr);
429 ret = ERR_PTR(rval);
430 goto done;
Ralph Campbellf9315512010-05-23 21:44:54 -0700431}
432
433/**
434 * qib_map_phys_fmr - set up a fast memory region
435 * @ibmfr: the fast memory region to set up
436 * @page_list: the list of pages to associate with the fast memory region
437 * @list_len: the number of pages to associate with the fast memory region
438 * @iova: the virtual address of the start of the fast memory region
439 *
440 * This may be called from interrupt context.
441 */
442
443int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
444 int list_len, u64 iova)
445{
446 struct qib_fmr *fmr = to_ifmr(ibfmr);
447 struct qib_lkey_table *rkt;
448 unsigned long flags;
449 int m, n, i;
450 u32 ps;
451 int ret;
452
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400453 i = atomic_read(&fmr->mr.refcount);
454 if (i > 2)
Ralph Campbellf9315512010-05-23 21:44:54 -0700455 return -EBUSY;
456
457 if (list_len > fmr->mr.max_segs) {
458 ret = -EINVAL;
459 goto bail;
460 }
461 rkt = &to_idev(ibfmr->device)->lk_table;
462 spin_lock_irqsave(&rkt->lock, flags);
463 fmr->mr.user_base = iova;
464 fmr->mr.iova = iova;
Mike Marciniszyn2a600f12011-01-10 17:42:22 -0800465 ps = 1 << fmr->mr.page_shift;
Ralph Campbellf9315512010-05-23 21:44:54 -0700466 fmr->mr.length = list_len * ps;
467 m = 0;
468 n = 0;
469 for (i = 0; i < list_len; i++) {
470 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
471 fmr->mr.map[m]->segs[n].length = ps;
472 if (++n == QIB_SEGSZ) {
473 m++;
474 n = 0;
475 }
476 }
477 spin_unlock_irqrestore(&rkt->lock, flags);
478 ret = 0;
479
480bail:
481 return ret;
482}
483
484/**
485 * qib_unmap_fmr - unmap fast memory regions
486 * @fmr_list: the list of fast memory regions to unmap
487 *
488 * Returns 0 on success.
489 */
490int qib_unmap_fmr(struct list_head *fmr_list)
491{
492 struct qib_fmr *fmr;
493 struct qib_lkey_table *rkt;
494 unsigned long flags;
495
496 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
497 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
498 spin_lock_irqsave(&rkt->lock, flags);
499 fmr->mr.user_base = 0;
500 fmr->mr.iova = 0;
501 fmr->mr.length = 0;
502 spin_unlock_irqrestore(&rkt->lock, flags);
503 }
504 return 0;
505}
506
507/**
508 * qib_dealloc_fmr - deallocate a fast memory region
509 * @ibfmr: the fast memory region to deallocate
510 *
511 * Returns 0 on success.
512 */
513int qib_dealloc_fmr(struct ib_fmr *ibfmr)
514{
515 struct qib_fmr *fmr = to_ifmr(ibfmr);
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400516 int ret = 0;
517 unsigned long timeout;
Ralph Campbellf9315512010-05-23 21:44:54 -0700518
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400519 qib_free_lkey(&fmr->mr);
520 qib_put_mr(&fmr->mr); /* will set completion if last */
521 timeout = wait_for_completion_timeout(&fmr->mr.comp,
522 5 * HZ);
523 if (!timeout) {
524 qib_get_mr(&fmr->mr);
525 ret = -EBUSY;
526 goto out;
527 }
528 deinit_qib_mregion(&fmr->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700529 kfree(fmr);
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400530out:
531 return ret;
Ralph Campbellf9315512010-05-23 21:44:54 -0700532}
Mike Marciniszyn8aac4cc2012-06-27 18:33:19 -0400533
534void mr_rcu_callback(struct rcu_head *list)
535{
536 struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
537
538 complete(&mr->comp);
539}