blob: 923d2b40e9c566fedb097ba3d0f00f18f5ce319e [file] [log] [blame]
oulijun9a443532016-07-21 19:06:38 +08001/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/platform_device.h>
Matan Barake89bf462017-06-08 17:23:50 +030035#include <linux/vmalloc.h>
oulijun9a443532016-07-21 19:06:38 +080036#include <rdma/ib_umem.h>
37#include "hns_roce_device.h"
38#include "hns_roce_cmd.h"
39#include "hns_roce_hem.h"
40
41static u32 hw_index_to_key(unsigned long ind)
42{
43 return (u32)(ind >> 24) | (ind << 8);
44}
45
Shaobo Xubfcc6812016-11-29 23:10:26 +000046unsigned long key_to_hw_index(u32 key)
oulijun9a443532016-07-21 19:06:38 +080047{
48 return (key << 24) | (key >> 8);
49}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +080050EXPORT_SYMBOL_GPL(key_to_hw_index);
oulijun9a443532016-07-21 19:06:38 +080051
52static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
53 struct hns_roce_cmd_mailbox *mailbox,
54 unsigned long mpt_index)
55{
56 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
57 HNS_ROCE_CMD_SW2HW_MPT,
Wei Hu (Xavier)6b877c32016-11-23 19:41:05 +000058 HNS_ROCE_CMD_TIMEOUT_MSECS);
oulijun9a443532016-07-21 19:06:38 +080059}
60
Shaobo Xubfcc6812016-11-29 23:10:26 +000061int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
oulijun9a443532016-07-21 19:06:38 +080062 struct hns_roce_cmd_mailbox *mailbox,
63 unsigned long mpt_index)
64{
65 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
66 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
Wei Hu (Xavier)6b877c32016-11-23 19:41:05 +000067 HNS_ROCE_CMD_TIMEOUT_MSECS);
oulijun9a443532016-07-21 19:06:38 +080068}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +080069EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
oulijun9a443532016-07-21 19:06:38 +080070
71static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
72 unsigned long *seg)
73{
74 int o;
75 u32 m;
76
77 spin_lock(&buddy->lock);
78
79 for (o = order; o <= buddy->max_order; ++o) {
80 if (buddy->num_free[o]) {
81 m = 1 << (buddy->max_order - o);
82 *seg = find_first_bit(buddy->bits[o], m);
83 if (*seg < m)
84 goto found;
85 }
86 }
87 spin_unlock(&buddy->lock);
88 return -1;
89
90 found:
91 clear_bit(*seg, buddy->bits[o]);
92 --buddy->num_free[o];
93
94 while (o > order) {
95 --o;
96 *seg <<= 1;
97 set_bit(*seg ^ 1, buddy->bits[o]);
98 ++buddy->num_free[o];
99 }
100
101 spin_unlock(&buddy->lock);
102
103 *seg <<= order;
104 return 0;
105}
106
107static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
108 int order)
109{
110 seg >>= order;
111
112 spin_lock(&buddy->lock);
113
114 while (test_bit(seg ^ 1, buddy->bits[order])) {
115 clear_bit(seg ^ 1, buddy->bits[order]);
116 --buddy->num_free[order];
117 seg >>= 1;
118 ++order;
119 }
120
121 set_bit(seg, buddy->bits[order]);
122 ++buddy->num_free[order];
123
124 spin_unlock(&buddy->lock);
125}
126
127static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
128{
129 int i, s;
130
131 buddy->max_order = max_order;
132 spin_lock_init(&buddy->lock);
Markus Elfring4418b272017-02-16 09:30:55 +0100133 buddy->bits = kcalloc(buddy->max_order + 1,
134 sizeof(*buddy->bits),
135 GFP_KERNEL);
136 buddy->num_free = kcalloc(buddy->max_order + 1,
137 sizeof(*buddy->num_free),
138 GFP_KERNEL);
oulijun9a443532016-07-21 19:06:38 +0800139 if (!buddy->bits || !buddy->num_free)
140 goto err_out;
141
142 for (i = 0; i <= buddy->max_order; ++i) {
143 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
Wei Hu (Xavier)8d497eb2016-11-23 19:41:01 +0000144 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
145 __GFP_NOWARN);
146 if (!buddy->bits[i]) {
147 buddy->bits[i] = vzalloc(s * sizeof(long));
148 if (!buddy->bits[i])
149 goto err_out_free;
150 }
oulijun9a443532016-07-21 19:06:38 +0800151 }
152
153 set_bit(0, buddy->bits[buddy->max_order]);
154 buddy->num_free[buddy->max_order] = 1;
155
156 return 0;
157
158err_out_free:
159 for (i = 0; i <= buddy->max_order; ++i)
Wei Hu (Xavier)8d497eb2016-11-23 19:41:01 +0000160 kvfree(buddy->bits[i]);
oulijun9a443532016-07-21 19:06:38 +0800161
162err_out:
163 kfree(buddy->bits);
164 kfree(buddy->num_free);
165 return -ENOMEM;
166}
167
168static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
169{
170 int i;
171
172 for (i = 0; i <= buddy->max_order; ++i)
Wei Hu (Xavier)8d497eb2016-11-23 19:41:01 +0000173 kvfree(buddy->bits[i]);
oulijun9a443532016-07-21 19:06:38 +0800174
175 kfree(buddy->bits);
176 kfree(buddy->num_free);
177}
178
179static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
180 unsigned long *seg)
181{
182 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
183 int ret = 0;
184
185 ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
186 if (ret == -1)
187 return -1;
188
189 if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
190 *seg + (1 << order) - 1)) {
191 hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
192 return -1;
193 }
194
195 return 0;
196}
197
198int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
199 struct hns_roce_mtt *mtt)
200{
201 int ret = 0;
202 int i;
203
204 /* Page num is zero, correspond to DMA memory register */
205 if (!npages) {
206 mtt->order = -1;
207 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
208 return 0;
209 }
210
Stephen Boydad61dd32017-05-08 15:57:50 -0700211 /* Note: if page_shift is zero, FAST memory register */
oulijun9a443532016-07-21 19:06:38 +0800212 mtt->page_shift = page_shift;
213
214 /* Compute MTT entry necessary */
215 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
216 i <<= 1)
217 ++mtt->order;
218
219 /* Allocate MTT entry */
220 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
221 if (ret == -1)
222 return -ENOMEM;
223
224 return 0;
225}
226
227void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
228{
229 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
230
231 if (mtt->order < 0)
232 return;
233
234 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
235 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
236 mtt->first_seg + (1 << mtt->order) - 1);
237}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800238EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
oulijun9a443532016-07-21 19:06:38 +0800239
240static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
241 u64 size, u32 access, int npages,
242 struct hns_roce_mr *mr)
243{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800244 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800245 unsigned long index = 0;
246 int ret = 0;
oulijun9a443532016-07-21 19:06:38 +0800247
248 /* Allocate a key for mr from mr_table */
249 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
250 if (ret == -1)
251 return -ENOMEM;
252
253 mr->iova = iova; /* MR va starting addr */
254 mr->size = size; /* MR addr range */
255 mr->pd = pd; /* MR num */
256 mr->access = access; /* MR access permit */
257 mr->enabled = 0; /* MR active status */
258 mr->key = hw_index_to_key(index); /* MR key */
259
260 if (size == ~0ull) {
261 mr->type = MR_TYPE_DMA;
262 mr->pbl_buf = NULL;
263 mr->pbl_dma_addr = 0;
264 } else {
265 mr->type = MR_TYPE_MR;
266 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
267 &(mr->pbl_dma_addr),
268 GFP_KERNEL);
269 if (!mr->pbl_buf)
270 return -ENOMEM;
271 }
272
273 return 0;
274}
275
276static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
277 struct hns_roce_mr *mr)
278{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800279 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800280 int npages = 0;
281 int ret;
282
283 if (mr->enabled) {
284 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
285 & (hr_dev->caps.num_mtpts - 1));
286 if (ret)
287 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
288 }
289
290 if (mr->size != ~0ULL) {
291 npages = ib_umem_page_count(mr->umem);
292 dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
293 mr->pbl_dma_addr);
294 }
295
296 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
Wei Hu (Xavier)5e6ff782016-11-23 19:41:07 +0000297 key_to_hw_index(mr->key), BITMAP_NO_RR);
oulijun9a443532016-07-21 19:06:38 +0800298}
299
300static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
301 struct hns_roce_mr *mr)
302{
303 int ret;
304 unsigned long mtpt_idx = key_to_hw_index(mr->key);
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800305 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800306 struct hns_roce_cmd_mailbox *mailbox;
307 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
308
309 /* Prepare HEM entry memory */
310 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
311 if (ret)
312 return ret;
313
314 /* Allocate mailbox memory */
315 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
316 if (IS_ERR(mailbox)) {
317 ret = PTR_ERR(mailbox);
318 goto err_table;
319 }
320
321 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
322 if (ret) {
323 dev_err(dev, "Write mtpt fail!\n");
324 goto err_page;
325 }
326
327 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
328 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
329 if (ret) {
330 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
331 goto err_page;
332 }
333
334 mr->enabled = 1;
335 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
336
337 return 0;
338
339err_page:
340 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
341
342err_table:
343 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
344 return ret;
345}
346
347static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
348 struct hns_roce_mtt *mtt, u32 start_index,
349 u32 npages, u64 *page_list)
350{
351 u32 i = 0;
352 __le64 *mtts = NULL;
353 dma_addr_t dma_handle;
354 u32 s = start_index * sizeof(u64);
355
356 /* All MTTs must fit in the same page */
357 if (start_index / (PAGE_SIZE / sizeof(u64)) !=
358 (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
359 return -EINVAL;
360
361 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
362 return -EINVAL;
363
364 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
365 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
366 &dma_handle);
367 if (!mtts)
368 return -ENOMEM;
369
370 /* Save page addr, low 12 bits : 0 */
371 for (i = 0; i < npages; ++i)
372 mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
373
374 return 0;
375}
376
377static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
378 struct hns_roce_mtt *mtt, u32 start_index,
379 u32 npages, u64 *page_list)
380{
381 int chunk;
382 int ret;
383
384 if (mtt->order < 0)
385 return -EINVAL;
386
387 while (npages > 0) {
388 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
389
390 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
391 page_list);
392 if (ret)
393 return ret;
394
395 npages -= chunk;
396 start_index += chunk;
397 page_list += chunk;
398 }
399
400 return 0;
401}
402
403int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
404 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
405{
406 u32 i = 0;
407 int ret = 0;
408 u64 *page_list = NULL;
409
410 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
411 if (!page_list)
412 return -ENOMEM;
413
414 for (i = 0; i < buf->npages; ++i) {
415 if (buf->nbufs == 1)
416 page_list[i] = buf->direct.map + (i << buf->page_shift);
417 else
418 page_list[i] = buf->page_list[i].map;
419
420 }
421 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
422
423 kfree(page_list);
424
425 return ret;
426}
427
428int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
429{
430 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
431 int ret = 0;
432
433 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
434 hr_dev->caps.num_mtpts,
435 hr_dev->caps.num_mtpts - 1,
436 hr_dev->caps.reserved_mrws, 0);
437 if (ret)
438 return ret;
439
440 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
441 ilog2(hr_dev->caps.num_mtt_segs));
442 if (ret)
443 goto err_buddy;
444
445 return 0;
446
447err_buddy:
448 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
449 return ret;
450}
451
452void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
453{
454 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
455
456 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
457 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
458}
459
460struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
461{
462 int ret = 0;
463 struct hns_roce_mr *mr = NULL;
464
465 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
466 if (mr == NULL)
467 return ERR_PTR(-ENOMEM);
468
469 /* Allocate memory region key */
470 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
471 ~0ULL, acc, 0, mr);
472 if (ret)
473 goto err_free;
474
475 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
476 if (ret)
477 goto err_mr;
478
479 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
480 mr->umem = NULL;
481
482 return &mr->ibmr;
483
484err_mr:
485 hns_roce_mr_free(to_hr_dev(pd->device), mr);
486
487err_free:
488 kfree(mr);
489 return ERR_PTR(ret);
490}
491
492int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
493 struct hns_roce_mtt *mtt, struct ib_umem *umem)
494{
495 struct scatterlist *sg;
496 int i, k, entry;
497 int ret = 0;
498 u64 *pages;
499 u32 n;
500 int len;
501
502 pages = (u64 *) __get_free_page(GFP_KERNEL);
503 if (!pages)
504 return -ENOMEM;
505
506 i = n = 0;
507
508 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
509 len = sg_dma_len(sg) >> mtt->page_shift;
510 for (k = 0; k < len; ++k) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300511 pages[i++] = sg_dma_address(sg) +
512 (k << umem->page_shift);
oulijun9a443532016-07-21 19:06:38 +0800513 if (i == PAGE_SIZE / sizeof(u64)) {
514 ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
515 pages);
516 if (ret)
517 goto out;
518 n += i;
519 i = 0;
520 }
521 }
522 }
523
524 if (i)
525 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
526
527out:
528 free_page((unsigned long) pages);
529 return ret;
530}
531
532static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
533 struct ib_umem *umem)
534{
535 int i = 0;
536 int entry;
537 struct scatterlist *sg;
538
539 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
540 mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
541 i++;
542 }
543
544 /* Memory barrier */
545 mb();
546
547 return 0;
548}
549
550struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
551 u64 virt_addr, int access_flags,
552 struct ib_udata *udata)
553{
554 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800555 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800556 struct hns_roce_mr *mr = NULL;
557 int ret = 0;
558 int n = 0;
559
560 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
561 if (!mr)
562 return ERR_PTR(-ENOMEM);
563
564 mr->umem = ib_umem_get(pd->uobject->context, start, length,
565 access_flags, 0);
566 if (IS_ERR(mr->umem)) {
567 ret = PTR_ERR(mr->umem);
568 goto err_free;
569 }
570
571 n = ib_umem_page_count(mr->umem);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300572 if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
573 dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
574 BIT(mr->umem->page_shift));
Lijun Ou1cd11062016-09-20 17:07:03 +0100575 ret = -EINVAL;
576 goto err_umem;
oulijun9a443532016-07-21 19:06:38 +0800577 }
578
579 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
580 dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
581 length);
Lijun Ou1cd11062016-09-20 17:07:03 +0100582 ret = -EINVAL;
oulijun9a443532016-07-21 19:06:38 +0800583 goto err_umem;
584 }
585
586 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
587 access_flags, n, mr);
588 if (ret)
589 goto err_umem;
590
591 ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
592 if (ret)
593 goto err_mr;
594
595 ret = hns_roce_mr_enable(hr_dev, mr);
596 if (ret)
597 goto err_mr;
598
599 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
600
601 return &mr->ibmr;
602
603err_mr:
604 hns_roce_mr_free(hr_dev, mr);
605
606err_umem:
607 ib_umem_release(mr->umem);
608
609err_free:
610 kfree(mr);
611 return ERR_PTR(ret);
612}
613
614int hns_roce_dereg_mr(struct ib_mr *ibmr)
615{
Shaobo Xubfcc6812016-11-29 23:10:26 +0000616 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
oulijun9a443532016-07-21 19:06:38 +0800617 struct hns_roce_mr *mr = to_hr_mr(ibmr);
Shaobo Xubfcc6812016-11-29 23:10:26 +0000618 int ret = 0;
oulijun9a443532016-07-21 19:06:38 +0800619
Shaobo Xubfcc6812016-11-29 23:10:26 +0000620 if (hr_dev->hw->dereg_mr) {
621 ret = hr_dev->hw->dereg_mr(hr_dev, mr);
622 } else {
623 hns_roce_mr_free(hr_dev, mr);
oulijun9a443532016-07-21 19:06:38 +0800624
Shaobo Xubfcc6812016-11-29 23:10:26 +0000625 if (mr->umem)
626 ib_umem_release(mr->umem);
oulijun9a443532016-07-21 19:06:38 +0800627
Shaobo Xubfcc6812016-11-29 23:10:26 +0000628 kfree(mr);
629 }
630
631 return ret;
oulijun9a443532016-07-21 19:06:38 +0800632}