blob: 80fc01ffd8bdd48d552b4919a7eda668efe98fa4 [file] [log] [blame]
oulijun9a443532016-07-21 19:06:38 +08001/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/platform_device.h>
35#include <rdma/ib_umem.h>
36#include "hns_roce_device.h"
37#include "hns_roce_cmd.h"
38#include "hns_roce_hem.h"
39
40static u32 hw_index_to_key(unsigned long ind)
41{
42 return (u32)(ind >> 24) | (ind << 8);
43}
44
Shaobo Xubfcc6812016-11-29 23:10:26 +000045unsigned long key_to_hw_index(u32 key)
oulijun9a443532016-07-21 19:06:38 +080046{
47 return (key << 24) | (key >> 8);
48}
49
50static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
51 struct hns_roce_cmd_mailbox *mailbox,
52 unsigned long mpt_index)
53{
54 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
55 HNS_ROCE_CMD_SW2HW_MPT,
Wei Hu (Xavier)6b877c32016-11-23 19:41:05 +000056 HNS_ROCE_CMD_TIMEOUT_MSECS);
oulijun9a443532016-07-21 19:06:38 +080057}
58
Shaobo Xubfcc6812016-11-29 23:10:26 +000059int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
oulijun9a443532016-07-21 19:06:38 +080060 struct hns_roce_cmd_mailbox *mailbox,
61 unsigned long mpt_index)
62{
63 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
64 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
Wei Hu (Xavier)6b877c32016-11-23 19:41:05 +000065 HNS_ROCE_CMD_TIMEOUT_MSECS);
oulijun9a443532016-07-21 19:06:38 +080066}
67
68static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
69 unsigned long *seg)
70{
71 int o;
72 u32 m;
73
74 spin_lock(&buddy->lock);
75
76 for (o = order; o <= buddy->max_order; ++o) {
77 if (buddy->num_free[o]) {
78 m = 1 << (buddy->max_order - o);
79 *seg = find_first_bit(buddy->bits[o], m);
80 if (*seg < m)
81 goto found;
82 }
83 }
84 spin_unlock(&buddy->lock);
85 return -1;
86
87 found:
88 clear_bit(*seg, buddy->bits[o]);
89 --buddy->num_free[o];
90
91 while (o > order) {
92 --o;
93 *seg <<= 1;
94 set_bit(*seg ^ 1, buddy->bits[o]);
95 ++buddy->num_free[o];
96 }
97
98 spin_unlock(&buddy->lock);
99
100 *seg <<= order;
101 return 0;
102}
103
104static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
105 int order)
106{
107 seg >>= order;
108
109 spin_lock(&buddy->lock);
110
111 while (test_bit(seg ^ 1, buddy->bits[order])) {
112 clear_bit(seg ^ 1, buddy->bits[order]);
113 --buddy->num_free[order];
114 seg >>= 1;
115 ++order;
116 }
117
118 set_bit(seg, buddy->bits[order]);
119 ++buddy->num_free[order];
120
121 spin_unlock(&buddy->lock);
122}
123
124static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
125{
126 int i, s;
127
128 buddy->max_order = max_order;
129 spin_lock_init(&buddy->lock);
Markus Elfring4418b272017-02-16 09:30:55 +0100130 buddy->bits = kcalloc(buddy->max_order + 1,
131 sizeof(*buddy->bits),
132 GFP_KERNEL);
133 buddy->num_free = kcalloc(buddy->max_order + 1,
134 sizeof(*buddy->num_free),
135 GFP_KERNEL);
oulijun9a443532016-07-21 19:06:38 +0800136 if (!buddy->bits || !buddy->num_free)
137 goto err_out;
138
139 for (i = 0; i <= buddy->max_order; ++i) {
140 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
Wei Hu (Xavier)8d497eb2016-11-23 19:41:01 +0000141 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
142 __GFP_NOWARN);
143 if (!buddy->bits[i]) {
144 buddy->bits[i] = vzalloc(s * sizeof(long));
145 if (!buddy->bits[i])
146 goto err_out_free;
147 }
oulijun9a443532016-07-21 19:06:38 +0800148 }
149
150 set_bit(0, buddy->bits[buddy->max_order]);
151 buddy->num_free[buddy->max_order] = 1;
152
153 return 0;
154
155err_out_free:
156 for (i = 0; i <= buddy->max_order; ++i)
Wei Hu (Xavier)8d497eb2016-11-23 19:41:01 +0000157 kvfree(buddy->bits[i]);
oulijun9a443532016-07-21 19:06:38 +0800158
159err_out:
160 kfree(buddy->bits);
161 kfree(buddy->num_free);
162 return -ENOMEM;
163}
164
165static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
166{
167 int i;
168
169 for (i = 0; i <= buddy->max_order; ++i)
Wei Hu (Xavier)8d497eb2016-11-23 19:41:01 +0000170 kvfree(buddy->bits[i]);
oulijun9a443532016-07-21 19:06:38 +0800171
172 kfree(buddy->bits);
173 kfree(buddy->num_free);
174}
175
176static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
177 unsigned long *seg)
178{
179 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
180 int ret = 0;
181
182 ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
183 if (ret == -1)
184 return -1;
185
186 if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
187 *seg + (1 << order) - 1)) {
188 hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
189 return -1;
190 }
191
192 return 0;
193}
194
195int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
196 struct hns_roce_mtt *mtt)
197{
198 int ret = 0;
199 int i;
200
201 /* Page num is zero, correspond to DMA memory register */
202 if (!npages) {
203 mtt->order = -1;
204 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
205 return 0;
206 }
207
Stephen Boydad61dd32017-05-08 15:57:50 -0700208 /* Note: if page_shift is zero, FAST memory register */
oulijun9a443532016-07-21 19:06:38 +0800209 mtt->page_shift = page_shift;
210
211 /* Compute MTT entry necessary */
212 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
213 i <<= 1)
214 ++mtt->order;
215
216 /* Allocate MTT entry */
217 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
218 if (ret == -1)
219 return -ENOMEM;
220
221 return 0;
222}
223
224void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
225{
226 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
227
228 if (mtt->order < 0)
229 return;
230
231 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
232 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
233 mtt->first_seg + (1 << mtt->order) - 1);
234}
235
236static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
237 u64 size, u32 access, int npages,
238 struct hns_roce_mr *mr)
239{
240 unsigned long index = 0;
241 int ret = 0;
242 struct device *dev = &hr_dev->pdev->dev;
243
244 /* Allocate a key for mr from mr_table */
245 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
246 if (ret == -1)
247 return -ENOMEM;
248
249 mr->iova = iova; /* MR va starting addr */
250 mr->size = size; /* MR addr range */
251 mr->pd = pd; /* MR num */
252 mr->access = access; /* MR access permit */
253 mr->enabled = 0; /* MR active status */
254 mr->key = hw_index_to_key(index); /* MR key */
255
256 if (size == ~0ull) {
257 mr->type = MR_TYPE_DMA;
258 mr->pbl_buf = NULL;
259 mr->pbl_dma_addr = 0;
260 } else {
261 mr->type = MR_TYPE_MR;
262 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
263 &(mr->pbl_dma_addr),
264 GFP_KERNEL);
265 if (!mr->pbl_buf)
266 return -ENOMEM;
267 }
268
269 return 0;
270}
271
272static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
273 struct hns_roce_mr *mr)
274{
275 struct device *dev = &hr_dev->pdev->dev;
276 int npages = 0;
277 int ret;
278
279 if (mr->enabled) {
280 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
281 & (hr_dev->caps.num_mtpts - 1));
282 if (ret)
283 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
284 }
285
286 if (mr->size != ~0ULL) {
287 npages = ib_umem_page_count(mr->umem);
288 dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
289 mr->pbl_dma_addr);
290 }
291
292 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
Wei Hu (Xavier)5e6ff782016-11-23 19:41:07 +0000293 key_to_hw_index(mr->key), BITMAP_NO_RR);
oulijun9a443532016-07-21 19:06:38 +0800294}
295
296static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
297 struct hns_roce_mr *mr)
298{
299 int ret;
300 unsigned long mtpt_idx = key_to_hw_index(mr->key);
301 struct device *dev = &hr_dev->pdev->dev;
302 struct hns_roce_cmd_mailbox *mailbox;
303 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
304
305 /* Prepare HEM entry memory */
306 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
307 if (ret)
308 return ret;
309
310 /* Allocate mailbox memory */
311 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
312 if (IS_ERR(mailbox)) {
313 ret = PTR_ERR(mailbox);
314 goto err_table;
315 }
316
317 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
318 if (ret) {
319 dev_err(dev, "Write mtpt fail!\n");
320 goto err_page;
321 }
322
323 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
324 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
325 if (ret) {
326 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
327 goto err_page;
328 }
329
330 mr->enabled = 1;
331 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
332
333 return 0;
334
335err_page:
336 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
337
338err_table:
339 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
340 return ret;
341}
342
343static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
344 struct hns_roce_mtt *mtt, u32 start_index,
345 u32 npages, u64 *page_list)
346{
347 u32 i = 0;
348 __le64 *mtts = NULL;
349 dma_addr_t dma_handle;
350 u32 s = start_index * sizeof(u64);
351
352 /* All MTTs must fit in the same page */
353 if (start_index / (PAGE_SIZE / sizeof(u64)) !=
354 (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
355 return -EINVAL;
356
357 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
358 return -EINVAL;
359
360 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
361 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
362 &dma_handle);
363 if (!mtts)
364 return -ENOMEM;
365
366 /* Save page addr, low 12 bits : 0 */
367 for (i = 0; i < npages; ++i)
368 mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
369
370 return 0;
371}
372
373static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
374 struct hns_roce_mtt *mtt, u32 start_index,
375 u32 npages, u64 *page_list)
376{
377 int chunk;
378 int ret;
379
380 if (mtt->order < 0)
381 return -EINVAL;
382
383 while (npages > 0) {
384 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
385
386 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
387 page_list);
388 if (ret)
389 return ret;
390
391 npages -= chunk;
392 start_index += chunk;
393 page_list += chunk;
394 }
395
396 return 0;
397}
398
399int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
400 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
401{
402 u32 i = 0;
403 int ret = 0;
404 u64 *page_list = NULL;
405
406 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
407 if (!page_list)
408 return -ENOMEM;
409
410 for (i = 0; i < buf->npages; ++i) {
411 if (buf->nbufs == 1)
412 page_list[i] = buf->direct.map + (i << buf->page_shift);
413 else
414 page_list[i] = buf->page_list[i].map;
415
416 }
417 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
418
419 kfree(page_list);
420
421 return ret;
422}
423
424int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
425{
426 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
427 int ret = 0;
428
429 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
430 hr_dev->caps.num_mtpts,
431 hr_dev->caps.num_mtpts - 1,
432 hr_dev->caps.reserved_mrws, 0);
433 if (ret)
434 return ret;
435
436 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
437 ilog2(hr_dev->caps.num_mtt_segs));
438 if (ret)
439 goto err_buddy;
440
441 return 0;
442
443err_buddy:
444 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
445 return ret;
446}
447
448void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
449{
450 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
451
452 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
453 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
454}
455
456struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
457{
458 int ret = 0;
459 struct hns_roce_mr *mr = NULL;
460
461 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
462 if (mr == NULL)
463 return ERR_PTR(-ENOMEM);
464
465 /* Allocate memory region key */
466 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
467 ~0ULL, acc, 0, mr);
468 if (ret)
469 goto err_free;
470
471 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
472 if (ret)
473 goto err_mr;
474
475 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
476 mr->umem = NULL;
477
478 return &mr->ibmr;
479
480err_mr:
481 hns_roce_mr_free(to_hr_dev(pd->device), mr);
482
483err_free:
484 kfree(mr);
485 return ERR_PTR(ret);
486}
487
488int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
489 struct hns_roce_mtt *mtt, struct ib_umem *umem)
490{
491 struct scatterlist *sg;
492 int i, k, entry;
493 int ret = 0;
494 u64 *pages;
495 u32 n;
496 int len;
497
498 pages = (u64 *) __get_free_page(GFP_KERNEL);
499 if (!pages)
500 return -ENOMEM;
501
502 i = n = 0;
503
504 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
505 len = sg_dma_len(sg) >> mtt->page_shift;
506 for (k = 0; k < len; ++k) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300507 pages[i++] = sg_dma_address(sg) +
508 (k << umem->page_shift);
oulijun9a443532016-07-21 19:06:38 +0800509 if (i == PAGE_SIZE / sizeof(u64)) {
510 ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
511 pages);
512 if (ret)
513 goto out;
514 n += i;
515 i = 0;
516 }
517 }
518 }
519
520 if (i)
521 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
522
523out:
524 free_page((unsigned long) pages);
525 return ret;
526}
527
528static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
529 struct ib_umem *umem)
530{
531 int i = 0;
532 int entry;
533 struct scatterlist *sg;
534
535 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
536 mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
537 i++;
538 }
539
540 /* Memory barrier */
541 mb();
542
543 return 0;
544}
545
546struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
547 u64 virt_addr, int access_flags,
548 struct ib_udata *udata)
549{
550 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
551 struct device *dev = &hr_dev->pdev->dev;
552 struct hns_roce_mr *mr = NULL;
553 int ret = 0;
554 int n = 0;
555
556 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
557 if (!mr)
558 return ERR_PTR(-ENOMEM);
559
560 mr->umem = ib_umem_get(pd->uobject->context, start, length,
561 access_flags, 0);
562 if (IS_ERR(mr->umem)) {
563 ret = PTR_ERR(mr->umem);
564 goto err_free;
565 }
566
567 n = ib_umem_page_count(mr->umem);
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300568 if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
569 dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
570 BIT(mr->umem->page_shift));
Lijun Ou1cd11062016-09-20 17:07:03 +0100571 ret = -EINVAL;
572 goto err_umem;
oulijun9a443532016-07-21 19:06:38 +0800573 }
574
575 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
576 dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
577 length);
Lijun Ou1cd11062016-09-20 17:07:03 +0100578 ret = -EINVAL;
oulijun9a443532016-07-21 19:06:38 +0800579 goto err_umem;
580 }
581
582 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
583 access_flags, n, mr);
584 if (ret)
585 goto err_umem;
586
587 ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
588 if (ret)
589 goto err_mr;
590
591 ret = hns_roce_mr_enable(hr_dev, mr);
592 if (ret)
593 goto err_mr;
594
595 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
596
597 return &mr->ibmr;
598
599err_mr:
600 hns_roce_mr_free(hr_dev, mr);
601
602err_umem:
603 ib_umem_release(mr->umem);
604
605err_free:
606 kfree(mr);
607 return ERR_PTR(ret);
608}
609
610int hns_roce_dereg_mr(struct ib_mr *ibmr)
611{
Shaobo Xubfcc6812016-11-29 23:10:26 +0000612 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
oulijun9a443532016-07-21 19:06:38 +0800613 struct hns_roce_mr *mr = to_hr_mr(ibmr);
Shaobo Xubfcc6812016-11-29 23:10:26 +0000614 int ret = 0;
oulijun9a443532016-07-21 19:06:38 +0800615
Shaobo Xubfcc6812016-11-29 23:10:26 +0000616 if (hr_dev->hw->dereg_mr) {
617 ret = hr_dev->hw->dereg_mr(hr_dev, mr);
618 } else {
619 hns_roce_mr_free(hr_dev, mr);
oulijun9a443532016-07-21 19:06:38 +0800620
Shaobo Xubfcc6812016-11-29 23:10:26 +0000621 if (mr->umem)
622 ib_umem_release(mr->umem);
oulijun9a443532016-07-21 19:06:38 +0800623
Shaobo Xubfcc6812016-11-29 23:10:26 +0000624 kfree(mr);
625 }
626
627 return ret;
oulijun9a443532016-07-21 19:06:38 +0800628}