blob: 59f5e2be046b326c7a3f0a8a2355f2ac33df4ab5 [file] [log] [blame]
oulijun9a443532016-07-21 19:06:38 +08001/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/platform_device.h>
35#include <rdma/ib_umem.h>
36#include "hns_roce_device.h"
37#include "hns_roce_cmd.h"
38#include "hns_roce_hem.h"
39
40static u32 hw_index_to_key(unsigned long ind)
41{
42 return (u32)(ind >> 24) | (ind << 8);
43}
44
45static unsigned long key_to_hw_index(u32 key)
46{
47 return (key << 24) | (key >> 8);
48}
49
50static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
51 struct hns_roce_cmd_mailbox *mailbox,
52 unsigned long mpt_index)
53{
54 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
55 HNS_ROCE_CMD_SW2HW_MPT,
56 HNS_ROCE_CMD_TIME_CLASS_B);
57}
58
59static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
60 struct hns_roce_cmd_mailbox *mailbox,
61 unsigned long mpt_index)
62{
63 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
64 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
65 HNS_ROCE_CMD_TIME_CLASS_B);
66}
67
68static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
69 unsigned long *seg)
70{
71 int o;
72 u32 m;
73
74 spin_lock(&buddy->lock);
75
76 for (o = order; o <= buddy->max_order; ++o) {
77 if (buddy->num_free[o]) {
78 m = 1 << (buddy->max_order - o);
79 *seg = find_first_bit(buddy->bits[o], m);
80 if (*seg < m)
81 goto found;
82 }
83 }
84 spin_unlock(&buddy->lock);
85 return -1;
86
87 found:
88 clear_bit(*seg, buddy->bits[o]);
89 --buddy->num_free[o];
90
91 while (o > order) {
92 --o;
93 *seg <<= 1;
94 set_bit(*seg ^ 1, buddy->bits[o]);
95 ++buddy->num_free[o];
96 }
97
98 spin_unlock(&buddy->lock);
99
100 *seg <<= order;
101 return 0;
102}
103
104static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
105 int order)
106{
107 seg >>= order;
108
109 spin_lock(&buddy->lock);
110
111 while (test_bit(seg ^ 1, buddy->bits[order])) {
112 clear_bit(seg ^ 1, buddy->bits[order]);
113 --buddy->num_free[order];
114 seg >>= 1;
115 ++order;
116 }
117
118 set_bit(seg, buddy->bits[order]);
119 ++buddy->num_free[order];
120
121 spin_unlock(&buddy->lock);
122}
123
124static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
125{
126 int i, s;
127
128 buddy->max_order = max_order;
129 spin_lock_init(&buddy->lock);
130
131 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof(long *),
132 GFP_KERNEL);
133 buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof(int *),
134 GFP_KERNEL);
135 if (!buddy->bits || !buddy->num_free)
136 goto err_out;
137
138 for (i = 0; i <= buddy->max_order; ++i) {
139 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
140 buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
141 if (!buddy->bits[i])
142 goto err_out_free;
143
144 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
145 }
146
147 set_bit(0, buddy->bits[buddy->max_order]);
148 buddy->num_free[buddy->max_order] = 1;
149
150 return 0;
151
152err_out_free:
153 for (i = 0; i <= buddy->max_order; ++i)
154 kfree(buddy->bits[i]);
155
156err_out:
157 kfree(buddy->bits);
158 kfree(buddy->num_free);
159 return -ENOMEM;
160}
161
162static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
163{
164 int i;
165
166 for (i = 0; i <= buddy->max_order; ++i)
167 kfree(buddy->bits[i]);
168
169 kfree(buddy->bits);
170 kfree(buddy->num_free);
171}
172
173static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
174 unsigned long *seg)
175{
176 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
177 int ret = 0;
178
179 ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
180 if (ret == -1)
181 return -1;
182
183 if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
184 *seg + (1 << order) - 1)) {
185 hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
186 return -1;
187 }
188
189 return 0;
190}
191
192int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
193 struct hns_roce_mtt *mtt)
194{
195 int ret = 0;
196 int i;
197
198 /* Page num is zero, correspond to DMA memory register */
199 if (!npages) {
200 mtt->order = -1;
201 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
202 return 0;
203 }
204
205 /* Note: if page_shift is zero, FAST memory regsiter */
206 mtt->page_shift = page_shift;
207
208 /* Compute MTT entry necessary */
209 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
210 i <<= 1)
211 ++mtt->order;
212
213 /* Allocate MTT entry */
214 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
215 if (ret == -1)
216 return -ENOMEM;
217
218 return 0;
219}
220
221void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
222{
223 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
224
225 if (mtt->order < 0)
226 return;
227
228 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
229 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
230 mtt->first_seg + (1 << mtt->order) - 1);
231}
232
233static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
234 u64 size, u32 access, int npages,
235 struct hns_roce_mr *mr)
236{
237 unsigned long index = 0;
238 int ret = 0;
239 struct device *dev = &hr_dev->pdev->dev;
240
241 /* Allocate a key for mr from mr_table */
242 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
243 if (ret == -1)
244 return -ENOMEM;
245
246 mr->iova = iova; /* MR va starting addr */
247 mr->size = size; /* MR addr range */
248 mr->pd = pd; /* MR num */
249 mr->access = access; /* MR access permit */
250 mr->enabled = 0; /* MR active status */
251 mr->key = hw_index_to_key(index); /* MR key */
252
253 if (size == ~0ull) {
254 mr->type = MR_TYPE_DMA;
255 mr->pbl_buf = NULL;
256 mr->pbl_dma_addr = 0;
257 } else {
258 mr->type = MR_TYPE_MR;
259 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
260 &(mr->pbl_dma_addr),
261 GFP_KERNEL);
262 if (!mr->pbl_buf)
263 return -ENOMEM;
264 }
265
266 return 0;
267}
268
269static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
270 struct hns_roce_mr *mr)
271{
272 struct device *dev = &hr_dev->pdev->dev;
273 int npages = 0;
274 int ret;
275
276 if (mr->enabled) {
277 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
278 & (hr_dev->caps.num_mtpts - 1));
279 if (ret)
280 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
281 }
282
283 if (mr->size != ~0ULL) {
284 npages = ib_umem_page_count(mr->umem);
285 dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
286 mr->pbl_dma_addr);
287 }
288
289 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
290 key_to_hw_index(mr->key));
291}
292
293static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
294 struct hns_roce_mr *mr)
295{
296 int ret;
297 unsigned long mtpt_idx = key_to_hw_index(mr->key);
298 struct device *dev = &hr_dev->pdev->dev;
299 struct hns_roce_cmd_mailbox *mailbox;
300 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
301
302 /* Prepare HEM entry memory */
303 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
304 if (ret)
305 return ret;
306
307 /* Allocate mailbox memory */
308 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
309 if (IS_ERR(mailbox)) {
310 ret = PTR_ERR(mailbox);
311 goto err_table;
312 }
313
314 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
315 if (ret) {
316 dev_err(dev, "Write mtpt fail!\n");
317 goto err_page;
318 }
319
320 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
321 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
322 if (ret) {
323 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
324 goto err_page;
325 }
326
327 mr->enabled = 1;
328 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
329
330 return 0;
331
332err_page:
333 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
334
335err_table:
336 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
337 return ret;
338}
339
340static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
341 struct hns_roce_mtt *mtt, u32 start_index,
342 u32 npages, u64 *page_list)
343{
344 u32 i = 0;
345 __le64 *mtts = NULL;
346 dma_addr_t dma_handle;
347 u32 s = start_index * sizeof(u64);
348
349 /* All MTTs must fit in the same page */
350 if (start_index / (PAGE_SIZE / sizeof(u64)) !=
351 (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
352 return -EINVAL;
353
354 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
355 return -EINVAL;
356
357 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
358 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
359 &dma_handle);
360 if (!mtts)
361 return -ENOMEM;
362
363 /* Save page addr, low 12 bits : 0 */
364 for (i = 0; i < npages; ++i)
365 mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
366
367 return 0;
368}
369
370static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
371 struct hns_roce_mtt *mtt, u32 start_index,
372 u32 npages, u64 *page_list)
373{
374 int chunk;
375 int ret;
376
377 if (mtt->order < 0)
378 return -EINVAL;
379
380 while (npages > 0) {
381 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
382
383 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
384 page_list);
385 if (ret)
386 return ret;
387
388 npages -= chunk;
389 start_index += chunk;
390 page_list += chunk;
391 }
392
393 return 0;
394}
395
396int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
397 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
398{
399 u32 i = 0;
400 int ret = 0;
401 u64 *page_list = NULL;
402
403 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
404 if (!page_list)
405 return -ENOMEM;
406
407 for (i = 0; i < buf->npages; ++i) {
408 if (buf->nbufs == 1)
409 page_list[i] = buf->direct.map + (i << buf->page_shift);
410 else
411 page_list[i] = buf->page_list[i].map;
412
413 }
414 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
415
416 kfree(page_list);
417
418 return ret;
419}
420
421int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
422{
423 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
424 int ret = 0;
425
426 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
427 hr_dev->caps.num_mtpts,
428 hr_dev->caps.num_mtpts - 1,
429 hr_dev->caps.reserved_mrws, 0);
430 if (ret)
431 return ret;
432
433 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
434 ilog2(hr_dev->caps.num_mtt_segs));
435 if (ret)
436 goto err_buddy;
437
438 return 0;
439
440err_buddy:
441 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
442 return ret;
443}
444
445void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
446{
447 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
448
449 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
450 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
451}
452
453struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
454{
455 int ret = 0;
456 struct hns_roce_mr *mr = NULL;
457
458 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
459 if (mr == NULL)
460 return ERR_PTR(-ENOMEM);
461
462 /* Allocate memory region key */
463 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
464 ~0ULL, acc, 0, mr);
465 if (ret)
466 goto err_free;
467
468 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
469 if (ret)
470 goto err_mr;
471
472 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
473 mr->umem = NULL;
474
475 return &mr->ibmr;
476
477err_mr:
478 hns_roce_mr_free(to_hr_dev(pd->device), mr);
479
480err_free:
481 kfree(mr);
482 return ERR_PTR(ret);
483}
484
485int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
486 struct hns_roce_mtt *mtt, struct ib_umem *umem)
487{
488 struct scatterlist *sg;
489 int i, k, entry;
490 int ret = 0;
491 u64 *pages;
492 u32 n;
493 int len;
494
495 pages = (u64 *) __get_free_page(GFP_KERNEL);
496 if (!pages)
497 return -ENOMEM;
498
499 i = n = 0;
500
501 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
502 len = sg_dma_len(sg) >> mtt->page_shift;
503 for (k = 0; k < len; ++k) {
504 pages[i++] = sg_dma_address(sg) + umem->page_size * k;
505 if (i == PAGE_SIZE / sizeof(u64)) {
506 ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
507 pages);
508 if (ret)
509 goto out;
510 n += i;
511 i = 0;
512 }
513 }
514 }
515
516 if (i)
517 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
518
519out:
520 free_page((unsigned long) pages);
521 return ret;
522}
523
524static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
525 struct ib_umem *umem)
526{
527 int i = 0;
528 int entry;
529 struct scatterlist *sg;
530
531 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
532 mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
533 i++;
534 }
535
536 /* Memory barrier */
537 mb();
538
539 return 0;
540}
541
542struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
543 u64 virt_addr, int access_flags,
544 struct ib_udata *udata)
545{
546 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
547 struct device *dev = &hr_dev->pdev->dev;
548 struct hns_roce_mr *mr = NULL;
549 int ret = 0;
550 int n = 0;
551
552 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
553 if (!mr)
554 return ERR_PTR(-ENOMEM);
555
556 mr->umem = ib_umem_get(pd->uobject->context, start, length,
557 access_flags, 0);
558 if (IS_ERR(mr->umem)) {
559 ret = PTR_ERR(mr->umem);
560 goto err_free;
561 }
562
563 n = ib_umem_page_count(mr->umem);
564 if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
565 dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
566 mr->umem->page_size);
567 }
568
569 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
570 dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
571 length);
572 goto err_umem;
573 }
574
575 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
576 access_flags, n, mr);
577 if (ret)
578 goto err_umem;
579
580 ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
581 if (ret)
582 goto err_mr;
583
584 ret = hns_roce_mr_enable(hr_dev, mr);
585 if (ret)
586 goto err_mr;
587
588 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
589
590 return &mr->ibmr;
591
592err_mr:
593 hns_roce_mr_free(hr_dev, mr);
594
595err_umem:
596 ib_umem_release(mr->umem);
597
598err_free:
599 kfree(mr);
600 return ERR_PTR(ret);
601}
602
603int hns_roce_dereg_mr(struct ib_mr *ibmr)
604{
605 struct hns_roce_mr *mr = to_hr_mr(ibmr);
606
607 hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
608 if (mr->umem)
609 ib_umem_release(mr->umem);
610
611 kfree(mr);
612
613 return 0;
614}