blob: 177f48f081ab8a877f7e2a71039b99ea1ed042e3 [file] [log] [blame]
oulijun9a443532016-07-21 19:06:38 +08001/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/platform_device.h>
35#include <rdma/ib_umem.h>
36#include "hns_roce_common.h"
37#include "hns_roce_device.h"
38#include "hns_roce_hem.h"
39#include "hns_roce_user.h"
40
oulijun9a443532016-07-21 19:06:38 +080041#define SQP_NUM 12
42
43void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
44{
45 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
46 struct device *dev = &hr_dev->pdev->dev;
47 struct hns_roce_qp *qp;
48
49 spin_lock(&qp_table->lock);
50
51 qp = __hns_roce_qp_lookup(hr_dev, qpn);
52 if (qp)
53 atomic_inc(&qp->refcount);
54
55 spin_unlock(&qp_table->lock);
56
57 if (!qp) {
58 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
59 return;
60 }
61
62 qp->event(qp, (enum hns_roce_event)event_type);
63
64 if (atomic_dec_and_test(&qp->refcount))
65 complete(&qp->free);
66}
67
68static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
69 enum hns_roce_event type)
70{
71 struct ib_event event;
72 struct ib_qp *ibqp = &hr_qp->ibqp;
73
74 if (ibqp->event_handler) {
75 event.device = ibqp->device;
76 event.element.qp = ibqp;
77 switch (type) {
78 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
79 event.event = IB_EVENT_PATH_MIG;
80 break;
81 case HNS_ROCE_EVENT_TYPE_COMM_EST:
82 event.event = IB_EVENT_COMM_EST;
83 break;
84 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
85 event.event = IB_EVENT_SQ_DRAINED;
86 break;
87 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
88 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
89 break;
90 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
91 event.event = IB_EVENT_QP_FATAL;
92 break;
93 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
94 event.event = IB_EVENT_PATH_MIG_ERR;
95 break;
96 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
97 event.event = IB_EVENT_QP_REQ_ERR;
98 break;
99 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
100 event.event = IB_EVENT_QP_ACCESS_ERR;
101 break;
102 default:
103 dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n",
104 type, hr_qp->qpn);
105 return;
106 }
107 ibqp->event_handler(&event, ibqp->qp_context);
108 }
109}
110
111static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
112 int align, unsigned long *base)
113{
114 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
oulijun9a443532016-07-21 19:06:38 +0800115
Lijun Oua598c6f2016-09-20 17:06:57 +0100116 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
oulijun9a443532016-07-21 19:06:38 +0800117}
118
119enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
120{
121 switch (state) {
122 case IB_QPS_RESET:
123 return HNS_ROCE_QP_STATE_RST;
124 case IB_QPS_INIT:
125 return HNS_ROCE_QP_STATE_INIT;
126 case IB_QPS_RTR:
127 return HNS_ROCE_QP_STATE_RTR;
128 case IB_QPS_RTS:
129 return HNS_ROCE_QP_STATE_RTS;
130 case IB_QPS_SQD:
131 return HNS_ROCE_QP_STATE_SQD;
132 case IB_QPS_ERR:
133 return HNS_ROCE_QP_STATE_ERR;
134 default:
135 return HNS_ROCE_QP_NUM_STATE;
136 }
137}
138
139static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
140 struct hns_roce_qp *hr_qp)
141{
142 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
143 int ret;
144
145 if (!qpn)
146 return -EINVAL;
147
148 hr_qp->qpn = qpn;
149
150 spin_lock_irq(&qp_table->lock);
151 ret = radix_tree_insert(&hr_dev->qp_table_tree,
152 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
153 spin_unlock_irq(&qp_table->lock);
154 if (ret) {
155 dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
156 goto err_put_irrl;
157 }
158
159 atomic_set(&hr_qp->refcount, 1);
160 init_completion(&hr_qp->free);
161
162 return 0;
163
164err_put_irrl:
165
166 return ret;
167}
168
169static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
170 struct hns_roce_qp *hr_qp)
171{
172 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
173 struct device *dev = &hr_dev->pdev->dev;
174 int ret;
175
176 if (!qpn)
177 return -EINVAL;
178
179 hr_qp->qpn = qpn;
180
181 /* Alloc memory for QPC */
182 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
183 if (ret) {
184 dev_err(dev, "QPC table get failed\n");
185 goto err_out;
186 }
187
188 /* Alloc memory for IRRL */
189 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
190 if (ret) {
191 dev_err(dev, "IRRL table get failed\n");
192 goto err_put_qp;
193 }
194
195 spin_lock_irq(&qp_table->lock);
196 ret = radix_tree_insert(&hr_dev->qp_table_tree,
197 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
198 spin_unlock_irq(&qp_table->lock);
199 if (ret) {
200 dev_err(dev, "QPC radix_tree_insert failed\n");
201 goto err_put_irrl;
202 }
203
204 atomic_set(&hr_qp->refcount, 1);
205 init_completion(&hr_qp->free);
206
207 return 0;
208
209err_put_irrl:
210 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
211
212err_put_qp:
213 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
214
215err_out:
216 return ret;
217}
218
219void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
220{
221 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
222 unsigned long flags;
223
224 spin_lock_irqsave(&qp_table->lock, flags);
225 radix_tree_delete(&hr_dev->qp_table_tree,
226 hr_qp->qpn & (hr_dev->caps.num_qps - 1));
227 spin_unlock_irqrestore(&qp_table->lock, flags);
228}
229
230void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
231{
232 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
233
234 if (atomic_dec_and_test(&hr_qp->refcount))
235 complete(&hr_qp->free);
236 wait_for_completion(&hr_qp->free);
237
238 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
239 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
240 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
241 }
242}
243
244void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
245 int cnt)
246{
247 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
248
249 if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports))
250 return;
251
252 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
253}
254
255static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
256 struct ib_qp_cap *cap, int is_user, int has_srq,
257 struct hns_roce_qp *hr_qp)
258{
259 u32 max_cnt;
260 struct device *dev = &hr_dev->pdev->dev;
261
262 /* Check the validity of QP support capacity */
263 if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
264 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
265 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
266 cap->max_recv_wr, cap->max_recv_sge);
267 return -EINVAL;
268 }
269
270 /* If srq exit, set zero for relative number of rq */
271 if (has_srq) {
272 if (cap->max_recv_wr) {
273 dev_dbg(dev, "srq no need config max_recv_wr\n");
274 return -EINVAL;
275 }
276
277 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
278 } else {
279 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
280 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
281 return -EINVAL;
282 }
283
284 /* In v1 engine, parameter verification procession */
285 max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
286 cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
287 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
288
289 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
290 dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
291 return -EINVAL;
292 }
293
294 max_cnt = max(1U, cap->max_recv_sge);
295 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
296 /* WQE is fixed for 64B */
297 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
298 }
299
300 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
301 cap->max_recv_sge = hr_qp->rq.max_gs;
302
303 return 0;
304}
305
306static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
307 struct hns_roce_qp *hr_qp,
308 struct hns_roce_ib_create_qp *ucmd)
309{
310 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
311 u8 max_sq_stride = ilog2(roundup_sq_stride);
312
313 /* Sanity check SQ size before proceeding */
314 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
315 ucmd->log_sq_stride > max_sq_stride ||
316 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
317 dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
318 return -EINVAL;
319 }
320
321 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
322 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
323
324 /* Get buf size, SQ and RQ are aligned to page_szie */
325 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
326 hr_qp->rq.wqe_shift), PAGE_SIZE) +
327 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
328 hr_qp->sq.wqe_shift), PAGE_SIZE);
329
330 hr_qp->sq.offset = 0;
331 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
332 hr_qp->sq.wqe_shift), PAGE_SIZE);
333
334 return 0;
335}
336
337static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
338 struct ib_qp_cap *cap,
oulijun9a443532016-07-21 19:06:38 +0800339 struct hns_roce_qp *hr_qp)
340{
341 struct device *dev = &hr_dev->pdev->dev;
342 u32 max_cnt;
oulijun9a443532016-07-21 19:06:38 +0800343
344 if (cap->max_send_wr > hr_dev->caps.max_wqes ||
345 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
346 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
347 dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
348 return -EINVAL;
349 }
350
351 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
352 hr_qp->sq_max_wqes_per_wr = 1;
353 hr_qp->sq_spare_wqes = 0;
354
355 /* In v1 engine, parameter verification procession */
356 max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
357 cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
358 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
359 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
360 dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
361 return -EINVAL;
362 }
363
364 /* Get data_seg numbers */
365 max_cnt = max(1U, cap->max_send_sge);
366 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
367
368 /* Get buf size, SQ and RQ are aligned to page_szie */
369 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
370 hr_qp->rq.wqe_shift), PAGE_SIZE) +
371 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
372 hr_qp->sq.wqe_shift), PAGE_SIZE);
373 hr_qp->sq.offset = 0;
374 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
375 hr_qp->sq.wqe_shift), PAGE_SIZE);
376
377 /* Get wr and sge number which send */
378 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
379 cap->max_send_sge = hr_qp->sq.max_gs;
380
381 /* We don't support inline sends for kernel QPs (yet) */
382 cap->max_inline_data = 0;
383
384 return 0;
385}
386
387static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
388 struct ib_pd *ib_pd,
389 struct ib_qp_init_attr *init_attr,
390 struct ib_udata *udata, unsigned long sqpn,
391 struct hns_roce_qp *hr_qp)
392{
393 struct device *dev = &hr_dev->pdev->dev;
394 struct hns_roce_ib_create_qp ucmd;
395 unsigned long qpn = 0;
396 int ret = 0;
397
398 mutex_init(&hr_qp->mutex);
399 spin_lock_init(&hr_qp->sq.lock);
400 spin_lock_init(&hr_qp->rq.lock);
401
402 hr_qp->state = IB_QPS_RESET;
403
404 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
405 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
406 else
407 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
408
409 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
410 !!init_attr->srq, hr_qp);
411 if (ret) {
412 dev_err(dev, "hns_roce_set_rq_size failed\n");
413 goto err_out;
414 }
415
416 if (ib_pd->uobject) {
417 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
418 dev_err(dev, "ib_copy_from_udata error for create qp\n");
419 ret = -EFAULT;
420 goto err_out;
421 }
422
423 ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
424 if (ret) {
425 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
426 goto err_out;
427 }
428
429 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
430 ucmd.buf_addr, hr_qp->buff_size, 0,
431 0);
432 if (IS_ERR(hr_qp->umem)) {
433 dev_err(dev, "ib_umem_get error for create qp\n");
434 ret = PTR_ERR(hr_qp->umem);
435 goto err_out;
436 }
437
438 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
439 ilog2((unsigned int)hr_qp->umem->page_size),
440 &hr_qp->mtt);
441 if (ret) {
442 dev_err(dev, "hns_roce_mtt_init error for create qp\n");
443 goto err_buf;
444 }
445
446 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
447 hr_qp->umem);
448 if (ret) {
449 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
450 goto err_mtt;
451 }
452 } else {
453 if (init_attr->create_flags &
454 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
455 dev_err(dev, "init_attr->create_flags error!\n");
456 ret = -EINVAL;
457 goto err_out;
458 }
459
460 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
461 dev_err(dev, "init_attr->create_flags error!\n");
462 ret = -EINVAL;
463 goto err_out;
464 }
465
466 /* Set SQ size */
467 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
Lijun Ou76445702016-09-20 17:06:58 +0100468 hr_qp);
oulijun9a443532016-07-21 19:06:38 +0800469 if (ret) {
470 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
471 goto err_out;
472 }
473
474 /* QP doorbell register address */
475 hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
476 DB_REG_OFFSET * hr_dev->priv_uar.index;
477 hr_qp->rq.db_reg_l = hr_dev->reg_base +
478 ROCEE_DB_OTHERS_L_0_REG +
479 DB_REG_OFFSET * hr_dev->priv_uar.index;
480
481 /* Allocate QP buf */
482 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
483 &hr_qp->hr_buf)) {
484 dev_err(dev, "hns_roce_buf_alloc error!\n");
485 ret = -ENOMEM;
486 goto err_out;
487 }
488
489 /* Write MTT */
490 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
491 hr_qp->hr_buf.page_shift, &hr_qp->mtt);
492 if (ret) {
493 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
494 goto err_buf;
495 }
496
497 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
498 &hr_qp->hr_buf);
499 if (ret) {
500 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
501 goto err_mtt;
502 }
503
504 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
505 GFP_KERNEL);
506 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
507 GFP_KERNEL);
508 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
509 ret = -ENOMEM;
510 goto err_wrid;
511 }
512 }
513
514 if (sqpn) {
515 qpn = sqpn;
516 } else {
517 /* Get QPN */
518 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
519 if (ret) {
520 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
521 goto err_wrid;
522 }
523 }
524
525 if ((init_attr->qp_type) == IB_QPT_GSI) {
526 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
527 if (ret) {
528 dev_err(dev, "hns_roce_qp_alloc failed!\n");
529 goto err_qpn;
530 }
531 } else {
532 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
533 if (ret) {
534 dev_err(dev, "hns_roce_qp_alloc failed!\n");
535 goto err_qpn;
536 }
537 }
538
539 if (sqpn)
540 hr_qp->doorbell_qpn = 1;
541 else
542 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
543
544 hr_qp->event = hns_roce_ib_qp_event;
545
546 return 0;
547
548err_qpn:
549 if (!sqpn)
550 hns_roce_release_range_qp(hr_dev, qpn, 1);
551
552err_wrid:
553 kfree(hr_qp->sq.wrid);
554 kfree(hr_qp->rq.wrid);
555
556err_mtt:
557 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
558
559err_buf:
560 if (ib_pd->uobject)
561 ib_umem_release(hr_qp->umem);
562 else
563 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
564
565err_out:
566 return ret;
567}
568
569struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
570 struct ib_qp_init_attr *init_attr,
571 struct ib_udata *udata)
572{
573 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
574 struct device *dev = &hr_dev->pdev->dev;
575 struct hns_roce_sqp *hr_sqp;
576 struct hns_roce_qp *hr_qp;
577 int ret;
578
579 switch (init_attr->qp_type) {
580 case IB_QPT_RC: {
581 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
582 if (!hr_qp)
583 return ERR_PTR(-ENOMEM);
584
585 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
586 hr_qp);
587 if (ret) {
588 dev_err(dev, "Create RC QP failed\n");
589 kfree(hr_qp);
590 return ERR_PTR(ret);
591 }
592
593 hr_qp->ibqp.qp_num = hr_qp->qpn;
594
595 break;
596 }
597 case IB_QPT_GSI: {
598 /* Userspace is not allowed to create special QPs: */
599 if (pd->uobject) {
600 dev_err(dev, "not support usr space GSI\n");
601 return ERR_PTR(-EINVAL);
602 }
603
604 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
605 if (!hr_sqp)
606 return ERR_PTR(-ENOMEM);
607
608 hr_qp = &hr_sqp->hr_qp;
Lijun Ou77168092016-09-15 23:48:10 +0100609 hr_qp->port = init_attr->port_num - 1;
610 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
611 hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
612 HNS_ROCE_MAX_PORTS +
613 hr_dev->iboe.phy_port[hr_qp->port];
oulijun9a443532016-07-21 19:06:38 +0800614
615 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
Lijun Ou77168092016-09-15 23:48:10 +0100616 hr_qp->ibqp.qp_num, hr_qp);
oulijun9a443532016-07-21 19:06:38 +0800617 if (ret) {
618 dev_err(dev, "Create GSI QP failed!\n");
619 kfree(hr_sqp);
620 return ERR_PTR(ret);
621 }
622
oulijun9a443532016-07-21 19:06:38 +0800623 break;
624 }
625 default:{
626 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
627 return ERR_PTR(-EINVAL);
628 }
629 }
630
631 return &hr_qp->ibqp;
632}
633
634int to_hr_qp_type(int qp_type)
635{
636 int transport_type;
637
638 if (qp_type == IB_QPT_RC)
639 transport_type = SERV_TYPE_RC;
640 else if (qp_type == IB_QPT_UC)
641 transport_type = SERV_TYPE_UC;
642 else if (qp_type == IB_QPT_UD)
643 transport_type = SERV_TYPE_UD;
644 else if (qp_type == IB_QPT_GSI)
645 transport_type = SERV_TYPE_UD;
646 else
647 transport_type = -1;
648
649 return transport_type;
650}
651
652int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
653 int attr_mask, struct ib_udata *udata)
654{
655 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
656 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
657 enum ib_qp_state cur_state, new_state;
658 struct device *dev = &hr_dev->pdev->dev;
659 int ret = -EINVAL;
660 int p;
661
662 mutex_lock(&hr_qp->mutex);
663
664 cur_state = attr_mask & IB_QP_CUR_STATE ?
665 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
666 new_state = attr_mask & IB_QP_STATE ?
667 attr->qp_state : cur_state;
668
669 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
670 IB_LINK_LAYER_ETHERNET)) {
671 dev_err(dev, "ib_modify_qp_is_ok failed\n");
672 goto out;
673 }
674
675 if ((attr_mask & IB_QP_PORT) &&
676 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
677 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
678 attr->port_num);
679 goto out;
680 }
681
682 if (attr_mask & IB_QP_PKEY_INDEX) {
683 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
684 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
685 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
686 attr->pkey_index);
687 goto out;
688 }
689 }
690
691 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
692 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
693 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
694 attr->max_rd_atomic);
695 goto out;
696 }
697
698 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
699 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
700 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
701 attr->max_dest_rd_atomic);
702 goto out;
703 }
704
705 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
706 ret = -EPERM;
707 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
708 new_state);
709 goto out;
710 }
711
712 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
713 new_state);
714
715out:
716 mutex_unlock(&hr_qp->mutex);
717
718 return ret;
719}
720
721void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
722 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
723{
724 if (send_cq == recv_cq) {
725 spin_lock_irq(&send_cq->lock);
726 __acquire(&recv_cq->lock);
727 } else if (send_cq->cqn < recv_cq->cqn) {
728 spin_lock_irq(&send_cq->lock);
729 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
730 } else {
731 spin_lock_irq(&recv_cq->lock);
732 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
733 }
734}
735
736void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
737 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
738 __releases(&recv_cq->lock)
739{
740 if (send_cq == recv_cq) {
741 __release(&recv_cq->lock);
742 spin_unlock_irq(&send_cq->lock);
743 } else if (send_cq->cqn < recv_cq->cqn) {
744 spin_unlock(&recv_cq->lock);
745 spin_unlock_irq(&send_cq->lock);
746 } else {
747 spin_unlock(&send_cq->lock);
748 spin_unlock_irq(&recv_cq->lock);
749 }
750}
751
752__be32 send_ieth(struct ib_send_wr *wr)
753{
754 switch (wr->opcode) {
755 case IB_WR_SEND_WITH_IMM:
756 case IB_WR_RDMA_WRITE_WITH_IMM:
757 return cpu_to_le32(wr->ex.imm_data);
758 case IB_WR_SEND_WITH_INV:
759 return cpu_to_le32(wr->ex.invalidate_rkey);
760 default:
761 return 0;
762 }
763}
764
765static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
766{
767
768 return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
769}
770
771void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
772{
773 struct ib_qp *ibqp = &hr_qp->ibqp;
774 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
775
776 if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
777 dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
778 n, hr_qp->rq.wqe_cnt);
779 return NULL;
780 }
781
782 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
783}
784
785void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
786{
787 struct ib_qp *ibqp = &hr_qp->ibqp;
788 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
789
790 if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
791 dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
792 n, hr_qp->sq.wqe_cnt);
793 return NULL;
794 }
795
796 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
797}
798
799bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
800 struct ib_cq *ib_cq)
801{
802 struct hns_roce_cq *hr_cq;
803 u32 cur;
804
805 cur = hr_wq->head - hr_wq->tail;
806 if (likely(cur + nreq < hr_wq->max_post))
807 return 0;
808
809 hr_cq = to_hr_cq(ib_cq);
810 spin_lock(&hr_cq->lock);
811 cur = hr_wq->head - hr_wq->tail;
812 spin_unlock(&hr_cq->lock);
813
814 return cur + nreq >= hr_wq->max_post;
815}
816
817int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
818{
819 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
820 int reserved_from_top = 0;
821 int ret;
822
823 spin_lock_init(&qp_table->lock);
824 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
825
826 /* A port include two SQP, six port total 12 */
827 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
828 hr_dev->caps.num_qps - 1,
829 hr_dev->caps.sqp_start + SQP_NUM,
830 reserved_from_top);
831 if (ret) {
832 dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
833 ret);
834 return ret;
835 }
836
837 return 0;
838}
839
840void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
841{
842 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
843}