blob: c04aa81ffac95ebd736140b304f73a3b00cc3048 [file] [log] [blame]
oulijun9a443532016-07-21 19:06:38 +08001/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/platform_device.h>
Lijun Oucb814642016-09-20 17:07:07 +010035#include <rdma/ib_addr.h>
oulijun9a443532016-07-21 19:06:38 +080036#include <rdma/ib_umem.h>
37#include "hns_roce_common.h"
38#include "hns_roce_device.h"
39#include "hns_roce_hem.h"
Leon Romanovsky4d409952016-10-19 20:13:07 +030040#include <rdma/hns-abi.h>
oulijun9a443532016-07-21 19:06:38 +080041
Wei Hu (Xavier)1ca5b252016-09-20 17:07:00 +010042#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
oulijun9a443532016-07-21 19:06:38 +080043
44void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
45{
46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +080047 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +080048 struct hns_roce_qp *qp;
49
50 spin_lock(&qp_table->lock);
51
52 qp = __hns_roce_qp_lookup(hr_dev, qpn);
53 if (qp)
54 atomic_inc(&qp->refcount);
55
56 spin_unlock(&qp_table->lock);
57
58 if (!qp) {
59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
60 return;
61 }
62
63 qp->event(qp, (enum hns_roce_event)event_type);
64
65 if (atomic_dec_and_test(&qp->refcount))
66 complete(&qp->free);
67}
68
69static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
70 enum hns_roce_event type)
71{
72 struct ib_event event;
73 struct ib_qp *ibqp = &hr_qp->ibqp;
74
75 if (ibqp->event_handler) {
76 event.device = ibqp->device;
77 event.element.qp = ibqp;
78 switch (type) {
79 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
80 event.event = IB_EVENT_PATH_MIG;
81 break;
82 case HNS_ROCE_EVENT_TYPE_COMM_EST:
83 event.event = IB_EVENT_COMM_EST;
84 break;
85 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
86 event.event = IB_EVENT_SQ_DRAINED;
87 break;
88 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
89 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
90 break;
91 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
92 event.event = IB_EVENT_QP_FATAL;
93 break;
94 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
95 event.event = IB_EVENT_PATH_MIG_ERR;
96 break;
97 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
98 event.event = IB_EVENT_QP_REQ_ERR;
99 break;
100 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
101 event.event = IB_EVENT_QP_ACCESS_ERR;
102 break;
103 default:
Bart Van Asschefecd02e2017-01-20 13:04:18 -0800104 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
oulijun9a443532016-07-21 19:06:38 +0800105 type, hr_qp->qpn);
106 return;
107 }
108 ibqp->event_handler(&event, ibqp->qp_context);
109 }
110}
111
112static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
113 int align, unsigned long *base)
114{
115 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
oulijun9a443532016-07-21 19:06:38 +0800116
Lijun Oua598c6f2016-09-20 17:06:57 +0100117 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
oulijun9a443532016-07-21 19:06:38 +0800118}
119
120enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
121{
122 switch (state) {
123 case IB_QPS_RESET:
124 return HNS_ROCE_QP_STATE_RST;
125 case IB_QPS_INIT:
126 return HNS_ROCE_QP_STATE_INIT;
127 case IB_QPS_RTR:
128 return HNS_ROCE_QP_STATE_RTR;
129 case IB_QPS_RTS:
130 return HNS_ROCE_QP_STATE_RTS;
131 case IB_QPS_SQD:
132 return HNS_ROCE_QP_STATE_SQD;
133 case IB_QPS_ERR:
134 return HNS_ROCE_QP_STATE_ERR;
135 default:
136 return HNS_ROCE_QP_NUM_STATE;
137 }
138}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800139EXPORT_SYMBOL_GPL(to_hns_roce_state);
oulijun9a443532016-07-21 19:06:38 +0800140
141static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
142 struct hns_roce_qp *hr_qp)
143{
144 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
145 int ret;
146
147 if (!qpn)
148 return -EINVAL;
149
150 hr_qp->qpn = qpn;
151
152 spin_lock_irq(&qp_table->lock);
153 ret = radix_tree_insert(&hr_dev->qp_table_tree,
154 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
155 spin_unlock_irq(&qp_table->lock);
156 if (ret) {
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800157 dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
oulijun9a443532016-07-21 19:06:38 +0800158 goto err_put_irrl;
159 }
160
161 atomic_set(&hr_qp->refcount, 1);
162 init_completion(&hr_qp->free);
163
164 return 0;
165
166err_put_irrl:
167
168 return ret;
169}
170
171static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
172 struct hns_roce_qp *hr_qp)
173{
174 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800175 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800176 int ret;
177
178 if (!qpn)
179 return -EINVAL;
180
181 hr_qp->qpn = qpn;
182
183 /* Alloc memory for QPC */
184 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
185 if (ret) {
186 dev_err(dev, "QPC table get failed\n");
187 goto err_out;
188 }
189
190 /* Alloc memory for IRRL */
191 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
192 if (ret) {
193 dev_err(dev, "IRRL table get failed\n");
194 goto err_put_qp;
195 }
196
197 spin_lock_irq(&qp_table->lock);
198 ret = radix_tree_insert(&hr_dev->qp_table_tree,
199 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
200 spin_unlock_irq(&qp_table->lock);
201 if (ret) {
202 dev_err(dev, "QPC radix_tree_insert failed\n");
203 goto err_put_irrl;
204 }
205
206 atomic_set(&hr_qp->refcount, 1);
207 init_completion(&hr_qp->free);
208
209 return 0;
210
211err_put_irrl:
212 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
213
214err_put_qp:
215 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
216
217err_out:
218 return ret;
219}
220
221void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
222{
223 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
224 unsigned long flags;
225
226 spin_lock_irqsave(&qp_table->lock, flags);
227 radix_tree_delete(&hr_dev->qp_table_tree,
228 hr_qp->qpn & (hr_dev->caps.num_qps - 1));
229 spin_unlock_irqrestore(&qp_table->lock, flags);
230}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800231EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
oulijun9a443532016-07-21 19:06:38 +0800232
233void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
234{
235 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
236
237 if (atomic_dec_and_test(&hr_qp->refcount))
238 complete(&hr_qp->free);
239 wait_for_completion(&hr_qp->free);
240
241 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
242 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
243 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
244 }
245}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800246EXPORT_SYMBOL_GPL(hns_roce_qp_free);
oulijun9a443532016-07-21 19:06:38 +0800247
248void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
249 int cnt)
250{
251 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
252
Wei Hu (Xavier)1ca5b252016-09-20 17:07:00 +0100253 if (base_qpn < SQP_NUM)
oulijun9a443532016-07-21 19:06:38 +0800254 return;
255
Wei Hu (Xavier)5e6ff782016-11-23 19:41:07 +0000256 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
oulijun9a443532016-07-21 19:06:38 +0800257}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800258EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
oulijun9a443532016-07-21 19:06:38 +0800259
260static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
261 struct ib_qp_cap *cap, int is_user, int has_srq,
262 struct hns_roce_qp *hr_qp)
263{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800264 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800265 u32 max_cnt;
oulijun9a443532016-07-21 19:06:38 +0800266
267 /* Check the validity of QP support capacity */
268 if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
269 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
270 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
271 cap->max_recv_wr, cap->max_recv_sge);
272 return -EINVAL;
273 }
274
275 /* If srq exit, set zero for relative number of rq */
276 if (has_srq) {
277 if (cap->max_recv_wr) {
278 dev_dbg(dev, "srq no need config max_recv_wr\n");
279 return -EINVAL;
280 }
281
282 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
283 } else {
284 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
285 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
286 return -EINVAL;
287 }
288
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800289 if (hr_dev->caps.min_wqes)
290 max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
291 else
292 max_cnt = cap->max_recv_wr;
293
oulijun9a443532016-07-21 19:06:38 +0800294 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
295
296 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800297 dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
oulijun9a443532016-07-21 19:06:38 +0800298 return -EINVAL;
299 }
300
301 max_cnt = max(1U, cap->max_recv_sge);
302 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800303 if (hr_dev->caps.max_rq_sg <= 2)
304 hr_qp->rq.wqe_shift =
305 ilog2(hr_dev->caps.max_rq_desc_sz);
306 else
307 hr_qp->rq.wqe_shift =
308 ilog2(hr_dev->caps.max_rq_desc_sz
309 * hr_qp->rq.max_gs);
oulijun9a443532016-07-21 19:06:38 +0800310 }
311
312 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
313 cap->max_recv_sge = hr_qp->rq.max_gs;
314
315 return 0;
316}
317
318static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800319 struct ib_qp_cap *cap,
oulijun9a443532016-07-21 19:06:38 +0800320 struct hns_roce_qp *hr_qp,
321 struct hns_roce_ib_create_qp *ucmd)
322{
323 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
324 u8 max_sq_stride = ilog2(roundup_sq_stride);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800325 u32 max_cnt;
oulijun9a443532016-07-21 19:06:38 +0800326
327 /* Sanity check SQ size before proceeding */
328 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
329 ucmd->log_sq_stride > max_sq_stride ||
330 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800331 dev_err(hr_dev->dev, "check SQ size error!\n");
oulijun9a443532016-07-21 19:06:38 +0800332 return -EINVAL;
333 }
334
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800335 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
336 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
337 cap->max_send_sge);
338 return -EINVAL;
339 }
340
oulijun9a443532016-07-21 19:06:38 +0800341 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
342 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
343
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800344 max_cnt = max(1U, cap->max_send_sge);
345 if (hr_dev->caps.max_sq_sg <= 2)
346 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
347 else
348 hr_qp->sq.max_gs = max_cnt;
349
350 if (hr_qp->sq.max_gs > 2)
351 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
352 (hr_qp->sq.max_gs - 2));
353 hr_qp->sge.sge_shift = 4;
354
oulijun9a443532016-07-21 19:06:38 +0800355 /* Get buf size, SQ and RQ are aligned to page_szie */
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800356 if (hr_dev->caps.max_sq_sg <= 2) {
357 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
oulijun9a443532016-07-21 19:06:38 +0800358 hr_qp->rq.wqe_shift), PAGE_SIZE) +
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800359 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
oulijun9a443532016-07-21 19:06:38 +0800360 hr_qp->sq.wqe_shift), PAGE_SIZE);
361
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800362 hr_qp->sq.offset = 0;
363 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
oulijun9a443532016-07-21 19:06:38 +0800364 hr_qp->sq.wqe_shift), PAGE_SIZE);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800365 } else {
366 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
367 hr_qp->rq.wqe_shift), PAGE_SIZE) +
368 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
369 hr_qp->sge.sge_shift), PAGE_SIZE) +
370 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
371 hr_qp->sq.wqe_shift), PAGE_SIZE);
372
373 hr_qp->sq.offset = 0;
374 if (hr_qp->sge.sge_cnt) {
375 hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
376 (hr_qp->sq.wqe_cnt <<
377 hr_qp->sq.wqe_shift),
378 PAGE_SIZE);
379 hr_qp->rq.offset = hr_qp->sge.offset +
380 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
381 hr_qp->sge.sge_shift),
382 PAGE_SIZE);
383 } else {
384 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
385 (hr_qp->sq.wqe_cnt <<
386 hr_qp->sq.wqe_shift),
387 PAGE_SIZE);
388 }
389 }
oulijun9a443532016-07-21 19:06:38 +0800390
391 return 0;
392}
393
394static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
395 struct ib_qp_cap *cap,
oulijun9a443532016-07-21 19:06:38 +0800396 struct hns_roce_qp *hr_qp)
397{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800398 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800399 u32 max_cnt;
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800400 int size;
oulijun9a443532016-07-21 19:06:38 +0800401
402 if (cap->max_send_wr > hr_dev->caps.max_wqes ||
403 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
404 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800405 dev_err(dev, "SQ WR or sge or inline data error!\n");
oulijun9a443532016-07-21 19:06:38 +0800406 return -EINVAL;
407 }
408
409 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
410 hr_qp->sq_max_wqes_per_wr = 1;
411 hr_qp->sq_spare_wqes = 0;
412
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800413 if (hr_dev->caps.min_wqes)
414 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
415 else
416 max_cnt = cap->max_send_wr;
417
oulijun9a443532016-07-21 19:06:38 +0800418 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
419 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800420 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
oulijun9a443532016-07-21 19:06:38 +0800421 return -EINVAL;
422 }
423
424 /* Get data_seg numbers */
425 max_cnt = max(1U, cap->max_send_sge);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800426 if (hr_dev->caps.max_sq_sg <= 2)
427 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
428 else
429 hr_qp->sq.max_gs = max_cnt;
oulijun9a443532016-07-21 19:06:38 +0800430
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800431 if (hr_qp->sq.max_gs > 2) {
432 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
433 (hr_qp->sq.max_gs - 2));
434 hr_qp->sge.sge_shift = 4;
435 }
436
437 /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
oulijun9a443532016-07-21 19:06:38 +0800438 hr_qp->sq.offset = 0;
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800439 size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
440 PAGE_SIZE);
441
442 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
443 hr_qp->sge.offset = size;
444 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
445 hr_qp->sge.sge_shift, PAGE_SIZE);
446 }
447
448 hr_qp->rq.offset = size;
449 size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
450 PAGE_SIZE);
451 hr_qp->buff_size = size;
oulijun9a443532016-07-21 19:06:38 +0800452
453 /* Get wr and sge number which send */
454 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
455 cap->max_send_sge = hr_qp->sq.max_gs;
456
457 /* We don't support inline sends for kernel QPs (yet) */
458 cap->max_inline_data = 0;
459
460 return 0;
461}
462
463static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
464 struct ib_pd *ib_pd,
465 struct ib_qp_init_attr *init_attr,
466 struct ib_udata *udata, unsigned long sqpn,
467 struct hns_roce_qp *hr_qp)
468{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800469 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800470 struct hns_roce_ib_create_qp ucmd;
471 unsigned long qpn = 0;
472 int ret = 0;
473
474 mutex_init(&hr_qp->mutex);
475 spin_lock_init(&hr_qp->sq.lock);
476 spin_lock_init(&hr_qp->rq.lock);
477
478 hr_qp->state = IB_QPS_RESET;
479
480 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
481 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
482 else
483 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
484
485 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
486 !!init_attr->srq, hr_qp);
487 if (ret) {
488 dev_err(dev, "hns_roce_set_rq_size failed\n");
489 goto err_out;
490 }
491
492 if (ib_pd->uobject) {
493 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
494 dev_err(dev, "ib_copy_from_udata error for create qp\n");
495 ret = -EFAULT;
496 goto err_out;
497 }
498
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800499 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
500 &ucmd);
oulijun9a443532016-07-21 19:06:38 +0800501 if (ret) {
502 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
503 goto err_out;
504 }
505
506 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
507 ucmd.buf_addr, hr_qp->buff_size, 0,
508 0);
509 if (IS_ERR(hr_qp->umem)) {
510 dev_err(dev, "ib_umem_get error for create qp\n");
511 ret = PTR_ERR(hr_qp->umem);
512 goto err_out;
513 }
514
Shaobo Xu9766edc2017-08-30 17:23:09 +0800515 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
oulijun9a443532016-07-21 19:06:38 +0800516 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +0300517 hr_qp->umem->page_shift, &hr_qp->mtt);
oulijun9a443532016-07-21 19:06:38 +0800518 if (ret) {
519 dev_err(dev, "hns_roce_mtt_init error for create qp\n");
520 goto err_buf;
521 }
522
523 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
524 hr_qp->umem);
525 if (ret) {
526 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
527 goto err_mtt;
528 }
529 } else {
530 if (init_attr->create_flags &
531 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
532 dev_err(dev, "init_attr->create_flags error!\n");
533 ret = -EINVAL;
534 goto err_out;
535 }
536
537 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
538 dev_err(dev, "init_attr->create_flags error!\n");
539 ret = -EINVAL;
540 goto err_out;
541 }
542
543 /* Set SQ size */
544 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
Lijun Ou76445702016-09-20 17:06:58 +0100545 hr_qp);
oulijun9a443532016-07-21 19:06:38 +0800546 if (ret) {
547 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
548 goto err_out;
549 }
550
551 /* QP doorbell register address */
552 hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
553 DB_REG_OFFSET * hr_dev->priv_uar.index;
554 hr_qp->rq.db_reg_l = hr_dev->reg_base +
555 ROCEE_DB_OTHERS_L_0_REG +
556 DB_REG_OFFSET * hr_dev->priv_uar.index;
557
558 /* Allocate QP buf */
559 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
560 &hr_qp->hr_buf)) {
561 dev_err(dev, "hns_roce_buf_alloc error!\n");
562 ret = -ENOMEM;
563 goto err_out;
564 }
565
Shaobo Xu9766edc2017-08-30 17:23:09 +0800566 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
oulijun9a443532016-07-21 19:06:38 +0800567 /* Write MTT */
568 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
569 hr_qp->hr_buf.page_shift, &hr_qp->mtt);
570 if (ret) {
571 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
572 goto err_buf;
573 }
574
575 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
576 &hr_qp->hr_buf);
577 if (ret) {
578 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
579 goto err_mtt;
580 }
581
582 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
583 GFP_KERNEL);
584 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
585 GFP_KERNEL);
586 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
587 ret = -ENOMEM;
588 goto err_wrid;
589 }
590 }
591
592 if (sqpn) {
593 qpn = sqpn;
594 } else {
595 /* Get QPN */
596 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
597 if (ret) {
598 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
599 goto err_wrid;
600 }
601 }
602
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800603 if (init_attr->qp_type == IB_QPT_GSI &&
604 hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
605 /* In v1 engine, GSI QP context in RoCE engine's register */
oulijun9a443532016-07-21 19:06:38 +0800606 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
607 if (ret) {
608 dev_err(dev, "hns_roce_qp_alloc failed!\n");
609 goto err_qpn;
610 }
611 } else {
612 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
613 if (ret) {
614 dev_err(dev, "hns_roce_qp_alloc failed!\n");
615 goto err_qpn;
616 }
617 }
618
619 if (sqpn)
620 hr_qp->doorbell_qpn = 1;
621 else
622 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
623
624 hr_qp->event = hns_roce_ib_qp_event;
625
626 return 0;
627
628err_qpn:
629 if (!sqpn)
630 hns_roce_release_range_qp(hr_dev, qpn, 1);
631
632err_wrid:
633 kfree(hr_qp->sq.wrid);
634 kfree(hr_qp->rq.wrid);
635
636err_mtt:
637 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
638
639err_buf:
640 if (ib_pd->uobject)
641 ib_umem_release(hr_qp->umem);
642 else
643 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
644
645err_out:
646 return ret;
647}
648
649struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
650 struct ib_qp_init_attr *init_attr,
651 struct ib_udata *udata)
652{
653 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800654 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800655 struct hns_roce_sqp *hr_sqp;
656 struct hns_roce_qp *hr_qp;
657 int ret;
658
659 switch (init_attr->qp_type) {
660 case IB_QPT_RC: {
661 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
662 if (!hr_qp)
663 return ERR_PTR(-ENOMEM);
664
665 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
666 hr_qp);
667 if (ret) {
668 dev_err(dev, "Create RC QP failed\n");
669 kfree(hr_qp);
670 return ERR_PTR(ret);
671 }
672
673 hr_qp->ibqp.qp_num = hr_qp->qpn;
674
675 break;
676 }
677 case IB_QPT_GSI: {
678 /* Userspace is not allowed to create special QPs: */
679 if (pd->uobject) {
680 dev_err(dev, "not support usr space GSI\n");
681 return ERR_PTR(-EINVAL);
682 }
683
684 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
685 if (!hr_sqp)
686 return ERR_PTR(-ENOMEM);
687
688 hr_qp = &hr_sqp->hr_qp;
Lijun Ou77168092016-09-15 23:48:10 +0100689 hr_qp->port = init_attr->port_num - 1;
690 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
Wei Hu (Xavier)1ca5b252016-09-20 17:07:00 +0100691 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
Lijun Ou77168092016-09-15 23:48:10 +0100692 hr_dev->iboe.phy_port[hr_qp->port];
oulijun9a443532016-07-21 19:06:38 +0800693
694 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
Lijun Ou77168092016-09-15 23:48:10 +0100695 hr_qp->ibqp.qp_num, hr_qp);
oulijun9a443532016-07-21 19:06:38 +0800696 if (ret) {
697 dev_err(dev, "Create GSI QP failed!\n");
698 kfree(hr_sqp);
699 return ERR_PTR(ret);
700 }
701
oulijun9a443532016-07-21 19:06:38 +0800702 break;
703 }
704 default:{
705 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
706 return ERR_PTR(-EINVAL);
707 }
708 }
709
710 return &hr_qp->ibqp;
711}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800712EXPORT_SYMBOL_GPL(hns_roce_create_qp);
oulijun9a443532016-07-21 19:06:38 +0800713
714int to_hr_qp_type(int qp_type)
715{
716 int transport_type;
717
718 if (qp_type == IB_QPT_RC)
719 transport_type = SERV_TYPE_RC;
720 else if (qp_type == IB_QPT_UC)
721 transport_type = SERV_TYPE_UC;
722 else if (qp_type == IB_QPT_UD)
723 transport_type = SERV_TYPE_UD;
724 else if (qp_type == IB_QPT_GSI)
725 transport_type = SERV_TYPE_UD;
726 else
727 transport_type = -1;
728
729 return transport_type;
730}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800731EXPORT_SYMBOL_GPL(to_hr_qp_type);
oulijun9a443532016-07-21 19:06:38 +0800732
733int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
734 int attr_mask, struct ib_udata *udata)
735{
736 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
737 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
738 enum ib_qp_state cur_state, new_state;
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800739 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800740 int ret = -EINVAL;
741 int p;
Lijun Oucb814642016-09-20 17:07:07 +0100742 enum ib_mtu active_mtu;
oulijun9a443532016-07-21 19:06:38 +0800743
744 mutex_lock(&hr_qp->mutex);
745
746 cur_state = attr_mask & IB_QP_CUR_STATE ?
747 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
748 new_state = attr_mask & IB_QP_STATE ?
749 attr->qp_state : cur_state;
750
751 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
752 IB_LINK_LAYER_ETHERNET)) {
753 dev_err(dev, "ib_modify_qp_is_ok failed\n");
754 goto out;
755 }
756
757 if ((attr_mask & IB_QP_PORT) &&
758 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
759 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
760 attr->port_num);
761 goto out;
762 }
763
764 if (attr_mask & IB_QP_PKEY_INDEX) {
765 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
766 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
767 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
768 attr->pkey_index);
769 goto out;
770 }
771 }
772
Lijun Oucb814642016-09-20 17:07:07 +0100773 if (attr_mask & IB_QP_PATH_MTU) {
774 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
775 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
776
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800777 if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
778 attr->path_mtu > IB_MTU_4096) ||
779 (hr_dev->caps.max_mtu == IB_MTU_2048 &&
780 attr->path_mtu > IB_MTU_2048) ||
Lijun Oucb814642016-09-20 17:07:07 +0100781 attr->path_mtu < IB_MTU_256 ||
782 attr->path_mtu > active_mtu) {
783 dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
784 attr->path_mtu);
785 goto out;
786 }
787 }
788
oulijun9a443532016-07-21 19:06:38 +0800789 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
790 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
791 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
792 attr->max_rd_atomic);
793 goto out;
794 }
795
796 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
797 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
798 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
799 attr->max_dest_rd_atomic);
800 goto out;
801 }
802
803 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800804 ret = 0;
oulijun9a443532016-07-21 19:06:38 +0800805 goto out;
806 }
807
808 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
809 new_state);
810
811out:
812 mutex_unlock(&hr_qp->mutex);
813
814 return ret;
815}
816
817void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
818 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
819{
820 if (send_cq == recv_cq) {
821 spin_lock_irq(&send_cq->lock);
822 __acquire(&recv_cq->lock);
823 } else if (send_cq->cqn < recv_cq->cqn) {
824 spin_lock_irq(&send_cq->lock);
825 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
826 } else {
827 spin_lock_irq(&recv_cq->lock);
828 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
829 }
830}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800831EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
oulijun9a443532016-07-21 19:06:38 +0800832
833void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
834 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
835 __releases(&recv_cq->lock)
836{
837 if (send_cq == recv_cq) {
838 __release(&recv_cq->lock);
839 spin_unlock_irq(&send_cq->lock);
840 } else if (send_cq->cqn < recv_cq->cqn) {
841 spin_unlock(&recv_cq->lock);
842 spin_unlock_irq(&send_cq->lock);
843 } else {
844 spin_unlock(&send_cq->lock);
845 spin_unlock_irq(&recv_cq->lock);
846 }
847}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800848EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
oulijun9a443532016-07-21 19:06:38 +0800849
850__be32 send_ieth(struct ib_send_wr *wr)
851{
852 switch (wr->opcode) {
853 case IB_WR_SEND_WITH_IMM:
854 case IB_WR_RDMA_WRITE_WITH_IMM:
855 return cpu_to_le32(wr->ex.imm_data);
856 case IB_WR_SEND_WITH_INV:
857 return cpu_to_le32(wr->ex.invalidate_rkey);
858 default:
859 return 0;
860 }
861}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800862EXPORT_SYMBOL_GPL(send_ieth);
oulijun9a443532016-07-21 19:06:38 +0800863
864static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
865{
866
867 return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
868}
869
870void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
871{
oulijun9a443532016-07-21 19:06:38 +0800872 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
873}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800874EXPORT_SYMBOL_GPL(get_recv_wqe);
oulijun9a443532016-07-21 19:06:38 +0800875
876void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
877{
oulijun9a443532016-07-21 19:06:38 +0800878 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
879}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800880EXPORT_SYMBOL_GPL(get_send_wqe);
oulijun9a443532016-07-21 19:06:38 +0800881
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800882void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
883{
884 return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
885 (n << hr_qp->sge.sge_shift));
886}
887EXPORT_SYMBOL_GPL(get_send_extend_sge);
888
oulijun9a443532016-07-21 19:06:38 +0800889bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
890 struct ib_cq *ib_cq)
891{
892 struct hns_roce_cq *hr_cq;
893 u32 cur;
894
895 cur = hr_wq->head - hr_wq->tail;
896 if (likely(cur + nreq < hr_wq->max_post))
kbuild test robot3756c7f2017-07-25 13:36:24 +0800897 return false;
oulijun9a443532016-07-21 19:06:38 +0800898
899 hr_cq = to_hr_cq(ib_cq);
900 spin_lock(&hr_cq->lock);
901 cur = hr_wq->head - hr_wq->tail;
902 spin_unlock(&hr_cq->lock);
903
904 return cur + nreq >= hr_wq->max_post;
905}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800906EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
oulijun9a443532016-07-21 19:06:38 +0800907
908int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
909{
910 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
911 int reserved_from_top = 0;
912 int ret;
913
914 spin_lock_init(&qp_table->lock);
915 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
916
917 /* A port include two SQP, six port total 12 */
918 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
Wei Hu (Xavier)1ca5b252016-09-20 17:07:00 +0100919 hr_dev->caps.num_qps - 1, SQP_NUM,
oulijun9a443532016-07-21 19:06:38 +0800920 reserved_from_top);
921 if (ret) {
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800922 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
oulijun9a443532016-07-21 19:06:38 +0800923 ret);
924 return ret;
925 }
926
927 return 0;
928}
929
930void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
931{
932 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
933}