blob: e289a924e7890a6b955522d818d4660584920d2f [file] [log] [blame]
oulijun9a443532016-07-21 19:06:38 +08001/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/platform_device.h>
Lijun Oucb814642016-09-20 17:07:07 +010035#include <rdma/ib_addr.h>
oulijun9a443532016-07-21 19:06:38 +080036#include <rdma/ib_umem.h>
37#include "hns_roce_common.h"
38#include "hns_roce_device.h"
39#include "hns_roce_hem.h"
Leon Romanovsky4d409952016-10-19 20:13:07 +030040#include <rdma/hns-abi.h>
oulijun9a443532016-07-21 19:06:38 +080041
Wei Hu (Xavier)1ca5b252016-09-20 17:07:00 +010042#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
oulijun9a443532016-07-21 19:06:38 +080043
44void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
45{
46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +080047 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +080048 struct hns_roce_qp *qp;
49
50 spin_lock(&qp_table->lock);
51
52 qp = __hns_roce_qp_lookup(hr_dev, qpn);
53 if (qp)
54 atomic_inc(&qp->refcount);
55
56 spin_unlock(&qp_table->lock);
57
58 if (!qp) {
59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
60 return;
61 }
62
63 qp->event(qp, (enum hns_roce_event)event_type);
64
65 if (atomic_dec_and_test(&qp->refcount))
66 complete(&qp->free);
67}
Yixian Liub16f8182017-11-14 17:26:16 +080068EXPORT_SYMBOL_GPL(hns_roce_qp_event);
oulijun9a443532016-07-21 19:06:38 +080069
70static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
71 enum hns_roce_event type)
72{
73 struct ib_event event;
74 struct ib_qp *ibqp = &hr_qp->ibqp;
75
76 if (ibqp->event_handler) {
77 event.device = ibqp->device;
78 event.element.qp = ibqp;
79 switch (type) {
80 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
81 event.event = IB_EVENT_PATH_MIG;
82 break;
83 case HNS_ROCE_EVENT_TYPE_COMM_EST:
84 event.event = IB_EVENT_COMM_EST;
85 break;
86 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
87 event.event = IB_EVENT_SQ_DRAINED;
88 break;
89 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
90 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
91 break;
92 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
93 event.event = IB_EVENT_QP_FATAL;
94 break;
95 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
96 event.event = IB_EVENT_PATH_MIG_ERR;
97 break;
98 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
99 event.event = IB_EVENT_QP_REQ_ERR;
100 break;
101 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
102 event.event = IB_EVENT_QP_ACCESS_ERR;
103 break;
104 default:
Bart Van Asschefecd02e2017-01-20 13:04:18 -0800105 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
oulijun9a443532016-07-21 19:06:38 +0800106 type, hr_qp->qpn);
107 return;
108 }
109 ibqp->event_handler(&event, ibqp->qp_context);
110 }
111}
112
113static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
114 int align, unsigned long *base)
115{
116 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
oulijun9a443532016-07-21 19:06:38 +0800117
Lijun Oua598c6f2016-09-20 17:06:57 +0100118 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
oulijun9a443532016-07-21 19:06:38 +0800119}
120
121enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
122{
123 switch (state) {
124 case IB_QPS_RESET:
125 return HNS_ROCE_QP_STATE_RST;
126 case IB_QPS_INIT:
127 return HNS_ROCE_QP_STATE_INIT;
128 case IB_QPS_RTR:
129 return HNS_ROCE_QP_STATE_RTR;
130 case IB_QPS_RTS:
131 return HNS_ROCE_QP_STATE_RTS;
132 case IB_QPS_SQD:
133 return HNS_ROCE_QP_STATE_SQD;
134 case IB_QPS_ERR:
135 return HNS_ROCE_QP_STATE_ERR;
136 default:
137 return HNS_ROCE_QP_NUM_STATE;
138 }
139}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800140EXPORT_SYMBOL_GPL(to_hns_roce_state);
oulijun9a443532016-07-21 19:06:38 +0800141
142static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
143 struct hns_roce_qp *hr_qp)
144{
145 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
146 int ret;
147
148 if (!qpn)
149 return -EINVAL;
150
151 hr_qp->qpn = qpn;
152
153 spin_lock_irq(&qp_table->lock);
154 ret = radix_tree_insert(&hr_dev->qp_table_tree,
155 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
156 spin_unlock_irq(&qp_table->lock);
157 if (ret) {
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800158 dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
oulijun9a443532016-07-21 19:06:38 +0800159 goto err_put_irrl;
160 }
161
162 atomic_set(&hr_qp->refcount, 1);
163 init_completion(&hr_qp->free);
164
165 return 0;
166
167err_put_irrl:
168
169 return ret;
170}
171
172static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
173 struct hns_roce_qp *hr_qp)
174{
175 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800176 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800177 int ret;
178
179 if (!qpn)
180 return -EINVAL;
181
182 hr_qp->qpn = qpn;
183
184 /* Alloc memory for QPC */
185 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
186 if (ret) {
187 dev_err(dev, "QPC table get failed\n");
188 goto err_out;
189 }
190
191 /* Alloc memory for IRRL */
192 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
193 if (ret) {
194 dev_err(dev, "IRRL table get failed\n");
195 goto err_put_qp;
196 }
197
oulijune92f2c12017-11-10 16:55:44 +0800198 if (hr_dev->caps.trrl_entry_sz) {
199 /* Alloc memory for TRRL */
200 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
201 hr_qp->qpn);
202 if (ret) {
203 dev_err(dev, "TRRL table get failed\n");
204 goto err_put_irrl;
205 }
206 }
207
oulijun9a443532016-07-21 19:06:38 +0800208 spin_lock_irq(&qp_table->lock);
209 ret = radix_tree_insert(&hr_dev->qp_table_tree,
210 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
211 spin_unlock_irq(&qp_table->lock);
212 if (ret) {
213 dev_err(dev, "QPC radix_tree_insert failed\n");
oulijune92f2c12017-11-10 16:55:44 +0800214 goto err_put_trrl;
oulijun9a443532016-07-21 19:06:38 +0800215 }
216
217 atomic_set(&hr_qp->refcount, 1);
218 init_completion(&hr_qp->free);
219
220 return 0;
221
oulijune92f2c12017-11-10 16:55:44 +0800222err_put_trrl:
223 if (hr_dev->caps.trrl_entry_sz)
224 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
225
oulijun9a443532016-07-21 19:06:38 +0800226err_put_irrl:
227 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
228
229err_put_qp:
230 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
231
232err_out:
233 return ret;
234}
235
236void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
237{
238 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
239 unsigned long flags;
240
241 spin_lock_irqsave(&qp_table->lock, flags);
242 radix_tree_delete(&hr_dev->qp_table_tree,
243 hr_qp->qpn & (hr_dev->caps.num_qps - 1));
244 spin_unlock_irqrestore(&qp_table->lock, flags);
245}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800246EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
oulijun9a443532016-07-21 19:06:38 +0800247
248void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
249{
250 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
251
252 if (atomic_dec_and_test(&hr_qp->refcount))
253 complete(&hr_qp->free);
254 wait_for_completion(&hr_qp->free);
255
256 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
oulijune92f2c12017-11-10 16:55:44 +0800257 if (hr_dev->caps.trrl_entry_sz)
258 hns_roce_table_put(hr_dev, &qp_table->trrl_table,
259 hr_qp->qpn);
oulijun9a443532016-07-21 19:06:38 +0800260 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
261 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
262 }
263}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800264EXPORT_SYMBOL_GPL(hns_roce_qp_free);
oulijun9a443532016-07-21 19:06:38 +0800265
266void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
267 int cnt)
268{
269 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
270
Wei Hu (Xavier)1ca5b252016-09-20 17:07:00 +0100271 if (base_qpn < SQP_NUM)
oulijun9a443532016-07-21 19:06:38 +0800272 return;
273
Wei Hu (Xavier)5e6ff782016-11-23 19:41:07 +0000274 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
oulijun9a443532016-07-21 19:06:38 +0800275}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800276EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
oulijun9a443532016-07-21 19:06:38 +0800277
278static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
279 struct ib_qp_cap *cap, int is_user, int has_srq,
280 struct hns_roce_qp *hr_qp)
281{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800282 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800283 u32 max_cnt;
oulijun9a443532016-07-21 19:06:38 +0800284
285 /* Check the validity of QP support capacity */
286 if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
287 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
288 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
289 cap->max_recv_wr, cap->max_recv_sge);
290 return -EINVAL;
291 }
292
293 /* If srq exit, set zero for relative number of rq */
294 if (has_srq) {
295 if (cap->max_recv_wr) {
296 dev_dbg(dev, "srq no need config max_recv_wr\n");
297 return -EINVAL;
298 }
299
300 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
301 } else {
302 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
303 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
304 return -EINVAL;
305 }
306
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800307 if (hr_dev->caps.min_wqes)
308 max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
309 else
310 max_cnt = cap->max_recv_wr;
311
oulijun9a443532016-07-21 19:06:38 +0800312 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
313
314 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800315 dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
oulijun9a443532016-07-21 19:06:38 +0800316 return -EINVAL;
317 }
318
319 max_cnt = max(1U, cap->max_recv_sge);
320 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800321 if (hr_dev->caps.max_rq_sg <= 2)
322 hr_qp->rq.wqe_shift =
323 ilog2(hr_dev->caps.max_rq_desc_sz);
324 else
325 hr_qp->rq.wqe_shift =
326 ilog2(hr_dev->caps.max_rq_desc_sz
327 * hr_qp->rq.max_gs);
oulijun9a443532016-07-21 19:06:38 +0800328 }
329
330 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
331 cap->max_recv_sge = hr_qp->rq.max_gs;
332
333 return 0;
334}
335
336static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800337 struct ib_qp_cap *cap,
oulijun9a443532016-07-21 19:06:38 +0800338 struct hns_roce_qp *hr_qp,
339 struct hns_roce_ib_create_qp *ucmd)
340{
341 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
342 u8 max_sq_stride = ilog2(roundup_sq_stride);
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800343 u32 page_size;
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800344 u32 max_cnt;
oulijun9a443532016-07-21 19:06:38 +0800345
346 /* Sanity check SQ size before proceeding */
347 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
348 ucmd->log_sq_stride > max_sq_stride ||
349 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800350 dev_err(hr_dev->dev, "check SQ size error!\n");
oulijun9a443532016-07-21 19:06:38 +0800351 return -EINVAL;
352 }
353
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800354 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
355 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
356 cap->max_send_sge);
357 return -EINVAL;
358 }
359
oulijun9a443532016-07-21 19:06:38 +0800360 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
361 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
362
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800363 max_cnt = max(1U, cap->max_send_sge);
364 if (hr_dev->caps.max_sq_sg <= 2)
365 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
366 else
367 hr_qp->sq.max_gs = max_cnt;
368
369 if (hr_qp->sq.max_gs > 2)
370 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
371 (hr_qp->sq.max_gs - 2));
372 hr_qp->sge.sge_shift = 4;
373
oulijun9a443532016-07-21 19:06:38 +0800374 /* Get buf size, SQ and RQ are aligned to page_szie */
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800375 if (hr_dev->caps.max_sq_sg <= 2) {
376 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
oulijun9a443532016-07-21 19:06:38 +0800377 hr_qp->rq.wqe_shift), PAGE_SIZE) +
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800378 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
oulijun9a443532016-07-21 19:06:38 +0800379 hr_qp->sq.wqe_shift), PAGE_SIZE);
380
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800381 hr_qp->sq.offset = 0;
382 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
oulijun9a443532016-07-21 19:06:38 +0800383 hr_qp->sq.wqe_shift), PAGE_SIZE);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800384 } else {
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800385 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800386 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800387 hr_qp->rq.wqe_shift), page_size) +
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800388 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800389 hr_qp->sge.sge_shift), page_size) +
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800390 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800391 hr_qp->sq.wqe_shift), page_size);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800392
393 hr_qp->sq.offset = 0;
394 if (hr_qp->sge.sge_cnt) {
395 hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
396 (hr_qp->sq.wqe_cnt <<
397 hr_qp->sq.wqe_shift),
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800398 page_size);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800399 hr_qp->rq.offset = hr_qp->sge.offset +
400 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
401 hr_qp->sge.sge_shift),
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800402 page_size);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800403 } else {
404 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
405 (hr_qp->sq.wqe_cnt <<
406 hr_qp->sq.wqe_shift),
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800407 page_size);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800408 }
409 }
oulijun9a443532016-07-21 19:06:38 +0800410
411 return 0;
412}
413
414static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
415 struct ib_qp_cap *cap,
oulijun9a443532016-07-21 19:06:38 +0800416 struct hns_roce_qp *hr_qp)
417{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800418 struct device *dev = hr_dev->dev;
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800419 u32 page_size;
oulijun9a443532016-07-21 19:06:38 +0800420 u32 max_cnt;
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800421 int size;
oulijun9a443532016-07-21 19:06:38 +0800422
423 if (cap->max_send_wr > hr_dev->caps.max_wqes ||
424 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
425 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800426 dev_err(dev, "SQ WR or sge or inline data error!\n");
oulijun9a443532016-07-21 19:06:38 +0800427 return -EINVAL;
428 }
429
430 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
431 hr_qp->sq_max_wqes_per_wr = 1;
432 hr_qp->sq_spare_wqes = 0;
433
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800434 if (hr_dev->caps.min_wqes)
435 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
436 else
437 max_cnt = cap->max_send_wr;
438
oulijun9a443532016-07-21 19:06:38 +0800439 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
440 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800441 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
oulijun9a443532016-07-21 19:06:38 +0800442 return -EINVAL;
443 }
444
445 /* Get data_seg numbers */
446 max_cnt = max(1U, cap->max_send_sge);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800447 if (hr_dev->caps.max_sq_sg <= 2)
448 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
449 else
450 hr_qp->sq.max_gs = max_cnt;
oulijun9a443532016-07-21 19:06:38 +0800451
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800452 if (hr_qp->sq.max_gs > 2) {
453 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
454 (hr_qp->sq.max_gs - 2));
455 hr_qp->sge.sge_shift = 4;
456 }
457
oulijunb66efc92018-01-10 14:39:48 +0800458 /* ud sqwqe's sge use extend sge */
459 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
460 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
461 hr_qp->sq.max_gs);
462 hr_qp->sge.sge_shift = 4;
463 }
464
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800465 /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800466 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
oulijun9a443532016-07-21 19:06:38 +0800467 hr_qp->sq.offset = 0;
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800468 size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800469 page_size);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800470
471 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
472 hr_qp->sge.offset = size;
473 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800474 hr_qp->sge.sge_shift, page_size);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800475 }
476
477 hr_qp->rq.offset = size;
478 size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800479 page_size);
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800480 hr_qp->buff_size = size;
oulijun9a443532016-07-21 19:06:38 +0800481
482 /* Get wr and sge number which send */
483 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
484 cap->max_send_sge = hr_qp->sq.max_gs;
485
486 /* We don't support inline sends for kernel QPs (yet) */
487 cap->max_inline_data = 0;
488
489 return 0;
490}
491
Yixian Liue088a682018-03-09 18:36:29 +0800492static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
493{
494 if (attr->qp_type == IB_QPT_XRC_INI ||
495 attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
496 return 0;
497
498 return 1;
499}
500
oulijun9a443532016-07-21 19:06:38 +0800501static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
502 struct ib_pd *ib_pd,
503 struct ib_qp_init_attr *init_attr,
504 struct ib_udata *udata, unsigned long sqpn,
505 struct hns_roce_qp *hr_qp)
506{
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800507 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800508 struct hns_roce_ib_create_qp ucmd;
Yixian Liu7b482212018-03-15 15:23:14 +0800509 struct hns_roce_ib_create_qp_resp resp = {};
oulijun9a443532016-07-21 19:06:38 +0800510 unsigned long qpn = 0;
511 int ret = 0;
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800512 u32 page_shift;
513 u32 npages;
oulijun0009c2d2018-01-03 10:44:03 +0800514 int i;
oulijun9a443532016-07-21 19:06:38 +0800515
516 mutex_init(&hr_qp->mutex);
517 spin_lock_init(&hr_qp->sq.lock);
518 spin_lock_init(&hr_qp->rq.lock);
519
520 hr_qp->state = IB_QPS_RESET;
521
oulijunb66efc92018-01-10 14:39:48 +0800522 hr_qp->ibqp.qp_type = init_attr->qp_type;
523
oulijun9a443532016-07-21 19:06:38 +0800524 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
oulijun8b9b8d12018-02-05 21:14:00 +0800525 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
oulijun9a443532016-07-21 19:06:38 +0800526 else
oulijun8b9b8d12018-02-05 21:14:00 +0800527 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
oulijun9a443532016-07-21 19:06:38 +0800528
529 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
530 !!init_attr->srq, hr_qp);
531 if (ret) {
532 dev_err(dev, "hns_roce_set_rq_size failed\n");
533 goto err_out;
534 }
535
oulijun0009c2d2018-01-03 10:44:03 +0800536 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
537 /* allocate recv inline buf */
538 hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
539 sizeof(struct hns_roce_rinl_wqe),
540 GFP_KERNEL);
541 if (!hr_qp->rq_inl_buf.wqe_list) {
542 ret = -ENOMEM;
543 goto err_out;
544 }
545
546 hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
547
548 /* Firstly, allocate a list of sge space buffer */
549 hr_qp->rq_inl_buf.wqe_list[0].sg_list =
550 kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
551 init_attr->cap.max_recv_sge *
552 sizeof(struct hns_roce_rinl_sge),
553 GFP_KERNEL);
554 if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
555 ret = -ENOMEM;
556 goto err_wqe_list;
557 }
558
559 for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
560 /* Secondly, reallocate the buffer */
561 hr_qp->rq_inl_buf.wqe_list[i].sg_list =
562 &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
563 init_attr->cap.max_recv_sge];
564 }
565
oulijun9a443532016-07-21 19:06:38 +0800566 if (ib_pd->uobject) {
567 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
568 dev_err(dev, "ib_copy_from_udata error for create qp\n");
569 ret = -EFAULT;
oulijun0009c2d2018-01-03 10:44:03 +0800570 goto err_rq_sge_list;
oulijun9a443532016-07-21 19:06:38 +0800571 }
572
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800573 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
574 &ucmd);
oulijun9a443532016-07-21 19:06:38 +0800575 if (ret) {
576 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
oulijun0009c2d2018-01-03 10:44:03 +0800577 goto err_rq_sge_list;
oulijun9a443532016-07-21 19:06:38 +0800578 }
579
580 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
581 ucmd.buf_addr, hr_qp->buff_size, 0,
582 0);
583 if (IS_ERR(hr_qp->umem)) {
584 dev_err(dev, "ib_umem_get error for create qp\n");
585 ret = PTR_ERR(hr_qp->umem);
oulijun0009c2d2018-01-03 10:44:03 +0800586 goto err_rq_sge_list;
oulijun9a443532016-07-21 19:06:38 +0800587 }
588
Shaobo Xu9766edc2017-08-30 17:23:09 +0800589 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800590 if (hr_dev->caps.mtt_buf_pg_sz) {
591 npages = (ib_umem_page_count(hr_qp->umem) +
592 (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
593 (1 << hr_dev->caps.mtt_buf_pg_sz);
594 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
595 ret = hns_roce_mtt_init(hr_dev, npages,
596 page_shift,
597 &hr_qp->mtt);
598 } else {
599 ret = hns_roce_mtt_init(hr_dev,
600 ib_umem_page_count(hr_qp->umem),
601 hr_qp->umem->page_shift,
602 &hr_qp->mtt);
603 }
oulijun9a443532016-07-21 19:06:38 +0800604 if (ret) {
605 dev_err(dev, "hns_roce_mtt_init error for create qp\n");
606 goto err_buf;
607 }
608
609 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
610 hr_qp->umem);
611 if (ret) {
612 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
613 goto err_mtt;
614 }
Yixian Liue088a682018-03-09 18:36:29 +0800615
616 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
Yixian Liu7b482212018-03-15 15:23:14 +0800617 (udata->outlen >= sizeof(resp)) &&
Yixian Liue088a682018-03-09 18:36:29 +0800618 hns_roce_qp_has_rq(init_attr)) {
619 ret = hns_roce_db_map_user(
620 to_hr_ucontext(ib_pd->uobject->context),
621 ucmd.db_addr, &hr_qp->rdb);
622 if (ret) {
623 dev_err(dev, "rp record doorbell map failed!\n");
624 goto err_mtt;
625 }
626 }
oulijun9a443532016-07-21 19:06:38 +0800627 } else {
628 if (init_attr->create_flags &
629 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
630 dev_err(dev, "init_attr->create_flags error!\n");
631 ret = -EINVAL;
oulijun0009c2d2018-01-03 10:44:03 +0800632 goto err_rq_sge_list;
oulijun9a443532016-07-21 19:06:38 +0800633 }
634
635 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
636 dev_err(dev, "init_attr->create_flags error!\n");
637 ret = -EINVAL;
oulijun0009c2d2018-01-03 10:44:03 +0800638 goto err_rq_sge_list;
oulijun9a443532016-07-21 19:06:38 +0800639 }
640
641 /* Set SQ size */
642 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
Lijun Ou76445702016-09-20 17:06:58 +0100643 hr_qp);
oulijun9a443532016-07-21 19:06:38 +0800644 if (ret) {
645 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
oulijun0009c2d2018-01-03 10:44:03 +0800646 goto err_rq_sge_list;
oulijun9a443532016-07-21 19:06:38 +0800647 }
648
649 /* QP doorbell register address */
Wei Hu(Xavier)2d407882017-08-30 17:23:14 +0800650 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
oulijun9a443532016-07-21 19:06:38 +0800651 DB_REG_OFFSET * hr_dev->priv_uar.index;
Wei Hu(Xavier)2d407882017-08-30 17:23:14 +0800652 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
oulijun9a443532016-07-21 19:06:38 +0800653 DB_REG_OFFSET * hr_dev->priv_uar.index;
654
Yixian Liu472bc0f2018-03-09 18:36:31 +0800655 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
656 hns_roce_qp_has_rq(init_attr)) {
657 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
658 if (ret) {
659 dev_err(dev, "rq record doorbell alloc failed!\n");
660 goto err_rq_sge_list;
661 }
662 *hr_qp->rdb.db_record = 0;
663 }
664
oulijun9a443532016-07-21 19:06:38 +0800665 /* Allocate QP buf */
Wei Hu(Xavier)9a8982d2017-10-18 17:32:44 +0800666 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
667 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
668 (1 << page_shift) * 2,
669 &hr_qp->hr_buf, page_shift)) {
oulijun9a443532016-07-21 19:06:38 +0800670 dev_err(dev, "hns_roce_buf_alloc error!\n");
671 ret = -ENOMEM;
Yixian Liu472bc0f2018-03-09 18:36:31 +0800672 goto err_db;
oulijun9a443532016-07-21 19:06:38 +0800673 }
674
Shaobo Xu9766edc2017-08-30 17:23:09 +0800675 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
oulijun9a443532016-07-21 19:06:38 +0800676 /* Write MTT */
677 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
678 hr_qp->hr_buf.page_shift, &hr_qp->mtt);
679 if (ret) {
680 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
681 goto err_buf;
682 }
683
684 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
685 &hr_qp->hr_buf);
686 if (ret) {
687 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
688 goto err_mtt;
689 }
690
691 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
692 GFP_KERNEL);
693 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
694 GFP_KERNEL);
695 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
696 ret = -ENOMEM;
697 goto err_wrid;
698 }
699 }
700
701 if (sqpn) {
702 qpn = sqpn;
703 } else {
704 /* Get QPN */
705 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
706 if (ret) {
707 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
708 goto err_wrid;
709 }
710 }
711
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800712 if (init_attr->qp_type == IB_QPT_GSI &&
713 hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
714 /* In v1 engine, GSI QP context in RoCE engine's register */
oulijun9a443532016-07-21 19:06:38 +0800715 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
716 if (ret) {
717 dev_err(dev, "hns_roce_qp_alloc failed!\n");
718 goto err_qpn;
719 }
720 } else {
721 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
722 if (ret) {
723 dev_err(dev, "hns_roce_qp_alloc failed!\n");
724 goto err_qpn;
725 }
726 }
727
728 if (sqpn)
729 hr_qp->doorbell_qpn = 1;
730 else
731 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
732
Yixian Liu7b482212018-03-15 15:23:14 +0800733 if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
Yixian Liue088a682018-03-09 18:36:29 +0800734 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
735
736 /* indicate kernel supports record db */
737 resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
738 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
739 if (ret)
740 goto err_qp;
741
742 hr_qp->rdb_en = 1;
743 }
oulijun9a443532016-07-21 19:06:38 +0800744 hr_qp->event = hns_roce_ib_qp_event;
745
746 return 0;
747
Yixian Liue088a682018-03-09 18:36:29 +0800748err_qp:
749 if (init_attr->qp_type == IB_QPT_GSI &&
750 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
751 hns_roce_qp_remove(hr_dev, hr_qp);
752 else
753 hns_roce_qp_free(hr_dev, hr_qp);
754
oulijun9a443532016-07-21 19:06:38 +0800755err_qpn:
756 if (!sqpn)
757 hns_roce_release_range_qp(hr_dev, qpn, 1);
758
759err_wrid:
Yixian Liue088a682018-03-09 18:36:29 +0800760 if (ib_pd->uobject) {
761 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
Yixian Liu7b482212018-03-15 15:23:14 +0800762 (udata->outlen >= sizeof(resp)) &&
Yixian Liue088a682018-03-09 18:36:29 +0800763 hns_roce_qp_has_rq(init_attr))
764 hns_roce_db_unmap_user(
765 to_hr_ucontext(ib_pd->uobject->context),
766 &hr_qp->rdb);
767 } else {
768 kfree(hr_qp->sq.wrid);
769 kfree(hr_qp->rq.wrid);
770 }
oulijun9a443532016-07-21 19:06:38 +0800771
772err_mtt:
773 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
774
775err_buf:
776 if (ib_pd->uobject)
777 ib_umem_release(hr_qp->umem);
778 else
779 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
780
Yixian Liu472bc0f2018-03-09 18:36:31 +0800781err_db:
782 if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
783 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
784 hns_roce_free_db(hr_dev, &hr_qp->rdb);
785
oulijun0009c2d2018-01-03 10:44:03 +0800786err_rq_sge_list:
787 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
788 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
789
790err_wqe_list:
791 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
792 kfree(hr_qp->rq_inl_buf.wqe_list);
793
oulijun9a443532016-07-21 19:06:38 +0800794err_out:
795 return ret;
796}
797
798struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
799 struct ib_qp_init_attr *init_attr,
800 struct ib_udata *udata)
801{
802 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800803 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800804 struct hns_roce_sqp *hr_sqp;
805 struct hns_roce_qp *hr_qp;
806 int ret;
807
808 switch (init_attr->qp_type) {
809 case IB_QPT_RC: {
810 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
811 if (!hr_qp)
812 return ERR_PTR(-ENOMEM);
813
814 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
815 hr_qp);
816 if (ret) {
817 dev_err(dev, "Create RC QP failed\n");
818 kfree(hr_qp);
819 return ERR_PTR(ret);
820 }
821
822 hr_qp->ibqp.qp_num = hr_qp->qpn;
823
824 break;
825 }
826 case IB_QPT_GSI: {
827 /* Userspace is not allowed to create special QPs: */
828 if (pd->uobject) {
829 dev_err(dev, "not support usr space GSI\n");
830 return ERR_PTR(-EINVAL);
831 }
832
833 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
834 if (!hr_sqp)
835 return ERR_PTR(-ENOMEM);
836
837 hr_qp = &hr_sqp->hr_qp;
Lijun Ou77168092016-09-15 23:48:10 +0100838 hr_qp->port = init_attr->port_num - 1;
839 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
oulijunb66efc92018-01-10 14:39:48 +0800840
841 /* when hw version is v1, the sqpn is allocated */
842 if (hr_dev->caps.max_sq_sg <= 2)
843 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
844 hr_dev->iboe.phy_port[hr_qp->port];
845 else
846 hr_qp->ibqp.qp_num = 1;
oulijun9a443532016-07-21 19:06:38 +0800847
848 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
Lijun Ou77168092016-09-15 23:48:10 +0100849 hr_qp->ibqp.qp_num, hr_qp);
oulijun9a443532016-07-21 19:06:38 +0800850 if (ret) {
851 dev_err(dev, "Create GSI QP failed!\n");
852 kfree(hr_sqp);
853 return ERR_PTR(ret);
854 }
855
oulijun9a443532016-07-21 19:06:38 +0800856 break;
857 }
858 default:{
859 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
860 return ERR_PTR(-EINVAL);
861 }
862 }
863
864 return &hr_qp->ibqp;
865}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800866EXPORT_SYMBOL_GPL(hns_roce_create_qp);
oulijun9a443532016-07-21 19:06:38 +0800867
868int to_hr_qp_type(int qp_type)
869{
870 int transport_type;
871
872 if (qp_type == IB_QPT_RC)
873 transport_type = SERV_TYPE_RC;
874 else if (qp_type == IB_QPT_UC)
875 transport_type = SERV_TYPE_UC;
876 else if (qp_type == IB_QPT_UD)
877 transport_type = SERV_TYPE_UD;
878 else if (qp_type == IB_QPT_GSI)
879 transport_type = SERV_TYPE_UD;
880 else
881 transport_type = -1;
882
883 return transport_type;
884}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800885EXPORT_SYMBOL_GPL(to_hr_qp_type);
oulijun9a443532016-07-21 19:06:38 +0800886
887int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
888 int attr_mask, struct ib_udata *udata)
889{
890 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
891 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
892 enum ib_qp_state cur_state, new_state;
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +0800893 struct device *dev = hr_dev->dev;
oulijun9a443532016-07-21 19:06:38 +0800894 int ret = -EINVAL;
895 int p;
Lijun Oucb814642016-09-20 17:07:07 +0100896 enum ib_mtu active_mtu;
oulijun9a443532016-07-21 19:06:38 +0800897
898 mutex_lock(&hr_qp->mutex);
899
900 cur_state = attr_mask & IB_QP_CUR_STATE ?
901 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
902 new_state = attr_mask & IB_QP_STATE ?
903 attr->qp_state : cur_state;
904
905 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
906 IB_LINK_LAYER_ETHERNET)) {
907 dev_err(dev, "ib_modify_qp_is_ok failed\n");
908 goto out;
909 }
910
911 if ((attr_mask & IB_QP_PORT) &&
912 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
913 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
914 attr->port_num);
915 goto out;
916 }
917
918 if (attr_mask & IB_QP_PKEY_INDEX) {
919 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
920 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
921 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
922 attr->pkey_index);
923 goto out;
924 }
925 }
926
Lijun Oucb814642016-09-20 17:07:07 +0100927 if (attr_mask & IB_QP_PATH_MTU) {
928 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
929 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
930
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800931 if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
932 attr->path_mtu > IB_MTU_4096) ||
933 (hr_dev->caps.max_mtu == IB_MTU_2048 &&
934 attr->path_mtu > IB_MTU_2048) ||
Lijun Oucb814642016-09-20 17:07:07 +0100935 attr->path_mtu < IB_MTU_256 ||
936 attr->path_mtu > active_mtu) {
937 dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
938 attr->path_mtu);
939 goto out;
940 }
941 }
942
oulijun9a443532016-07-21 19:06:38 +0800943 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
944 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
945 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
946 attr->max_rd_atomic);
947 goto out;
948 }
949
950 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
951 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
952 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
953 attr->max_dest_rd_atomic);
954 goto out;
955 }
956
957 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +0800958 ret = 0;
oulijun9a443532016-07-21 19:06:38 +0800959 goto out;
960 }
961
962 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
963 new_state);
964
965out:
966 mutex_unlock(&hr_qp->mutex);
967
968 return ret;
969}
970
971void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
972 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
973{
974 if (send_cq == recv_cq) {
975 spin_lock_irq(&send_cq->lock);
976 __acquire(&recv_cq->lock);
977 } else if (send_cq->cqn < recv_cq->cqn) {
978 spin_lock_irq(&send_cq->lock);
979 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
980 } else {
981 spin_lock_irq(&recv_cq->lock);
982 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
983 }
984}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +0800985EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
oulijun9a443532016-07-21 19:06:38 +0800986
987void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
988 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
989 __releases(&recv_cq->lock)
990{
991 if (send_cq == recv_cq) {
992 __release(&recv_cq->lock);
993 spin_unlock_irq(&send_cq->lock);
994 } else if (send_cq->cqn < recv_cq->cqn) {
995 spin_unlock(&recv_cq->lock);
996 spin_unlock_irq(&send_cq->lock);
997 } else {
998 spin_unlock(&send_cq->lock);
999 spin_unlock_irq(&recv_cq->lock);
1000 }
1001}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +08001002EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
oulijun9a443532016-07-21 19:06:38 +08001003
oulijun9a443532016-07-21 19:06:38 +08001004static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1005{
1006
1007 return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1008}
1009
1010void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1011{
oulijun9a443532016-07-21 19:06:38 +08001012 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1013}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +08001014EXPORT_SYMBOL_GPL(get_recv_wqe);
oulijun9a443532016-07-21 19:06:38 +08001015
1016void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1017{
oulijun9a443532016-07-21 19:06:38 +08001018 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1019}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +08001020EXPORT_SYMBOL_GPL(get_send_wqe);
oulijun9a443532016-07-21 19:06:38 +08001021
Wei Hu(Xavier)926a01d2017-08-30 17:23:13 +08001022void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1023{
1024 return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1025 (n << hr_qp->sge.sge_shift));
1026}
1027EXPORT_SYMBOL_GPL(get_send_extend_sge);
1028
oulijun9a443532016-07-21 19:06:38 +08001029bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1030 struct ib_cq *ib_cq)
1031{
1032 struct hns_roce_cq *hr_cq;
1033 u32 cur;
1034
1035 cur = hr_wq->head - hr_wq->tail;
1036 if (likely(cur + nreq < hr_wq->max_post))
kbuild test robot3756c7f2017-07-25 13:36:24 +08001037 return false;
oulijun9a443532016-07-21 19:06:38 +08001038
1039 hr_cq = to_hr_cq(ib_cq);
1040 spin_lock(&hr_cq->lock);
1041 cur = hr_wq->head - hr_wq->tail;
1042 spin_unlock(&hr_cq->lock);
1043
1044 return cur + nreq >= hr_wq->max_post;
1045}
Wei Hu(Xavier)08805fd2017-08-30 17:22:59 +08001046EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
oulijun9a443532016-07-21 19:06:38 +08001047
1048int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1049{
1050 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1051 int reserved_from_top = 0;
1052 int ret;
1053
1054 spin_lock_init(&qp_table->lock);
1055 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
1056
1057 /* A port include two SQP, six port total 12 */
1058 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
Wei Hu (Xavier)1ca5b252016-09-20 17:07:00 +01001059 hr_dev->caps.num_qps - 1, SQP_NUM,
oulijun9a443532016-07-21 19:06:38 +08001060 reserved_from_top);
1061 if (ret) {
Wei Hu(Xavier)13ca9702017-08-30 17:23:02 +08001062 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
oulijun9a443532016-07-21 19:06:38 +08001063 ret);
1064 return ret;
1065 }
1066
1067 return 0;
1068}
1069
1070void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1071{
1072 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1073}