blob: c745c6c5e10da0b296fd19ef6ee01d7650af44ff [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <rdma/ib_umem.h>
35#include "mlx5_ib.h"
36#include "user.h"
37
38/* not supported currently */
39static int wq_signature;
40
41enum {
42 MLX5_IB_ACK_REQ_FREQ = 8,
43};
44
45enum {
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
50};
51
52enum {
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
55};
56
57static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
71};
72
Eli Cohene126ba92013-07-07 17:25:49 +030073
74static int is_qp0(enum ib_qp_type qp_type)
75{
76 return qp_type == IB_QPT_SMI;
77}
78
Eli Cohene126ba92013-07-07 17:25:49 +030079static int is_sqp(enum ib_qp_type qp_type)
80{
81 return is_qp0(qp_type) || is_qp1(qp_type);
82}
83
84static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
85{
86 return mlx5_buf_offset(&qp->buf, offset);
87}
88
89static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
90{
91 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
92}
93
94void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
95{
96 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
97}
98
Haggai Eranc1395a22014-12-11 17:04:14 +020099/**
100 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
101 *
102 * @qp: QP to copy from.
103 * @send: copy from the send queue when non-zero, use the receive queue
104 * otherwise.
105 * @wqe_index: index to start copying from. For send work queues, the
106 * wqe_index is in units of MLX5_SEND_WQE_BB.
107 * For receive work queue, it is the number of work queue
108 * element in the queue.
109 * @buffer: destination buffer.
110 * @length: maximum number of bytes to copy.
111 *
112 * Copies at least a single WQE, but may copy more data.
113 *
114 * Return: the number of bytes copied, or an error code.
115 */
116int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
117 void *buffer, u32 length)
118{
119 struct ib_device *ibdev = qp->ibqp.device;
120 struct mlx5_ib_dev *dev = to_mdev(ibdev);
121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
122 size_t offset;
123 size_t wq_end;
124 struct ib_umem *umem = qp->umem;
125 u32 first_copy_length;
126 int wqe_length;
127 int ret;
128
129 if (wq->wqe_cnt == 0) {
130 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
131 qp->ibqp.qp_type);
132 return -EINVAL;
133 }
134
135 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
136 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
137
138 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
139 return -EINVAL;
140
141 if (offset > umem->length ||
142 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
143 return -EINVAL;
144
145 first_copy_length = min_t(u32, offset + length, wq_end) - offset;
146 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
147 if (ret)
148 return ret;
149
150 if (send) {
151 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
152 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
153
154 wqe_length = ds * MLX5_WQE_DS_UNITS;
155 } else {
156 wqe_length = 1 << wq->wqe_shift;
157 }
158
159 if (wqe_length <= first_copy_length)
160 return first_copy_length;
161
162 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
163 wqe_length - first_copy_length);
164 if (ret)
165 return ret;
166
167 return wqe_length;
168}
169
Eli Cohene126ba92013-07-07 17:25:49 +0300170static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
171{
172 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
173 struct ib_event event;
174
175 if (type == MLX5_EVENT_TYPE_PATH_MIG)
176 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
177
178 if (ibqp->event_handler) {
179 event.device = ibqp->device;
180 event.element.qp = ibqp;
181 switch (type) {
182 case MLX5_EVENT_TYPE_PATH_MIG:
183 event.event = IB_EVENT_PATH_MIG;
184 break;
185 case MLX5_EVENT_TYPE_COMM_EST:
186 event.event = IB_EVENT_COMM_EST;
187 break;
188 case MLX5_EVENT_TYPE_SQ_DRAINED:
189 event.event = IB_EVENT_SQ_DRAINED;
190 break;
191 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
192 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
193 break;
194 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
195 event.event = IB_EVENT_QP_FATAL;
196 break;
197 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
198 event.event = IB_EVENT_PATH_MIG_ERR;
199 break;
200 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
201 event.event = IB_EVENT_QP_REQ_ERR;
202 break;
203 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
204 event.event = IB_EVENT_QP_ACCESS_ERR;
205 break;
206 default:
207 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
208 return;
209 }
210
211 ibqp->event_handler(&event, ibqp->qp_context);
212 }
213}
214
215static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
216 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
217{
218 int wqe_size;
219 int wq_size;
220
221 /* Sanity check RQ size before proceeding */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300222 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
Eli Cohene126ba92013-07-07 17:25:49 +0300223 return -EINVAL;
224
225 if (!has_rq) {
226 qp->rq.max_gs = 0;
227 qp->rq.wqe_cnt = 0;
228 qp->rq.wqe_shift = 0;
229 } else {
230 if (ucmd) {
231 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
232 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
233 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
234 qp->rq.max_post = qp->rq.wqe_cnt;
235 } else {
236 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
237 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
238 wqe_size = roundup_pow_of_two(wqe_size);
239 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
240 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
241 qp->rq.wqe_cnt = wq_size / wqe_size;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300242 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300243 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
244 wqe_size,
Saeed Mahameed938fe832015-05-28 22:28:41 +0300245 MLX5_CAP_GEN(dev->mdev,
246 max_wqe_sz_rq));
Eli Cohene126ba92013-07-07 17:25:49 +0300247 return -EINVAL;
248 }
249 qp->rq.wqe_shift = ilog2(wqe_size);
250 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
251 qp->rq.max_post = qp->rq.wqe_cnt;
252 }
253 }
254
255 return 0;
256}
257
258static int sq_overhead(enum ib_qp_type qp_type)
259{
Andi Shyti618af382013-07-16 15:35:01 +0200260 int size = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300261
262 switch (qp_type) {
263 case IB_QPT_XRC_INI:
Eli Cohenb125a542013-09-11 16:35:22 +0300264 size += sizeof(struct mlx5_wqe_xrc_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300265 /* fall through */
266 case IB_QPT_RC:
267 size += sizeof(struct mlx5_wqe_ctrl_seg) +
268 sizeof(struct mlx5_wqe_atomic_seg) +
269 sizeof(struct mlx5_wqe_raddr_seg);
270 break;
271
Eli Cohenb125a542013-09-11 16:35:22 +0300272 case IB_QPT_XRC_TGT:
273 return 0;
274
Eli Cohene126ba92013-07-07 17:25:49 +0300275 case IB_QPT_UC:
Eli Cohenb125a542013-09-11 16:35:22 +0300276 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohen9e65dc32014-01-28 14:52:47 +0200277 sizeof(struct mlx5_wqe_raddr_seg) +
278 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
279 sizeof(struct mlx5_mkey_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300280 break;
281
282 case IB_QPT_UD:
283 case IB_QPT_SMI:
284 case IB_QPT_GSI:
Eli Cohenb125a542013-09-11 16:35:22 +0300285 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohene126ba92013-07-07 17:25:49 +0300286 sizeof(struct mlx5_wqe_datagram_seg);
287 break;
288
289 case MLX5_IB_QPT_REG_UMR:
Eli Cohenb125a542013-09-11 16:35:22 +0300290 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohene126ba92013-07-07 17:25:49 +0300291 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
292 sizeof(struct mlx5_mkey_seg);
293 break;
294
295 default:
296 return -EINVAL;
297 }
298
299 return size;
300}
301
302static int calc_send_wqe(struct ib_qp_init_attr *attr)
303{
304 int inl_size = 0;
305 int size;
306
307 size = sq_overhead(attr->qp_type);
308 if (size < 0)
309 return size;
310
311 if (attr->cap.max_inline_data) {
312 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
313 attr->cap.max_inline_data;
314 }
315
316 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200317 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
318 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
319 return MLX5_SIG_WQE_SIZE;
320 else
321 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
Eli Cohene126ba92013-07-07 17:25:49 +0300322}
323
324static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
325 struct mlx5_ib_qp *qp)
326{
327 int wqe_size;
328 int wq_size;
329
330 if (!attr->cap.max_send_wr)
331 return 0;
332
333 wqe_size = calc_send_wqe(attr);
334 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
335 if (wqe_size < 0)
336 return wqe_size;
337
Saeed Mahameed938fe832015-05-28 22:28:41 +0300338 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
Eli Cohenb125a542013-09-11 16:35:22 +0300339 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300340 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
Eli Cohene126ba92013-07-07 17:25:49 +0300341 return -EINVAL;
342 }
343
344 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
345 sizeof(struct mlx5_wqe_inline_seg);
346 attr->cap.max_inline_data = qp->max_inline_data;
347
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200348 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
349 qp->signature_en = true;
350
Eli Cohene126ba92013-07-07 17:25:49 +0300351 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
352 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300353 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
Eli Cohenb125a542013-09-11 16:35:22 +0300354 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300355 qp->sq.wqe_cnt,
356 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
Eli Cohenb125a542013-09-11 16:35:22 +0300357 return -ENOMEM;
358 }
Eli Cohene126ba92013-07-07 17:25:49 +0300359 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
360 qp->sq.max_gs = attr->cap.max_send_sge;
Eli Cohenb125a542013-09-11 16:35:22 +0300361 qp->sq.max_post = wq_size / wqe_size;
362 attr->cap.max_send_wr = qp->sq.max_post;
Eli Cohene126ba92013-07-07 17:25:49 +0300363
364 return wq_size;
365}
366
367static int set_user_buf_size(struct mlx5_ib_dev *dev,
368 struct mlx5_ib_qp *qp,
369 struct mlx5_ib_create_qp *ucmd)
370{
371 int desc_sz = 1 << qp->sq.wqe_shift;
372
Saeed Mahameed938fe832015-05-28 22:28:41 +0300373 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300374 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300375 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
Eli Cohene126ba92013-07-07 17:25:49 +0300376 return -EINVAL;
377 }
378
379 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
380 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
381 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
382 return -EINVAL;
383 }
384
385 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
386
Saeed Mahameed938fe832015-05-28 22:28:41 +0300387 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
Eli Cohene126ba92013-07-07 17:25:49 +0300388 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300389 qp->sq.wqe_cnt,
390 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
Eli Cohene126ba92013-07-07 17:25:49 +0300391 return -EINVAL;
392 }
393
394 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
395 (qp->sq.wqe_cnt << 6);
396
397 return 0;
398}
399
400static int qp_has_rq(struct ib_qp_init_attr *attr)
401{
402 if (attr->qp_type == IB_QPT_XRC_INI ||
403 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
404 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
405 !attr->cap.max_recv_wr)
406 return 0;
407
408 return 1;
409}
410
Eli Cohenc1be5232014-01-14 17:45:12 +0200411static int first_med_uuar(void)
412{
413 return 1;
414}
415
416static int next_uuar(int n)
417{
418 n++;
419
420 while (((n % 4) & 2))
421 n++;
422
423 return n;
424}
425
426static int num_med_uuar(struct mlx5_uuar_info *uuari)
427{
428 int n;
429
430 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
431 uuari->num_low_latency_uuars - 1;
432
433 return n >= 0 ? n : 0;
434}
435
436static int max_uuari(struct mlx5_uuar_info *uuari)
437{
438 return uuari->num_uars * 4;
439}
440
441static int first_hi_uuar(struct mlx5_uuar_info *uuari)
442{
443 int med;
444 int i;
445 int t;
446
447 med = num_med_uuar(uuari);
448 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
449 t++;
450 if (t == med)
451 return next_uuar(i);
452 }
453
454 return 0;
455}
456
Eli Cohene126ba92013-07-07 17:25:49 +0300457static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
458{
Eli Cohene126ba92013-07-07 17:25:49 +0300459 int i;
460
Eli Cohenc1be5232014-01-14 17:45:12 +0200461 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300462 if (!test_bit(i, uuari->bitmap)) {
463 set_bit(i, uuari->bitmap);
464 uuari->count[i]++;
465 return i;
466 }
467 }
468
469 return -ENOMEM;
470}
471
472static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
473{
Eli Cohenc1be5232014-01-14 17:45:12 +0200474 int minidx = first_med_uuar();
Eli Cohene126ba92013-07-07 17:25:49 +0300475 int i;
476
Eli Cohenc1be5232014-01-14 17:45:12 +0200477 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300478 if (uuari->count[i] < uuari->count[minidx])
479 minidx = i;
480 }
481
482 uuari->count[minidx]++;
483 return minidx;
484}
485
486static int alloc_uuar(struct mlx5_uuar_info *uuari,
487 enum mlx5_ib_latency_class lat)
488{
489 int uuarn = -EINVAL;
490
491 mutex_lock(&uuari->lock);
492 switch (lat) {
493 case MLX5_IB_LATENCY_CLASS_LOW:
494 uuarn = 0;
495 uuari->count[uuarn]++;
496 break;
497
498 case MLX5_IB_LATENCY_CLASS_MEDIUM:
Eli Cohen78c0f982014-01-30 13:49:48 +0200499 if (uuari->ver < 2)
500 uuarn = -ENOMEM;
501 else
502 uuarn = alloc_med_class_uuar(uuari);
Eli Cohene126ba92013-07-07 17:25:49 +0300503 break;
504
505 case MLX5_IB_LATENCY_CLASS_HIGH:
Eli Cohen78c0f982014-01-30 13:49:48 +0200506 if (uuari->ver < 2)
507 uuarn = -ENOMEM;
508 else
509 uuarn = alloc_high_class_uuar(uuari);
Eli Cohene126ba92013-07-07 17:25:49 +0300510 break;
511
512 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
513 uuarn = 2;
514 break;
515 }
516 mutex_unlock(&uuari->lock);
517
518 return uuarn;
519}
520
521static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
522{
523 clear_bit(uuarn, uuari->bitmap);
524 --uuari->count[uuarn];
525}
526
527static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
528{
529 clear_bit(uuarn, uuari->bitmap);
530 --uuari->count[uuarn];
531}
532
533static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
534{
535 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
536 int high_uuar = nuuars - uuari->num_low_latency_uuars;
537
538 mutex_lock(&uuari->lock);
539 if (uuarn == 0) {
540 --uuari->count[uuarn];
541 goto out;
542 }
543
544 if (uuarn < high_uuar) {
545 free_med_class_uuar(uuari, uuarn);
546 goto out;
547 }
548
549 free_high_class_uuar(uuari, uuarn);
550
551out:
552 mutex_unlock(&uuari->lock);
553}
554
555static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
556{
557 switch (state) {
558 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
559 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
560 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
561 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
562 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
563 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
564 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
565 default: return -1;
566 }
567}
568
569static int to_mlx5_st(enum ib_qp_type type)
570{
571 switch (type) {
572 case IB_QPT_RC: return MLX5_QP_ST_RC;
573 case IB_QPT_UC: return MLX5_QP_ST_UC;
574 case IB_QPT_UD: return MLX5_QP_ST_UD;
575 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
576 case IB_QPT_XRC_INI:
577 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
578 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
579 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
580 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
581 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
582 case IB_QPT_RAW_PACKET:
583 case IB_QPT_MAX:
584 default: return -EINVAL;
585 }
586}
587
588static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
589{
590 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
591}
592
593static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
594 struct mlx5_ib_qp *qp, struct ib_udata *udata,
595 struct mlx5_create_qp_mbox_in **in,
596 struct mlx5_ib_create_qp_resp *resp, int *inlen)
597{
598 struct mlx5_ib_ucontext *context;
599 struct mlx5_ib_create_qp ucmd;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200600 int page_shift = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300601 int uar_index;
602 int npages;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200603 u32 offset = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300604 int uuarn;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200605 int ncont = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300606 int err;
607
608 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
609 if (err) {
610 mlx5_ib_dbg(dev, "copy failed\n");
611 return err;
612 }
613
614 context = to_mucontext(pd->uobject->context);
615 /*
616 * TBD: should come from the verbs when we have the API
617 */
618 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
619 if (uuarn < 0) {
620 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
Eli Cohenc1be5232014-01-14 17:45:12 +0200621 mlx5_ib_dbg(dev, "reverting to medium latency\n");
622 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
Eli Cohene126ba92013-07-07 17:25:49 +0300623 if (uuarn < 0) {
Eli Cohenc1be5232014-01-14 17:45:12 +0200624 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
625 mlx5_ib_dbg(dev, "reverting to high latency\n");
626 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
627 if (uuarn < 0) {
628 mlx5_ib_warn(dev, "uuar allocation failed\n");
629 return uuarn;
630 }
Eli Cohene126ba92013-07-07 17:25:49 +0300631 }
632 }
633
634 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
635 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
636
Haggai Eran48fea832014-05-22 14:50:11 +0300637 qp->rq.offset = 0;
638 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
639 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
640
Eli Cohene126ba92013-07-07 17:25:49 +0300641 err = set_user_buf_size(dev, qp, &ucmd);
642 if (err)
643 goto err_uuar;
644
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200645 if (ucmd.buf_addr && qp->buf_size) {
646 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
647 qp->buf_size, 0, 0);
648 if (IS_ERR(qp->umem)) {
649 mlx5_ib_dbg(dev, "umem_get failed\n");
650 err = PTR_ERR(qp->umem);
651 goto err_uuar;
652 }
653 } else {
654 qp->umem = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300655 }
656
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200657 if (qp->umem) {
658 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
659 &ncont, NULL);
660 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
661 if (err) {
662 mlx5_ib_warn(dev, "bad offset\n");
663 goto err_umem;
664 }
665 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
666 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
Eli Cohene126ba92013-07-07 17:25:49 +0300667 }
Eli Cohene126ba92013-07-07 17:25:49 +0300668
669 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
670 *in = mlx5_vzalloc(*inlen);
671 if (!*in) {
672 err = -ENOMEM;
673 goto err_umem;
674 }
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200675 if (qp->umem)
676 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300677 (*in)->ctx.log_pg_sz_remote_qpn =
Eli Cohen1b77d2b2013-10-24 12:01:03 +0300678 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
Eli Cohene126ba92013-07-07 17:25:49 +0300679 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
680
681 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
682 resp->uuar_index = uuarn;
683 qp->uuarn = uuarn;
684
685 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
686 if (err) {
687 mlx5_ib_dbg(dev, "map failed\n");
688 goto err_free;
689 }
690
691 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
692 if (err) {
693 mlx5_ib_dbg(dev, "copy failed\n");
694 goto err_unmap;
695 }
696 qp->create_type = MLX5_QP_USER;
697
698 return 0;
699
700err_unmap:
701 mlx5_ib_db_unmap_user(context, &qp->db);
702
703err_free:
Al Viro479163f2014-11-20 08:13:57 +0000704 kvfree(*in);
Eli Cohene126ba92013-07-07 17:25:49 +0300705
706err_umem:
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200707 if (qp->umem)
708 ib_umem_release(qp->umem);
Eli Cohene126ba92013-07-07 17:25:49 +0300709
710err_uuar:
711 free_uuar(&context->uuari, uuarn);
712 return err;
713}
714
715static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
716{
717 struct mlx5_ib_ucontext *context;
718
719 context = to_mucontext(pd->uobject->context);
720 mlx5_ib_db_unmap_user(context, &qp->db);
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200721 if (qp->umem)
722 ib_umem_release(qp->umem);
Eli Cohene126ba92013-07-07 17:25:49 +0300723 free_uuar(&context->uuari, qp->uuarn);
724}
725
726static int create_kernel_qp(struct mlx5_ib_dev *dev,
727 struct ib_qp_init_attr *init_attr,
728 struct mlx5_ib_qp *qp,
729 struct mlx5_create_qp_mbox_in **in, int *inlen)
730{
731 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
732 struct mlx5_uuar_info *uuari;
733 int uar_index;
734 int uuarn;
735 int err;
736
Jack Morgenstein9603b612014-07-28 23:30:22 +0300737 uuari = &dev->mdev->priv.uuari;
Or Gerlitz652c1a02014-06-25 16:44:14 +0300738 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
Eli Cohen1a4c3a32014-02-06 17:41:25 +0200739 return -EINVAL;
Eli Cohene126ba92013-07-07 17:25:49 +0300740
741 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
742 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
743
744 uuarn = alloc_uuar(uuari, lc);
745 if (uuarn < 0) {
746 mlx5_ib_dbg(dev, "\n");
747 return -ENOMEM;
748 }
749
750 qp->bf = &uuari->bfs[uuarn];
751 uar_index = qp->bf->uar->index;
752
753 err = calc_sq_size(dev, init_attr, qp);
754 if (err < 0) {
755 mlx5_ib_dbg(dev, "err %d\n", err);
756 goto err_uuar;
757 }
758
759 qp->rq.offset = 0;
760 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
761 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
762
Amir Vadai64ffaa22015-05-28 22:28:38 +0300763 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300764 if (err) {
765 mlx5_ib_dbg(dev, "err %d\n", err);
766 goto err_uuar;
767 }
768
769 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
770 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
771 *in = mlx5_vzalloc(*inlen);
772 if (!*in) {
773 err = -ENOMEM;
774 goto err_buf;
775 }
776 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
Eli Cohen1b77d2b2013-10-24 12:01:03 +0300777 (*in)->ctx.log_pg_sz_remote_qpn =
778 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
Eli Cohene126ba92013-07-07 17:25:49 +0300779 /* Set "fast registration enabled" for all kernel QPs */
780 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
781 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
782
783 mlx5_fill_page_array(&qp->buf, (*in)->pas);
784
Jack Morgenstein9603b612014-07-28 23:30:22 +0300785 err = mlx5_db_alloc(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300786 if (err) {
787 mlx5_ib_dbg(dev, "err %d\n", err);
788 goto err_free;
789 }
790
Eli Cohene126ba92013-07-07 17:25:49 +0300791 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
792 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
793 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
794 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
795 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
796
797 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
798 !qp->sq.w_list || !qp->sq.wqe_head) {
799 err = -ENOMEM;
800 goto err_wrid;
801 }
802 qp->create_type = MLX5_QP_KERNEL;
803
804 return 0;
805
806err_wrid:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300807 mlx5_db_free(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300808 kfree(qp->sq.wqe_head);
809 kfree(qp->sq.w_list);
810 kfree(qp->sq.wrid);
811 kfree(qp->sq.wr_data);
812 kfree(qp->rq.wrid);
813
814err_free:
Al Viro479163f2014-11-20 08:13:57 +0000815 kvfree(*in);
Eli Cohene126ba92013-07-07 17:25:49 +0300816
817err_buf:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300818 mlx5_buf_free(dev->mdev, &qp->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300819
820err_uuar:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300821 free_uuar(&dev->mdev->priv.uuari, uuarn);
Eli Cohene126ba92013-07-07 17:25:49 +0300822 return err;
823}
824
825static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
826{
Jack Morgenstein9603b612014-07-28 23:30:22 +0300827 mlx5_db_free(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300828 kfree(qp->sq.wqe_head);
829 kfree(qp->sq.w_list);
830 kfree(qp->sq.wrid);
831 kfree(qp->sq.wr_data);
832 kfree(qp->rq.wrid);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300833 mlx5_buf_free(dev->mdev, &qp->buf);
834 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
Eli Cohene126ba92013-07-07 17:25:49 +0300835}
836
837static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
838{
839 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
840 (attr->qp_type == IB_QPT_XRC_INI))
841 return cpu_to_be32(MLX5_SRQ_RQ);
842 else if (!qp->has_rq)
843 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
844 else
845 return cpu_to_be32(MLX5_NON_ZERO_RQ);
846}
847
848static int is_connected(enum ib_qp_type qp_type)
849{
850 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
851 return 1;
852
853 return 0;
854}
855
856static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
857 struct ib_qp_init_attr *init_attr,
858 struct ib_udata *udata, struct mlx5_ib_qp *qp)
859{
860 struct mlx5_ib_resources *devr = &dev->devr;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300861 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300862 struct mlx5_ib_create_qp_resp resp;
863 struct mlx5_create_qp_mbox_in *in;
864 struct mlx5_ib_create_qp ucmd;
865 int inlen = sizeof(*in);
866 int err;
867
Haggai Eran6aec21f2014-12-11 17:04:23 +0200868 mlx5_ib_odp_create_qp(qp);
869
Eli Cohene126ba92013-07-07 17:25:49 +0300870 mutex_init(&qp->mutex);
871 spin_lock_init(&qp->sq.lock);
872 spin_lock_init(&qp->rq.lock);
873
Eli Cohenf360d882014-04-02 00:10:16 +0300874 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
Saeed Mahameed938fe832015-05-28 22:28:41 +0300875 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
Eli Cohenf360d882014-04-02 00:10:16 +0300876 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
877 return -EINVAL;
878 } else {
879 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
880 }
881 }
882
Eli Cohene126ba92013-07-07 17:25:49 +0300883 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
884 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
885
886 if (pd && pd->uobject) {
887 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
888 mlx5_ib_dbg(dev, "copy failed\n");
889 return -EFAULT;
890 }
891
892 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
893 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
894 } else {
895 qp->wq_sig = !!wq_signature;
896 }
897
898 qp->has_rq = qp_has_rq(init_attr);
899 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
900 qp, (pd && pd->uobject) ? &ucmd : NULL);
901 if (err) {
902 mlx5_ib_dbg(dev, "err %d\n", err);
903 return err;
904 }
905
906 if (pd) {
907 if (pd->uobject) {
Saeed Mahameed938fe832015-05-28 22:28:41 +0300908 __u32 max_wqes =
909 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
Eli Cohene126ba92013-07-07 17:25:49 +0300910 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
911 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
912 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
913 mlx5_ib_dbg(dev, "invalid rq params\n");
914 return -EINVAL;
915 }
Saeed Mahameed938fe832015-05-28 22:28:41 +0300916 if (ucmd.sq_wqe_count > max_wqes) {
Eli Cohene126ba92013-07-07 17:25:49 +0300917 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300918 ucmd.sq_wqe_count, max_wqes);
Eli Cohene126ba92013-07-07 17:25:49 +0300919 return -EINVAL;
920 }
921 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
922 if (err)
923 mlx5_ib_dbg(dev, "err %d\n", err);
924 } else {
925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
926 if (err)
927 mlx5_ib_dbg(dev, "err %d\n", err);
928 else
929 qp->pa_lkey = to_mpd(pd)->pa_lkey;
930 }
931
932 if (err)
933 return err;
934 } else {
935 in = mlx5_vzalloc(sizeof(*in));
936 if (!in)
937 return -ENOMEM;
938
939 qp->create_type = MLX5_QP_EMPTY;
940 }
941
942 if (is_sqp(init_attr->qp_type))
943 qp->port = init_attr->port_num;
944
945 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
946 MLX5_QP_PM_MIGRATED << 11);
947
948 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
949 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
950 else
951 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
952
953 if (qp->wq_sig)
954 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
955
Eli Cohenf360d882014-04-02 00:10:16 +0300956 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
957 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
958
Eli Cohene126ba92013-07-07 17:25:49 +0300959 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
960 int rcqe_sz;
961 int scqe_sz;
962
963 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
964 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
965
966 if (rcqe_sz == 128)
967 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
968 else
969 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
970
971 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
972 if (scqe_sz == 128)
973 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
974 else
975 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
976 }
977 }
978
979 if (qp->rq.wqe_cnt) {
980 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
981 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
982 }
983
984 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
985
986 if (qp->sq.wqe_cnt)
987 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
988 else
989 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
990
991 /* Set default resources */
992 switch (init_attr->qp_type) {
993 case IB_QPT_XRC_TGT:
994 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
995 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
996 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
997 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
998 break;
999 case IB_QPT_XRC_INI:
1000 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1001 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1002 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1003 break;
1004 default:
1005 if (init_attr->srq) {
1006 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1007 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1008 } else {
1009 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03001010 in->ctx.rq_type_srqn |=
1011 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
Eli Cohene126ba92013-07-07 17:25:49 +03001012 }
1013 }
1014
1015 if (init_attr->send_cq)
1016 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1017
1018 if (init_attr->recv_cq)
1019 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1020
1021 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1022
Jack Morgenstein9603b612014-07-28 23:30:22 +03001023 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001024 if (err) {
1025 mlx5_ib_dbg(dev, "create qp failed\n");
1026 goto err_create;
1027 }
1028
Al Viro479163f2014-11-20 08:13:57 +00001029 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001030 /* Hardware wants QPN written in big-endian order (after
1031 * shifting) for send doorbell. Precompute this value to save
1032 * a little bit when posting sends.
1033 */
1034 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1035
1036 qp->mqp.event = mlx5_ib_qp_event;
1037
1038 return 0;
1039
1040err_create:
1041 if (qp->create_type == MLX5_QP_USER)
1042 destroy_qp_user(pd, qp);
1043 else if (qp->create_type == MLX5_QP_KERNEL)
1044 destroy_qp_kernel(dev, qp);
1045
Al Viro479163f2014-11-20 08:13:57 +00001046 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001047 return err;
1048}
1049
1050static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1051 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1052{
1053 if (send_cq) {
1054 if (recv_cq) {
1055 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1056 spin_lock_irq(&send_cq->lock);
1057 spin_lock_nested(&recv_cq->lock,
1058 SINGLE_DEPTH_NESTING);
1059 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1060 spin_lock_irq(&send_cq->lock);
1061 __acquire(&recv_cq->lock);
1062 } else {
1063 spin_lock_irq(&recv_cq->lock);
1064 spin_lock_nested(&send_cq->lock,
1065 SINGLE_DEPTH_NESTING);
1066 }
1067 } else {
1068 spin_lock_irq(&send_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001069 __acquire(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001070 }
1071 } else if (recv_cq) {
1072 spin_lock_irq(&recv_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001073 __acquire(&send_cq->lock);
1074 } else {
1075 __acquire(&send_cq->lock);
1076 __acquire(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001077 }
1078}
1079
1080static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1081 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1082{
1083 if (send_cq) {
1084 if (recv_cq) {
1085 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1086 spin_unlock(&recv_cq->lock);
1087 spin_unlock_irq(&send_cq->lock);
1088 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1089 __release(&recv_cq->lock);
1090 spin_unlock_irq(&send_cq->lock);
1091 } else {
1092 spin_unlock(&send_cq->lock);
1093 spin_unlock_irq(&recv_cq->lock);
1094 }
1095 } else {
Eli Cohen6a4f1392014-12-02 12:26:18 +02001096 __release(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001097 spin_unlock_irq(&send_cq->lock);
1098 }
1099 } else if (recv_cq) {
Eli Cohen6a4f1392014-12-02 12:26:18 +02001100 __release(&send_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001101 spin_unlock_irq(&recv_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001102 } else {
1103 __release(&recv_cq->lock);
1104 __release(&send_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001105 }
1106}
1107
1108static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1109{
1110 return to_mpd(qp->ibqp.pd);
1111}
1112
1113static void get_cqs(struct mlx5_ib_qp *qp,
1114 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1115{
1116 switch (qp->ibqp.qp_type) {
1117 case IB_QPT_XRC_TGT:
1118 *send_cq = NULL;
1119 *recv_cq = NULL;
1120 break;
1121 case MLX5_IB_QPT_REG_UMR:
1122 case IB_QPT_XRC_INI:
1123 *send_cq = to_mcq(qp->ibqp.send_cq);
1124 *recv_cq = NULL;
1125 break;
1126
1127 case IB_QPT_SMI:
1128 case IB_QPT_GSI:
1129 case IB_QPT_RC:
1130 case IB_QPT_UC:
1131 case IB_QPT_UD:
1132 case IB_QPT_RAW_IPV6:
1133 case IB_QPT_RAW_ETHERTYPE:
1134 *send_cq = to_mcq(qp->ibqp.send_cq);
1135 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1136 break;
1137
1138 case IB_QPT_RAW_PACKET:
1139 case IB_QPT_MAX:
1140 default:
1141 *send_cq = NULL;
1142 *recv_cq = NULL;
1143 break;
1144 }
1145}
1146
1147static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1148{
1149 struct mlx5_ib_cq *send_cq, *recv_cq;
1150 struct mlx5_modify_qp_mbox_in *in;
1151 int err;
1152
1153 in = kzalloc(sizeof(*in), GFP_KERNEL);
1154 if (!in)
1155 return;
Eli Cohen7bef7ad2015-04-02 17:07:21 +03001156
Haggai Eran6aec21f2014-12-11 17:04:23 +02001157 if (qp->state != IB_QPS_RESET) {
1158 mlx5_ib_qp_disable_pagefaults(qp);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001159 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
Haggai Abramonvskyc3c6c9c2015-04-02 17:07:20 +03001160 MLX5_QP_STATE_RST, in, 0, &qp->mqp))
Eli Cohene126ba92013-07-07 17:25:49 +03001161 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1162 qp->mqp.qpn);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001163 }
Eli Cohene126ba92013-07-07 17:25:49 +03001164
1165 get_cqs(qp, &send_cq, &recv_cq);
1166
1167 if (qp->create_type == MLX5_QP_KERNEL) {
1168 mlx5_ib_lock_cqs(send_cq, recv_cq);
1169 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1170 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1171 if (send_cq != recv_cq)
1172 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1173 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1174 }
1175
Jack Morgenstein9603b612014-07-28 23:30:22 +03001176 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
Eli Cohene126ba92013-07-07 17:25:49 +03001177 if (err)
1178 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1179 kfree(in);
1180
1181
1182 if (qp->create_type == MLX5_QP_KERNEL)
1183 destroy_qp_kernel(dev, qp);
1184 else if (qp->create_type == MLX5_QP_USER)
1185 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1186}
1187
1188static const char *ib_qp_type_str(enum ib_qp_type type)
1189{
1190 switch (type) {
1191 case IB_QPT_SMI:
1192 return "IB_QPT_SMI";
1193 case IB_QPT_GSI:
1194 return "IB_QPT_GSI";
1195 case IB_QPT_RC:
1196 return "IB_QPT_RC";
1197 case IB_QPT_UC:
1198 return "IB_QPT_UC";
1199 case IB_QPT_UD:
1200 return "IB_QPT_UD";
1201 case IB_QPT_RAW_IPV6:
1202 return "IB_QPT_RAW_IPV6";
1203 case IB_QPT_RAW_ETHERTYPE:
1204 return "IB_QPT_RAW_ETHERTYPE";
1205 case IB_QPT_XRC_INI:
1206 return "IB_QPT_XRC_INI";
1207 case IB_QPT_XRC_TGT:
1208 return "IB_QPT_XRC_TGT";
1209 case IB_QPT_RAW_PACKET:
1210 return "IB_QPT_RAW_PACKET";
1211 case MLX5_IB_QPT_REG_UMR:
1212 return "MLX5_IB_QPT_REG_UMR";
1213 case IB_QPT_MAX:
1214 default:
1215 return "Invalid QP type";
1216 }
1217}
1218
1219struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1220 struct ib_qp_init_attr *init_attr,
1221 struct ib_udata *udata)
1222{
1223 struct mlx5_ib_dev *dev;
1224 struct mlx5_ib_qp *qp;
1225 u16 xrcdn = 0;
1226 int err;
1227
1228 if (pd) {
1229 dev = to_mdev(pd->device);
1230 } else {
1231 /* being cautious here */
1232 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1233 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1234 pr_warn("%s: no PD for transport %s\n", __func__,
1235 ib_qp_type_str(init_attr->qp_type));
1236 return ERR_PTR(-EINVAL);
1237 }
1238 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1239 }
1240
1241 switch (init_attr->qp_type) {
1242 case IB_QPT_XRC_TGT:
1243 case IB_QPT_XRC_INI:
Saeed Mahameed938fe832015-05-28 22:28:41 +03001244 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
Eli Cohene126ba92013-07-07 17:25:49 +03001245 mlx5_ib_dbg(dev, "XRC not supported\n");
1246 return ERR_PTR(-ENOSYS);
1247 }
1248 init_attr->recv_cq = NULL;
1249 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1250 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1251 init_attr->send_cq = NULL;
1252 }
1253
1254 /* fall through */
1255 case IB_QPT_RC:
1256 case IB_QPT_UC:
1257 case IB_QPT_UD:
1258 case IB_QPT_SMI:
1259 case IB_QPT_GSI:
1260 case MLX5_IB_QPT_REG_UMR:
1261 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1262 if (!qp)
1263 return ERR_PTR(-ENOMEM);
1264
1265 err = create_qp_common(dev, pd, init_attr, udata, qp);
1266 if (err) {
1267 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1268 kfree(qp);
1269 return ERR_PTR(err);
1270 }
1271
1272 if (is_qp0(init_attr->qp_type))
1273 qp->ibqp.qp_num = 0;
1274 else if (is_qp1(init_attr->qp_type))
1275 qp->ibqp.qp_num = 1;
1276 else
1277 qp->ibqp.qp_num = qp->mqp.qpn;
1278
1279 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1280 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1281 to_mcq(init_attr->send_cq)->mcq.cqn);
1282
1283 qp->xrcdn = xrcdn;
1284
1285 break;
1286
1287 case IB_QPT_RAW_IPV6:
1288 case IB_QPT_RAW_ETHERTYPE:
1289 case IB_QPT_RAW_PACKET:
1290 case IB_QPT_MAX:
1291 default:
1292 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1293 init_attr->qp_type);
1294 /* Don't support raw QPs */
1295 return ERR_PTR(-EINVAL);
1296 }
1297
1298 return &qp->ibqp;
1299}
1300
1301int mlx5_ib_destroy_qp(struct ib_qp *qp)
1302{
1303 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1304 struct mlx5_ib_qp *mqp = to_mqp(qp);
1305
1306 destroy_qp_common(dev, mqp);
1307
1308 kfree(mqp);
1309
1310 return 0;
1311}
1312
1313static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1314 int attr_mask)
1315{
1316 u32 hw_access_flags = 0;
1317 u8 dest_rd_atomic;
1318 u32 access_flags;
1319
1320 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1321 dest_rd_atomic = attr->max_dest_rd_atomic;
1322 else
1323 dest_rd_atomic = qp->resp_depth;
1324
1325 if (attr_mask & IB_QP_ACCESS_FLAGS)
1326 access_flags = attr->qp_access_flags;
1327 else
1328 access_flags = qp->atomic_rd_en;
1329
1330 if (!dest_rd_atomic)
1331 access_flags &= IB_ACCESS_REMOTE_WRITE;
1332
1333 if (access_flags & IB_ACCESS_REMOTE_READ)
1334 hw_access_flags |= MLX5_QP_BIT_RRE;
1335 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1336 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1337 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1338 hw_access_flags |= MLX5_QP_BIT_RWE;
1339
1340 return cpu_to_be32(hw_access_flags);
1341}
1342
1343enum {
1344 MLX5_PATH_FLAG_FL = 1 << 0,
1345 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1346 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1347};
1348
1349static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1350{
1351 if (rate == IB_RATE_PORT_CURRENT) {
1352 return 0;
1353 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1354 return -EINVAL;
1355 } else {
1356 while (rate != IB_RATE_2_5_GBPS &&
1357 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
Saeed Mahameed938fe832015-05-28 22:28:41 +03001358 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
Eli Cohene126ba92013-07-07 17:25:49 +03001359 --rate;
1360 }
1361
1362 return rate + MLX5_STAT_RATE_OFFSET;
1363}
1364
1365static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1366 struct mlx5_qp_path *path, u8 port, int attr_mask,
1367 u32 path_flags, const struct ib_qp_attr *attr)
1368{
1369 int err;
1370
1371 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1372 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1373
1374 if (attr_mask & IB_QP_PKEY_INDEX)
1375 path->pkey_index = attr->pkey_index;
1376
1377 path->grh_mlid = ah->src_path_bits & 0x7f;
1378 path->rlid = cpu_to_be16(ah->dlid);
1379
1380 if (ah->ah_flags & IB_AH_GRH) {
Saeed Mahameed938fe832015-05-28 22:28:41 +03001381 if (ah->grh.sgid_index >=
1382 dev->mdev->port_caps[port - 1].gid_table_len) {
Joe Perchesf4f01b52015-05-08 15:58:07 -07001383 pr_err("sgid_index (%u) too large. max is %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +03001384 ah->grh.sgid_index,
1385 dev->mdev->port_caps[port - 1].gid_table_len);
Eli Cohenf83b4262014-09-14 16:47:54 +03001386 return -EINVAL;
1387 }
Eli Cohene126ba92013-07-07 17:25:49 +03001388 path->grh_mlid |= 1 << 7;
1389 path->mgid_index = ah->grh.sgid_index;
1390 path->hop_limit = ah->grh.hop_limit;
1391 path->tclass_flowlabel =
1392 cpu_to_be32((ah->grh.traffic_class << 20) |
1393 (ah->grh.flow_label));
1394 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1395 }
1396
1397 err = ib_rate_to_mlx5(dev, ah->static_rate);
1398 if (err < 0)
1399 return err;
1400 path->static_rate = err;
1401 path->port = port;
1402
Eli Cohene126ba92013-07-07 17:25:49 +03001403 if (attr_mask & IB_QP_TIMEOUT)
1404 path->ackto_lt = attr->timeout << 3;
1405
1406 path->sl = ah->sl & 0xf;
1407
1408 return 0;
1409}
1410
1411static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1412 [MLX5_QP_STATE_INIT] = {
1413 [MLX5_QP_STATE_INIT] = {
1414 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1415 MLX5_QP_OPTPAR_RAE |
1416 MLX5_QP_OPTPAR_RWE |
1417 MLX5_QP_OPTPAR_PKEY_INDEX |
1418 MLX5_QP_OPTPAR_PRI_PORT,
1419 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1420 MLX5_QP_OPTPAR_PKEY_INDEX |
1421 MLX5_QP_OPTPAR_PRI_PORT,
1422 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1423 MLX5_QP_OPTPAR_Q_KEY |
1424 MLX5_QP_OPTPAR_PRI_PORT,
1425 },
1426 [MLX5_QP_STATE_RTR] = {
1427 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1428 MLX5_QP_OPTPAR_RRE |
1429 MLX5_QP_OPTPAR_RAE |
1430 MLX5_QP_OPTPAR_RWE |
1431 MLX5_QP_OPTPAR_PKEY_INDEX,
1432 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1433 MLX5_QP_OPTPAR_RWE |
1434 MLX5_QP_OPTPAR_PKEY_INDEX,
1435 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1436 MLX5_QP_OPTPAR_Q_KEY,
1437 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1438 MLX5_QP_OPTPAR_Q_KEY,
Eli Cohena4774e92013-09-11 16:35:32 +03001439 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1440 MLX5_QP_OPTPAR_RRE |
1441 MLX5_QP_OPTPAR_RAE |
1442 MLX5_QP_OPTPAR_RWE |
1443 MLX5_QP_OPTPAR_PKEY_INDEX,
Eli Cohene126ba92013-07-07 17:25:49 +03001444 },
1445 },
1446 [MLX5_QP_STATE_RTR] = {
1447 [MLX5_QP_STATE_RTS] = {
1448 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1449 MLX5_QP_OPTPAR_RRE |
1450 MLX5_QP_OPTPAR_RAE |
1451 MLX5_QP_OPTPAR_RWE |
1452 MLX5_QP_OPTPAR_PM_STATE |
1453 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1454 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1455 MLX5_QP_OPTPAR_RWE |
1456 MLX5_QP_OPTPAR_PM_STATE,
1457 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1458 },
1459 },
1460 [MLX5_QP_STATE_RTS] = {
1461 [MLX5_QP_STATE_RTS] = {
1462 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1463 MLX5_QP_OPTPAR_RAE |
1464 MLX5_QP_OPTPAR_RWE |
1465 MLX5_QP_OPTPAR_RNR_TIMEOUT |
Eli Cohenc2a34312013-10-24 12:01:02 +03001466 MLX5_QP_OPTPAR_PM_STATE |
1467 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
Eli Cohene126ba92013-07-07 17:25:49 +03001468 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
Eli Cohenc2a34312013-10-24 12:01:02 +03001469 MLX5_QP_OPTPAR_PM_STATE |
1470 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
Eli Cohene126ba92013-07-07 17:25:49 +03001471 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1472 MLX5_QP_OPTPAR_SRQN |
1473 MLX5_QP_OPTPAR_CQN_RCV,
1474 },
1475 },
1476 [MLX5_QP_STATE_SQER] = {
1477 [MLX5_QP_STATE_RTS] = {
1478 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1479 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
Eli Cohen75959f52013-09-11 16:35:31 +03001480 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
Eli Cohena4774e92013-09-11 16:35:32 +03001481 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1482 MLX5_QP_OPTPAR_RWE |
1483 MLX5_QP_OPTPAR_RAE |
1484 MLX5_QP_OPTPAR_RRE,
Eli Cohene126ba92013-07-07 17:25:49 +03001485 },
1486 },
1487};
1488
1489static int ib_nr_to_mlx5_nr(int ib_mask)
1490{
1491 switch (ib_mask) {
1492 case IB_QP_STATE:
1493 return 0;
1494 case IB_QP_CUR_STATE:
1495 return 0;
1496 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1497 return 0;
1498 case IB_QP_ACCESS_FLAGS:
1499 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1500 MLX5_QP_OPTPAR_RAE;
1501 case IB_QP_PKEY_INDEX:
1502 return MLX5_QP_OPTPAR_PKEY_INDEX;
1503 case IB_QP_PORT:
1504 return MLX5_QP_OPTPAR_PRI_PORT;
1505 case IB_QP_QKEY:
1506 return MLX5_QP_OPTPAR_Q_KEY;
1507 case IB_QP_AV:
1508 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1509 MLX5_QP_OPTPAR_PRI_PORT;
1510 case IB_QP_PATH_MTU:
1511 return 0;
1512 case IB_QP_TIMEOUT:
1513 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1514 case IB_QP_RETRY_CNT:
1515 return MLX5_QP_OPTPAR_RETRY_COUNT;
1516 case IB_QP_RNR_RETRY:
1517 return MLX5_QP_OPTPAR_RNR_RETRY;
1518 case IB_QP_RQ_PSN:
1519 return 0;
1520 case IB_QP_MAX_QP_RD_ATOMIC:
1521 return MLX5_QP_OPTPAR_SRA_MAX;
1522 case IB_QP_ALT_PATH:
1523 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1524 case IB_QP_MIN_RNR_TIMER:
1525 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1526 case IB_QP_SQ_PSN:
1527 return 0;
1528 case IB_QP_MAX_DEST_RD_ATOMIC:
1529 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1530 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1531 case IB_QP_PATH_MIG_STATE:
1532 return MLX5_QP_OPTPAR_PM_STATE;
1533 case IB_QP_CAP:
1534 return 0;
1535 case IB_QP_DEST_QPN:
1536 return 0;
1537 }
1538 return 0;
1539}
1540
1541static int ib_mask_to_mlx5_opt(int ib_mask)
1542{
1543 int result = 0;
1544 int i;
1545
1546 for (i = 0; i < 8 * sizeof(int); i++) {
1547 if ((1 << i) & ib_mask)
1548 result |= ib_nr_to_mlx5_nr(1 << i);
1549 }
1550
1551 return result;
1552}
1553
1554static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1555 const struct ib_qp_attr *attr, int attr_mask,
1556 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1557{
1558 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1559 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1560 struct mlx5_ib_cq *send_cq, *recv_cq;
1561 struct mlx5_qp_context *context;
1562 struct mlx5_modify_qp_mbox_in *in;
1563 struct mlx5_ib_pd *pd;
1564 enum mlx5_qp_state mlx5_cur, mlx5_new;
1565 enum mlx5_qp_optpar optpar;
1566 int sqd_event;
1567 int mlx5_st;
1568 int err;
1569
1570 in = kzalloc(sizeof(*in), GFP_KERNEL);
1571 if (!in)
1572 return -ENOMEM;
1573
1574 context = &in->ctx;
1575 err = to_mlx5_st(ibqp->qp_type);
1576 if (err < 0)
1577 goto out;
1578
1579 context->flags = cpu_to_be32(err << 16);
1580
1581 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1582 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1583 } else {
1584 switch (attr->path_mig_state) {
1585 case IB_MIG_MIGRATED:
1586 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1587 break;
1588 case IB_MIG_REARM:
1589 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1590 break;
1591 case IB_MIG_ARMED:
1592 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1593 break;
1594 }
1595 }
1596
1597 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1598 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1599 } else if (ibqp->qp_type == IB_QPT_UD ||
1600 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1601 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1602 } else if (attr_mask & IB_QP_PATH_MTU) {
1603 if (attr->path_mtu < IB_MTU_256 ||
1604 attr->path_mtu > IB_MTU_4096) {
1605 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1606 err = -EINVAL;
1607 goto out;
1608 }
Saeed Mahameed938fe832015-05-28 22:28:41 +03001609 context->mtu_msgmax = (attr->path_mtu << 5) |
1610 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
Eli Cohene126ba92013-07-07 17:25:49 +03001611 }
1612
1613 if (attr_mask & IB_QP_DEST_QPN)
1614 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1615
1616 if (attr_mask & IB_QP_PKEY_INDEX)
1617 context->pri_path.pkey_index = attr->pkey_index;
1618
1619 /* todo implement counter_index functionality */
1620
1621 if (is_sqp(ibqp->qp_type))
1622 context->pri_path.port = qp->port;
1623
1624 if (attr_mask & IB_QP_PORT)
1625 context->pri_path.port = attr->port_num;
1626
1627 if (attr_mask & IB_QP_AV) {
1628 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1629 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1630 attr_mask, 0, attr);
1631 if (err)
1632 goto out;
1633 }
1634
1635 if (attr_mask & IB_QP_TIMEOUT)
1636 context->pri_path.ackto_lt |= attr->timeout << 3;
1637
1638 if (attr_mask & IB_QP_ALT_PATH) {
1639 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1640 attr->alt_port_num, attr_mask, 0, attr);
1641 if (err)
1642 goto out;
1643 }
1644
1645 pd = get_pd(qp);
1646 get_cqs(qp, &send_cq, &recv_cq);
1647
1648 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1649 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1650 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1651 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1652
1653 if (attr_mask & IB_QP_RNR_RETRY)
1654 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1655
1656 if (attr_mask & IB_QP_RETRY_CNT)
1657 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1658
1659 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1660 if (attr->max_rd_atomic)
1661 context->params1 |=
1662 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1663 }
1664
1665 if (attr_mask & IB_QP_SQ_PSN)
1666 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1667
1668 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1669 if (attr->max_dest_rd_atomic)
1670 context->params2 |=
1671 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1672 }
1673
1674 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1675 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1676
1677 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1678 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1679
1680 if (attr_mask & IB_QP_RQ_PSN)
1681 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1682
1683 if (attr_mask & IB_QP_QKEY)
1684 context->qkey = cpu_to_be32(attr->qkey);
1685
1686 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1687 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1688
1689 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1690 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1691 sqd_event = 1;
1692 else
1693 sqd_event = 0;
1694
1695 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1696 context->sq_crq_size |= cpu_to_be16(1 << 4);
1697
1698
1699 mlx5_cur = to_mlx5_state(cur_state);
1700 mlx5_new = to_mlx5_state(new_state);
1701 mlx5_st = to_mlx5_st(ibqp->qp_type);
Eli Cohen07c91132013-10-24 12:01:01 +03001702 if (mlx5_st < 0)
Eli Cohene126ba92013-07-07 17:25:49 +03001703 goto out;
1704
Haggai Eran6aec21f2014-12-11 17:04:23 +02001705 /* If moving to a reset or error state, we must disable page faults on
1706 * this QP and flush all current page faults. Otherwise a stale page
1707 * fault may attempt to work on this QP after it is reset and moved
1708 * again to RTS, and may cause the driver and the device to get out of
1709 * sync. */
1710 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1711 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1712 mlx5_ib_qp_disable_pagefaults(qp);
1713
Eli Cohene126ba92013-07-07 17:25:49 +03001714 optpar = ib_mask_to_mlx5_opt(attr_mask);
1715 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1716 in->optparam = cpu_to_be32(optpar);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001717 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
Eli Cohene126ba92013-07-07 17:25:49 +03001718 to_mlx5_state(new_state), in, sqd_event,
1719 &qp->mqp);
1720 if (err)
1721 goto out;
1722
Haggai Eran6aec21f2014-12-11 17:04:23 +02001723 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1724 mlx5_ib_qp_enable_pagefaults(qp);
1725
Eli Cohene126ba92013-07-07 17:25:49 +03001726 qp->state = new_state;
1727
1728 if (attr_mask & IB_QP_ACCESS_FLAGS)
1729 qp->atomic_rd_en = attr->qp_access_flags;
1730 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1731 qp->resp_depth = attr->max_dest_rd_atomic;
1732 if (attr_mask & IB_QP_PORT)
1733 qp->port = attr->port_num;
1734 if (attr_mask & IB_QP_ALT_PATH)
1735 qp->alt_port = attr->alt_port_num;
1736
1737 /*
1738 * If we moved a kernel QP to RESET, clean up all old CQ
1739 * entries and reinitialize the QP.
1740 */
1741 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1742 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1743 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1744 if (send_cq != recv_cq)
1745 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1746
1747 qp->rq.head = 0;
1748 qp->rq.tail = 0;
1749 qp->sq.head = 0;
1750 qp->sq.tail = 0;
1751 qp->sq.cur_post = 0;
1752 qp->sq.last_poll = 0;
1753 qp->db.db[MLX5_RCV_DBR] = 0;
1754 qp->db.db[MLX5_SND_DBR] = 0;
1755 }
1756
1757out:
1758 kfree(in);
1759 return err;
1760}
1761
1762int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1763 int attr_mask, struct ib_udata *udata)
1764{
1765 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1766 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1767 enum ib_qp_state cur_state, new_state;
1768 int err = -EINVAL;
1769 int port;
1770
1771 mutex_lock(&qp->mutex);
1772
1773 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1774 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1775
1776 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
Matan Barakdd5f03b2013-12-12 18:03:11 +02001777 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1778 IB_LINK_LAYER_UNSPECIFIED))
Eli Cohene126ba92013-07-07 17:25:49 +03001779 goto out;
1780
1781 if ((attr_mask & IB_QP_PORT) &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001782 (attr->port_num == 0 ||
1783 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
Eli Cohene126ba92013-07-07 17:25:49 +03001784 goto out;
1785
1786 if (attr_mask & IB_QP_PKEY_INDEX) {
1787 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001788 if (attr->pkey_index >=
1789 dev->mdev->port_caps[port - 1].pkey_table_len)
Eli Cohene126ba92013-07-07 17:25:49 +03001790 goto out;
1791 }
1792
1793 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001794 attr->max_rd_atomic >
1795 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
Eli Cohene126ba92013-07-07 17:25:49 +03001796 goto out;
1797
1798 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001799 attr->max_dest_rd_atomic >
1800 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
Eli Cohene126ba92013-07-07 17:25:49 +03001801 goto out;
1802
1803 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1804 err = 0;
1805 goto out;
1806 }
1807
1808 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1809
1810out:
1811 mutex_unlock(&qp->mutex);
1812 return err;
1813}
1814
1815static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1816{
1817 struct mlx5_ib_cq *cq;
1818 unsigned cur;
1819
1820 cur = wq->head - wq->tail;
1821 if (likely(cur + nreq < wq->max_post))
1822 return 0;
1823
1824 cq = to_mcq(ib_cq);
1825 spin_lock(&cq->lock);
1826 cur = wq->head - wq->tail;
1827 spin_unlock(&cq->lock);
1828
1829 return cur + nreq >= wq->max_post;
1830}
1831
1832static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1833 u64 remote_addr, u32 rkey)
1834{
1835 rseg->raddr = cpu_to_be64(remote_addr);
1836 rseg->rkey = cpu_to_be32(rkey);
1837 rseg->reserved = 0;
1838}
1839
Eli Cohene126ba92013-07-07 17:25:49 +03001840static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1841 struct ib_send_wr *wr)
1842{
1843 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1844 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1845 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1846}
1847
1848static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1849{
1850 dseg->byte_count = cpu_to_be32(sg->length);
1851 dseg->lkey = cpu_to_be32(sg->lkey);
1852 dseg->addr = cpu_to_be64(sg->addr);
1853}
1854
1855static __be16 get_klm_octo(int npages)
1856{
1857 return cpu_to_be16(ALIGN(npages, 8) / 2);
1858}
1859
1860static __be64 frwr_mkey_mask(void)
1861{
1862 u64 result;
1863
1864 result = MLX5_MKEY_MASK_LEN |
1865 MLX5_MKEY_MASK_PAGE_SIZE |
1866 MLX5_MKEY_MASK_START_ADDR |
1867 MLX5_MKEY_MASK_EN_RINVAL |
1868 MLX5_MKEY_MASK_KEY |
1869 MLX5_MKEY_MASK_LR |
1870 MLX5_MKEY_MASK_LW |
1871 MLX5_MKEY_MASK_RR |
1872 MLX5_MKEY_MASK_RW |
1873 MLX5_MKEY_MASK_A |
1874 MLX5_MKEY_MASK_SMALL_FENCE |
1875 MLX5_MKEY_MASK_FREE;
1876
1877 return cpu_to_be64(result);
1878}
1879
Sagi Grimberge6631812014-02-23 14:19:11 +02001880static __be64 sig_mkey_mask(void)
1881{
1882 u64 result;
1883
1884 result = MLX5_MKEY_MASK_LEN |
1885 MLX5_MKEY_MASK_PAGE_SIZE |
1886 MLX5_MKEY_MASK_START_ADDR |
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001887 MLX5_MKEY_MASK_EN_SIGERR |
Sagi Grimberge6631812014-02-23 14:19:11 +02001888 MLX5_MKEY_MASK_EN_RINVAL |
1889 MLX5_MKEY_MASK_KEY |
1890 MLX5_MKEY_MASK_LR |
1891 MLX5_MKEY_MASK_LW |
1892 MLX5_MKEY_MASK_RR |
1893 MLX5_MKEY_MASK_RW |
1894 MLX5_MKEY_MASK_SMALL_FENCE |
1895 MLX5_MKEY_MASK_FREE |
1896 MLX5_MKEY_MASK_BSF_EN;
1897
1898 return cpu_to_be64(result);
1899}
1900
Eli Cohene126ba92013-07-07 17:25:49 +03001901static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1902 struct ib_send_wr *wr, int li)
1903{
1904 memset(umr, 0, sizeof(*umr));
1905
1906 if (li) {
1907 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1908 umr->flags = 1 << 7;
1909 return;
1910 }
1911
1912 umr->flags = (1 << 5); /* fail if not free */
1913 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1914 umr->mkey_mask = frwr_mkey_mask();
1915}
1916
Haggai Eran968e78d2014-12-11 17:04:11 +02001917static __be64 get_umr_reg_mr_mask(void)
1918{
1919 u64 result;
1920
1921 result = MLX5_MKEY_MASK_LEN |
1922 MLX5_MKEY_MASK_PAGE_SIZE |
1923 MLX5_MKEY_MASK_START_ADDR |
1924 MLX5_MKEY_MASK_PD |
1925 MLX5_MKEY_MASK_LR |
1926 MLX5_MKEY_MASK_LW |
1927 MLX5_MKEY_MASK_KEY |
1928 MLX5_MKEY_MASK_RR |
1929 MLX5_MKEY_MASK_RW |
1930 MLX5_MKEY_MASK_A |
1931 MLX5_MKEY_MASK_FREE;
1932
1933 return cpu_to_be64(result);
1934}
1935
1936static __be64 get_umr_unreg_mr_mask(void)
1937{
1938 u64 result;
1939
1940 result = MLX5_MKEY_MASK_FREE;
1941
1942 return cpu_to_be64(result);
1943}
1944
1945static __be64 get_umr_update_mtt_mask(void)
1946{
1947 u64 result;
1948
1949 result = MLX5_MKEY_MASK_FREE;
1950
1951 return cpu_to_be64(result);
1952}
1953
Eli Cohene126ba92013-07-07 17:25:49 +03001954static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1955 struct ib_send_wr *wr)
1956{
Haggai Eran968e78d2014-12-11 17:04:11 +02001957 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
Eli Cohene126ba92013-07-07 17:25:49 +03001958
1959 memset(umr, 0, sizeof(*umr));
1960
Haggai Eran968e78d2014-12-11 17:04:11 +02001961 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1962 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1963 else
1964 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1965
Eli Cohene126ba92013-07-07 17:25:49 +03001966 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
Eli Cohene126ba92013-07-07 17:25:49 +03001967 umr->klm_octowords = get_klm_octo(umrwr->npages);
Haggai Eran968e78d2014-12-11 17:04:11 +02001968 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1969 umr->mkey_mask = get_umr_update_mtt_mask();
1970 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1971 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1972 } else {
1973 umr->mkey_mask = get_umr_reg_mr_mask();
1974 }
Eli Cohene126ba92013-07-07 17:25:49 +03001975 } else {
Haggai Eran968e78d2014-12-11 17:04:11 +02001976 umr->mkey_mask = get_umr_unreg_mr_mask();
Eli Cohene126ba92013-07-07 17:25:49 +03001977 }
1978
1979 if (!wr->num_sge)
Haggai Eran968e78d2014-12-11 17:04:11 +02001980 umr->flags |= MLX5_UMR_INLINE;
Eli Cohene126ba92013-07-07 17:25:49 +03001981}
1982
1983static u8 get_umr_flags(int acc)
1984{
1985 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1986 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1987 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1988 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
Sagi Grimberg2ac45932014-02-23 14:19:09 +02001989 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
Eli Cohene126ba92013-07-07 17:25:49 +03001990}
1991
1992static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1993 int li, int *writ)
1994{
1995 memset(seg, 0, sizeof(*seg));
1996 if (li) {
Haggai Eran968e78d2014-12-11 17:04:11 +02001997 seg->status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +03001998 return;
1999 }
2000
Sagi Grimberg2ac45932014-02-23 14:19:09 +02002001 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
2002 MLX5_ACCESS_MODE_MTT;
Eli Cohene126ba92013-07-07 17:25:49 +03002003 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
2004 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
2005 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2006 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
2007 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
2008 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
2009 seg->log2_page_size = wr->wr.fast_reg.page_shift;
2010}
2011
2012static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2013{
Haggai Eran968e78d2014-12-11 17:04:11 +02002014 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
2015
Eli Cohene126ba92013-07-07 17:25:49 +03002016 memset(seg, 0, sizeof(*seg));
2017 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
Haggai Eran968e78d2014-12-11 17:04:11 +02002018 seg->status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +03002019 return;
2020 }
2021
Haggai Eran968e78d2014-12-11 17:04:11 +02002022 seg->flags = convert_access(umrwr->access_flags);
2023 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2024 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2025 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2026 }
2027 seg->len = cpu_to_be64(umrwr->length);
2028 seg->log2_page_size = umrwr->page_shift;
Eli Cohen746b5582013-10-23 09:53:14 +03002029 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
Haggai Eran968e78d2014-12-11 17:04:11 +02002030 mlx5_mkey_variant(umrwr->mkey));
Eli Cohene126ba92013-07-07 17:25:49 +03002031}
2032
2033static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
2034 struct ib_send_wr *wr,
2035 struct mlx5_core_dev *mdev,
2036 struct mlx5_ib_pd *pd,
2037 int writ)
2038{
2039 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
2040 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
2041 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
2042 int i;
2043
2044 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
2045 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
2046 dseg->addr = cpu_to_be64(mfrpl->map);
2047 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
2048 dseg->lkey = cpu_to_be32(pd->pa_lkey);
2049}
2050
2051static __be32 send_ieth(struct ib_send_wr *wr)
2052{
2053 switch (wr->opcode) {
2054 case IB_WR_SEND_WITH_IMM:
2055 case IB_WR_RDMA_WRITE_WITH_IMM:
2056 return wr->ex.imm_data;
2057
2058 case IB_WR_SEND_WITH_INV:
2059 return cpu_to_be32(wr->ex.invalidate_rkey);
2060
2061 default:
2062 return 0;
2063 }
2064}
2065
2066static u8 calc_sig(void *wqe, int size)
2067{
2068 u8 *p = wqe;
2069 u8 res = 0;
2070 int i;
2071
2072 for (i = 0; i < size; i++)
2073 res ^= p[i];
2074
2075 return ~res;
2076}
2077
2078static u8 wq_sig(void *wqe)
2079{
2080 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2081}
2082
2083static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2084 void *wqe, int *sz)
2085{
2086 struct mlx5_wqe_inline_seg *seg;
2087 void *qend = qp->sq.qend;
2088 void *addr;
2089 int inl = 0;
2090 int copy;
2091 int len;
2092 int i;
2093
2094 seg = wqe;
2095 wqe += sizeof(*seg);
2096 for (i = 0; i < wr->num_sge; i++) {
2097 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2098 len = wr->sg_list[i].length;
2099 inl += len;
2100
2101 if (unlikely(inl > qp->max_inline_data))
2102 return -ENOMEM;
2103
2104 if (unlikely(wqe + len > qend)) {
2105 copy = qend - wqe;
2106 memcpy(wqe, addr, copy);
2107 addr += copy;
2108 len -= copy;
2109 wqe = mlx5_get_send_wqe(qp, 0);
2110 }
2111 memcpy(wqe, addr, len);
2112 wqe += len;
2113 }
2114
2115 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2116
2117 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2118
2119 return 0;
2120}
2121
Sagi Grimberge6631812014-02-23 14:19:11 +02002122static u16 prot_field_size(enum ib_signature_type type)
2123{
2124 switch (type) {
2125 case IB_SIG_TYPE_T10_DIF:
2126 return MLX5_DIF_SIZE;
2127 default:
2128 return 0;
2129 }
2130}
2131
2132static u8 bs_selector(int block_size)
2133{
2134 switch (block_size) {
2135 case 512: return 0x1;
2136 case 520: return 0x2;
2137 case 4096: return 0x3;
2138 case 4160: return 0x4;
2139 case 1073741824: return 0x5;
2140 default: return 0;
2141 }
2142}
2143
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002144static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2145 struct mlx5_bsf_inl *inl)
Sagi Grimberge6631812014-02-23 14:19:11 +02002146{
Sagi Grimberg142537f2014-08-13 19:54:32 +03002147 /* Valid inline section and allow BSF refresh */
2148 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2149 MLX5_BSF_REFRESH_DIF);
2150 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2151 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002152 /* repeating block */
2153 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2154 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2155 MLX5_DIF_CRC : MLX5_DIF_IPCS;
Sagi Grimberge6631812014-02-23 14:19:11 +02002156
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002157 if (domain->sig.dif.ref_remap)
2158 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
Sagi Grimberge6631812014-02-23 14:19:11 +02002159
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002160 if (domain->sig.dif.app_escape) {
2161 if (domain->sig.dif.ref_escape)
2162 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2163 else
2164 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
Sagi Grimberge6631812014-02-23 14:19:11 +02002165 }
2166
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002167 inl->dif_app_bitmask_check =
2168 cpu_to_be16(domain->sig.dif.apptag_check_mask);
Sagi Grimberge6631812014-02-23 14:19:11 +02002169}
2170
2171static int mlx5_set_bsf(struct ib_mr *sig_mr,
2172 struct ib_sig_attrs *sig_attrs,
2173 struct mlx5_bsf *bsf, u32 data_size)
2174{
2175 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2176 struct mlx5_bsf_basic *basic = &bsf->basic;
2177 struct ib_sig_domain *mem = &sig_attrs->mem;
2178 struct ib_sig_domain *wire = &sig_attrs->wire;
Sagi Grimberge6631812014-02-23 14:19:11 +02002179
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002180 memset(bsf, 0, sizeof(*bsf));
Sagi Grimberge6631812014-02-23 14:19:11 +02002181
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002182 /* Basic + Extended + Inline */
2183 basic->bsf_size_sbs = 1 << 7;
2184 /* Input domain check byte mask */
2185 basic->check_byte_mask = sig_attrs->check_mask;
2186 basic->raw_data_size = cpu_to_be32(data_size);
2187
2188 /* Memory domain */
2189 switch (sig_attrs->mem.sig_type) {
2190 case IB_SIG_TYPE_NONE:
2191 break;
2192 case IB_SIG_TYPE_T10_DIF:
2193 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2194 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2195 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2196 break;
2197 default:
2198 return -EINVAL;
2199 }
2200
2201 /* Wire domain */
2202 switch (sig_attrs->wire.sig_type) {
2203 case IB_SIG_TYPE_NONE:
2204 break;
2205 case IB_SIG_TYPE_T10_DIF:
Sagi Grimberge6631812014-02-23 14:19:11 +02002206 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002207 mem->sig_type == wire->sig_type) {
Sagi Grimberge6631812014-02-23 14:19:11 +02002208 /* Same block structure */
Sagi Grimberg142537f2014-08-13 19:54:32 +03002209 basic->bsf_size_sbs |= 1 << 4;
Sagi Grimberge6631812014-02-23 14:19:11 +02002210 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002211 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002212 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002213 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002214 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002215 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
Sagi Grimberge6631812014-02-23 14:19:11 +02002216 } else
2217 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2218
Sagi Grimberg142537f2014-08-13 19:54:32 +03002219 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002220 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
Sagi Grimberge6631812014-02-23 14:19:11 +02002221 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002222 default:
2223 return -EINVAL;
2224 }
2225
2226 return 0;
2227}
2228
2229static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2230 void **seg, int *size)
2231{
2232 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2233 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2234 struct mlx5_bsf *bsf;
2235 u32 data_len = wr->sg_list->length;
2236 u32 data_key = wr->sg_list->lkey;
2237 u64 data_va = wr->sg_list->addr;
2238 int ret;
2239 int wqe_size;
2240
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002241 if (!wr->wr.sig_handover.prot ||
2242 (data_key == wr->wr.sig_handover.prot->lkey &&
2243 data_va == wr->wr.sig_handover.prot->addr &&
2244 data_len == wr->wr.sig_handover.prot->length)) {
Sagi Grimberge6631812014-02-23 14:19:11 +02002245 /**
2246 * Source domain doesn't contain signature information
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002247 * or data and protection are interleaved in memory.
Sagi Grimberge6631812014-02-23 14:19:11 +02002248 * So need construct:
2249 * ------------------
2250 * | data_klm |
2251 * ------------------
2252 * | BSF |
2253 * ------------------
2254 **/
2255 struct mlx5_klm *data_klm = *seg;
2256
2257 data_klm->bcount = cpu_to_be32(data_len);
2258 data_klm->key = cpu_to_be32(data_key);
2259 data_klm->va = cpu_to_be64(data_va);
2260 wqe_size = ALIGN(sizeof(*data_klm), 64);
2261 } else {
2262 /**
2263 * Source domain contains signature information
2264 * So need construct a strided block format:
2265 * ---------------------------
2266 * | stride_block_ctrl |
2267 * ---------------------------
2268 * | data_klm |
2269 * ---------------------------
2270 * | prot_klm |
2271 * ---------------------------
2272 * | BSF |
2273 * ---------------------------
2274 **/
2275 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2276 struct mlx5_stride_block_entry *data_sentry;
2277 struct mlx5_stride_block_entry *prot_sentry;
2278 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2279 u64 prot_va = wr->wr.sig_handover.prot->addr;
2280 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2281 int prot_size;
2282
2283 sblock_ctrl = *seg;
2284 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2285 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2286
2287 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2288 if (!prot_size) {
2289 pr_err("Bad block size given: %u\n", block_size);
2290 return -EINVAL;
2291 }
2292 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2293 prot_size);
2294 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2295 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2296 sblock_ctrl->num_entries = cpu_to_be16(2);
2297
2298 data_sentry->bcount = cpu_to_be16(block_size);
2299 data_sentry->key = cpu_to_be32(data_key);
2300 data_sentry->va = cpu_to_be64(data_va);
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002301 data_sentry->stride = cpu_to_be16(block_size);
2302
Sagi Grimberge6631812014-02-23 14:19:11 +02002303 prot_sentry->bcount = cpu_to_be16(prot_size);
2304 prot_sentry->key = cpu_to_be32(prot_key);
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002305 prot_sentry->va = cpu_to_be64(prot_va);
2306 prot_sentry->stride = cpu_to_be16(prot_size);
Sagi Grimberge6631812014-02-23 14:19:11 +02002307
Sagi Grimberge6631812014-02-23 14:19:11 +02002308 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2309 sizeof(*prot_sentry), 64);
2310 }
2311
2312 *seg += wqe_size;
2313 *size += wqe_size / 16;
2314 if (unlikely((*seg == qp->sq.qend)))
2315 *seg = mlx5_get_send_wqe(qp, 0);
2316
2317 bsf = *seg;
2318 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2319 if (ret)
2320 return -EINVAL;
2321
2322 *seg += sizeof(*bsf);
2323 *size += sizeof(*bsf) / 16;
2324 if (unlikely((*seg == qp->sq.qend)))
2325 *seg = mlx5_get_send_wqe(qp, 0);
2326
2327 return 0;
2328}
2329
2330static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2331 struct ib_send_wr *wr, u32 nelements,
2332 u32 length, u32 pdn)
2333{
2334 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2335 u32 sig_key = sig_mr->rkey;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002336 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
Sagi Grimberge6631812014-02-23 14:19:11 +02002337
2338 memset(seg, 0, sizeof(*seg));
2339
2340 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2341 MLX5_ACCESS_MODE_KLM;
2342 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002343 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
Sagi Grimberge6631812014-02-23 14:19:11 +02002344 MLX5_MKEY_BSF_EN | pdn);
2345 seg->len = cpu_to_be64(length);
2346 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2347 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2348}
2349
2350static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2351 struct ib_send_wr *wr, u32 nelements)
2352{
2353 memset(umr, 0, sizeof(*umr));
2354
2355 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2356 umr->klm_octowords = get_klm_octo(nelements);
2357 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2358 umr->mkey_mask = sig_mkey_mask();
2359}
2360
2361
2362static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2363 void **seg, int *size)
2364{
2365 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2366 u32 pdn = get_pd(qp)->pdn;
2367 u32 klm_oct_size;
2368 int region_len, ret;
2369
2370 if (unlikely(wr->num_sge != 1) ||
2371 unlikely(wr->wr.sig_handover.access_flags &
2372 IB_ACCESS_REMOTE_ATOMIC) ||
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002373 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2374 unlikely(!sig_mr->sig->sig_status_checked))
Sagi Grimberge6631812014-02-23 14:19:11 +02002375 return -EINVAL;
2376
2377 /* length of the protected region, data + protection */
2378 region_len = wr->sg_list->length;
Sagi Grimberg85248672014-05-18 18:32:38 +03002379 if (wr->wr.sig_handover.prot &&
2380 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
2381 wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
2382 wr->wr.sig_handover.prot->length != wr->sg_list->length))
Sagi Grimberge6631812014-02-23 14:19:11 +02002383 region_len += wr->wr.sig_handover.prot->length;
2384
2385 /**
2386 * KLM octoword size - if protection was provided
2387 * then we use strided block format (3 octowords),
2388 * else we use single KLM (1 octoword)
2389 **/
2390 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2391
2392 set_sig_umr_segment(*seg, wr, klm_oct_size);
2393 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2394 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2395 if (unlikely((*seg == qp->sq.qend)))
2396 *seg = mlx5_get_send_wqe(qp, 0);
2397
2398 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2399 *seg += sizeof(struct mlx5_mkey_seg);
2400 *size += sizeof(struct mlx5_mkey_seg) / 16;
2401 if (unlikely((*seg == qp->sq.qend)))
2402 *seg = mlx5_get_send_wqe(qp, 0);
2403
2404 ret = set_sig_data_segment(wr, qp, seg, size);
2405 if (ret)
2406 return ret;
2407
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002408 sig_mr->sig->sig_status_checked = false;
Sagi Grimberge6631812014-02-23 14:19:11 +02002409 return 0;
2410}
2411
2412static int set_psv_wr(struct ib_sig_domain *domain,
2413 u32 psv_idx, void **seg, int *size)
2414{
2415 struct mlx5_seg_set_psv *psv_seg = *seg;
2416
2417 memset(psv_seg, 0, sizeof(*psv_seg));
2418 psv_seg->psv_num = cpu_to_be32(psv_idx);
2419 switch (domain->sig_type) {
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002420 case IB_SIG_TYPE_NONE:
2421 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002422 case IB_SIG_TYPE_T10_DIF:
2423 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2424 domain->sig.dif.app_tag);
2425 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
Sagi Grimberge6631812014-02-23 14:19:11 +02002426 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002427 default:
2428 pr_err("Bad signature type given.\n");
2429 return 1;
2430 }
2431
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002432 *seg += sizeof(*psv_seg);
2433 *size += sizeof(*psv_seg) / 16;
2434
Sagi Grimberge6631812014-02-23 14:19:11 +02002435 return 0;
2436}
2437
Eli Cohene126ba92013-07-07 17:25:49 +03002438static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2439 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2440{
2441 int writ = 0;
2442 int li;
2443
2444 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2445 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2446 return -EINVAL;
2447
2448 set_frwr_umr_segment(*seg, wr, li);
2449 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2450 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2451 if (unlikely((*seg == qp->sq.qend)))
2452 *seg = mlx5_get_send_wqe(qp, 0);
2453 set_mkey_segment(*seg, wr, li, &writ);
2454 *seg += sizeof(struct mlx5_mkey_seg);
2455 *size += sizeof(struct mlx5_mkey_seg) / 16;
2456 if (unlikely((*seg == qp->sq.qend)))
2457 *seg = mlx5_get_send_wqe(qp, 0);
2458 if (!li) {
Eli Cohen9641b742013-10-23 09:53:15 +03002459 if (unlikely(wr->wr.fast_reg.page_list_len >
2460 wr->wr.fast_reg.page_list->max_page_list_len))
2461 return -ENOMEM;
2462
Eli Cohene126ba92013-07-07 17:25:49 +03002463 set_frwr_pages(*seg, wr, mdev, pd, writ);
2464 *seg += sizeof(struct mlx5_wqe_data_seg);
2465 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2466 }
2467 return 0;
2468}
2469
2470static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2471{
2472 __be32 *p = NULL;
2473 int tidx = idx;
2474 int i, j;
2475
2476 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2477 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2478 if ((i & 0xf) == 0) {
2479 void *buf = mlx5_get_send_wqe(qp, tidx);
2480 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2481 p = buf;
2482 j = 0;
2483 }
2484 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2485 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2486 be32_to_cpu(p[j + 3]));
2487 }
2488}
2489
2490static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2491 unsigned bytecnt, struct mlx5_ib_qp *qp)
2492{
2493 while (bytecnt > 0) {
2494 __iowrite64_copy(dst++, src++, 8);
2495 __iowrite64_copy(dst++, src++, 8);
2496 __iowrite64_copy(dst++, src++, 8);
2497 __iowrite64_copy(dst++, src++, 8);
2498 __iowrite64_copy(dst++, src++, 8);
2499 __iowrite64_copy(dst++, src++, 8);
2500 __iowrite64_copy(dst++, src++, 8);
2501 __iowrite64_copy(dst++, src++, 8);
2502 bytecnt -= 64;
2503 if (unlikely(src == qp->sq.qend))
2504 src = mlx5_get_send_wqe(qp, 0);
2505 }
2506}
2507
2508static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2509{
2510 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2511 wr->send_flags & IB_SEND_FENCE))
2512 return MLX5_FENCE_MODE_STRONG_ORDERING;
2513
2514 if (unlikely(fence)) {
2515 if (wr->send_flags & IB_SEND_FENCE)
2516 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2517 else
2518 return fence;
2519
2520 } else {
2521 return 0;
2522 }
2523}
2524
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002525static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2526 struct mlx5_wqe_ctrl_seg **ctrl,
Eli Cohen6a4f1392014-12-02 12:26:18 +02002527 struct ib_send_wr *wr, unsigned *idx,
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002528 int *size, int nreq)
2529{
2530 int err = 0;
2531
2532 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2533 err = -ENOMEM;
2534 return err;
2535 }
2536
2537 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2538 *seg = mlx5_get_send_wqe(qp, *idx);
2539 *ctrl = *seg;
2540 *(uint32_t *)(*seg + 8) = 0;
2541 (*ctrl)->imm = send_ieth(wr);
2542 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2543 (wr->send_flags & IB_SEND_SIGNALED ?
2544 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2545 (wr->send_flags & IB_SEND_SOLICITED ?
2546 MLX5_WQE_CTRL_SOLICITED : 0);
2547
2548 *seg += sizeof(**ctrl);
2549 *size = sizeof(**ctrl) / 16;
2550
2551 return err;
2552}
2553
2554static void finish_wqe(struct mlx5_ib_qp *qp,
2555 struct mlx5_wqe_ctrl_seg *ctrl,
2556 u8 size, unsigned idx, u64 wr_id,
2557 int nreq, u8 fence, u8 next_fence,
2558 u32 mlx5_opcode)
2559{
2560 u8 opmod = 0;
2561
2562 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2563 mlx5_opcode | ((u32)opmod << 24));
2564 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2565 ctrl->fm_ce_se |= fence;
2566 qp->fm_cache = next_fence;
2567 if (unlikely(qp->wq_sig))
2568 ctrl->signature = wq_sig(ctrl);
2569
2570 qp->sq.wrid[idx] = wr_id;
2571 qp->sq.w_list[idx].opcode = mlx5_opcode;
2572 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2573 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2574 qp->sq.w_list[idx].next = qp->sq.cur_post;
2575}
2576
2577
Eli Cohene126ba92013-07-07 17:25:49 +03002578int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2579 struct ib_send_wr **bad_wr)
2580{
2581 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2582 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
Jack Morgenstein9603b612014-07-28 23:30:22 +03002583 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03002584 struct mlx5_ib_qp *qp = to_mqp(ibqp);
Sagi Grimberge6631812014-02-23 14:19:11 +02002585 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +03002586 struct mlx5_wqe_data_seg *dpseg;
2587 struct mlx5_wqe_xrc_seg *xrc;
2588 struct mlx5_bf *bf = qp->bf;
2589 int uninitialized_var(size);
2590 void *qend = qp->sq.qend;
2591 unsigned long flags;
Eli Cohene126ba92013-07-07 17:25:49 +03002592 unsigned idx;
2593 int err = 0;
2594 int inl = 0;
2595 int num_sge;
2596 void *seg;
2597 int nreq;
2598 int i;
2599 u8 next_fence = 0;
Eli Cohene126ba92013-07-07 17:25:49 +03002600 u8 fence;
2601
2602 spin_lock_irqsave(&qp->sq.lock, flags);
2603
2604 for (nreq = 0; wr; nreq++, wr = wr->next) {
Fabian Fredericka8f731e2014-08-12 19:20:08 -04002605 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
Eli Cohene126ba92013-07-07 17:25:49 +03002606 mlx5_ib_warn(dev, "\n");
2607 err = -EINVAL;
2608 *bad_wr = wr;
2609 goto out;
2610 }
2611
Eli Cohene126ba92013-07-07 17:25:49 +03002612 fence = qp->fm_cache;
2613 num_sge = wr->num_sge;
2614 if (unlikely(num_sge > qp->sq.max_gs)) {
2615 mlx5_ib_warn(dev, "\n");
2616 err = -ENOMEM;
2617 *bad_wr = wr;
2618 goto out;
2619 }
2620
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002621 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2622 if (err) {
2623 mlx5_ib_warn(dev, "\n");
2624 err = -ENOMEM;
2625 *bad_wr = wr;
2626 goto out;
2627 }
Eli Cohene126ba92013-07-07 17:25:49 +03002628
2629 switch (ibqp->qp_type) {
2630 case IB_QPT_XRC_INI:
2631 xrc = seg;
2632 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2633 seg += sizeof(*xrc);
2634 size += sizeof(*xrc) / 16;
2635 /* fall through */
2636 case IB_QPT_RC:
2637 switch (wr->opcode) {
2638 case IB_WR_RDMA_READ:
2639 case IB_WR_RDMA_WRITE:
2640 case IB_WR_RDMA_WRITE_WITH_IMM:
2641 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2642 wr->wr.rdma.rkey);
Jack Morgensteinf241e742014-07-28 23:30:23 +03002643 seg += sizeof(struct mlx5_wqe_raddr_seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002644 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2645 break;
2646
2647 case IB_WR_ATOMIC_CMP_AND_SWP:
2648 case IB_WR_ATOMIC_FETCH_AND_ADD:
Eli Cohene126ba92013-07-07 17:25:49 +03002649 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
Eli Cohen81bea282013-09-11 16:35:30 +03002650 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2651 err = -ENOSYS;
2652 *bad_wr = wr;
2653 goto out;
Eli Cohene126ba92013-07-07 17:25:49 +03002654
2655 case IB_WR_LOCAL_INV:
2656 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2657 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2658 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2659 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2660 if (err) {
2661 mlx5_ib_warn(dev, "\n");
2662 *bad_wr = wr;
2663 goto out;
2664 }
2665 num_sge = 0;
2666 break;
2667
2668 case IB_WR_FAST_REG_MR:
2669 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2670 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2671 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2672 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2673 if (err) {
2674 mlx5_ib_warn(dev, "\n");
2675 *bad_wr = wr;
2676 goto out;
2677 }
2678 num_sge = 0;
2679 break;
2680
Sagi Grimberge6631812014-02-23 14:19:11 +02002681 case IB_WR_REG_SIG_MR:
2682 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2683 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2684
2685 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2686 err = set_sig_umr_wr(wr, qp, &seg, &size);
2687 if (err) {
2688 mlx5_ib_warn(dev, "\n");
2689 *bad_wr = wr;
2690 goto out;
2691 }
2692
2693 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2694 nreq, get_fence(fence, wr),
2695 next_fence, MLX5_OPCODE_UMR);
2696 /*
2697 * SET_PSV WQEs are not signaled and solicited
2698 * on error
2699 */
2700 wr->send_flags &= ~IB_SEND_SIGNALED;
2701 wr->send_flags |= IB_SEND_SOLICITED;
2702 err = begin_wqe(qp, &seg, &ctrl, wr,
2703 &idx, &size, nreq);
2704 if (err) {
2705 mlx5_ib_warn(dev, "\n");
2706 err = -ENOMEM;
2707 *bad_wr = wr;
2708 goto out;
2709 }
2710
2711 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2712 mr->sig->psv_memory.psv_idx, &seg,
2713 &size);
2714 if (err) {
2715 mlx5_ib_warn(dev, "\n");
2716 *bad_wr = wr;
2717 goto out;
2718 }
2719
2720 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2721 nreq, get_fence(fence, wr),
2722 next_fence, MLX5_OPCODE_SET_PSV);
2723 err = begin_wqe(qp, &seg, &ctrl, wr,
2724 &idx, &size, nreq);
2725 if (err) {
2726 mlx5_ib_warn(dev, "\n");
2727 err = -ENOMEM;
2728 *bad_wr = wr;
2729 goto out;
2730 }
2731
2732 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2733 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2734 mr->sig->psv_wire.psv_idx, &seg,
2735 &size);
2736 if (err) {
2737 mlx5_ib_warn(dev, "\n");
2738 *bad_wr = wr;
2739 goto out;
2740 }
2741
2742 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2743 nreq, get_fence(fence, wr),
2744 next_fence, MLX5_OPCODE_SET_PSV);
2745 num_sge = 0;
2746 goto skip_psv;
2747
Eli Cohene126ba92013-07-07 17:25:49 +03002748 default:
2749 break;
2750 }
2751 break;
2752
2753 case IB_QPT_UC:
2754 switch (wr->opcode) {
2755 case IB_WR_RDMA_WRITE:
2756 case IB_WR_RDMA_WRITE_WITH_IMM:
2757 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2758 wr->wr.rdma.rkey);
2759 seg += sizeof(struct mlx5_wqe_raddr_seg);
2760 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2761 break;
2762
2763 default:
2764 break;
2765 }
2766 break;
2767
2768 case IB_QPT_UD:
2769 case IB_QPT_SMI:
2770 case IB_QPT_GSI:
2771 set_datagram_seg(seg, wr);
Jack Morgensteinf241e742014-07-28 23:30:23 +03002772 seg += sizeof(struct mlx5_wqe_datagram_seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002773 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2774 if (unlikely((seg == qend)))
2775 seg = mlx5_get_send_wqe(qp, 0);
2776 break;
2777
2778 case MLX5_IB_QPT_REG_UMR:
2779 if (wr->opcode != MLX5_IB_WR_UMR) {
2780 err = -EINVAL;
2781 mlx5_ib_warn(dev, "bad opcode\n");
2782 goto out;
2783 }
2784 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2785 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2786 set_reg_umr_segment(seg, wr);
2787 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2788 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2789 if (unlikely((seg == qend)))
2790 seg = mlx5_get_send_wqe(qp, 0);
2791 set_reg_mkey_segment(seg, wr);
2792 seg += sizeof(struct mlx5_mkey_seg);
2793 size += sizeof(struct mlx5_mkey_seg) / 16;
2794 if (unlikely((seg == qend)))
2795 seg = mlx5_get_send_wqe(qp, 0);
2796 break;
2797
2798 default:
2799 break;
2800 }
2801
2802 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2803 int uninitialized_var(sz);
2804
2805 err = set_data_inl_seg(qp, wr, seg, &sz);
2806 if (unlikely(err)) {
2807 mlx5_ib_warn(dev, "\n");
2808 *bad_wr = wr;
2809 goto out;
2810 }
2811 inl = 1;
2812 size += sz;
2813 } else {
2814 dpseg = seg;
2815 for (i = 0; i < num_sge; i++) {
2816 if (unlikely(dpseg == qend)) {
2817 seg = mlx5_get_send_wqe(qp, 0);
2818 dpseg = seg;
2819 }
2820 if (likely(wr->sg_list[i].length)) {
2821 set_data_ptr_seg(dpseg, wr->sg_list + i);
2822 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2823 dpseg++;
2824 }
2825 }
2826 }
2827
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002828 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2829 get_fence(fence, wr), next_fence,
2830 mlx5_ib_opcode[wr->opcode]);
Sagi Grimberge6631812014-02-23 14:19:11 +02002831skip_psv:
Eli Cohene126ba92013-07-07 17:25:49 +03002832 if (0)
2833 dump_wqe(qp, idx, size);
2834 }
2835
2836out:
2837 if (likely(nreq)) {
2838 qp->sq.head += nreq;
2839
2840 /* Make sure that descriptors are written before
2841 * updating doorbell record and ringing the doorbell
2842 */
2843 wmb();
2844
2845 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2846
Eli Cohenada388f2014-01-14 17:45:16 +02002847 /* Make sure doorbell record is visible to the HCA before
2848 * we hit doorbell */
2849 wmb();
2850
Eli Cohene126ba92013-07-07 17:25:49 +03002851 if (bf->need_lock)
2852 spin_lock(&bf->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02002853 else
2854 __acquire(&bf->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03002855
2856 /* TBD enable WC */
2857 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2858 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2859 /* wc_wmb(); */
2860 } else {
2861 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2862 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2863 /* Make sure doorbells don't leak out of SQ spinlock
2864 * and reach the HCA out of order.
2865 */
2866 mmiowb();
2867 }
2868 bf->offset ^= bf->buf_size;
2869 if (bf->need_lock)
2870 spin_unlock(&bf->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02002871 else
2872 __release(&bf->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03002873 }
2874
2875 spin_unlock_irqrestore(&qp->sq.lock, flags);
2876
2877 return err;
2878}
2879
2880static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2881{
2882 sig->signature = calc_sig(sig, size);
2883}
2884
2885int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2886 struct ib_recv_wr **bad_wr)
2887{
2888 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2889 struct mlx5_wqe_data_seg *scat;
2890 struct mlx5_rwqe_sig *sig;
2891 unsigned long flags;
2892 int err = 0;
2893 int nreq;
2894 int ind;
2895 int i;
2896
2897 spin_lock_irqsave(&qp->rq.lock, flags);
2898
2899 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2900
2901 for (nreq = 0; wr; nreq++, wr = wr->next) {
2902 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2903 err = -ENOMEM;
2904 *bad_wr = wr;
2905 goto out;
2906 }
2907
2908 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2909 err = -EINVAL;
2910 *bad_wr = wr;
2911 goto out;
2912 }
2913
2914 scat = get_recv_wqe(qp, ind);
2915 if (qp->wq_sig)
2916 scat++;
2917
2918 for (i = 0; i < wr->num_sge; i++)
2919 set_data_ptr_seg(scat + i, wr->sg_list + i);
2920
2921 if (i < qp->rq.max_gs) {
2922 scat[i].byte_count = 0;
2923 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2924 scat[i].addr = 0;
2925 }
2926
2927 if (qp->wq_sig) {
2928 sig = (struct mlx5_rwqe_sig *)scat;
2929 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2930 }
2931
2932 qp->rq.wrid[ind] = wr->wr_id;
2933
2934 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2935 }
2936
2937out:
2938 if (likely(nreq)) {
2939 qp->rq.head += nreq;
2940
2941 /* Make sure that descriptors are written before
2942 * doorbell record.
2943 */
2944 wmb();
2945
2946 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2947 }
2948
2949 spin_unlock_irqrestore(&qp->rq.lock, flags);
2950
2951 return err;
2952}
2953
2954static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2955{
2956 switch (mlx5_state) {
2957 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2958 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2959 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2960 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2961 case MLX5_QP_STATE_SQ_DRAINING:
2962 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2963 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2964 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2965 default: return -1;
2966 }
2967}
2968
2969static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2970{
2971 switch (mlx5_mig_state) {
2972 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2973 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2974 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2975 default: return -1;
2976 }
2977}
2978
2979static int to_ib_qp_access_flags(int mlx5_flags)
2980{
2981 int ib_flags = 0;
2982
2983 if (mlx5_flags & MLX5_QP_BIT_RRE)
2984 ib_flags |= IB_ACCESS_REMOTE_READ;
2985 if (mlx5_flags & MLX5_QP_BIT_RWE)
2986 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2987 if (mlx5_flags & MLX5_QP_BIT_RAE)
2988 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2989
2990 return ib_flags;
2991}
2992
2993static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2994 struct mlx5_qp_path *path)
2995{
Jack Morgenstein9603b612014-07-28 23:30:22 +03002996 struct mlx5_core_dev *dev = ibdev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03002997
2998 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2999 ib_ah_attr->port_num = path->port;
3000
Eli Cohenc7a08ac2014-10-02 12:19:42 +03003001 if (ib_ah_attr->port_num == 0 ||
Saeed Mahameed938fe832015-05-28 22:28:41 +03003002 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
Eli Cohene126ba92013-07-07 17:25:49 +03003003 return;
3004
3005 ib_ah_attr->sl = path->sl & 0xf;
3006
3007 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
3008 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3009 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
3010 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3011 if (ib_ah_attr->ah_flags) {
3012 ib_ah_attr->grh.sgid_index = path->mgid_index;
3013 ib_ah_attr->grh.hop_limit = path->hop_limit;
3014 ib_ah_attr->grh.traffic_class =
3015 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3016 ib_ah_attr->grh.flow_label =
3017 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3018 memcpy(ib_ah_attr->grh.dgid.raw,
3019 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3020 }
3021}
3022
3023int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3024 struct ib_qp_init_attr *qp_init_attr)
3025{
3026 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3027 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3028 struct mlx5_query_qp_mbox_out *outb;
3029 struct mlx5_qp_context *context;
3030 int mlx5_state;
3031 int err = 0;
3032
Haggai Eran6aec21f2014-12-11 17:04:23 +02003033#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3034 /*
3035 * Wait for any outstanding page faults, in case the user frees memory
3036 * based upon this query's result.
3037 */
3038 flush_workqueue(mlx5_ib_page_fault_wq);
3039#endif
3040
Eli Cohene126ba92013-07-07 17:25:49 +03003041 mutex_lock(&qp->mutex);
3042 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3043 if (!outb) {
3044 err = -ENOMEM;
3045 goto out;
3046 }
3047 context = &outb->ctx;
Jack Morgenstein9603b612014-07-28 23:30:22 +03003048 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
Eli Cohene126ba92013-07-07 17:25:49 +03003049 if (err)
3050 goto out_free;
3051
3052 mlx5_state = be32_to_cpu(context->flags) >> 28;
3053
3054 qp->state = to_ib_qp_state(mlx5_state);
3055 qp_attr->qp_state = qp->state;
3056 qp_attr->path_mtu = context->mtu_msgmax >> 5;
3057 qp_attr->path_mig_state =
3058 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3059 qp_attr->qkey = be32_to_cpu(context->qkey);
3060 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3061 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
3062 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3063 qp_attr->qp_access_flags =
3064 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3065
3066 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3067 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3068 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3069 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3070 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
3071 }
3072
3073 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3074 qp_attr->port_num = context->pri_path.port;
3075
3076 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3077 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3078
3079 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3080
3081 qp_attr->max_dest_rd_atomic =
3082 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3083 qp_attr->min_rnr_timer =
3084 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3085 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3086 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3087 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3088 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3089 qp_attr->cur_qp_state = qp_attr->qp_state;
3090 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3091 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3092
3093 if (!ibqp->uobject) {
3094 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3095 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3096 } else {
3097 qp_attr->cap.max_send_wr = 0;
3098 qp_attr->cap.max_send_sge = 0;
3099 }
3100
3101 /* We don't support inline sends for kernel QPs (yet), and we
3102 * don't know what userspace's value should be.
3103 */
3104 qp_attr->cap.max_inline_data = 0;
3105
3106 qp_init_attr->cap = qp_attr->cap;
3107
3108 qp_init_attr->create_flags = 0;
3109 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3110 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3111
3112 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3113 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3114
3115out_free:
3116 kfree(outb);
3117
3118out:
3119 mutex_unlock(&qp->mutex);
3120 return err;
3121}
3122
3123struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3124 struct ib_ucontext *context,
3125 struct ib_udata *udata)
3126{
3127 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3128 struct mlx5_ib_xrcd *xrcd;
3129 int err;
3130
Saeed Mahameed938fe832015-05-28 22:28:41 +03003131 if (!MLX5_CAP_GEN(dev->mdev, xrc))
Eli Cohene126ba92013-07-07 17:25:49 +03003132 return ERR_PTR(-ENOSYS);
3133
3134 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3135 if (!xrcd)
3136 return ERR_PTR(-ENOMEM);
3137
Jack Morgenstein9603b612014-07-28 23:30:22 +03003138 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
Eli Cohene126ba92013-07-07 17:25:49 +03003139 if (err) {
3140 kfree(xrcd);
3141 return ERR_PTR(-ENOMEM);
3142 }
3143
3144 return &xrcd->ibxrcd;
3145}
3146
3147int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3148{
3149 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3150 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3151 int err;
3152
Jack Morgenstein9603b612014-07-28 23:30:22 +03003153 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
Eli Cohene126ba92013-07-07 17:25:49 +03003154 if (err) {
3155 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3156 return err;
3157 }
3158
3159 kfree(xrcd);
3160
3161 return 0;
3162}