blob: 1ea049ed87da7418bfe9bbe43eeea69ddab1f60f [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <rdma/ib_umem.h>
Achiad Shochat2811ba52015-12-23 18:47:24 +020035#include <rdma/ib_cache.h>
Eli Cohene126ba92013-07-07 17:25:49 +030036#include "mlx5_ib.h"
37#include "user.h"
38
39/* not supported currently */
40static int wq_signature;
41
42enum {
43 MLX5_IB_ACK_REQ_FREQ = 8,
44};
45
46enum {
47 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
48 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
49 MLX5_IB_LINK_TYPE_IB = 0,
50 MLX5_IB_LINK_TYPE_ETH = 1
51};
52
53enum {
54 MLX5_IB_SQ_STRIDE = 6,
55 MLX5_IB_CACHE_LINE_SIZE = 64,
56};
57
58static const u32 mlx5_ib_opcode[] = {
59 [IB_WR_SEND] = MLX5_OPCODE_SEND,
60 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
61 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
62 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
63 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
64 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
65 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
66 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
67 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +030068 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
Eli Cohene126ba92013-07-07 17:25:49 +030069 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
70 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
71 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
72};
73
Eli Cohene126ba92013-07-07 17:25:49 +030074
75static int is_qp0(enum ib_qp_type qp_type)
76{
77 return qp_type == IB_QPT_SMI;
78}
79
Eli Cohene126ba92013-07-07 17:25:49 +030080static int is_sqp(enum ib_qp_type qp_type)
81{
82 return is_qp0(qp_type) || is_qp1(qp_type);
83}
84
85static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
86{
87 return mlx5_buf_offset(&qp->buf, offset);
88}
89
90static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
91{
92 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
93}
94
95void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
96{
97 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
98}
99
Haggai Eranc1395a22014-12-11 17:04:14 +0200100/**
101 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
102 *
103 * @qp: QP to copy from.
104 * @send: copy from the send queue when non-zero, use the receive queue
105 * otherwise.
106 * @wqe_index: index to start copying from. For send work queues, the
107 * wqe_index is in units of MLX5_SEND_WQE_BB.
108 * For receive work queue, it is the number of work queue
109 * element in the queue.
110 * @buffer: destination buffer.
111 * @length: maximum number of bytes to copy.
112 *
113 * Copies at least a single WQE, but may copy more data.
114 *
115 * Return: the number of bytes copied, or an error code.
116 */
117int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
118 void *buffer, u32 length)
119{
120 struct ib_device *ibdev = qp->ibqp.device;
121 struct mlx5_ib_dev *dev = to_mdev(ibdev);
122 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
123 size_t offset;
124 size_t wq_end;
125 struct ib_umem *umem = qp->umem;
126 u32 first_copy_length;
127 int wqe_length;
128 int ret;
129
130 if (wq->wqe_cnt == 0) {
131 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
132 qp->ibqp.qp_type);
133 return -EINVAL;
134 }
135
136 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
137 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
138
139 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
140 return -EINVAL;
141
142 if (offset > umem->length ||
143 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
144 return -EINVAL;
145
146 first_copy_length = min_t(u32, offset + length, wq_end) - offset;
147 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
148 if (ret)
149 return ret;
150
151 if (send) {
152 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
153 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
154
155 wqe_length = ds * MLX5_WQE_DS_UNITS;
156 } else {
157 wqe_length = 1 << wq->wqe_shift;
158 }
159
160 if (wqe_length <= first_copy_length)
161 return first_copy_length;
162
163 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
164 wqe_length - first_copy_length);
165 if (ret)
166 return ret;
167
168 return wqe_length;
169}
170
Eli Cohene126ba92013-07-07 17:25:49 +0300171static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
172{
173 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
174 struct ib_event event;
175
176 if (type == MLX5_EVENT_TYPE_PATH_MIG)
177 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
178
179 if (ibqp->event_handler) {
180 event.device = ibqp->device;
181 event.element.qp = ibqp;
182 switch (type) {
183 case MLX5_EVENT_TYPE_PATH_MIG:
184 event.event = IB_EVENT_PATH_MIG;
185 break;
186 case MLX5_EVENT_TYPE_COMM_EST:
187 event.event = IB_EVENT_COMM_EST;
188 break;
189 case MLX5_EVENT_TYPE_SQ_DRAINED:
190 event.event = IB_EVENT_SQ_DRAINED;
191 break;
192 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
193 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
194 break;
195 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
196 event.event = IB_EVENT_QP_FATAL;
197 break;
198 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
199 event.event = IB_EVENT_PATH_MIG_ERR;
200 break;
201 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
202 event.event = IB_EVENT_QP_REQ_ERR;
203 break;
204 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
205 event.event = IB_EVENT_QP_ACCESS_ERR;
206 break;
207 default:
208 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
209 return;
210 }
211
212 ibqp->event_handler(&event, ibqp->qp_context);
213 }
214}
215
216static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
217 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
218{
219 int wqe_size;
220 int wq_size;
221
222 /* Sanity check RQ size before proceeding */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300223 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
Eli Cohene126ba92013-07-07 17:25:49 +0300224 return -EINVAL;
225
226 if (!has_rq) {
227 qp->rq.max_gs = 0;
228 qp->rq.wqe_cnt = 0;
229 qp->rq.wqe_shift = 0;
230 } else {
231 if (ucmd) {
232 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
233 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
234 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
235 qp->rq.max_post = qp->rq.wqe_cnt;
236 } else {
237 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
238 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
239 wqe_size = roundup_pow_of_two(wqe_size);
240 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
241 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
242 qp->rq.wqe_cnt = wq_size / wqe_size;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300243 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300244 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
245 wqe_size,
Saeed Mahameed938fe832015-05-28 22:28:41 +0300246 MLX5_CAP_GEN(dev->mdev,
247 max_wqe_sz_rq));
Eli Cohene126ba92013-07-07 17:25:49 +0300248 return -EINVAL;
249 }
250 qp->rq.wqe_shift = ilog2(wqe_size);
251 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
252 qp->rq.max_post = qp->rq.wqe_cnt;
253 }
254 }
255
256 return 0;
257}
258
259static int sq_overhead(enum ib_qp_type qp_type)
260{
Andi Shyti618af382013-07-16 15:35:01 +0200261 int size = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300262
263 switch (qp_type) {
264 case IB_QPT_XRC_INI:
Eli Cohenb125a542013-09-11 16:35:22 +0300265 size += sizeof(struct mlx5_wqe_xrc_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300266 /* fall through */
267 case IB_QPT_RC:
268 size += sizeof(struct mlx5_wqe_ctrl_seg) +
269 sizeof(struct mlx5_wqe_atomic_seg) +
270 sizeof(struct mlx5_wqe_raddr_seg);
271 break;
272
Eli Cohenb125a542013-09-11 16:35:22 +0300273 case IB_QPT_XRC_TGT:
274 return 0;
275
Eli Cohene126ba92013-07-07 17:25:49 +0300276 case IB_QPT_UC:
Eli Cohenb125a542013-09-11 16:35:22 +0300277 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohen9e65dc32014-01-28 14:52:47 +0200278 sizeof(struct mlx5_wqe_raddr_seg) +
279 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
280 sizeof(struct mlx5_mkey_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300281 break;
282
283 case IB_QPT_UD:
284 case IB_QPT_SMI:
285 case IB_QPT_GSI:
Eli Cohenb125a542013-09-11 16:35:22 +0300286 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohene126ba92013-07-07 17:25:49 +0300287 sizeof(struct mlx5_wqe_datagram_seg);
288 break;
289
290 case MLX5_IB_QPT_REG_UMR:
Eli Cohenb125a542013-09-11 16:35:22 +0300291 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohene126ba92013-07-07 17:25:49 +0300292 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
293 sizeof(struct mlx5_mkey_seg);
294 break;
295
296 default:
297 return -EINVAL;
298 }
299
300 return size;
301}
302
303static int calc_send_wqe(struct ib_qp_init_attr *attr)
304{
305 int inl_size = 0;
306 int size;
307
308 size = sq_overhead(attr->qp_type);
309 if (size < 0)
310 return size;
311
312 if (attr->cap.max_inline_data) {
313 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
314 attr->cap.max_inline_data;
315 }
316
317 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200318 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
319 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
320 return MLX5_SIG_WQE_SIZE;
321 else
322 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
Eli Cohene126ba92013-07-07 17:25:49 +0300323}
324
325static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
326 struct mlx5_ib_qp *qp)
327{
328 int wqe_size;
329 int wq_size;
330
331 if (!attr->cap.max_send_wr)
332 return 0;
333
334 wqe_size = calc_send_wqe(attr);
335 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
336 if (wqe_size < 0)
337 return wqe_size;
338
Saeed Mahameed938fe832015-05-28 22:28:41 +0300339 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
Eli Cohenb125a542013-09-11 16:35:22 +0300340 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300341 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
Eli Cohene126ba92013-07-07 17:25:49 +0300342 return -EINVAL;
343 }
344
345 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
346 sizeof(struct mlx5_wqe_inline_seg);
347 attr->cap.max_inline_data = qp->max_inline_data;
348
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200349 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
350 qp->signature_en = true;
351
Eli Cohene126ba92013-07-07 17:25:49 +0300352 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
353 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300354 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
Eli Cohenb125a542013-09-11 16:35:22 +0300355 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300356 qp->sq.wqe_cnt,
357 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
Eli Cohenb125a542013-09-11 16:35:22 +0300358 return -ENOMEM;
359 }
Eli Cohene126ba92013-07-07 17:25:49 +0300360 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
361 qp->sq.max_gs = attr->cap.max_send_sge;
Eli Cohenb125a542013-09-11 16:35:22 +0300362 qp->sq.max_post = wq_size / wqe_size;
363 attr->cap.max_send_wr = qp->sq.max_post;
Eli Cohene126ba92013-07-07 17:25:49 +0300364
365 return wq_size;
366}
367
368static int set_user_buf_size(struct mlx5_ib_dev *dev,
369 struct mlx5_ib_qp *qp,
370 struct mlx5_ib_create_qp *ucmd)
371{
372 int desc_sz = 1 << qp->sq.wqe_shift;
373
Saeed Mahameed938fe832015-05-28 22:28:41 +0300374 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300375 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300376 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
Eli Cohene126ba92013-07-07 17:25:49 +0300377 return -EINVAL;
378 }
379
380 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
381 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
382 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
383 return -EINVAL;
384 }
385
386 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
387
Saeed Mahameed938fe832015-05-28 22:28:41 +0300388 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
Eli Cohene126ba92013-07-07 17:25:49 +0300389 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300390 qp->sq.wqe_cnt,
391 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
Eli Cohene126ba92013-07-07 17:25:49 +0300392 return -EINVAL;
393 }
394
395 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
396 (qp->sq.wqe_cnt << 6);
397
398 return 0;
399}
400
401static int qp_has_rq(struct ib_qp_init_attr *attr)
402{
403 if (attr->qp_type == IB_QPT_XRC_INI ||
404 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
405 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
406 !attr->cap.max_recv_wr)
407 return 0;
408
409 return 1;
410}
411
Eli Cohenc1be5232014-01-14 17:45:12 +0200412static int first_med_uuar(void)
413{
414 return 1;
415}
416
417static int next_uuar(int n)
418{
419 n++;
420
421 while (((n % 4) & 2))
422 n++;
423
424 return n;
425}
426
427static int num_med_uuar(struct mlx5_uuar_info *uuari)
428{
429 int n;
430
431 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
432 uuari->num_low_latency_uuars - 1;
433
434 return n >= 0 ? n : 0;
435}
436
437static int max_uuari(struct mlx5_uuar_info *uuari)
438{
439 return uuari->num_uars * 4;
440}
441
442static int first_hi_uuar(struct mlx5_uuar_info *uuari)
443{
444 int med;
445 int i;
446 int t;
447
448 med = num_med_uuar(uuari);
449 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
450 t++;
451 if (t == med)
452 return next_uuar(i);
453 }
454
455 return 0;
456}
457
Eli Cohene126ba92013-07-07 17:25:49 +0300458static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
459{
Eli Cohene126ba92013-07-07 17:25:49 +0300460 int i;
461
Eli Cohenc1be5232014-01-14 17:45:12 +0200462 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300463 if (!test_bit(i, uuari->bitmap)) {
464 set_bit(i, uuari->bitmap);
465 uuari->count[i]++;
466 return i;
467 }
468 }
469
470 return -ENOMEM;
471}
472
473static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
474{
Eli Cohenc1be5232014-01-14 17:45:12 +0200475 int minidx = first_med_uuar();
Eli Cohene126ba92013-07-07 17:25:49 +0300476 int i;
477
Eli Cohenc1be5232014-01-14 17:45:12 +0200478 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300479 if (uuari->count[i] < uuari->count[minidx])
480 minidx = i;
481 }
482
483 uuari->count[minidx]++;
484 return minidx;
485}
486
487static int alloc_uuar(struct mlx5_uuar_info *uuari,
488 enum mlx5_ib_latency_class lat)
489{
490 int uuarn = -EINVAL;
491
492 mutex_lock(&uuari->lock);
493 switch (lat) {
494 case MLX5_IB_LATENCY_CLASS_LOW:
495 uuarn = 0;
496 uuari->count[uuarn]++;
497 break;
498
499 case MLX5_IB_LATENCY_CLASS_MEDIUM:
Eli Cohen78c0f982014-01-30 13:49:48 +0200500 if (uuari->ver < 2)
501 uuarn = -ENOMEM;
502 else
503 uuarn = alloc_med_class_uuar(uuari);
Eli Cohene126ba92013-07-07 17:25:49 +0300504 break;
505
506 case MLX5_IB_LATENCY_CLASS_HIGH:
Eli Cohen78c0f982014-01-30 13:49:48 +0200507 if (uuari->ver < 2)
508 uuarn = -ENOMEM;
509 else
510 uuarn = alloc_high_class_uuar(uuari);
Eli Cohene126ba92013-07-07 17:25:49 +0300511 break;
512
513 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
514 uuarn = 2;
515 break;
516 }
517 mutex_unlock(&uuari->lock);
518
519 return uuarn;
520}
521
522static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
523{
524 clear_bit(uuarn, uuari->bitmap);
525 --uuari->count[uuarn];
526}
527
528static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
529{
530 clear_bit(uuarn, uuari->bitmap);
531 --uuari->count[uuarn];
532}
533
534static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
535{
536 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
537 int high_uuar = nuuars - uuari->num_low_latency_uuars;
538
539 mutex_lock(&uuari->lock);
540 if (uuarn == 0) {
541 --uuari->count[uuarn];
542 goto out;
543 }
544
545 if (uuarn < high_uuar) {
546 free_med_class_uuar(uuari, uuarn);
547 goto out;
548 }
549
550 free_high_class_uuar(uuari, uuarn);
551
552out:
553 mutex_unlock(&uuari->lock);
554}
555
556static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
557{
558 switch (state) {
559 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
560 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
561 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
562 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
563 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
564 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
565 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
566 default: return -1;
567 }
568}
569
570static int to_mlx5_st(enum ib_qp_type type)
571{
572 switch (type) {
573 case IB_QPT_RC: return MLX5_QP_ST_RC;
574 case IB_QPT_UC: return MLX5_QP_ST_UC;
575 case IB_QPT_UD: return MLX5_QP_ST_UD;
576 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
577 case IB_QPT_XRC_INI:
578 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
579 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
580 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
581 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
582 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
583 case IB_QPT_RAW_PACKET:
584 case IB_QPT_MAX:
585 default: return -EINVAL;
586 }
587}
588
589static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
590{
591 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
592}
593
594static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
595 struct mlx5_ib_qp *qp, struct ib_udata *udata,
596 struct mlx5_create_qp_mbox_in **in,
597 struct mlx5_ib_create_qp_resp *resp, int *inlen)
598{
599 struct mlx5_ib_ucontext *context;
600 struct mlx5_ib_create_qp ucmd;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200601 int page_shift = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300602 int uar_index;
603 int npages;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200604 u32 offset = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300605 int uuarn;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200606 int ncont = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300607 int err;
608
609 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
610 if (err) {
611 mlx5_ib_dbg(dev, "copy failed\n");
612 return err;
613 }
614
615 context = to_mucontext(pd->uobject->context);
616 /*
617 * TBD: should come from the verbs when we have the API
618 */
Leon Romanovsky051f2632015-12-20 12:16:11 +0200619 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
620 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
621 uuarn = MLX5_CROSS_CHANNEL_UUAR;
622 else {
623 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
Eli Cohene126ba92013-07-07 17:25:49 +0300624 if (uuarn < 0) {
Leon Romanovsky051f2632015-12-20 12:16:11 +0200625 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
626 mlx5_ib_dbg(dev, "reverting to medium latency\n");
627 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
Eli Cohenc1be5232014-01-14 17:45:12 +0200628 if (uuarn < 0) {
Leon Romanovsky051f2632015-12-20 12:16:11 +0200629 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
630 mlx5_ib_dbg(dev, "reverting to high latency\n");
631 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
632 if (uuarn < 0) {
633 mlx5_ib_warn(dev, "uuar allocation failed\n");
634 return uuarn;
635 }
Eli Cohenc1be5232014-01-14 17:45:12 +0200636 }
Eli Cohene126ba92013-07-07 17:25:49 +0300637 }
638 }
639
640 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
641 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
642
Haggai Eran48fea832014-05-22 14:50:11 +0300643 qp->rq.offset = 0;
644 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
645 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
646
Eli Cohene126ba92013-07-07 17:25:49 +0300647 err = set_user_buf_size(dev, qp, &ucmd);
648 if (err)
649 goto err_uuar;
650
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200651 if (ucmd.buf_addr && qp->buf_size) {
652 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
653 qp->buf_size, 0, 0);
654 if (IS_ERR(qp->umem)) {
655 mlx5_ib_dbg(dev, "umem_get failed\n");
656 err = PTR_ERR(qp->umem);
657 goto err_uuar;
658 }
659 } else {
660 qp->umem = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300661 }
662
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200663 if (qp->umem) {
664 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
665 &ncont, NULL);
666 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
667 if (err) {
668 mlx5_ib_warn(dev, "bad offset\n");
669 goto err_umem;
670 }
671 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
672 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
Eli Cohene126ba92013-07-07 17:25:49 +0300673 }
Eli Cohene126ba92013-07-07 17:25:49 +0300674
675 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
676 *in = mlx5_vzalloc(*inlen);
677 if (!*in) {
678 err = -ENOMEM;
679 goto err_umem;
680 }
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200681 if (qp->umem)
682 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300683 (*in)->ctx.log_pg_sz_remote_qpn =
Eli Cohen1b77d2b2013-10-24 12:01:03 +0300684 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
Eli Cohene126ba92013-07-07 17:25:49 +0300685 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
686
687 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
688 resp->uuar_index = uuarn;
689 qp->uuarn = uuarn;
690
691 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
692 if (err) {
693 mlx5_ib_dbg(dev, "map failed\n");
694 goto err_free;
695 }
696
697 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
698 if (err) {
699 mlx5_ib_dbg(dev, "copy failed\n");
700 goto err_unmap;
701 }
702 qp->create_type = MLX5_QP_USER;
703
704 return 0;
705
706err_unmap:
707 mlx5_ib_db_unmap_user(context, &qp->db);
708
709err_free:
Al Viro479163f2014-11-20 08:13:57 +0000710 kvfree(*in);
Eli Cohene126ba92013-07-07 17:25:49 +0300711
712err_umem:
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200713 if (qp->umem)
714 ib_umem_release(qp->umem);
Eli Cohene126ba92013-07-07 17:25:49 +0300715
716err_uuar:
717 free_uuar(&context->uuari, uuarn);
718 return err;
719}
720
721static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
722{
723 struct mlx5_ib_ucontext *context;
724
725 context = to_mucontext(pd->uobject->context);
726 mlx5_ib_db_unmap_user(context, &qp->db);
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200727 if (qp->umem)
728 ib_umem_release(qp->umem);
Eli Cohene126ba92013-07-07 17:25:49 +0300729 free_uuar(&context->uuari, qp->uuarn);
730}
731
732static int create_kernel_qp(struct mlx5_ib_dev *dev,
733 struct ib_qp_init_attr *init_attr,
734 struct mlx5_ib_qp *qp,
735 struct mlx5_create_qp_mbox_in **in, int *inlen)
736{
737 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
738 struct mlx5_uuar_info *uuari;
739 int uar_index;
740 int uuarn;
741 int err;
742
Jack Morgenstein9603b612014-07-28 23:30:22 +0300743 uuari = &dev->mdev->priv.uuari;
Or Gerlitz652c1a02014-06-25 16:44:14 +0300744 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
Eli Cohen1a4c3a32014-02-06 17:41:25 +0200745 return -EINVAL;
Eli Cohene126ba92013-07-07 17:25:49 +0300746
747 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
748 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
749
750 uuarn = alloc_uuar(uuari, lc);
751 if (uuarn < 0) {
752 mlx5_ib_dbg(dev, "\n");
753 return -ENOMEM;
754 }
755
756 qp->bf = &uuari->bfs[uuarn];
757 uar_index = qp->bf->uar->index;
758
759 err = calc_sq_size(dev, init_attr, qp);
760 if (err < 0) {
761 mlx5_ib_dbg(dev, "err %d\n", err);
762 goto err_uuar;
763 }
764
765 qp->rq.offset = 0;
766 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
767 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
768
Amir Vadai64ffaa22015-05-28 22:28:38 +0300769 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300770 if (err) {
771 mlx5_ib_dbg(dev, "err %d\n", err);
772 goto err_uuar;
773 }
774
775 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
776 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
777 *in = mlx5_vzalloc(*inlen);
778 if (!*in) {
779 err = -ENOMEM;
780 goto err_buf;
781 }
782 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
Eli Cohen1b77d2b2013-10-24 12:01:03 +0300783 (*in)->ctx.log_pg_sz_remote_qpn =
784 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
Eli Cohene126ba92013-07-07 17:25:49 +0300785 /* Set "fast registration enabled" for all kernel QPs */
786 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
787 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
788
789 mlx5_fill_page_array(&qp->buf, (*in)->pas);
790
Jack Morgenstein9603b612014-07-28 23:30:22 +0300791 err = mlx5_db_alloc(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300792 if (err) {
793 mlx5_ib_dbg(dev, "err %d\n", err);
794 goto err_free;
795 }
796
Eli Cohene126ba92013-07-07 17:25:49 +0300797 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
798 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
799 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
800 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
801 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
802
803 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
804 !qp->sq.w_list || !qp->sq.wqe_head) {
805 err = -ENOMEM;
806 goto err_wrid;
807 }
808 qp->create_type = MLX5_QP_KERNEL;
809
810 return 0;
811
812err_wrid:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300813 mlx5_db_free(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300814 kfree(qp->sq.wqe_head);
815 kfree(qp->sq.w_list);
816 kfree(qp->sq.wrid);
817 kfree(qp->sq.wr_data);
818 kfree(qp->rq.wrid);
819
820err_free:
Al Viro479163f2014-11-20 08:13:57 +0000821 kvfree(*in);
Eli Cohene126ba92013-07-07 17:25:49 +0300822
823err_buf:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300824 mlx5_buf_free(dev->mdev, &qp->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300825
826err_uuar:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300827 free_uuar(&dev->mdev->priv.uuari, uuarn);
Eli Cohene126ba92013-07-07 17:25:49 +0300828 return err;
829}
830
831static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
832{
Jack Morgenstein9603b612014-07-28 23:30:22 +0300833 mlx5_db_free(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300834 kfree(qp->sq.wqe_head);
835 kfree(qp->sq.w_list);
836 kfree(qp->sq.wrid);
837 kfree(qp->sq.wr_data);
838 kfree(qp->rq.wrid);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300839 mlx5_buf_free(dev->mdev, &qp->buf);
840 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
Eli Cohene126ba92013-07-07 17:25:49 +0300841}
842
843static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
844{
845 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
846 (attr->qp_type == IB_QPT_XRC_INI))
847 return cpu_to_be32(MLX5_SRQ_RQ);
848 else if (!qp->has_rq)
849 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
850 else
851 return cpu_to_be32(MLX5_NON_ZERO_RQ);
852}
853
854static int is_connected(enum ib_qp_type qp_type)
855{
856 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
857 return 1;
858
859 return 0;
860}
861
862static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
863 struct ib_qp_init_attr *init_attr,
864 struct ib_udata *udata, struct mlx5_ib_qp *qp)
865{
866 struct mlx5_ib_resources *devr = &dev->devr;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300867 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300868 struct mlx5_ib_create_qp_resp resp;
869 struct mlx5_create_qp_mbox_in *in;
870 struct mlx5_ib_create_qp ucmd;
871 int inlen = sizeof(*in);
872 int err;
873
Haggai Eran6aec21f2014-12-11 17:04:23 +0200874 mlx5_ib_odp_create_qp(qp);
875
Eli Cohene126ba92013-07-07 17:25:49 +0300876 mutex_init(&qp->mutex);
877 spin_lock_init(&qp->sq.lock);
878 spin_lock_init(&qp->rq.lock);
879
Eli Cohenf360d882014-04-02 00:10:16 +0300880 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
Saeed Mahameed938fe832015-05-28 22:28:41 +0300881 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
Eli Cohenf360d882014-04-02 00:10:16 +0300882 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
883 return -EINVAL;
884 } else {
885 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
886 }
887 }
888
Leon Romanovsky051f2632015-12-20 12:16:11 +0200889 if (init_attr->create_flags &
890 (IB_QP_CREATE_CROSS_CHANNEL |
891 IB_QP_CREATE_MANAGED_SEND |
892 IB_QP_CREATE_MANAGED_RECV)) {
893 if (!MLX5_CAP_GEN(mdev, cd)) {
894 mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
895 return -EINVAL;
896 }
897 if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
898 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
899 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
900 qp->flags |= MLX5_IB_QP_MANAGED_SEND;
901 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
902 qp->flags |= MLX5_IB_QP_MANAGED_RECV;
903 }
Eli Cohene126ba92013-07-07 17:25:49 +0300904 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
905 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
906
907 if (pd && pd->uobject) {
908 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
909 mlx5_ib_dbg(dev, "copy failed\n");
910 return -EFAULT;
911 }
912
913 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
914 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
915 } else {
916 qp->wq_sig = !!wq_signature;
917 }
918
919 qp->has_rq = qp_has_rq(init_attr);
920 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
921 qp, (pd && pd->uobject) ? &ucmd : NULL);
922 if (err) {
923 mlx5_ib_dbg(dev, "err %d\n", err);
924 return err;
925 }
926
927 if (pd) {
928 if (pd->uobject) {
Saeed Mahameed938fe832015-05-28 22:28:41 +0300929 __u32 max_wqes =
930 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
Eli Cohene126ba92013-07-07 17:25:49 +0300931 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
932 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
933 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
934 mlx5_ib_dbg(dev, "invalid rq params\n");
935 return -EINVAL;
936 }
Saeed Mahameed938fe832015-05-28 22:28:41 +0300937 if (ucmd.sq_wqe_count > max_wqes) {
Eli Cohene126ba92013-07-07 17:25:49 +0300938 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300939 ucmd.sq_wqe_count, max_wqes);
Eli Cohene126ba92013-07-07 17:25:49 +0300940 return -EINVAL;
941 }
942 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
943 if (err)
944 mlx5_ib_dbg(dev, "err %d\n", err);
945 } else {
946 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
947 if (err)
948 mlx5_ib_dbg(dev, "err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300949 }
950
951 if (err)
952 return err;
953 } else {
954 in = mlx5_vzalloc(sizeof(*in));
955 if (!in)
956 return -ENOMEM;
957
958 qp->create_type = MLX5_QP_EMPTY;
959 }
960
961 if (is_sqp(init_attr->qp_type))
962 qp->port = init_attr->port_num;
963
964 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
965 MLX5_QP_PM_MIGRATED << 11);
966
967 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
968 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
969 else
970 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
971
972 if (qp->wq_sig)
973 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
974
Eli Cohenf360d882014-04-02 00:10:16 +0300975 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
976 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
977
Leon Romanovsky051f2632015-12-20 12:16:11 +0200978 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
979 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
980 if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
981 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
982 if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
983 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
984
Eli Cohene126ba92013-07-07 17:25:49 +0300985 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
986 int rcqe_sz;
987 int scqe_sz;
988
989 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
990 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
991
992 if (rcqe_sz == 128)
993 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
994 else
995 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
996
997 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
998 if (scqe_sz == 128)
999 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
1000 else
1001 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
1002 }
1003 }
1004
1005 if (qp->rq.wqe_cnt) {
1006 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
1007 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
1008 }
1009
1010 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
1011
1012 if (qp->sq.wqe_cnt)
1013 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
1014 else
1015 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
1016
1017 /* Set default resources */
1018 switch (init_attr->qp_type) {
1019 case IB_QPT_XRC_TGT:
1020 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1021 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1022 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1023 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
1024 break;
1025 case IB_QPT_XRC_INI:
1026 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1027 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1028 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1029 break;
1030 default:
1031 if (init_attr->srq) {
1032 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1033 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1034 } else {
1035 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03001036 in->ctx.rq_type_srqn |=
1037 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
Eli Cohene126ba92013-07-07 17:25:49 +03001038 }
1039 }
1040
1041 if (init_attr->send_cq)
1042 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1043
1044 if (init_attr->recv_cq)
1045 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1046
1047 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1048
Jack Morgenstein9603b612014-07-28 23:30:22 +03001049 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001050 if (err) {
1051 mlx5_ib_dbg(dev, "create qp failed\n");
1052 goto err_create;
1053 }
1054
Al Viro479163f2014-11-20 08:13:57 +00001055 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001056 /* Hardware wants QPN written in big-endian order (after
1057 * shifting) for send doorbell. Precompute this value to save
1058 * a little bit when posting sends.
1059 */
1060 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1061
1062 qp->mqp.event = mlx5_ib_qp_event;
1063
1064 return 0;
1065
1066err_create:
1067 if (qp->create_type == MLX5_QP_USER)
1068 destroy_qp_user(pd, qp);
1069 else if (qp->create_type == MLX5_QP_KERNEL)
1070 destroy_qp_kernel(dev, qp);
1071
Al Viro479163f2014-11-20 08:13:57 +00001072 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001073 return err;
1074}
1075
1076static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1077 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1078{
1079 if (send_cq) {
1080 if (recv_cq) {
1081 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1082 spin_lock_irq(&send_cq->lock);
1083 spin_lock_nested(&recv_cq->lock,
1084 SINGLE_DEPTH_NESTING);
1085 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1086 spin_lock_irq(&send_cq->lock);
1087 __acquire(&recv_cq->lock);
1088 } else {
1089 spin_lock_irq(&recv_cq->lock);
1090 spin_lock_nested(&send_cq->lock,
1091 SINGLE_DEPTH_NESTING);
1092 }
1093 } else {
1094 spin_lock_irq(&send_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001095 __acquire(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001096 }
1097 } else if (recv_cq) {
1098 spin_lock_irq(&recv_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001099 __acquire(&send_cq->lock);
1100 } else {
1101 __acquire(&send_cq->lock);
1102 __acquire(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001103 }
1104}
1105
1106static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1107 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1108{
1109 if (send_cq) {
1110 if (recv_cq) {
1111 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1112 spin_unlock(&recv_cq->lock);
1113 spin_unlock_irq(&send_cq->lock);
1114 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1115 __release(&recv_cq->lock);
1116 spin_unlock_irq(&send_cq->lock);
1117 } else {
1118 spin_unlock(&send_cq->lock);
1119 spin_unlock_irq(&recv_cq->lock);
1120 }
1121 } else {
Eli Cohen6a4f1392014-12-02 12:26:18 +02001122 __release(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001123 spin_unlock_irq(&send_cq->lock);
1124 }
1125 } else if (recv_cq) {
Eli Cohen6a4f1392014-12-02 12:26:18 +02001126 __release(&send_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001127 spin_unlock_irq(&recv_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001128 } else {
1129 __release(&recv_cq->lock);
1130 __release(&send_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001131 }
1132}
1133
1134static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1135{
1136 return to_mpd(qp->ibqp.pd);
1137}
1138
1139static void get_cqs(struct mlx5_ib_qp *qp,
1140 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1141{
1142 switch (qp->ibqp.qp_type) {
1143 case IB_QPT_XRC_TGT:
1144 *send_cq = NULL;
1145 *recv_cq = NULL;
1146 break;
1147 case MLX5_IB_QPT_REG_UMR:
1148 case IB_QPT_XRC_INI:
1149 *send_cq = to_mcq(qp->ibqp.send_cq);
1150 *recv_cq = NULL;
1151 break;
1152
1153 case IB_QPT_SMI:
1154 case IB_QPT_GSI:
1155 case IB_QPT_RC:
1156 case IB_QPT_UC:
1157 case IB_QPT_UD:
1158 case IB_QPT_RAW_IPV6:
1159 case IB_QPT_RAW_ETHERTYPE:
1160 *send_cq = to_mcq(qp->ibqp.send_cq);
1161 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1162 break;
1163
1164 case IB_QPT_RAW_PACKET:
1165 case IB_QPT_MAX:
1166 default:
1167 *send_cq = NULL;
1168 *recv_cq = NULL;
1169 break;
1170 }
1171}
1172
1173static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1174{
1175 struct mlx5_ib_cq *send_cq, *recv_cq;
1176 struct mlx5_modify_qp_mbox_in *in;
1177 int err;
1178
1179 in = kzalloc(sizeof(*in), GFP_KERNEL);
1180 if (!in)
1181 return;
Eli Cohen7bef7ad2015-04-02 17:07:21 +03001182
Haggai Eran6aec21f2014-12-11 17:04:23 +02001183 if (qp->state != IB_QPS_RESET) {
1184 mlx5_ib_qp_disable_pagefaults(qp);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001185 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
Haggai Abramonvskyc3c6c9c2015-04-02 17:07:20 +03001186 MLX5_QP_STATE_RST, in, 0, &qp->mqp))
Eli Cohene126ba92013-07-07 17:25:49 +03001187 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1188 qp->mqp.qpn);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001189 }
Eli Cohene126ba92013-07-07 17:25:49 +03001190
1191 get_cqs(qp, &send_cq, &recv_cq);
1192
1193 if (qp->create_type == MLX5_QP_KERNEL) {
1194 mlx5_ib_lock_cqs(send_cq, recv_cq);
1195 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1196 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1197 if (send_cq != recv_cq)
1198 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1199 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1200 }
1201
Jack Morgenstein9603b612014-07-28 23:30:22 +03001202 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
Eli Cohene126ba92013-07-07 17:25:49 +03001203 if (err)
1204 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1205 kfree(in);
1206
1207
1208 if (qp->create_type == MLX5_QP_KERNEL)
1209 destroy_qp_kernel(dev, qp);
1210 else if (qp->create_type == MLX5_QP_USER)
1211 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1212}
1213
1214static const char *ib_qp_type_str(enum ib_qp_type type)
1215{
1216 switch (type) {
1217 case IB_QPT_SMI:
1218 return "IB_QPT_SMI";
1219 case IB_QPT_GSI:
1220 return "IB_QPT_GSI";
1221 case IB_QPT_RC:
1222 return "IB_QPT_RC";
1223 case IB_QPT_UC:
1224 return "IB_QPT_UC";
1225 case IB_QPT_UD:
1226 return "IB_QPT_UD";
1227 case IB_QPT_RAW_IPV6:
1228 return "IB_QPT_RAW_IPV6";
1229 case IB_QPT_RAW_ETHERTYPE:
1230 return "IB_QPT_RAW_ETHERTYPE";
1231 case IB_QPT_XRC_INI:
1232 return "IB_QPT_XRC_INI";
1233 case IB_QPT_XRC_TGT:
1234 return "IB_QPT_XRC_TGT";
1235 case IB_QPT_RAW_PACKET:
1236 return "IB_QPT_RAW_PACKET";
1237 case MLX5_IB_QPT_REG_UMR:
1238 return "MLX5_IB_QPT_REG_UMR";
1239 case IB_QPT_MAX:
1240 default:
1241 return "Invalid QP type";
1242 }
1243}
1244
1245struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1246 struct ib_qp_init_attr *init_attr,
1247 struct ib_udata *udata)
1248{
1249 struct mlx5_ib_dev *dev;
1250 struct mlx5_ib_qp *qp;
1251 u16 xrcdn = 0;
1252 int err;
1253
1254 if (pd) {
1255 dev = to_mdev(pd->device);
1256 } else {
1257 /* being cautious here */
1258 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1259 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1260 pr_warn("%s: no PD for transport %s\n", __func__,
1261 ib_qp_type_str(init_attr->qp_type));
1262 return ERR_PTR(-EINVAL);
1263 }
1264 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1265 }
1266
1267 switch (init_attr->qp_type) {
1268 case IB_QPT_XRC_TGT:
1269 case IB_QPT_XRC_INI:
Saeed Mahameed938fe832015-05-28 22:28:41 +03001270 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
Eli Cohene126ba92013-07-07 17:25:49 +03001271 mlx5_ib_dbg(dev, "XRC not supported\n");
1272 return ERR_PTR(-ENOSYS);
1273 }
1274 init_attr->recv_cq = NULL;
1275 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1276 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1277 init_attr->send_cq = NULL;
1278 }
1279
1280 /* fall through */
1281 case IB_QPT_RC:
1282 case IB_QPT_UC:
1283 case IB_QPT_UD:
1284 case IB_QPT_SMI:
1285 case IB_QPT_GSI:
1286 case MLX5_IB_QPT_REG_UMR:
1287 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1288 if (!qp)
1289 return ERR_PTR(-ENOMEM);
1290
1291 err = create_qp_common(dev, pd, init_attr, udata, qp);
1292 if (err) {
1293 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1294 kfree(qp);
1295 return ERR_PTR(err);
1296 }
1297
1298 if (is_qp0(init_attr->qp_type))
1299 qp->ibqp.qp_num = 0;
1300 else if (is_qp1(init_attr->qp_type))
1301 qp->ibqp.qp_num = 1;
1302 else
1303 qp->ibqp.qp_num = qp->mqp.qpn;
1304
1305 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1306 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1307 to_mcq(init_attr->send_cq)->mcq.cqn);
1308
1309 qp->xrcdn = xrcdn;
1310
1311 break;
1312
1313 case IB_QPT_RAW_IPV6:
1314 case IB_QPT_RAW_ETHERTYPE:
1315 case IB_QPT_RAW_PACKET:
1316 case IB_QPT_MAX:
1317 default:
1318 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1319 init_attr->qp_type);
1320 /* Don't support raw QPs */
1321 return ERR_PTR(-EINVAL);
1322 }
1323
1324 return &qp->ibqp;
1325}
1326
1327int mlx5_ib_destroy_qp(struct ib_qp *qp)
1328{
1329 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1330 struct mlx5_ib_qp *mqp = to_mqp(qp);
1331
1332 destroy_qp_common(dev, mqp);
1333
1334 kfree(mqp);
1335
1336 return 0;
1337}
1338
1339static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1340 int attr_mask)
1341{
1342 u32 hw_access_flags = 0;
1343 u8 dest_rd_atomic;
1344 u32 access_flags;
1345
1346 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1347 dest_rd_atomic = attr->max_dest_rd_atomic;
1348 else
1349 dest_rd_atomic = qp->resp_depth;
1350
1351 if (attr_mask & IB_QP_ACCESS_FLAGS)
1352 access_flags = attr->qp_access_flags;
1353 else
1354 access_flags = qp->atomic_rd_en;
1355
1356 if (!dest_rd_atomic)
1357 access_flags &= IB_ACCESS_REMOTE_WRITE;
1358
1359 if (access_flags & IB_ACCESS_REMOTE_READ)
1360 hw_access_flags |= MLX5_QP_BIT_RRE;
1361 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1362 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1363 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1364 hw_access_flags |= MLX5_QP_BIT_RWE;
1365
1366 return cpu_to_be32(hw_access_flags);
1367}
1368
1369enum {
1370 MLX5_PATH_FLAG_FL = 1 << 0,
1371 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1372 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1373};
1374
1375static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1376{
1377 if (rate == IB_RATE_PORT_CURRENT) {
1378 return 0;
1379 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1380 return -EINVAL;
1381 } else {
1382 while (rate != IB_RATE_2_5_GBPS &&
1383 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
Saeed Mahameed938fe832015-05-28 22:28:41 +03001384 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
Eli Cohene126ba92013-07-07 17:25:49 +03001385 --rate;
1386 }
1387
1388 return rate + MLX5_STAT_RATE_OFFSET;
1389}
1390
1391static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1392 struct mlx5_qp_path *path, u8 port, int attr_mask,
1393 u32 path_flags, const struct ib_qp_attr *attr)
1394{
Achiad Shochat2811ba52015-12-23 18:47:24 +02001395 enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
Eli Cohene126ba92013-07-07 17:25:49 +03001396 int err;
1397
Eli Cohene126ba92013-07-07 17:25:49 +03001398 if (attr_mask & IB_QP_PKEY_INDEX)
1399 path->pkey_index = attr->pkey_index;
1400
Eli Cohene126ba92013-07-07 17:25:49 +03001401 if (ah->ah_flags & IB_AH_GRH) {
Saeed Mahameed938fe832015-05-28 22:28:41 +03001402 if (ah->grh.sgid_index >=
1403 dev->mdev->port_caps[port - 1].gid_table_len) {
Joe Perchesf4f01b52015-05-08 15:58:07 -07001404 pr_err("sgid_index (%u) too large. max is %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +03001405 ah->grh.sgid_index,
1406 dev->mdev->port_caps[port - 1].gid_table_len);
Eli Cohenf83b4262014-09-14 16:47:54 +03001407 return -EINVAL;
1408 }
Achiad Shochat2811ba52015-12-23 18:47:24 +02001409 }
1410
1411 if (ll == IB_LINK_LAYER_ETHERNET) {
1412 if (!(ah->ah_flags & IB_AH_GRH))
1413 return -EINVAL;
1414 memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
1415 path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
1416 ah->grh.sgid_index);
1417 path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
1418 } else {
1419 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1420 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
1421 0;
1422 path->rlid = cpu_to_be16(ah->dlid);
1423 path->grh_mlid = ah->src_path_bits & 0x7f;
1424 if (ah->ah_flags & IB_AH_GRH)
1425 path->grh_mlid |= 1 << 7;
1426 path->dci_cfi_prio_sl = ah->sl & 0xf;
1427 }
1428
1429 if (ah->ah_flags & IB_AH_GRH) {
Eli Cohene126ba92013-07-07 17:25:49 +03001430 path->mgid_index = ah->grh.sgid_index;
1431 path->hop_limit = ah->grh.hop_limit;
1432 path->tclass_flowlabel =
1433 cpu_to_be32((ah->grh.traffic_class << 20) |
1434 (ah->grh.flow_label));
1435 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1436 }
1437
1438 err = ib_rate_to_mlx5(dev, ah->static_rate);
1439 if (err < 0)
1440 return err;
1441 path->static_rate = err;
1442 path->port = port;
1443
Eli Cohene126ba92013-07-07 17:25:49 +03001444 if (attr_mask & IB_QP_TIMEOUT)
1445 path->ackto_lt = attr->timeout << 3;
1446
Eli Cohene126ba92013-07-07 17:25:49 +03001447 return 0;
1448}
1449
1450static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1451 [MLX5_QP_STATE_INIT] = {
1452 [MLX5_QP_STATE_INIT] = {
1453 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1454 MLX5_QP_OPTPAR_RAE |
1455 MLX5_QP_OPTPAR_RWE |
1456 MLX5_QP_OPTPAR_PKEY_INDEX |
1457 MLX5_QP_OPTPAR_PRI_PORT,
1458 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1459 MLX5_QP_OPTPAR_PKEY_INDEX |
1460 MLX5_QP_OPTPAR_PRI_PORT,
1461 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1462 MLX5_QP_OPTPAR_Q_KEY |
1463 MLX5_QP_OPTPAR_PRI_PORT,
1464 },
1465 [MLX5_QP_STATE_RTR] = {
1466 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1467 MLX5_QP_OPTPAR_RRE |
1468 MLX5_QP_OPTPAR_RAE |
1469 MLX5_QP_OPTPAR_RWE |
1470 MLX5_QP_OPTPAR_PKEY_INDEX,
1471 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1472 MLX5_QP_OPTPAR_RWE |
1473 MLX5_QP_OPTPAR_PKEY_INDEX,
1474 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1475 MLX5_QP_OPTPAR_Q_KEY,
1476 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1477 MLX5_QP_OPTPAR_Q_KEY,
Eli Cohena4774e92013-09-11 16:35:32 +03001478 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1479 MLX5_QP_OPTPAR_RRE |
1480 MLX5_QP_OPTPAR_RAE |
1481 MLX5_QP_OPTPAR_RWE |
1482 MLX5_QP_OPTPAR_PKEY_INDEX,
Eli Cohene126ba92013-07-07 17:25:49 +03001483 },
1484 },
1485 [MLX5_QP_STATE_RTR] = {
1486 [MLX5_QP_STATE_RTS] = {
1487 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1488 MLX5_QP_OPTPAR_RRE |
1489 MLX5_QP_OPTPAR_RAE |
1490 MLX5_QP_OPTPAR_RWE |
1491 MLX5_QP_OPTPAR_PM_STATE |
1492 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1493 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1494 MLX5_QP_OPTPAR_RWE |
1495 MLX5_QP_OPTPAR_PM_STATE,
1496 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1497 },
1498 },
1499 [MLX5_QP_STATE_RTS] = {
1500 [MLX5_QP_STATE_RTS] = {
1501 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1502 MLX5_QP_OPTPAR_RAE |
1503 MLX5_QP_OPTPAR_RWE |
1504 MLX5_QP_OPTPAR_RNR_TIMEOUT |
Eli Cohenc2a34312013-10-24 12:01:02 +03001505 MLX5_QP_OPTPAR_PM_STATE |
1506 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
Eli Cohene126ba92013-07-07 17:25:49 +03001507 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
Eli Cohenc2a34312013-10-24 12:01:02 +03001508 MLX5_QP_OPTPAR_PM_STATE |
1509 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
Eli Cohene126ba92013-07-07 17:25:49 +03001510 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1511 MLX5_QP_OPTPAR_SRQN |
1512 MLX5_QP_OPTPAR_CQN_RCV,
1513 },
1514 },
1515 [MLX5_QP_STATE_SQER] = {
1516 [MLX5_QP_STATE_RTS] = {
1517 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1518 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
Eli Cohen75959f52013-09-11 16:35:31 +03001519 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
Eli Cohena4774e92013-09-11 16:35:32 +03001520 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1521 MLX5_QP_OPTPAR_RWE |
1522 MLX5_QP_OPTPAR_RAE |
1523 MLX5_QP_OPTPAR_RRE,
Eli Cohene126ba92013-07-07 17:25:49 +03001524 },
1525 },
1526};
1527
1528static int ib_nr_to_mlx5_nr(int ib_mask)
1529{
1530 switch (ib_mask) {
1531 case IB_QP_STATE:
1532 return 0;
1533 case IB_QP_CUR_STATE:
1534 return 0;
1535 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1536 return 0;
1537 case IB_QP_ACCESS_FLAGS:
1538 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1539 MLX5_QP_OPTPAR_RAE;
1540 case IB_QP_PKEY_INDEX:
1541 return MLX5_QP_OPTPAR_PKEY_INDEX;
1542 case IB_QP_PORT:
1543 return MLX5_QP_OPTPAR_PRI_PORT;
1544 case IB_QP_QKEY:
1545 return MLX5_QP_OPTPAR_Q_KEY;
1546 case IB_QP_AV:
1547 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1548 MLX5_QP_OPTPAR_PRI_PORT;
1549 case IB_QP_PATH_MTU:
1550 return 0;
1551 case IB_QP_TIMEOUT:
1552 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1553 case IB_QP_RETRY_CNT:
1554 return MLX5_QP_OPTPAR_RETRY_COUNT;
1555 case IB_QP_RNR_RETRY:
1556 return MLX5_QP_OPTPAR_RNR_RETRY;
1557 case IB_QP_RQ_PSN:
1558 return 0;
1559 case IB_QP_MAX_QP_RD_ATOMIC:
1560 return MLX5_QP_OPTPAR_SRA_MAX;
1561 case IB_QP_ALT_PATH:
1562 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1563 case IB_QP_MIN_RNR_TIMER:
1564 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1565 case IB_QP_SQ_PSN:
1566 return 0;
1567 case IB_QP_MAX_DEST_RD_ATOMIC:
1568 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1569 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1570 case IB_QP_PATH_MIG_STATE:
1571 return MLX5_QP_OPTPAR_PM_STATE;
1572 case IB_QP_CAP:
1573 return 0;
1574 case IB_QP_DEST_QPN:
1575 return 0;
1576 }
1577 return 0;
1578}
1579
1580static int ib_mask_to_mlx5_opt(int ib_mask)
1581{
1582 int result = 0;
1583 int i;
1584
1585 for (i = 0; i < 8 * sizeof(int); i++) {
1586 if ((1 << i) & ib_mask)
1587 result |= ib_nr_to_mlx5_nr(1 << i);
1588 }
1589
1590 return result;
1591}
1592
1593static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1594 const struct ib_qp_attr *attr, int attr_mask,
1595 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1596{
1597 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1598 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1599 struct mlx5_ib_cq *send_cq, *recv_cq;
1600 struct mlx5_qp_context *context;
1601 struct mlx5_modify_qp_mbox_in *in;
1602 struct mlx5_ib_pd *pd;
1603 enum mlx5_qp_state mlx5_cur, mlx5_new;
1604 enum mlx5_qp_optpar optpar;
1605 int sqd_event;
1606 int mlx5_st;
1607 int err;
1608
1609 in = kzalloc(sizeof(*in), GFP_KERNEL);
1610 if (!in)
1611 return -ENOMEM;
1612
1613 context = &in->ctx;
1614 err = to_mlx5_st(ibqp->qp_type);
1615 if (err < 0)
1616 goto out;
1617
1618 context->flags = cpu_to_be32(err << 16);
1619
1620 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1621 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1622 } else {
1623 switch (attr->path_mig_state) {
1624 case IB_MIG_MIGRATED:
1625 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1626 break;
1627 case IB_MIG_REARM:
1628 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1629 break;
1630 case IB_MIG_ARMED:
1631 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1632 break;
1633 }
1634 }
1635
1636 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1637 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1638 } else if (ibqp->qp_type == IB_QPT_UD ||
1639 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1640 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1641 } else if (attr_mask & IB_QP_PATH_MTU) {
1642 if (attr->path_mtu < IB_MTU_256 ||
1643 attr->path_mtu > IB_MTU_4096) {
1644 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1645 err = -EINVAL;
1646 goto out;
1647 }
Saeed Mahameed938fe832015-05-28 22:28:41 +03001648 context->mtu_msgmax = (attr->path_mtu << 5) |
1649 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
Eli Cohene126ba92013-07-07 17:25:49 +03001650 }
1651
1652 if (attr_mask & IB_QP_DEST_QPN)
1653 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1654
1655 if (attr_mask & IB_QP_PKEY_INDEX)
1656 context->pri_path.pkey_index = attr->pkey_index;
1657
1658 /* todo implement counter_index functionality */
1659
1660 if (is_sqp(ibqp->qp_type))
1661 context->pri_path.port = qp->port;
1662
1663 if (attr_mask & IB_QP_PORT)
1664 context->pri_path.port = attr->port_num;
1665
1666 if (attr_mask & IB_QP_AV) {
1667 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1668 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1669 attr_mask, 0, attr);
1670 if (err)
1671 goto out;
1672 }
1673
1674 if (attr_mask & IB_QP_TIMEOUT)
1675 context->pri_path.ackto_lt |= attr->timeout << 3;
1676
1677 if (attr_mask & IB_QP_ALT_PATH) {
1678 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1679 attr->alt_port_num, attr_mask, 0, attr);
1680 if (err)
1681 goto out;
1682 }
1683
1684 pd = get_pd(qp);
1685 get_cqs(qp, &send_cq, &recv_cq);
1686
1687 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1688 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1689 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1690 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1691
1692 if (attr_mask & IB_QP_RNR_RETRY)
1693 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1694
1695 if (attr_mask & IB_QP_RETRY_CNT)
1696 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1697
1698 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1699 if (attr->max_rd_atomic)
1700 context->params1 |=
1701 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1702 }
1703
1704 if (attr_mask & IB_QP_SQ_PSN)
1705 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1706
1707 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1708 if (attr->max_dest_rd_atomic)
1709 context->params2 |=
1710 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1711 }
1712
1713 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1714 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1715
1716 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1717 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1718
1719 if (attr_mask & IB_QP_RQ_PSN)
1720 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1721
1722 if (attr_mask & IB_QP_QKEY)
1723 context->qkey = cpu_to_be32(attr->qkey);
1724
1725 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1726 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1727
1728 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1729 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1730 sqd_event = 1;
1731 else
1732 sqd_event = 0;
1733
1734 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1735 context->sq_crq_size |= cpu_to_be16(1 << 4);
1736
1737
1738 mlx5_cur = to_mlx5_state(cur_state);
1739 mlx5_new = to_mlx5_state(new_state);
1740 mlx5_st = to_mlx5_st(ibqp->qp_type);
Eli Cohen07c91132013-10-24 12:01:01 +03001741 if (mlx5_st < 0)
Eli Cohene126ba92013-07-07 17:25:49 +03001742 goto out;
1743
Haggai Eran6aec21f2014-12-11 17:04:23 +02001744 /* If moving to a reset or error state, we must disable page faults on
1745 * this QP and flush all current page faults. Otherwise a stale page
1746 * fault may attempt to work on this QP after it is reset and moved
1747 * again to RTS, and may cause the driver and the device to get out of
1748 * sync. */
1749 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1750 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1751 mlx5_ib_qp_disable_pagefaults(qp);
1752
Eli Cohene126ba92013-07-07 17:25:49 +03001753 optpar = ib_mask_to_mlx5_opt(attr_mask);
1754 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1755 in->optparam = cpu_to_be32(optpar);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001756 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
Eli Cohene126ba92013-07-07 17:25:49 +03001757 to_mlx5_state(new_state), in, sqd_event,
1758 &qp->mqp);
1759 if (err)
1760 goto out;
1761
Haggai Eran6aec21f2014-12-11 17:04:23 +02001762 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1763 mlx5_ib_qp_enable_pagefaults(qp);
1764
Eli Cohene126ba92013-07-07 17:25:49 +03001765 qp->state = new_state;
1766
1767 if (attr_mask & IB_QP_ACCESS_FLAGS)
1768 qp->atomic_rd_en = attr->qp_access_flags;
1769 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1770 qp->resp_depth = attr->max_dest_rd_atomic;
1771 if (attr_mask & IB_QP_PORT)
1772 qp->port = attr->port_num;
1773 if (attr_mask & IB_QP_ALT_PATH)
1774 qp->alt_port = attr->alt_port_num;
1775
1776 /*
1777 * If we moved a kernel QP to RESET, clean up all old CQ
1778 * entries and reinitialize the QP.
1779 */
1780 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1781 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1782 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1783 if (send_cq != recv_cq)
1784 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1785
1786 qp->rq.head = 0;
1787 qp->rq.tail = 0;
1788 qp->sq.head = 0;
1789 qp->sq.tail = 0;
1790 qp->sq.cur_post = 0;
1791 qp->sq.last_poll = 0;
1792 qp->db.db[MLX5_RCV_DBR] = 0;
1793 qp->db.db[MLX5_SND_DBR] = 0;
1794 }
1795
1796out:
1797 kfree(in);
1798 return err;
1799}
1800
1801int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1802 int attr_mask, struct ib_udata *udata)
1803{
1804 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1805 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1806 enum ib_qp_state cur_state, new_state;
1807 int err = -EINVAL;
1808 int port;
Achiad Shochat2811ba52015-12-23 18:47:24 +02001809 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
Eli Cohene126ba92013-07-07 17:25:49 +03001810
1811 mutex_lock(&qp->mutex);
1812
1813 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1814 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1815
Achiad Shochat2811ba52015-12-23 18:47:24 +02001816 if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
1817 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1818 ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
1819 }
1820
Eli Cohene126ba92013-07-07 17:25:49 +03001821 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
Matan Barakdd5f03b2013-12-12 18:03:11 +02001822 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
Achiad Shochat2811ba52015-12-23 18:47:24 +02001823 ll))
Eli Cohene126ba92013-07-07 17:25:49 +03001824 goto out;
1825
1826 if ((attr_mask & IB_QP_PORT) &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001827 (attr->port_num == 0 ||
1828 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
Eli Cohene126ba92013-07-07 17:25:49 +03001829 goto out;
1830
1831 if (attr_mask & IB_QP_PKEY_INDEX) {
1832 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001833 if (attr->pkey_index >=
1834 dev->mdev->port_caps[port - 1].pkey_table_len)
Eli Cohene126ba92013-07-07 17:25:49 +03001835 goto out;
1836 }
1837
1838 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001839 attr->max_rd_atomic >
1840 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
Eli Cohene126ba92013-07-07 17:25:49 +03001841 goto out;
1842
1843 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001844 attr->max_dest_rd_atomic >
1845 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
Eli Cohene126ba92013-07-07 17:25:49 +03001846 goto out;
1847
1848 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1849 err = 0;
1850 goto out;
1851 }
1852
1853 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1854
1855out:
1856 mutex_unlock(&qp->mutex);
1857 return err;
1858}
1859
1860static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1861{
1862 struct mlx5_ib_cq *cq;
1863 unsigned cur;
1864
1865 cur = wq->head - wq->tail;
1866 if (likely(cur + nreq < wq->max_post))
1867 return 0;
1868
1869 cq = to_mcq(ib_cq);
1870 spin_lock(&cq->lock);
1871 cur = wq->head - wq->tail;
1872 spin_unlock(&cq->lock);
1873
1874 return cur + nreq >= wq->max_post;
1875}
1876
1877static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1878 u64 remote_addr, u32 rkey)
1879{
1880 rseg->raddr = cpu_to_be64(remote_addr);
1881 rseg->rkey = cpu_to_be32(rkey);
1882 rseg->reserved = 0;
1883}
1884
Eli Cohene126ba92013-07-07 17:25:49 +03001885static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1886 struct ib_send_wr *wr)
1887{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001888 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1889 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1890 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
Eli Cohene126ba92013-07-07 17:25:49 +03001891}
1892
1893static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1894{
1895 dseg->byte_count = cpu_to_be32(sg->length);
1896 dseg->lkey = cpu_to_be32(sg->lkey);
1897 dseg->addr = cpu_to_be64(sg->addr);
1898}
1899
1900static __be16 get_klm_octo(int npages)
1901{
1902 return cpu_to_be16(ALIGN(npages, 8) / 2);
1903}
1904
1905static __be64 frwr_mkey_mask(void)
1906{
1907 u64 result;
1908
1909 result = MLX5_MKEY_MASK_LEN |
1910 MLX5_MKEY_MASK_PAGE_SIZE |
1911 MLX5_MKEY_MASK_START_ADDR |
1912 MLX5_MKEY_MASK_EN_RINVAL |
1913 MLX5_MKEY_MASK_KEY |
1914 MLX5_MKEY_MASK_LR |
1915 MLX5_MKEY_MASK_LW |
1916 MLX5_MKEY_MASK_RR |
1917 MLX5_MKEY_MASK_RW |
1918 MLX5_MKEY_MASK_A |
1919 MLX5_MKEY_MASK_SMALL_FENCE |
1920 MLX5_MKEY_MASK_FREE;
1921
1922 return cpu_to_be64(result);
1923}
1924
Sagi Grimberge6631812014-02-23 14:19:11 +02001925static __be64 sig_mkey_mask(void)
1926{
1927 u64 result;
1928
1929 result = MLX5_MKEY_MASK_LEN |
1930 MLX5_MKEY_MASK_PAGE_SIZE |
1931 MLX5_MKEY_MASK_START_ADDR |
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001932 MLX5_MKEY_MASK_EN_SIGERR |
Sagi Grimberge6631812014-02-23 14:19:11 +02001933 MLX5_MKEY_MASK_EN_RINVAL |
1934 MLX5_MKEY_MASK_KEY |
1935 MLX5_MKEY_MASK_LR |
1936 MLX5_MKEY_MASK_LW |
1937 MLX5_MKEY_MASK_RR |
1938 MLX5_MKEY_MASK_RW |
1939 MLX5_MKEY_MASK_SMALL_FENCE |
1940 MLX5_MKEY_MASK_FREE |
1941 MLX5_MKEY_MASK_BSF_EN;
1942
1943 return cpu_to_be64(result);
1944}
1945
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001946static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1947 struct mlx5_ib_mr *mr)
1948{
1949 int ndescs = mr->ndescs;
1950
1951 memset(umr, 0, sizeof(*umr));
1952 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1953 umr->klm_octowords = get_klm_octo(ndescs);
1954 umr->mkey_mask = frwr_mkey_mask();
1955}
1956
Sagi Grimbergdd01e662015-10-13 19:11:42 +03001957static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
Eli Cohene126ba92013-07-07 17:25:49 +03001958{
1959 memset(umr, 0, sizeof(*umr));
Sagi Grimbergdd01e662015-10-13 19:11:42 +03001960 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1961 umr->flags = 1 << 7;
Eli Cohene126ba92013-07-07 17:25:49 +03001962}
1963
Haggai Eran968e78d2014-12-11 17:04:11 +02001964static __be64 get_umr_reg_mr_mask(void)
1965{
1966 u64 result;
1967
1968 result = MLX5_MKEY_MASK_LEN |
1969 MLX5_MKEY_MASK_PAGE_SIZE |
1970 MLX5_MKEY_MASK_START_ADDR |
1971 MLX5_MKEY_MASK_PD |
1972 MLX5_MKEY_MASK_LR |
1973 MLX5_MKEY_MASK_LW |
1974 MLX5_MKEY_MASK_KEY |
1975 MLX5_MKEY_MASK_RR |
1976 MLX5_MKEY_MASK_RW |
1977 MLX5_MKEY_MASK_A |
1978 MLX5_MKEY_MASK_FREE;
1979
1980 return cpu_to_be64(result);
1981}
1982
1983static __be64 get_umr_unreg_mr_mask(void)
1984{
1985 u64 result;
1986
1987 result = MLX5_MKEY_MASK_FREE;
1988
1989 return cpu_to_be64(result);
1990}
1991
1992static __be64 get_umr_update_mtt_mask(void)
1993{
1994 u64 result;
1995
1996 result = MLX5_MKEY_MASK_FREE;
1997
1998 return cpu_to_be64(result);
1999}
2000
Eli Cohene126ba92013-07-07 17:25:49 +03002001static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2002 struct ib_send_wr *wr)
2003{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002004 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Eli Cohene126ba92013-07-07 17:25:49 +03002005
2006 memset(umr, 0, sizeof(*umr));
2007
Haggai Eran968e78d2014-12-11 17:04:11 +02002008 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
2009 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
2010 else
2011 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
2012
Eli Cohene126ba92013-07-07 17:25:49 +03002013 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
Eli Cohene126ba92013-07-07 17:25:49 +03002014 umr->klm_octowords = get_klm_octo(umrwr->npages);
Haggai Eran968e78d2014-12-11 17:04:11 +02002015 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
2016 umr->mkey_mask = get_umr_update_mtt_mask();
2017 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
2018 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
2019 } else {
2020 umr->mkey_mask = get_umr_reg_mr_mask();
2021 }
Eli Cohene126ba92013-07-07 17:25:49 +03002022 } else {
Haggai Eran968e78d2014-12-11 17:04:11 +02002023 umr->mkey_mask = get_umr_unreg_mr_mask();
Eli Cohene126ba92013-07-07 17:25:49 +03002024 }
2025
2026 if (!wr->num_sge)
Haggai Eran968e78d2014-12-11 17:04:11 +02002027 umr->flags |= MLX5_UMR_INLINE;
Eli Cohene126ba92013-07-07 17:25:49 +03002028}
2029
2030static u8 get_umr_flags(int acc)
2031{
2032 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
2033 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
2034 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
2035 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
Sagi Grimberg2ac45932014-02-23 14:19:09 +02002036 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
Eli Cohene126ba92013-07-07 17:25:49 +03002037}
2038
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002039static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
2040 struct mlx5_ib_mr *mr,
2041 u32 key, int access)
2042{
2043 int ndescs = ALIGN(mr->ndescs, 8) >> 1;
2044
2045 memset(seg, 0, sizeof(*seg));
2046 seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
2047 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
2048 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2049 seg->start_addr = cpu_to_be64(mr->ibmr.iova);
2050 seg->len = cpu_to_be64(mr->ibmr.length);
2051 seg->xlt_oct_size = cpu_to_be32(ndescs);
2052 seg->log2_page_size = ilog2(mr->ibmr.page_size);
2053}
2054
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002055static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
Eli Cohene126ba92013-07-07 17:25:49 +03002056{
2057 memset(seg, 0, sizeof(*seg));
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002058 seg->status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +03002059}
2060
2061static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2062{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002063 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Haggai Eran968e78d2014-12-11 17:04:11 +02002064
Eli Cohene126ba92013-07-07 17:25:49 +03002065 memset(seg, 0, sizeof(*seg));
2066 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
Haggai Eran968e78d2014-12-11 17:04:11 +02002067 seg->status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +03002068 return;
2069 }
2070
Haggai Eran968e78d2014-12-11 17:04:11 +02002071 seg->flags = convert_access(umrwr->access_flags);
2072 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2073 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2074 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2075 }
2076 seg->len = cpu_to_be64(umrwr->length);
2077 seg->log2_page_size = umrwr->page_shift;
Eli Cohen746b5582013-10-23 09:53:14 +03002078 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
Haggai Eran968e78d2014-12-11 17:04:11 +02002079 mlx5_mkey_variant(umrwr->mkey));
Eli Cohene126ba92013-07-07 17:25:49 +03002080}
2081
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002082static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2083 struct mlx5_ib_mr *mr,
2084 struct mlx5_ib_pd *pd)
2085{
2086 int bcount = mr->desc_size * mr->ndescs;
2087
2088 dseg->addr = cpu_to_be64(mr->desc_map);
2089 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
2090 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2091}
2092
Eli Cohene126ba92013-07-07 17:25:49 +03002093static __be32 send_ieth(struct ib_send_wr *wr)
2094{
2095 switch (wr->opcode) {
2096 case IB_WR_SEND_WITH_IMM:
2097 case IB_WR_RDMA_WRITE_WITH_IMM:
2098 return wr->ex.imm_data;
2099
2100 case IB_WR_SEND_WITH_INV:
2101 return cpu_to_be32(wr->ex.invalidate_rkey);
2102
2103 default:
2104 return 0;
2105 }
2106}
2107
2108static u8 calc_sig(void *wqe, int size)
2109{
2110 u8 *p = wqe;
2111 u8 res = 0;
2112 int i;
2113
2114 for (i = 0; i < size; i++)
2115 res ^= p[i];
2116
2117 return ~res;
2118}
2119
2120static u8 wq_sig(void *wqe)
2121{
2122 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2123}
2124
2125static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2126 void *wqe, int *sz)
2127{
2128 struct mlx5_wqe_inline_seg *seg;
2129 void *qend = qp->sq.qend;
2130 void *addr;
2131 int inl = 0;
2132 int copy;
2133 int len;
2134 int i;
2135
2136 seg = wqe;
2137 wqe += sizeof(*seg);
2138 for (i = 0; i < wr->num_sge; i++) {
2139 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2140 len = wr->sg_list[i].length;
2141 inl += len;
2142
2143 if (unlikely(inl > qp->max_inline_data))
2144 return -ENOMEM;
2145
2146 if (unlikely(wqe + len > qend)) {
2147 copy = qend - wqe;
2148 memcpy(wqe, addr, copy);
2149 addr += copy;
2150 len -= copy;
2151 wqe = mlx5_get_send_wqe(qp, 0);
2152 }
2153 memcpy(wqe, addr, len);
2154 wqe += len;
2155 }
2156
2157 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2158
2159 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2160
2161 return 0;
2162}
2163
Sagi Grimberge6631812014-02-23 14:19:11 +02002164static u16 prot_field_size(enum ib_signature_type type)
2165{
2166 switch (type) {
2167 case IB_SIG_TYPE_T10_DIF:
2168 return MLX5_DIF_SIZE;
2169 default:
2170 return 0;
2171 }
2172}
2173
2174static u8 bs_selector(int block_size)
2175{
2176 switch (block_size) {
2177 case 512: return 0x1;
2178 case 520: return 0x2;
2179 case 4096: return 0x3;
2180 case 4160: return 0x4;
2181 case 1073741824: return 0x5;
2182 default: return 0;
2183 }
2184}
2185
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002186static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2187 struct mlx5_bsf_inl *inl)
Sagi Grimberge6631812014-02-23 14:19:11 +02002188{
Sagi Grimberg142537f2014-08-13 19:54:32 +03002189 /* Valid inline section and allow BSF refresh */
2190 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2191 MLX5_BSF_REFRESH_DIF);
2192 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2193 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002194 /* repeating block */
2195 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2196 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2197 MLX5_DIF_CRC : MLX5_DIF_IPCS;
Sagi Grimberge6631812014-02-23 14:19:11 +02002198
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002199 if (domain->sig.dif.ref_remap)
2200 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
Sagi Grimberge6631812014-02-23 14:19:11 +02002201
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002202 if (domain->sig.dif.app_escape) {
2203 if (domain->sig.dif.ref_escape)
2204 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2205 else
2206 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
Sagi Grimberge6631812014-02-23 14:19:11 +02002207 }
2208
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002209 inl->dif_app_bitmask_check =
2210 cpu_to_be16(domain->sig.dif.apptag_check_mask);
Sagi Grimberge6631812014-02-23 14:19:11 +02002211}
2212
2213static int mlx5_set_bsf(struct ib_mr *sig_mr,
2214 struct ib_sig_attrs *sig_attrs,
2215 struct mlx5_bsf *bsf, u32 data_size)
2216{
2217 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2218 struct mlx5_bsf_basic *basic = &bsf->basic;
2219 struct ib_sig_domain *mem = &sig_attrs->mem;
2220 struct ib_sig_domain *wire = &sig_attrs->wire;
Sagi Grimberge6631812014-02-23 14:19:11 +02002221
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002222 memset(bsf, 0, sizeof(*bsf));
Sagi Grimberge6631812014-02-23 14:19:11 +02002223
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002224 /* Basic + Extended + Inline */
2225 basic->bsf_size_sbs = 1 << 7;
2226 /* Input domain check byte mask */
2227 basic->check_byte_mask = sig_attrs->check_mask;
2228 basic->raw_data_size = cpu_to_be32(data_size);
2229
2230 /* Memory domain */
2231 switch (sig_attrs->mem.sig_type) {
2232 case IB_SIG_TYPE_NONE:
2233 break;
2234 case IB_SIG_TYPE_T10_DIF:
2235 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2236 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2237 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2238 break;
2239 default:
2240 return -EINVAL;
2241 }
2242
2243 /* Wire domain */
2244 switch (sig_attrs->wire.sig_type) {
2245 case IB_SIG_TYPE_NONE:
2246 break;
2247 case IB_SIG_TYPE_T10_DIF:
Sagi Grimberge6631812014-02-23 14:19:11 +02002248 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002249 mem->sig_type == wire->sig_type) {
Sagi Grimberge6631812014-02-23 14:19:11 +02002250 /* Same block structure */
Sagi Grimberg142537f2014-08-13 19:54:32 +03002251 basic->bsf_size_sbs |= 1 << 4;
Sagi Grimberge6631812014-02-23 14:19:11 +02002252 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002253 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002254 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002255 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002256 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002257 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
Sagi Grimberge6631812014-02-23 14:19:11 +02002258 } else
2259 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2260
Sagi Grimberg142537f2014-08-13 19:54:32 +03002261 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002262 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
Sagi Grimberge6631812014-02-23 14:19:11 +02002263 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002264 default:
2265 return -EINVAL;
2266 }
2267
2268 return 0;
2269}
2270
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002271static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2272 struct mlx5_ib_qp *qp, void **seg, int *size)
Sagi Grimberge6631812014-02-23 14:19:11 +02002273{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002274 struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2275 struct ib_mr *sig_mr = wr->sig_mr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002276 struct mlx5_bsf *bsf;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002277 u32 data_len = wr->wr.sg_list->length;
2278 u32 data_key = wr->wr.sg_list->lkey;
2279 u64 data_va = wr->wr.sg_list->addr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002280 int ret;
2281 int wqe_size;
2282
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002283 if (!wr->prot ||
2284 (data_key == wr->prot->lkey &&
2285 data_va == wr->prot->addr &&
2286 data_len == wr->prot->length)) {
Sagi Grimberge6631812014-02-23 14:19:11 +02002287 /**
2288 * Source domain doesn't contain signature information
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002289 * or data and protection are interleaved in memory.
Sagi Grimberge6631812014-02-23 14:19:11 +02002290 * So need construct:
2291 * ------------------
2292 * | data_klm |
2293 * ------------------
2294 * | BSF |
2295 * ------------------
2296 **/
2297 struct mlx5_klm *data_klm = *seg;
2298
2299 data_klm->bcount = cpu_to_be32(data_len);
2300 data_klm->key = cpu_to_be32(data_key);
2301 data_klm->va = cpu_to_be64(data_va);
2302 wqe_size = ALIGN(sizeof(*data_klm), 64);
2303 } else {
2304 /**
2305 * Source domain contains signature information
2306 * So need construct a strided block format:
2307 * ---------------------------
2308 * | stride_block_ctrl |
2309 * ---------------------------
2310 * | data_klm |
2311 * ---------------------------
2312 * | prot_klm |
2313 * ---------------------------
2314 * | BSF |
2315 * ---------------------------
2316 **/
2317 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2318 struct mlx5_stride_block_entry *data_sentry;
2319 struct mlx5_stride_block_entry *prot_sentry;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002320 u32 prot_key = wr->prot->lkey;
2321 u64 prot_va = wr->prot->addr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002322 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2323 int prot_size;
2324
2325 sblock_ctrl = *seg;
2326 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2327 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2328
2329 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2330 if (!prot_size) {
2331 pr_err("Bad block size given: %u\n", block_size);
2332 return -EINVAL;
2333 }
2334 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2335 prot_size);
2336 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2337 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2338 sblock_ctrl->num_entries = cpu_to_be16(2);
2339
2340 data_sentry->bcount = cpu_to_be16(block_size);
2341 data_sentry->key = cpu_to_be32(data_key);
2342 data_sentry->va = cpu_to_be64(data_va);
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002343 data_sentry->stride = cpu_to_be16(block_size);
2344
Sagi Grimberge6631812014-02-23 14:19:11 +02002345 prot_sentry->bcount = cpu_to_be16(prot_size);
2346 prot_sentry->key = cpu_to_be32(prot_key);
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002347 prot_sentry->va = cpu_to_be64(prot_va);
2348 prot_sentry->stride = cpu_to_be16(prot_size);
Sagi Grimberge6631812014-02-23 14:19:11 +02002349
Sagi Grimberge6631812014-02-23 14:19:11 +02002350 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2351 sizeof(*prot_sentry), 64);
2352 }
2353
2354 *seg += wqe_size;
2355 *size += wqe_size / 16;
2356 if (unlikely((*seg == qp->sq.qend)))
2357 *seg = mlx5_get_send_wqe(qp, 0);
2358
2359 bsf = *seg;
2360 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2361 if (ret)
2362 return -EINVAL;
2363
2364 *seg += sizeof(*bsf);
2365 *size += sizeof(*bsf) / 16;
2366 if (unlikely((*seg == qp->sq.qend)))
2367 *seg = mlx5_get_send_wqe(qp, 0);
2368
2369 return 0;
2370}
2371
2372static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002373 struct ib_sig_handover_wr *wr, u32 nelements,
Sagi Grimberge6631812014-02-23 14:19:11 +02002374 u32 length, u32 pdn)
2375{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002376 struct ib_mr *sig_mr = wr->sig_mr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002377 u32 sig_key = sig_mr->rkey;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002378 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
Sagi Grimberge6631812014-02-23 14:19:11 +02002379
2380 memset(seg, 0, sizeof(*seg));
2381
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002382 seg->flags = get_umr_flags(wr->access_flags) |
Sagi Grimberge6631812014-02-23 14:19:11 +02002383 MLX5_ACCESS_MODE_KLM;
2384 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002385 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
Sagi Grimberge6631812014-02-23 14:19:11 +02002386 MLX5_MKEY_BSF_EN | pdn);
2387 seg->len = cpu_to_be64(length);
2388 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2389 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2390}
2391
2392static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002393 u32 nelements)
Sagi Grimberge6631812014-02-23 14:19:11 +02002394{
2395 memset(umr, 0, sizeof(*umr));
2396
2397 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2398 umr->klm_octowords = get_klm_octo(nelements);
2399 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2400 umr->mkey_mask = sig_mkey_mask();
2401}
2402
2403
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002404static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
Sagi Grimberge6631812014-02-23 14:19:11 +02002405 void **seg, int *size)
2406{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002407 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2408 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
Sagi Grimberge6631812014-02-23 14:19:11 +02002409 u32 pdn = get_pd(qp)->pdn;
2410 u32 klm_oct_size;
2411 int region_len, ret;
2412
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002413 if (unlikely(wr->wr.num_sge != 1) ||
2414 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002415 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2416 unlikely(!sig_mr->sig->sig_status_checked))
Sagi Grimberge6631812014-02-23 14:19:11 +02002417 return -EINVAL;
2418
2419 /* length of the protected region, data + protection */
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002420 region_len = wr->wr.sg_list->length;
2421 if (wr->prot &&
2422 (wr->prot->lkey != wr->wr.sg_list->lkey ||
2423 wr->prot->addr != wr->wr.sg_list->addr ||
2424 wr->prot->length != wr->wr.sg_list->length))
2425 region_len += wr->prot->length;
Sagi Grimberge6631812014-02-23 14:19:11 +02002426
2427 /**
2428 * KLM octoword size - if protection was provided
2429 * then we use strided block format (3 octowords),
2430 * else we use single KLM (1 octoword)
2431 **/
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002432 klm_oct_size = wr->prot ? 3 : 1;
Sagi Grimberge6631812014-02-23 14:19:11 +02002433
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002434 set_sig_umr_segment(*seg, klm_oct_size);
Sagi Grimberge6631812014-02-23 14:19:11 +02002435 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2436 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2437 if (unlikely((*seg == qp->sq.qend)))
2438 *seg = mlx5_get_send_wqe(qp, 0);
2439
2440 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2441 *seg += sizeof(struct mlx5_mkey_seg);
2442 *size += sizeof(struct mlx5_mkey_seg) / 16;
2443 if (unlikely((*seg == qp->sq.qend)))
2444 *seg = mlx5_get_send_wqe(qp, 0);
2445
2446 ret = set_sig_data_segment(wr, qp, seg, size);
2447 if (ret)
2448 return ret;
2449
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002450 sig_mr->sig->sig_status_checked = false;
Sagi Grimberge6631812014-02-23 14:19:11 +02002451 return 0;
2452}
2453
2454static int set_psv_wr(struct ib_sig_domain *domain,
2455 u32 psv_idx, void **seg, int *size)
2456{
2457 struct mlx5_seg_set_psv *psv_seg = *seg;
2458
2459 memset(psv_seg, 0, sizeof(*psv_seg));
2460 psv_seg->psv_num = cpu_to_be32(psv_idx);
2461 switch (domain->sig_type) {
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002462 case IB_SIG_TYPE_NONE:
2463 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002464 case IB_SIG_TYPE_T10_DIF:
2465 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2466 domain->sig.dif.app_tag);
2467 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
Sagi Grimberge6631812014-02-23 14:19:11 +02002468 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002469 default:
2470 pr_err("Bad signature type given.\n");
2471 return 1;
2472 }
2473
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002474 *seg += sizeof(*psv_seg);
2475 *size += sizeof(*psv_seg) / 16;
2476
Sagi Grimberge6631812014-02-23 14:19:11 +02002477 return 0;
2478}
2479
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002480static int set_reg_wr(struct mlx5_ib_qp *qp,
2481 struct ib_reg_wr *wr,
2482 void **seg, int *size)
2483{
2484 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
2485 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
2486
2487 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
2488 mlx5_ib_warn(to_mdev(qp->ibqp.device),
2489 "Invalid IB_SEND_INLINE send flag\n");
2490 return -EINVAL;
2491 }
2492
2493 set_reg_umr_seg(*seg, mr);
2494 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2495 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2496 if (unlikely((*seg == qp->sq.qend)))
2497 *seg = mlx5_get_send_wqe(qp, 0);
2498
2499 set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
2500 *seg += sizeof(struct mlx5_mkey_seg);
2501 *size += sizeof(struct mlx5_mkey_seg) / 16;
2502 if (unlikely((*seg == qp->sq.qend)))
2503 *seg = mlx5_get_send_wqe(qp, 0);
2504
2505 set_reg_data_seg(*seg, mr, pd);
2506 *seg += sizeof(struct mlx5_wqe_data_seg);
2507 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2508
2509 return 0;
2510}
2511
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002512static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
Eli Cohene126ba92013-07-07 17:25:49 +03002513{
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002514 set_linv_umr_seg(*seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002515 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2516 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2517 if (unlikely((*seg == qp->sq.qend)))
2518 *seg = mlx5_get_send_wqe(qp, 0);
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002519 set_linv_mkey_seg(*seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002520 *seg += sizeof(struct mlx5_mkey_seg);
2521 *size += sizeof(struct mlx5_mkey_seg) / 16;
2522 if (unlikely((*seg == qp->sq.qend)))
2523 *seg = mlx5_get_send_wqe(qp, 0);
Eli Cohene126ba92013-07-07 17:25:49 +03002524}
2525
2526static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2527{
2528 __be32 *p = NULL;
2529 int tidx = idx;
2530 int i, j;
2531
2532 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2533 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2534 if ((i & 0xf) == 0) {
2535 void *buf = mlx5_get_send_wqe(qp, tidx);
2536 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2537 p = buf;
2538 j = 0;
2539 }
2540 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2541 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2542 be32_to_cpu(p[j + 3]));
2543 }
2544}
2545
2546static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2547 unsigned bytecnt, struct mlx5_ib_qp *qp)
2548{
2549 while (bytecnt > 0) {
2550 __iowrite64_copy(dst++, src++, 8);
2551 __iowrite64_copy(dst++, src++, 8);
2552 __iowrite64_copy(dst++, src++, 8);
2553 __iowrite64_copy(dst++, src++, 8);
2554 __iowrite64_copy(dst++, src++, 8);
2555 __iowrite64_copy(dst++, src++, 8);
2556 __iowrite64_copy(dst++, src++, 8);
2557 __iowrite64_copy(dst++, src++, 8);
2558 bytecnt -= 64;
2559 if (unlikely(src == qp->sq.qend))
2560 src = mlx5_get_send_wqe(qp, 0);
2561 }
2562}
2563
2564static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2565{
2566 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2567 wr->send_flags & IB_SEND_FENCE))
2568 return MLX5_FENCE_MODE_STRONG_ORDERING;
2569
2570 if (unlikely(fence)) {
2571 if (wr->send_flags & IB_SEND_FENCE)
2572 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2573 else
2574 return fence;
2575
2576 } else {
2577 return 0;
2578 }
2579}
2580
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002581static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2582 struct mlx5_wqe_ctrl_seg **ctrl,
Eli Cohen6a4f1392014-12-02 12:26:18 +02002583 struct ib_send_wr *wr, unsigned *idx,
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002584 int *size, int nreq)
2585{
2586 int err = 0;
2587
2588 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2589 err = -ENOMEM;
2590 return err;
2591 }
2592
2593 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2594 *seg = mlx5_get_send_wqe(qp, *idx);
2595 *ctrl = *seg;
2596 *(uint32_t *)(*seg + 8) = 0;
2597 (*ctrl)->imm = send_ieth(wr);
2598 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2599 (wr->send_flags & IB_SEND_SIGNALED ?
2600 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2601 (wr->send_flags & IB_SEND_SOLICITED ?
2602 MLX5_WQE_CTRL_SOLICITED : 0);
2603
2604 *seg += sizeof(**ctrl);
2605 *size = sizeof(**ctrl) / 16;
2606
2607 return err;
2608}
2609
2610static void finish_wqe(struct mlx5_ib_qp *qp,
2611 struct mlx5_wqe_ctrl_seg *ctrl,
2612 u8 size, unsigned idx, u64 wr_id,
2613 int nreq, u8 fence, u8 next_fence,
2614 u32 mlx5_opcode)
2615{
2616 u8 opmod = 0;
2617
2618 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2619 mlx5_opcode | ((u32)opmod << 24));
2620 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2621 ctrl->fm_ce_se |= fence;
2622 qp->fm_cache = next_fence;
2623 if (unlikely(qp->wq_sig))
2624 ctrl->signature = wq_sig(ctrl);
2625
2626 qp->sq.wrid[idx] = wr_id;
2627 qp->sq.w_list[idx].opcode = mlx5_opcode;
2628 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2629 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2630 qp->sq.w_list[idx].next = qp->sq.cur_post;
2631}
2632
2633
Eli Cohene126ba92013-07-07 17:25:49 +03002634int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2635 struct ib_send_wr **bad_wr)
2636{
2637 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2638 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
Eli Cohene126ba92013-07-07 17:25:49 +03002639 struct mlx5_ib_qp *qp = to_mqp(ibqp);
Sagi Grimberge6631812014-02-23 14:19:11 +02002640 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +03002641 struct mlx5_wqe_data_seg *dpseg;
2642 struct mlx5_wqe_xrc_seg *xrc;
2643 struct mlx5_bf *bf = qp->bf;
2644 int uninitialized_var(size);
2645 void *qend = qp->sq.qend;
2646 unsigned long flags;
Eli Cohene126ba92013-07-07 17:25:49 +03002647 unsigned idx;
2648 int err = 0;
2649 int inl = 0;
2650 int num_sge;
2651 void *seg;
2652 int nreq;
2653 int i;
2654 u8 next_fence = 0;
Eli Cohene126ba92013-07-07 17:25:49 +03002655 u8 fence;
2656
2657 spin_lock_irqsave(&qp->sq.lock, flags);
2658
2659 for (nreq = 0; wr; nreq++, wr = wr->next) {
Fabian Fredericka8f731e2014-08-12 19:20:08 -04002660 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
Eli Cohene126ba92013-07-07 17:25:49 +03002661 mlx5_ib_warn(dev, "\n");
2662 err = -EINVAL;
2663 *bad_wr = wr;
2664 goto out;
2665 }
2666
Eli Cohene126ba92013-07-07 17:25:49 +03002667 fence = qp->fm_cache;
2668 num_sge = wr->num_sge;
2669 if (unlikely(num_sge > qp->sq.max_gs)) {
2670 mlx5_ib_warn(dev, "\n");
2671 err = -ENOMEM;
2672 *bad_wr = wr;
2673 goto out;
2674 }
2675
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002676 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2677 if (err) {
2678 mlx5_ib_warn(dev, "\n");
2679 err = -ENOMEM;
2680 *bad_wr = wr;
2681 goto out;
2682 }
Eli Cohene126ba92013-07-07 17:25:49 +03002683
2684 switch (ibqp->qp_type) {
2685 case IB_QPT_XRC_INI:
2686 xrc = seg;
Eli Cohene126ba92013-07-07 17:25:49 +03002687 seg += sizeof(*xrc);
2688 size += sizeof(*xrc) / 16;
2689 /* fall through */
2690 case IB_QPT_RC:
2691 switch (wr->opcode) {
2692 case IB_WR_RDMA_READ:
2693 case IB_WR_RDMA_WRITE:
2694 case IB_WR_RDMA_WRITE_WITH_IMM:
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002695 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2696 rdma_wr(wr)->rkey);
Jack Morgensteinf241e742014-07-28 23:30:23 +03002697 seg += sizeof(struct mlx5_wqe_raddr_seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002698 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2699 break;
2700
2701 case IB_WR_ATOMIC_CMP_AND_SWP:
2702 case IB_WR_ATOMIC_FETCH_AND_ADD:
Eli Cohene126ba92013-07-07 17:25:49 +03002703 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
Eli Cohen81bea282013-09-11 16:35:30 +03002704 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2705 err = -ENOSYS;
2706 *bad_wr = wr;
2707 goto out;
Eli Cohene126ba92013-07-07 17:25:49 +03002708
2709 case IB_WR_LOCAL_INV:
2710 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2711 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2712 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002713 set_linv_wr(qp, &seg, &size);
Eli Cohene126ba92013-07-07 17:25:49 +03002714 num_sge = 0;
2715 break;
2716
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002717 case IB_WR_REG_MR:
2718 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2719 qp->sq.wr_data[idx] = IB_WR_REG_MR;
2720 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
2721 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
2722 if (err) {
2723 *bad_wr = wr;
2724 goto out;
2725 }
2726 num_sge = 0;
2727 break;
2728
Sagi Grimberge6631812014-02-23 14:19:11 +02002729 case IB_WR_REG_SIG_MR:
2730 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002731 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
Sagi Grimberge6631812014-02-23 14:19:11 +02002732
2733 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2734 err = set_sig_umr_wr(wr, qp, &seg, &size);
2735 if (err) {
2736 mlx5_ib_warn(dev, "\n");
2737 *bad_wr = wr;
2738 goto out;
2739 }
2740
2741 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2742 nreq, get_fence(fence, wr),
2743 next_fence, MLX5_OPCODE_UMR);
2744 /*
2745 * SET_PSV WQEs are not signaled and solicited
2746 * on error
2747 */
2748 wr->send_flags &= ~IB_SEND_SIGNALED;
2749 wr->send_flags |= IB_SEND_SOLICITED;
2750 err = begin_wqe(qp, &seg, &ctrl, wr,
2751 &idx, &size, nreq);
2752 if (err) {
2753 mlx5_ib_warn(dev, "\n");
2754 err = -ENOMEM;
2755 *bad_wr = wr;
2756 goto out;
2757 }
2758
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002759 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
Sagi Grimberge6631812014-02-23 14:19:11 +02002760 mr->sig->psv_memory.psv_idx, &seg,
2761 &size);
2762 if (err) {
2763 mlx5_ib_warn(dev, "\n");
2764 *bad_wr = wr;
2765 goto out;
2766 }
2767
2768 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2769 nreq, get_fence(fence, wr),
2770 next_fence, MLX5_OPCODE_SET_PSV);
2771 err = begin_wqe(qp, &seg, &ctrl, wr,
2772 &idx, &size, nreq);
2773 if (err) {
2774 mlx5_ib_warn(dev, "\n");
2775 err = -ENOMEM;
2776 *bad_wr = wr;
2777 goto out;
2778 }
2779
2780 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002781 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
Sagi Grimberge6631812014-02-23 14:19:11 +02002782 mr->sig->psv_wire.psv_idx, &seg,
2783 &size);
2784 if (err) {
2785 mlx5_ib_warn(dev, "\n");
2786 *bad_wr = wr;
2787 goto out;
2788 }
2789
2790 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2791 nreq, get_fence(fence, wr),
2792 next_fence, MLX5_OPCODE_SET_PSV);
2793 num_sge = 0;
2794 goto skip_psv;
2795
Eli Cohene126ba92013-07-07 17:25:49 +03002796 default:
2797 break;
2798 }
2799 break;
2800
2801 case IB_QPT_UC:
2802 switch (wr->opcode) {
2803 case IB_WR_RDMA_WRITE:
2804 case IB_WR_RDMA_WRITE_WITH_IMM:
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002805 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2806 rdma_wr(wr)->rkey);
Eli Cohene126ba92013-07-07 17:25:49 +03002807 seg += sizeof(struct mlx5_wqe_raddr_seg);
2808 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2809 break;
2810
2811 default:
2812 break;
2813 }
2814 break;
2815
2816 case IB_QPT_UD:
2817 case IB_QPT_SMI:
2818 case IB_QPT_GSI:
2819 set_datagram_seg(seg, wr);
Jack Morgensteinf241e742014-07-28 23:30:23 +03002820 seg += sizeof(struct mlx5_wqe_datagram_seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002821 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2822 if (unlikely((seg == qend)))
2823 seg = mlx5_get_send_wqe(qp, 0);
2824 break;
2825
2826 case MLX5_IB_QPT_REG_UMR:
2827 if (wr->opcode != MLX5_IB_WR_UMR) {
2828 err = -EINVAL;
2829 mlx5_ib_warn(dev, "bad opcode\n");
2830 goto out;
2831 }
2832 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002833 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
Eli Cohene126ba92013-07-07 17:25:49 +03002834 set_reg_umr_segment(seg, wr);
2835 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2836 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2837 if (unlikely((seg == qend)))
2838 seg = mlx5_get_send_wqe(qp, 0);
2839 set_reg_mkey_segment(seg, wr);
2840 seg += sizeof(struct mlx5_mkey_seg);
2841 size += sizeof(struct mlx5_mkey_seg) / 16;
2842 if (unlikely((seg == qend)))
2843 seg = mlx5_get_send_wqe(qp, 0);
2844 break;
2845
2846 default:
2847 break;
2848 }
2849
2850 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2851 int uninitialized_var(sz);
2852
2853 err = set_data_inl_seg(qp, wr, seg, &sz);
2854 if (unlikely(err)) {
2855 mlx5_ib_warn(dev, "\n");
2856 *bad_wr = wr;
2857 goto out;
2858 }
2859 inl = 1;
2860 size += sz;
2861 } else {
2862 dpseg = seg;
2863 for (i = 0; i < num_sge; i++) {
2864 if (unlikely(dpseg == qend)) {
2865 seg = mlx5_get_send_wqe(qp, 0);
2866 dpseg = seg;
2867 }
2868 if (likely(wr->sg_list[i].length)) {
2869 set_data_ptr_seg(dpseg, wr->sg_list + i);
2870 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2871 dpseg++;
2872 }
2873 }
2874 }
2875
Sagi Grimberg6e5eadace2014-02-23 14:19:08 +02002876 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2877 get_fence(fence, wr), next_fence,
2878 mlx5_ib_opcode[wr->opcode]);
Sagi Grimberge6631812014-02-23 14:19:11 +02002879skip_psv:
Eli Cohene126ba92013-07-07 17:25:49 +03002880 if (0)
2881 dump_wqe(qp, idx, size);
2882 }
2883
2884out:
2885 if (likely(nreq)) {
2886 qp->sq.head += nreq;
2887
2888 /* Make sure that descriptors are written before
2889 * updating doorbell record and ringing the doorbell
2890 */
2891 wmb();
2892
2893 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2894
Eli Cohenada388f2014-01-14 17:45:16 +02002895 /* Make sure doorbell record is visible to the HCA before
2896 * we hit doorbell */
2897 wmb();
2898
Eli Cohene126ba92013-07-07 17:25:49 +03002899 if (bf->need_lock)
2900 spin_lock(&bf->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02002901 else
2902 __acquire(&bf->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03002903
2904 /* TBD enable WC */
2905 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2906 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2907 /* wc_wmb(); */
2908 } else {
2909 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2910 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2911 /* Make sure doorbells don't leak out of SQ spinlock
2912 * and reach the HCA out of order.
2913 */
2914 mmiowb();
2915 }
2916 bf->offset ^= bf->buf_size;
2917 if (bf->need_lock)
2918 spin_unlock(&bf->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02002919 else
2920 __release(&bf->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03002921 }
2922
2923 spin_unlock_irqrestore(&qp->sq.lock, flags);
2924
2925 return err;
2926}
2927
2928static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2929{
2930 sig->signature = calc_sig(sig, size);
2931}
2932
2933int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2934 struct ib_recv_wr **bad_wr)
2935{
2936 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2937 struct mlx5_wqe_data_seg *scat;
2938 struct mlx5_rwqe_sig *sig;
2939 unsigned long flags;
2940 int err = 0;
2941 int nreq;
2942 int ind;
2943 int i;
2944
2945 spin_lock_irqsave(&qp->rq.lock, flags);
2946
2947 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2948
2949 for (nreq = 0; wr; nreq++, wr = wr->next) {
2950 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2951 err = -ENOMEM;
2952 *bad_wr = wr;
2953 goto out;
2954 }
2955
2956 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2957 err = -EINVAL;
2958 *bad_wr = wr;
2959 goto out;
2960 }
2961
2962 scat = get_recv_wqe(qp, ind);
2963 if (qp->wq_sig)
2964 scat++;
2965
2966 for (i = 0; i < wr->num_sge; i++)
2967 set_data_ptr_seg(scat + i, wr->sg_list + i);
2968
2969 if (i < qp->rq.max_gs) {
2970 scat[i].byte_count = 0;
2971 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2972 scat[i].addr = 0;
2973 }
2974
2975 if (qp->wq_sig) {
2976 sig = (struct mlx5_rwqe_sig *)scat;
2977 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2978 }
2979
2980 qp->rq.wrid[ind] = wr->wr_id;
2981
2982 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2983 }
2984
2985out:
2986 if (likely(nreq)) {
2987 qp->rq.head += nreq;
2988
2989 /* Make sure that descriptors are written before
2990 * doorbell record.
2991 */
2992 wmb();
2993
2994 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2995 }
2996
2997 spin_unlock_irqrestore(&qp->rq.lock, flags);
2998
2999 return err;
3000}
3001
3002static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
3003{
3004 switch (mlx5_state) {
3005 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
3006 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
3007 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
3008 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
3009 case MLX5_QP_STATE_SQ_DRAINING:
3010 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
3011 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
3012 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
3013 default: return -1;
3014 }
3015}
3016
3017static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
3018{
3019 switch (mlx5_mig_state) {
3020 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
3021 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
3022 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
3023 default: return -1;
3024 }
3025}
3026
3027static int to_ib_qp_access_flags(int mlx5_flags)
3028{
3029 int ib_flags = 0;
3030
3031 if (mlx5_flags & MLX5_QP_BIT_RRE)
3032 ib_flags |= IB_ACCESS_REMOTE_READ;
3033 if (mlx5_flags & MLX5_QP_BIT_RWE)
3034 ib_flags |= IB_ACCESS_REMOTE_WRITE;
3035 if (mlx5_flags & MLX5_QP_BIT_RAE)
3036 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3037
3038 return ib_flags;
3039}
3040
3041static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
3042 struct mlx5_qp_path *path)
3043{
Jack Morgenstein9603b612014-07-28 23:30:22 +03003044 struct mlx5_core_dev *dev = ibdev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03003045
3046 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
3047 ib_ah_attr->port_num = path->port;
3048
Eli Cohenc7a08ac2014-10-02 12:19:42 +03003049 if (ib_ah_attr->port_num == 0 ||
Saeed Mahameed938fe832015-05-28 22:28:41 +03003050 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
Eli Cohene126ba92013-07-07 17:25:49 +03003051 return;
3052
Achiad Shochat2811ba52015-12-23 18:47:24 +02003053 ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf;
Eli Cohene126ba92013-07-07 17:25:49 +03003054
3055 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
3056 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3057 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
3058 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3059 if (ib_ah_attr->ah_flags) {
3060 ib_ah_attr->grh.sgid_index = path->mgid_index;
3061 ib_ah_attr->grh.hop_limit = path->hop_limit;
3062 ib_ah_attr->grh.traffic_class =
3063 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3064 ib_ah_attr->grh.flow_label =
3065 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3066 memcpy(ib_ah_attr->grh.dgid.raw,
3067 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3068 }
3069}
3070
3071int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3072 struct ib_qp_init_attr *qp_init_attr)
3073{
3074 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3075 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3076 struct mlx5_query_qp_mbox_out *outb;
3077 struct mlx5_qp_context *context;
3078 int mlx5_state;
3079 int err = 0;
3080
Haggai Eran6aec21f2014-12-11 17:04:23 +02003081#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3082 /*
3083 * Wait for any outstanding page faults, in case the user frees memory
3084 * based upon this query's result.
3085 */
3086 flush_workqueue(mlx5_ib_page_fault_wq);
3087#endif
3088
Eli Cohene126ba92013-07-07 17:25:49 +03003089 mutex_lock(&qp->mutex);
3090 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3091 if (!outb) {
3092 err = -ENOMEM;
3093 goto out;
3094 }
3095 context = &outb->ctx;
Jack Morgenstein9603b612014-07-28 23:30:22 +03003096 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
Eli Cohene126ba92013-07-07 17:25:49 +03003097 if (err)
3098 goto out_free;
3099
3100 mlx5_state = be32_to_cpu(context->flags) >> 28;
3101
3102 qp->state = to_ib_qp_state(mlx5_state);
3103 qp_attr->qp_state = qp->state;
3104 qp_attr->path_mtu = context->mtu_msgmax >> 5;
3105 qp_attr->path_mig_state =
3106 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3107 qp_attr->qkey = be32_to_cpu(context->qkey);
3108 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3109 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
3110 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3111 qp_attr->qp_access_flags =
3112 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3113
3114 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3115 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3116 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3117 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3118 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
3119 }
3120
3121 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3122 qp_attr->port_num = context->pri_path.port;
3123
3124 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3125 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3126
3127 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3128
3129 qp_attr->max_dest_rd_atomic =
3130 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3131 qp_attr->min_rnr_timer =
3132 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3133 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3134 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3135 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3136 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3137 qp_attr->cur_qp_state = qp_attr->qp_state;
3138 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3139 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3140
3141 if (!ibqp->uobject) {
3142 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3143 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3144 } else {
3145 qp_attr->cap.max_send_wr = 0;
3146 qp_attr->cap.max_send_sge = 0;
3147 }
3148
3149 /* We don't support inline sends for kernel QPs (yet), and we
3150 * don't know what userspace's value should be.
3151 */
3152 qp_attr->cap.max_inline_data = 0;
3153
3154 qp_init_attr->cap = qp_attr->cap;
3155
3156 qp_init_attr->create_flags = 0;
3157 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3158 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3159
Leon Romanovsky051f2632015-12-20 12:16:11 +02003160 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
3161 qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
3162 if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
3163 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
3164 if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
3165 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
3166
Eli Cohene126ba92013-07-07 17:25:49 +03003167 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3168 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3169
3170out_free:
3171 kfree(outb);
3172
3173out:
3174 mutex_unlock(&qp->mutex);
3175 return err;
3176}
3177
3178struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3179 struct ib_ucontext *context,
3180 struct ib_udata *udata)
3181{
3182 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3183 struct mlx5_ib_xrcd *xrcd;
3184 int err;
3185
Saeed Mahameed938fe832015-05-28 22:28:41 +03003186 if (!MLX5_CAP_GEN(dev->mdev, xrc))
Eli Cohene126ba92013-07-07 17:25:49 +03003187 return ERR_PTR(-ENOSYS);
3188
3189 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3190 if (!xrcd)
3191 return ERR_PTR(-ENOMEM);
3192
Jack Morgenstein9603b612014-07-28 23:30:22 +03003193 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
Eli Cohene126ba92013-07-07 17:25:49 +03003194 if (err) {
3195 kfree(xrcd);
3196 return ERR_PTR(-ENOMEM);
3197 }
3198
3199 return &xrcd->ibxrcd;
3200}
3201
3202int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3203{
3204 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3205 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3206 int err;
3207
Jack Morgenstein9603b612014-07-28 23:30:22 +03003208 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
Eli Cohene126ba92013-07-07 17:25:49 +03003209 if (err) {
3210 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3211 return err;
3212 }
3213
3214 kfree(xrcd);
3215
3216 return 0;
3217}