blob: 5c92d087b9f0084d58ad5fc6c62e9945f6c15fd1 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <rdma/ib_umem.h>
Achiad Shochat2811ba52015-12-23 18:47:24 +020035#include <rdma/ib_cache.h>
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020036#include <rdma/ib_user_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030037#include "mlx5_ib.h"
38#include "user.h"
39
40/* not supported currently */
41static int wq_signature;
42
43enum {
44 MLX5_IB_ACK_REQ_FREQ = 8,
45};
46
47enum {
48 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
49 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
50 MLX5_IB_LINK_TYPE_IB = 0,
51 MLX5_IB_LINK_TYPE_ETH = 1
52};
53
54enum {
55 MLX5_IB_SQ_STRIDE = 6,
56 MLX5_IB_CACHE_LINE_SIZE = 64,
57};
58
59static const u32 mlx5_ib_opcode[] = {
60 [IB_WR_SEND] = MLX5_OPCODE_SEND,
61 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
62 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
63 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
64 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
65 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
66 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
67 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
68 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +030069 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
Eli Cohene126ba92013-07-07 17:25:49 +030070 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
71 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
72 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
73};
74
Eli Cohene126ba92013-07-07 17:25:49 +030075
76static int is_qp0(enum ib_qp_type qp_type)
77{
78 return qp_type == IB_QPT_SMI;
79}
80
Eli Cohene126ba92013-07-07 17:25:49 +030081static int is_sqp(enum ib_qp_type qp_type)
82{
83 return is_qp0(qp_type) || is_qp1(qp_type);
84}
85
86static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
87{
88 return mlx5_buf_offset(&qp->buf, offset);
89}
90
91static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
92{
93 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
94}
95
96void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
97{
98 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
99}
100
Haggai Eranc1395a22014-12-11 17:04:14 +0200101/**
102 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
103 *
104 * @qp: QP to copy from.
105 * @send: copy from the send queue when non-zero, use the receive queue
106 * otherwise.
107 * @wqe_index: index to start copying from. For send work queues, the
108 * wqe_index is in units of MLX5_SEND_WQE_BB.
109 * For receive work queue, it is the number of work queue
110 * element in the queue.
111 * @buffer: destination buffer.
112 * @length: maximum number of bytes to copy.
113 *
114 * Copies at least a single WQE, but may copy more data.
115 *
116 * Return: the number of bytes copied, or an error code.
117 */
118int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
119 void *buffer, u32 length)
120{
121 struct ib_device *ibdev = qp->ibqp.device;
122 struct mlx5_ib_dev *dev = to_mdev(ibdev);
123 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
124 size_t offset;
125 size_t wq_end;
126 struct ib_umem *umem = qp->umem;
127 u32 first_copy_length;
128 int wqe_length;
129 int ret;
130
131 if (wq->wqe_cnt == 0) {
132 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
133 qp->ibqp.qp_type);
134 return -EINVAL;
135 }
136
137 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
138 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
139
140 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
141 return -EINVAL;
142
143 if (offset > umem->length ||
144 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
145 return -EINVAL;
146
147 first_copy_length = min_t(u32, offset + length, wq_end) - offset;
148 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
149 if (ret)
150 return ret;
151
152 if (send) {
153 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
154 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
155
156 wqe_length = ds * MLX5_WQE_DS_UNITS;
157 } else {
158 wqe_length = 1 << wq->wqe_shift;
159 }
160
161 if (wqe_length <= first_copy_length)
162 return first_copy_length;
163
164 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
165 wqe_length - first_copy_length);
166 if (ret)
167 return ret;
168
169 return wqe_length;
170}
171
Eli Cohene126ba92013-07-07 17:25:49 +0300172static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
173{
174 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
175 struct ib_event event;
176
177 if (type == MLX5_EVENT_TYPE_PATH_MIG)
178 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
179
180 if (ibqp->event_handler) {
181 event.device = ibqp->device;
182 event.element.qp = ibqp;
183 switch (type) {
184 case MLX5_EVENT_TYPE_PATH_MIG:
185 event.event = IB_EVENT_PATH_MIG;
186 break;
187 case MLX5_EVENT_TYPE_COMM_EST:
188 event.event = IB_EVENT_COMM_EST;
189 break;
190 case MLX5_EVENT_TYPE_SQ_DRAINED:
191 event.event = IB_EVENT_SQ_DRAINED;
192 break;
193 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
194 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
195 break;
196 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
197 event.event = IB_EVENT_QP_FATAL;
198 break;
199 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
200 event.event = IB_EVENT_PATH_MIG_ERR;
201 break;
202 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
203 event.event = IB_EVENT_QP_REQ_ERR;
204 break;
205 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
206 event.event = IB_EVENT_QP_ACCESS_ERR;
207 break;
208 default:
209 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
210 return;
211 }
212
213 ibqp->event_handler(&event, ibqp->qp_context);
214 }
215}
216
217static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
218 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
219{
220 int wqe_size;
221 int wq_size;
222
223 /* Sanity check RQ size before proceeding */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300224 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
Eli Cohene126ba92013-07-07 17:25:49 +0300225 return -EINVAL;
226
227 if (!has_rq) {
228 qp->rq.max_gs = 0;
229 qp->rq.wqe_cnt = 0;
230 qp->rq.wqe_shift = 0;
231 } else {
232 if (ucmd) {
233 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
234 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
235 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
236 qp->rq.max_post = qp->rq.wqe_cnt;
237 } else {
238 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
239 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
240 wqe_size = roundup_pow_of_two(wqe_size);
241 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
242 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
243 qp->rq.wqe_cnt = wq_size / wqe_size;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300244 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300245 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
246 wqe_size,
Saeed Mahameed938fe832015-05-28 22:28:41 +0300247 MLX5_CAP_GEN(dev->mdev,
248 max_wqe_sz_rq));
Eli Cohene126ba92013-07-07 17:25:49 +0300249 return -EINVAL;
250 }
251 qp->rq.wqe_shift = ilog2(wqe_size);
252 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
253 qp->rq.max_post = qp->rq.wqe_cnt;
254 }
255 }
256
257 return 0;
258}
259
260static int sq_overhead(enum ib_qp_type qp_type)
261{
Andi Shyti618af382013-07-16 15:35:01 +0200262 int size = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300263
264 switch (qp_type) {
265 case IB_QPT_XRC_INI:
Eli Cohenb125a542013-09-11 16:35:22 +0300266 size += sizeof(struct mlx5_wqe_xrc_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300267 /* fall through */
268 case IB_QPT_RC:
269 size += sizeof(struct mlx5_wqe_ctrl_seg) +
270 sizeof(struct mlx5_wqe_atomic_seg) +
271 sizeof(struct mlx5_wqe_raddr_seg);
272 break;
273
Eli Cohenb125a542013-09-11 16:35:22 +0300274 case IB_QPT_XRC_TGT:
275 return 0;
276
Eli Cohene126ba92013-07-07 17:25:49 +0300277 case IB_QPT_UC:
Eli Cohenb125a542013-09-11 16:35:22 +0300278 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohen9e65dc32014-01-28 14:52:47 +0200279 sizeof(struct mlx5_wqe_raddr_seg) +
280 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
281 sizeof(struct mlx5_mkey_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300282 break;
283
284 case IB_QPT_UD:
285 case IB_QPT_SMI:
286 case IB_QPT_GSI:
Eli Cohenb125a542013-09-11 16:35:22 +0300287 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohene126ba92013-07-07 17:25:49 +0300288 sizeof(struct mlx5_wqe_datagram_seg);
289 break;
290
291 case MLX5_IB_QPT_REG_UMR:
Eli Cohenb125a542013-09-11 16:35:22 +0300292 size += sizeof(struct mlx5_wqe_ctrl_seg) +
Eli Cohene126ba92013-07-07 17:25:49 +0300293 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
294 sizeof(struct mlx5_mkey_seg);
295 break;
296
297 default:
298 return -EINVAL;
299 }
300
301 return size;
302}
303
304static int calc_send_wqe(struct ib_qp_init_attr *attr)
305{
306 int inl_size = 0;
307 int size;
308
309 size = sq_overhead(attr->qp_type);
310 if (size < 0)
311 return size;
312
313 if (attr->cap.max_inline_data) {
314 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
315 attr->cap.max_inline_data;
316 }
317
318 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200319 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
320 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
321 return MLX5_SIG_WQE_SIZE;
322 else
323 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
Eli Cohene126ba92013-07-07 17:25:49 +0300324}
325
326static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
327 struct mlx5_ib_qp *qp)
328{
329 int wqe_size;
330 int wq_size;
331
332 if (!attr->cap.max_send_wr)
333 return 0;
334
335 wqe_size = calc_send_wqe(attr);
336 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
337 if (wqe_size < 0)
338 return wqe_size;
339
Saeed Mahameed938fe832015-05-28 22:28:41 +0300340 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
Eli Cohenb125a542013-09-11 16:35:22 +0300341 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300342 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
Eli Cohene126ba92013-07-07 17:25:49 +0300343 return -EINVAL;
344 }
345
346 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
347 sizeof(struct mlx5_wqe_inline_seg);
348 attr->cap.max_inline_data = qp->max_inline_data;
349
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200350 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
351 qp->signature_en = true;
352
Eli Cohene126ba92013-07-07 17:25:49 +0300353 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
354 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300355 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
Eli Cohenb125a542013-09-11 16:35:22 +0300356 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300357 qp->sq.wqe_cnt,
358 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
Eli Cohenb125a542013-09-11 16:35:22 +0300359 return -ENOMEM;
360 }
Eli Cohene126ba92013-07-07 17:25:49 +0300361 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
362 qp->sq.max_gs = attr->cap.max_send_sge;
Eli Cohenb125a542013-09-11 16:35:22 +0300363 qp->sq.max_post = wq_size / wqe_size;
364 attr->cap.max_send_wr = qp->sq.max_post;
Eli Cohene126ba92013-07-07 17:25:49 +0300365
366 return wq_size;
367}
368
369static int set_user_buf_size(struct mlx5_ib_dev *dev,
370 struct mlx5_ib_qp *qp,
371 struct mlx5_ib_create_qp *ucmd)
372{
373 int desc_sz = 1 << qp->sq.wqe_shift;
374
Saeed Mahameed938fe832015-05-28 22:28:41 +0300375 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300376 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300377 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
Eli Cohene126ba92013-07-07 17:25:49 +0300378 return -EINVAL;
379 }
380
381 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
382 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
383 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
384 return -EINVAL;
385 }
386
387 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
388
Saeed Mahameed938fe832015-05-28 22:28:41 +0300389 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
Eli Cohene126ba92013-07-07 17:25:49 +0300390 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300391 qp->sq.wqe_cnt,
392 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
Eli Cohene126ba92013-07-07 17:25:49 +0300393 return -EINVAL;
394 }
395
396 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
397 (qp->sq.wqe_cnt << 6);
398
399 return 0;
400}
401
402static int qp_has_rq(struct ib_qp_init_attr *attr)
403{
404 if (attr->qp_type == IB_QPT_XRC_INI ||
405 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
406 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
407 !attr->cap.max_recv_wr)
408 return 0;
409
410 return 1;
411}
412
Eli Cohenc1be5232014-01-14 17:45:12 +0200413static int first_med_uuar(void)
414{
415 return 1;
416}
417
418static int next_uuar(int n)
419{
420 n++;
421
422 while (((n % 4) & 2))
423 n++;
424
425 return n;
426}
427
428static int num_med_uuar(struct mlx5_uuar_info *uuari)
429{
430 int n;
431
432 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
433 uuari->num_low_latency_uuars - 1;
434
435 return n >= 0 ? n : 0;
436}
437
438static int max_uuari(struct mlx5_uuar_info *uuari)
439{
440 return uuari->num_uars * 4;
441}
442
443static int first_hi_uuar(struct mlx5_uuar_info *uuari)
444{
445 int med;
446 int i;
447 int t;
448
449 med = num_med_uuar(uuari);
450 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
451 t++;
452 if (t == med)
453 return next_uuar(i);
454 }
455
456 return 0;
457}
458
Eli Cohene126ba92013-07-07 17:25:49 +0300459static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
460{
Eli Cohene126ba92013-07-07 17:25:49 +0300461 int i;
462
Eli Cohenc1be5232014-01-14 17:45:12 +0200463 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300464 if (!test_bit(i, uuari->bitmap)) {
465 set_bit(i, uuari->bitmap);
466 uuari->count[i]++;
467 return i;
468 }
469 }
470
471 return -ENOMEM;
472}
473
474static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
475{
Eli Cohenc1be5232014-01-14 17:45:12 +0200476 int minidx = first_med_uuar();
Eli Cohene126ba92013-07-07 17:25:49 +0300477 int i;
478
Eli Cohenc1be5232014-01-14 17:45:12 +0200479 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300480 if (uuari->count[i] < uuari->count[minidx])
481 minidx = i;
482 }
483
484 uuari->count[minidx]++;
485 return minidx;
486}
487
488static int alloc_uuar(struct mlx5_uuar_info *uuari,
489 enum mlx5_ib_latency_class lat)
490{
491 int uuarn = -EINVAL;
492
493 mutex_lock(&uuari->lock);
494 switch (lat) {
495 case MLX5_IB_LATENCY_CLASS_LOW:
496 uuarn = 0;
497 uuari->count[uuarn]++;
498 break;
499
500 case MLX5_IB_LATENCY_CLASS_MEDIUM:
Eli Cohen78c0f982014-01-30 13:49:48 +0200501 if (uuari->ver < 2)
502 uuarn = -ENOMEM;
503 else
504 uuarn = alloc_med_class_uuar(uuari);
Eli Cohene126ba92013-07-07 17:25:49 +0300505 break;
506
507 case MLX5_IB_LATENCY_CLASS_HIGH:
Eli Cohen78c0f982014-01-30 13:49:48 +0200508 if (uuari->ver < 2)
509 uuarn = -ENOMEM;
510 else
511 uuarn = alloc_high_class_uuar(uuari);
Eli Cohene126ba92013-07-07 17:25:49 +0300512 break;
513
514 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
515 uuarn = 2;
516 break;
517 }
518 mutex_unlock(&uuari->lock);
519
520 return uuarn;
521}
522
523static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
524{
525 clear_bit(uuarn, uuari->bitmap);
526 --uuari->count[uuarn];
527}
528
529static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
530{
531 clear_bit(uuarn, uuari->bitmap);
532 --uuari->count[uuarn];
533}
534
535static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
536{
537 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
538 int high_uuar = nuuars - uuari->num_low_latency_uuars;
539
540 mutex_lock(&uuari->lock);
541 if (uuarn == 0) {
542 --uuari->count[uuarn];
543 goto out;
544 }
545
546 if (uuarn < high_uuar) {
547 free_med_class_uuar(uuari, uuarn);
548 goto out;
549 }
550
551 free_high_class_uuar(uuari, uuarn);
552
553out:
554 mutex_unlock(&uuari->lock);
555}
556
557static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
558{
559 switch (state) {
560 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
561 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
562 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
563 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
564 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
565 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
566 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
567 default: return -1;
568 }
569}
570
571static int to_mlx5_st(enum ib_qp_type type)
572{
573 switch (type) {
574 case IB_QPT_RC: return MLX5_QP_ST_RC;
575 case IB_QPT_UC: return MLX5_QP_ST_UC;
576 case IB_QPT_UD: return MLX5_QP_ST_UD;
577 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
578 case IB_QPT_XRC_INI:
579 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
580 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
581 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
582 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
583 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
584 case IB_QPT_RAW_PACKET:
585 case IB_QPT_MAX:
586 default: return -EINVAL;
587 }
588}
589
590static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
591{
592 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
593}
594
595static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
596 struct mlx5_ib_qp *qp, struct ib_udata *udata,
597 struct mlx5_create_qp_mbox_in **in,
598 struct mlx5_ib_create_qp_resp *resp, int *inlen)
599{
600 struct mlx5_ib_ucontext *context;
601 struct mlx5_ib_create_qp ucmd;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200602 int page_shift = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300603 int uar_index;
604 int npages;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200605 u32 offset = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300606 int uuarn;
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200607 int ncont = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300608 int err;
609
610 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
611 if (err) {
612 mlx5_ib_dbg(dev, "copy failed\n");
613 return err;
614 }
615
616 context = to_mucontext(pd->uobject->context);
617 /*
618 * TBD: should come from the verbs when we have the API
619 */
Leon Romanovsky051f2632015-12-20 12:16:11 +0200620 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
621 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
622 uuarn = MLX5_CROSS_CHANNEL_UUAR;
623 else {
624 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
Eli Cohene126ba92013-07-07 17:25:49 +0300625 if (uuarn < 0) {
Leon Romanovsky051f2632015-12-20 12:16:11 +0200626 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
627 mlx5_ib_dbg(dev, "reverting to medium latency\n");
628 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
Eli Cohenc1be5232014-01-14 17:45:12 +0200629 if (uuarn < 0) {
Leon Romanovsky051f2632015-12-20 12:16:11 +0200630 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
631 mlx5_ib_dbg(dev, "reverting to high latency\n");
632 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
633 if (uuarn < 0) {
634 mlx5_ib_warn(dev, "uuar allocation failed\n");
635 return uuarn;
636 }
Eli Cohenc1be5232014-01-14 17:45:12 +0200637 }
Eli Cohene126ba92013-07-07 17:25:49 +0300638 }
639 }
640
641 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
642 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
643
Haggai Eran48fea832014-05-22 14:50:11 +0300644 qp->rq.offset = 0;
645 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
646 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
647
Eli Cohene126ba92013-07-07 17:25:49 +0300648 err = set_user_buf_size(dev, qp, &ucmd);
649 if (err)
650 goto err_uuar;
651
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200652 if (ucmd.buf_addr && qp->buf_size) {
653 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
654 qp->buf_size, 0, 0);
655 if (IS_ERR(qp->umem)) {
656 mlx5_ib_dbg(dev, "umem_get failed\n");
657 err = PTR_ERR(qp->umem);
658 goto err_uuar;
659 }
660 } else {
661 qp->umem = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300662 }
663
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200664 if (qp->umem) {
665 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
666 &ncont, NULL);
667 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
668 if (err) {
669 mlx5_ib_warn(dev, "bad offset\n");
670 goto err_umem;
671 }
672 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
673 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
Eli Cohene126ba92013-07-07 17:25:49 +0300674 }
Eli Cohene126ba92013-07-07 17:25:49 +0300675
676 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
677 *in = mlx5_vzalloc(*inlen);
678 if (!*in) {
679 err = -ENOMEM;
680 goto err_umem;
681 }
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200682 if (qp->umem)
683 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300684 (*in)->ctx.log_pg_sz_remote_qpn =
Eli Cohen1b77d2b2013-10-24 12:01:03 +0300685 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
Eli Cohene126ba92013-07-07 17:25:49 +0300686 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
687
688 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
689 resp->uuar_index = uuarn;
690 qp->uuarn = uuarn;
691
692 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
693 if (err) {
694 mlx5_ib_dbg(dev, "map failed\n");
695 goto err_free;
696 }
697
698 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
699 if (err) {
700 mlx5_ib_dbg(dev, "copy failed\n");
701 goto err_unmap;
702 }
703 qp->create_type = MLX5_QP_USER;
704
705 return 0;
706
707err_unmap:
708 mlx5_ib_db_unmap_user(context, &qp->db);
709
710err_free:
Al Viro479163f2014-11-20 08:13:57 +0000711 kvfree(*in);
Eli Cohene126ba92013-07-07 17:25:49 +0300712
713err_umem:
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200714 if (qp->umem)
715 ib_umem_release(qp->umem);
Eli Cohene126ba92013-07-07 17:25:49 +0300716
717err_uuar:
718 free_uuar(&context->uuari, uuarn);
719 return err;
720}
721
722static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
723{
724 struct mlx5_ib_ucontext *context;
725
726 context = to_mucontext(pd->uobject->context);
727 mlx5_ib_db_unmap_user(context, &qp->db);
Eli Cohen9e9c47d2014-01-14 17:45:21 +0200728 if (qp->umem)
729 ib_umem_release(qp->umem);
Eli Cohene126ba92013-07-07 17:25:49 +0300730 free_uuar(&context->uuari, qp->uuarn);
731}
732
733static int create_kernel_qp(struct mlx5_ib_dev *dev,
734 struct ib_qp_init_attr *init_attr,
735 struct mlx5_ib_qp *qp,
736 struct mlx5_create_qp_mbox_in **in, int *inlen)
737{
738 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
739 struct mlx5_uuar_info *uuari;
740 int uar_index;
741 int uuarn;
742 int err;
743
Jack Morgenstein9603b612014-07-28 23:30:22 +0300744 uuari = &dev->mdev->priv.uuari;
Or Gerlitz652c1a02014-06-25 16:44:14 +0300745 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
Eli Cohen1a4c3a32014-02-06 17:41:25 +0200746 return -EINVAL;
Eli Cohene126ba92013-07-07 17:25:49 +0300747
748 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
749 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
750
751 uuarn = alloc_uuar(uuari, lc);
752 if (uuarn < 0) {
753 mlx5_ib_dbg(dev, "\n");
754 return -ENOMEM;
755 }
756
757 qp->bf = &uuari->bfs[uuarn];
758 uar_index = qp->bf->uar->index;
759
760 err = calc_sq_size(dev, init_attr, qp);
761 if (err < 0) {
762 mlx5_ib_dbg(dev, "err %d\n", err);
763 goto err_uuar;
764 }
765
766 qp->rq.offset = 0;
767 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
768 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
769
Amir Vadai64ffaa22015-05-28 22:28:38 +0300770 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300771 if (err) {
772 mlx5_ib_dbg(dev, "err %d\n", err);
773 goto err_uuar;
774 }
775
776 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
777 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
778 *in = mlx5_vzalloc(*inlen);
779 if (!*in) {
780 err = -ENOMEM;
781 goto err_buf;
782 }
783 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
Eli Cohen1b77d2b2013-10-24 12:01:03 +0300784 (*in)->ctx.log_pg_sz_remote_qpn =
785 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
Eli Cohene126ba92013-07-07 17:25:49 +0300786 /* Set "fast registration enabled" for all kernel QPs */
787 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
788 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
789
790 mlx5_fill_page_array(&qp->buf, (*in)->pas);
791
Jack Morgenstein9603b612014-07-28 23:30:22 +0300792 err = mlx5_db_alloc(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300793 if (err) {
794 mlx5_ib_dbg(dev, "err %d\n", err);
795 goto err_free;
796 }
797
Eli Cohene126ba92013-07-07 17:25:49 +0300798 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
799 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
800 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
801 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
802 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
803
804 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
805 !qp->sq.w_list || !qp->sq.wqe_head) {
806 err = -ENOMEM;
807 goto err_wrid;
808 }
809 qp->create_type = MLX5_QP_KERNEL;
810
811 return 0;
812
813err_wrid:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300814 mlx5_db_free(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300815 kfree(qp->sq.wqe_head);
816 kfree(qp->sq.w_list);
817 kfree(qp->sq.wrid);
818 kfree(qp->sq.wr_data);
819 kfree(qp->rq.wrid);
820
821err_free:
Al Viro479163f2014-11-20 08:13:57 +0000822 kvfree(*in);
Eli Cohene126ba92013-07-07 17:25:49 +0300823
824err_buf:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300825 mlx5_buf_free(dev->mdev, &qp->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300826
827err_uuar:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300828 free_uuar(&dev->mdev->priv.uuari, uuarn);
Eli Cohene126ba92013-07-07 17:25:49 +0300829 return err;
830}
831
832static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
833{
Jack Morgenstein9603b612014-07-28 23:30:22 +0300834 mlx5_db_free(dev->mdev, &qp->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300835 kfree(qp->sq.wqe_head);
836 kfree(qp->sq.w_list);
837 kfree(qp->sq.wrid);
838 kfree(qp->sq.wr_data);
839 kfree(qp->rq.wrid);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300840 mlx5_buf_free(dev->mdev, &qp->buf);
841 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
Eli Cohene126ba92013-07-07 17:25:49 +0300842}
843
844static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
845{
846 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
847 (attr->qp_type == IB_QPT_XRC_INI))
848 return cpu_to_be32(MLX5_SRQ_RQ);
849 else if (!qp->has_rq)
850 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
851 else
852 return cpu_to_be32(MLX5_NON_ZERO_RQ);
853}
854
855static int is_connected(enum ib_qp_type qp_type)
856{
857 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
858 return 1;
859
860 return 0;
861}
862
863static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
864 struct ib_qp_init_attr *init_attr,
865 struct ib_udata *udata, struct mlx5_ib_qp *qp)
866{
867 struct mlx5_ib_resources *devr = &dev->devr;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300868 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300869 struct mlx5_ib_create_qp_resp resp;
870 struct mlx5_create_qp_mbox_in *in;
871 struct mlx5_ib_create_qp ucmd;
872 int inlen = sizeof(*in);
873 int err;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200874 u32 uidx = MLX5_IB_DEFAULT_UIDX;
875 void *qpc;
Eli Cohene126ba92013-07-07 17:25:49 +0300876
Haggai Eran6aec21f2014-12-11 17:04:23 +0200877 mlx5_ib_odp_create_qp(qp);
878
Eli Cohene126ba92013-07-07 17:25:49 +0300879 mutex_init(&qp->mutex);
880 spin_lock_init(&qp->sq.lock);
881 spin_lock_init(&qp->rq.lock);
882
Eli Cohenf360d882014-04-02 00:10:16 +0300883 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
Saeed Mahameed938fe832015-05-28 22:28:41 +0300884 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
Eli Cohenf360d882014-04-02 00:10:16 +0300885 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
886 return -EINVAL;
887 } else {
888 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
889 }
890 }
891
Leon Romanovsky051f2632015-12-20 12:16:11 +0200892 if (init_attr->create_flags &
893 (IB_QP_CREATE_CROSS_CHANNEL |
894 IB_QP_CREATE_MANAGED_SEND |
895 IB_QP_CREATE_MANAGED_RECV)) {
896 if (!MLX5_CAP_GEN(mdev, cd)) {
897 mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
898 return -EINVAL;
899 }
900 if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
901 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
902 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
903 qp->flags |= MLX5_IB_QP_MANAGED_SEND;
904 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
905 qp->flags |= MLX5_IB_QP_MANAGED_RECV;
906 }
Eli Cohene126ba92013-07-07 17:25:49 +0300907 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
908 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
909
910 if (pd && pd->uobject) {
911 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
912 mlx5_ib_dbg(dev, "copy failed\n");
913 return -EFAULT;
914 }
915
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200916 err = get_qp_user_index(to_mucontext(pd->uobject->context),
917 &ucmd, udata->inlen, &uidx);
918 if (err)
919 return err;
920
Eli Cohene126ba92013-07-07 17:25:49 +0300921 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
922 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
923 } else {
924 qp->wq_sig = !!wq_signature;
925 }
926
927 qp->has_rq = qp_has_rq(init_attr);
928 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
929 qp, (pd && pd->uobject) ? &ucmd : NULL);
930 if (err) {
931 mlx5_ib_dbg(dev, "err %d\n", err);
932 return err;
933 }
934
935 if (pd) {
936 if (pd->uobject) {
Saeed Mahameed938fe832015-05-28 22:28:41 +0300937 __u32 max_wqes =
938 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
Eli Cohene126ba92013-07-07 17:25:49 +0300939 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
940 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
941 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
942 mlx5_ib_dbg(dev, "invalid rq params\n");
943 return -EINVAL;
944 }
Saeed Mahameed938fe832015-05-28 22:28:41 +0300945 if (ucmd.sq_wqe_count > max_wqes) {
Eli Cohene126ba92013-07-07 17:25:49 +0300946 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +0300947 ucmd.sq_wqe_count, max_wqes);
Eli Cohene126ba92013-07-07 17:25:49 +0300948 return -EINVAL;
949 }
950 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
951 if (err)
952 mlx5_ib_dbg(dev, "err %d\n", err);
953 } else {
954 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
955 if (err)
956 mlx5_ib_dbg(dev, "err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300957 }
958
959 if (err)
960 return err;
961 } else {
962 in = mlx5_vzalloc(sizeof(*in));
963 if (!in)
964 return -ENOMEM;
965
966 qp->create_type = MLX5_QP_EMPTY;
967 }
968
969 if (is_sqp(init_attr->qp_type))
970 qp->port = init_attr->port_num;
971
972 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
973 MLX5_QP_PM_MIGRATED << 11);
974
975 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
976 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
977 else
978 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
979
980 if (qp->wq_sig)
981 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
982
Eli Cohenf360d882014-04-02 00:10:16 +0300983 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
984 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
985
Leon Romanovsky051f2632015-12-20 12:16:11 +0200986 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
987 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
988 if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
989 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
990 if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
991 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
992
Eli Cohene126ba92013-07-07 17:25:49 +0300993 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
994 int rcqe_sz;
995 int scqe_sz;
996
997 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
998 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
999
1000 if (rcqe_sz == 128)
1001 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
1002 else
1003 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
1004
1005 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
1006 if (scqe_sz == 128)
1007 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
1008 else
1009 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
1010 }
1011 }
1012
1013 if (qp->rq.wqe_cnt) {
1014 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
1015 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
1016 }
1017
1018 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
1019
1020 if (qp->sq.wqe_cnt)
1021 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
1022 else
1023 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
1024
1025 /* Set default resources */
1026 switch (init_attr->qp_type) {
1027 case IB_QPT_XRC_TGT:
1028 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1029 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1030 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1031 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
1032 break;
1033 case IB_QPT_XRC_INI:
1034 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1035 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1036 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1037 break;
1038 default:
1039 if (init_attr->srq) {
1040 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1041 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1042 } else {
1043 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03001044 in->ctx.rq_type_srqn |=
1045 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
Eli Cohene126ba92013-07-07 17:25:49 +03001046 }
1047 }
1048
1049 if (init_attr->send_cq)
1050 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1051
1052 if (init_attr->recv_cq)
1053 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1054
1055 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1056
Haggai Abramovskycfb5e082016-01-14 19:12:57 +02001057 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
1058 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1059 /* 0xffffff means we ask to work with cqe version 0 */
1060 MLX5_SET(qpc, qpc, user_index, uidx);
1061 }
1062
Jack Morgenstein9603b612014-07-28 23:30:22 +03001063 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001064 if (err) {
1065 mlx5_ib_dbg(dev, "create qp failed\n");
1066 goto err_create;
1067 }
1068
Al Viro479163f2014-11-20 08:13:57 +00001069 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001070 /* Hardware wants QPN written in big-endian order (after
1071 * shifting) for send doorbell. Precompute this value to save
1072 * a little bit when posting sends.
1073 */
1074 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1075
1076 qp->mqp.event = mlx5_ib_qp_event;
1077
1078 return 0;
1079
1080err_create:
1081 if (qp->create_type == MLX5_QP_USER)
1082 destroy_qp_user(pd, qp);
1083 else if (qp->create_type == MLX5_QP_KERNEL)
1084 destroy_qp_kernel(dev, qp);
1085
Al Viro479163f2014-11-20 08:13:57 +00001086 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001087 return err;
1088}
1089
1090static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1091 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1092{
1093 if (send_cq) {
1094 if (recv_cq) {
1095 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1096 spin_lock_irq(&send_cq->lock);
1097 spin_lock_nested(&recv_cq->lock,
1098 SINGLE_DEPTH_NESTING);
1099 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1100 spin_lock_irq(&send_cq->lock);
1101 __acquire(&recv_cq->lock);
1102 } else {
1103 spin_lock_irq(&recv_cq->lock);
1104 spin_lock_nested(&send_cq->lock,
1105 SINGLE_DEPTH_NESTING);
1106 }
1107 } else {
1108 spin_lock_irq(&send_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001109 __acquire(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001110 }
1111 } else if (recv_cq) {
1112 spin_lock_irq(&recv_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001113 __acquire(&send_cq->lock);
1114 } else {
1115 __acquire(&send_cq->lock);
1116 __acquire(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001117 }
1118}
1119
1120static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1121 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1122{
1123 if (send_cq) {
1124 if (recv_cq) {
1125 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1126 spin_unlock(&recv_cq->lock);
1127 spin_unlock_irq(&send_cq->lock);
1128 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1129 __release(&recv_cq->lock);
1130 spin_unlock_irq(&send_cq->lock);
1131 } else {
1132 spin_unlock(&send_cq->lock);
1133 spin_unlock_irq(&recv_cq->lock);
1134 }
1135 } else {
Eli Cohen6a4f1392014-12-02 12:26:18 +02001136 __release(&recv_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001137 spin_unlock_irq(&send_cq->lock);
1138 }
1139 } else if (recv_cq) {
Eli Cohen6a4f1392014-12-02 12:26:18 +02001140 __release(&send_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001141 spin_unlock_irq(&recv_cq->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02001142 } else {
1143 __release(&recv_cq->lock);
1144 __release(&send_cq->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03001145 }
1146}
1147
1148static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1149{
1150 return to_mpd(qp->ibqp.pd);
1151}
1152
1153static void get_cqs(struct mlx5_ib_qp *qp,
1154 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1155{
1156 switch (qp->ibqp.qp_type) {
1157 case IB_QPT_XRC_TGT:
1158 *send_cq = NULL;
1159 *recv_cq = NULL;
1160 break;
1161 case MLX5_IB_QPT_REG_UMR:
1162 case IB_QPT_XRC_INI:
1163 *send_cq = to_mcq(qp->ibqp.send_cq);
1164 *recv_cq = NULL;
1165 break;
1166
1167 case IB_QPT_SMI:
1168 case IB_QPT_GSI:
1169 case IB_QPT_RC:
1170 case IB_QPT_UC:
1171 case IB_QPT_UD:
1172 case IB_QPT_RAW_IPV6:
1173 case IB_QPT_RAW_ETHERTYPE:
1174 *send_cq = to_mcq(qp->ibqp.send_cq);
1175 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1176 break;
1177
1178 case IB_QPT_RAW_PACKET:
1179 case IB_QPT_MAX:
1180 default:
1181 *send_cq = NULL;
1182 *recv_cq = NULL;
1183 break;
1184 }
1185}
1186
1187static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1188{
1189 struct mlx5_ib_cq *send_cq, *recv_cq;
1190 struct mlx5_modify_qp_mbox_in *in;
1191 int err;
1192
1193 in = kzalloc(sizeof(*in), GFP_KERNEL);
1194 if (!in)
1195 return;
Eli Cohen7bef7ad2015-04-02 17:07:21 +03001196
Haggai Eran6aec21f2014-12-11 17:04:23 +02001197 if (qp->state != IB_QPS_RESET) {
1198 mlx5_ib_qp_disable_pagefaults(qp);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001199 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
Haggai Abramonvskyc3c6c9c2015-04-02 17:07:20 +03001200 MLX5_QP_STATE_RST, in, 0, &qp->mqp))
Eli Cohene126ba92013-07-07 17:25:49 +03001201 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1202 qp->mqp.qpn);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001203 }
Eli Cohene126ba92013-07-07 17:25:49 +03001204
1205 get_cqs(qp, &send_cq, &recv_cq);
1206
1207 if (qp->create_type == MLX5_QP_KERNEL) {
1208 mlx5_ib_lock_cqs(send_cq, recv_cq);
1209 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1210 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1211 if (send_cq != recv_cq)
1212 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1213 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1214 }
1215
Jack Morgenstein9603b612014-07-28 23:30:22 +03001216 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
Eli Cohene126ba92013-07-07 17:25:49 +03001217 if (err)
1218 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1219 kfree(in);
1220
1221
1222 if (qp->create_type == MLX5_QP_KERNEL)
1223 destroy_qp_kernel(dev, qp);
1224 else if (qp->create_type == MLX5_QP_USER)
1225 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1226}
1227
1228static const char *ib_qp_type_str(enum ib_qp_type type)
1229{
1230 switch (type) {
1231 case IB_QPT_SMI:
1232 return "IB_QPT_SMI";
1233 case IB_QPT_GSI:
1234 return "IB_QPT_GSI";
1235 case IB_QPT_RC:
1236 return "IB_QPT_RC";
1237 case IB_QPT_UC:
1238 return "IB_QPT_UC";
1239 case IB_QPT_UD:
1240 return "IB_QPT_UD";
1241 case IB_QPT_RAW_IPV6:
1242 return "IB_QPT_RAW_IPV6";
1243 case IB_QPT_RAW_ETHERTYPE:
1244 return "IB_QPT_RAW_ETHERTYPE";
1245 case IB_QPT_XRC_INI:
1246 return "IB_QPT_XRC_INI";
1247 case IB_QPT_XRC_TGT:
1248 return "IB_QPT_XRC_TGT";
1249 case IB_QPT_RAW_PACKET:
1250 return "IB_QPT_RAW_PACKET";
1251 case MLX5_IB_QPT_REG_UMR:
1252 return "MLX5_IB_QPT_REG_UMR";
1253 case IB_QPT_MAX:
1254 default:
1255 return "Invalid QP type";
1256 }
1257}
1258
1259struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1260 struct ib_qp_init_attr *init_attr,
1261 struct ib_udata *udata)
1262{
1263 struct mlx5_ib_dev *dev;
1264 struct mlx5_ib_qp *qp;
1265 u16 xrcdn = 0;
1266 int err;
1267
1268 if (pd) {
1269 dev = to_mdev(pd->device);
1270 } else {
1271 /* being cautious here */
1272 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1273 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1274 pr_warn("%s: no PD for transport %s\n", __func__,
1275 ib_qp_type_str(init_attr->qp_type));
1276 return ERR_PTR(-EINVAL);
1277 }
1278 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1279 }
1280
1281 switch (init_attr->qp_type) {
1282 case IB_QPT_XRC_TGT:
1283 case IB_QPT_XRC_INI:
Saeed Mahameed938fe832015-05-28 22:28:41 +03001284 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
Eli Cohene126ba92013-07-07 17:25:49 +03001285 mlx5_ib_dbg(dev, "XRC not supported\n");
1286 return ERR_PTR(-ENOSYS);
1287 }
1288 init_attr->recv_cq = NULL;
1289 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1290 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1291 init_attr->send_cq = NULL;
1292 }
1293
1294 /* fall through */
1295 case IB_QPT_RC:
1296 case IB_QPT_UC:
1297 case IB_QPT_UD:
1298 case IB_QPT_SMI:
1299 case IB_QPT_GSI:
1300 case MLX5_IB_QPT_REG_UMR:
1301 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1302 if (!qp)
1303 return ERR_PTR(-ENOMEM);
1304
1305 err = create_qp_common(dev, pd, init_attr, udata, qp);
1306 if (err) {
1307 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1308 kfree(qp);
1309 return ERR_PTR(err);
1310 }
1311
1312 if (is_qp0(init_attr->qp_type))
1313 qp->ibqp.qp_num = 0;
1314 else if (is_qp1(init_attr->qp_type))
1315 qp->ibqp.qp_num = 1;
1316 else
1317 qp->ibqp.qp_num = qp->mqp.qpn;
1318
1319 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1320 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1321 to_mcq(init_attr->send_cq)->mcq.cqn);
1322
1323 qp->xrcdn = xrcdn;
1324
1325 break;
1326
1327 case IB_QPT_RAW_IPV6:
1328 case IB_QPT_RAW_ETHERTYPE:
1329 case IB_QPT_RAW_PACKET:
1330 case IB_QPT_MAX:
1331 default:
1332 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1333 init_attr->qp_type);
1334 /* Don't support raw QPs */
1335 return ERR_PTR(-EINVAL);
1336 }
1337
1338 return &qp->ibqp;
1339}
1340
1341int mlx5_ib_destroy_qp(struct ib_qp *qp)
1342{
1343 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1344 struct mlx5_ib_qp *mqp = to_mqp(qp);
1345
1346 destroy_qp_common(dev, mqp);
1347
1348 kfree(mqp);
1349
1350 return 0;
1351}
1352
1353static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1354 int attr_mask)
1355{
1356 u32 hw_access_flags = 0;
1357 u8 dest_rd_atomic;
1358 u32 access_flags;
1359
1360 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1361 dest_rd_atomic = attr->max_dest_rd_atomic;
1362 else
1363 dest_rd_atomic = qp->resp_depth;
1364
1365 if (attr_mask & IB_QP_ACCESS_FLAGS)
1366 access_flags = attr->qp_access_flags;
1367 else
1368 access_flags = qp->atomic_rd_en;
1369
1370 if (!dest_rd_atomic)
1371 access_flags &= IB_ACCESS_REMOTE_WRITE;
1372
1373 if (access_flags & IB_ACCESS_REMOTE_READ)
1374 hw_access_flags |= MLX5_QP_BIT_RRE;
1375 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1376 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1377 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1378 hw_access_flags |= MLX5_QP_BIT_RWE;
1379
1380 return cpu_to_be32(hw_access_flags);
1381}
1382
1383enum {
1384 MLX5_PATH_FLAG_FL = 1 << 0,
1385 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1386 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1387};
1388
1389static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1390{
1391 if (rate == IB_RATE_PORT_CURRENT) {
1392 return 0;
1393 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1394 return -EINVAL;
1395 } else {
1396 while (rate != IB_RATE_2_5_GBPS &&
1397 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
Saeed Mahameed938fe832015-05-28 22:28:41 +03001398 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
Eli Cohene126ba92013-07-07 17:25:49 +03001399 --rate;
1400 }
1401
1402 return rate + MLX5_STAT_RATE_OFFSET;
1403}
1404
1405static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1406 struct mlx5_qp_path *path, u8 port, int attr_mask,
1407 u32 path_flags, const struct ib_qp_attr *attr)
1408{
Achiad Shochat2811ba52015-12-23 18:47:24 +02001409 enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
Eli Cohene126ba92013-07-07 17:25:49 +03001410 int err;
1411
Eli Cohene126ba92013-07-07 17:25:49 +03001412 if (attr_mask & IB_QP_PKEY_INDEX)
1413 path->pkey_index = attr->pkey_index;
1414
Eli Cohene126ba92013-07-07 17:25:49 +03001415 if (ah->ah_flags & IB_AH_GRH) {
Saeed Mahameed938fe832015-05-28 22:28:41 +03001416 if (ah->grh.sgid_index >=
1417 dev->mdev->port_caps[port - 1].gid_table_len) {
Joe Perchesf4f01b52015-05-08 15:58:07 -07001418 pr_err("sgid_index (%u) too large. max is %d\n",
Saeed Mahameed938fe832015-05-28 22:28:41 +03001419 ah->grh.sgid_index,
1420 dev->mdev->port_caps[port - 1].gid_table_len);
Eli Cohenf83b4262014-09-14 16:47:54 +03001421 return -EINVAL;
1422 }
Achiad Shochat2811ba52015-12-23 18:47:24 +02001423 }
1424
1425 if (ll == IB_LINK_LAYER_ETHERNET) {
1426 if (!(ah->ah_flags & IB_AH_GRH))
1427 return -EINVAL;
1428 memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
1429 path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
1430 ah->grh.sgid_index);
1431 path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
1432 } else {
1433 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1434 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
1435 0;
1436 path->rlid = cpu_to_be16(ah->dlid);
1437 path->grh_mlid = ah->src_path_bits & 0x7f;
1438 if (ah->ah_flags & IB_AH_GRH)
1439 path->grh_mlid |= 1 << 7;
1440 path->dci_cfi_prio_sl = ah->sl & 0xf;
1441 }
1442
1443 if (ah->ah_flags & IB_AH_GRH) {
Eli Cohene126ba92013-07-07 17:25:49 +03001444 path->mgid_index = ah->grh.sgid_index;
1445 path->hop_limit = ah->grh.hop_limit;
1446 path->tclass_flowlabel =
1447 cpu_to_be32((ah->grh.traffic_class << 20) |
1448 (ah->grh.flow_label));
1449 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1450 }
1451
1452 err = ib_rate_to_mlx5(dev, ah->static_rate);
1453 if (err < 0)
1454 return err;
1455 path->static_rate = err;
1456 path->port = port;
1457
Eli Cohene126ba92013-07-07 17:25:49 +03001458 if (attr_mask & IB_QP_TIMEOUT)
1459 path->ackto_lt = attr->timeout << 3;
1460
Eli Cohene126ba92013-07-07 17:25:49 +03001461 return 0;
1462}
1463
1464static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1465 [MLX5_QP_STATE_INIT] = {
1466 [MLX5_QP_STATE_INIT] = {
1467 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1468 MLX5_QP_OPTPAR_RAE |
1469 MLX5_QP_OPTPAR_RWE |
1470 MLX5_QP_OPTPAR_PKEY_INDEX |
1471 MLX5_QP_OPTPAR_PRI_PORT,
1472 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1473 MLX5_QP_OPTPAR_PKEY_INDEX |
1474 MLX5_QP_OPTPAR_PRI_PORT,
1475 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1476 MLX5_QP_OPTPAR_Q_KEY |
1477 MLX5_QP_OPTPAR_PRI_PORT,
1478 },
1479 [MLX5_QP_STATE_RTR] = {
1480 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1481 MLX5_QP_OPTPAR_RRE |
1482 MLX5_QP_OPTPAR_RAE |
1483 MLX5_QP_OPTPAR_RWE |
1484 MLX5_QP_OPTPAR_PKEY_INDEX,
1485 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1486 MLX5_QP_OPTPAR_RWE |
1487 MLX5_QP_OPTPAR_PKEY_INDEX,
1488 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1489 MLX5_QP_OPTPAR_Q_KEY,
1490 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1491 MLX5_QP_OPTPAR_Q_KEY,
Eli Cohena4774e92013-09-11 16:35:32 +03001492 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1493 MLX5_QP_OPTPAR_RRE |
1494 MLX5_QP_OPTPAR_RAE |
1495 MLX5_QP_OPTPAR_RWE |
1496 MLX5_QP_OPTPAR_PKEY_INDEX,
Eli Cohene126ba92013-07-07 17:25:49 +03001497 },
1498 },
1499 [MLX5_QP_STATE_RTR] = {
1500 [MLX5_QP_STATE_RTS] = {
1501 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1502 MLX5_QP_OPTPAR_RRE |
1503 MLX5_QP_OPTPAR_RAE |
1504 MLX5_QP_OPTPAR_RWE |
1505 MLX5_QP_OPTPAR_PM_STATE |
1506 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1507 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1508 MLX5_QP_OPTPAR_RWE |
1509 MLX5_QP_OPTPAR_PM_STATE,
1510 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1511 },
1512 },
1513 [MLX5_QP_STATE_RTS] = {
1514 [MLX5_QP_STATE_RTS] = {
1515 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1516 MLX5_QP_OPTPAR_RAE |
1517 MLX5_QP_OPTPAR_RWE |
1518 MLX5_QP_OPTPAR_RNR_TIMEOUT |
Eli Cohenc2a34312013-10-24 12:01:02 +03001519 MLX5_QP_OPTPAR_PM_STATE |
1520 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
Eli Cohene126ba92013-07-07 17:25:49 +03001521 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
Eli Cohenc2a34312013-10-24 12:01:02 +03001522 MLX5_QP_OPTPAR_PM_STATE |
1523 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
Eli Cohene126ba92013-07-07 17:25:49 +03001524 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1525 MLX5_QP_OPTPAR_SRQN |
1526 MLX5_QP_OPTPAR_CQN_RCV,
1527 },
1528 },
1529 [MLX5_QP_STATE_SQER] = {
1530 [MLX5_QP_STATE_RTS] = {
1531 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1532 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
Eli Cohen75959f52013-09-11 16:35:31 +03001533 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
Eli Cohena4774e92013-09-11 16:35:32 +03001534 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1535 MLX5_QP_OPTPAR_RWE |
1536 MLX5_QP_OPTPAR_RAE |
1537 MLX5_QP_OPTPAR_RRE,
Eli Cohene126ba92013-07-07 17:25:49 +03001538 },
1539 },
1540};
1541
1542static int ib_nr_to_mlx5_nr(int ib_mask)
1543{
1544 switch (ib_mask) {
1545 case IB_QP_STATE:
1546 return 0;
1547 case IB_QP_CUR_STATE:
1548 return 0;
1549 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1550 return 0;
1551 case IB_QP_ACCESS_FLAGS:
1552 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1553 MLX5_QP_OPTPAR_RAE;
1554 case IB_QP_PKEY_INDEX:
1555 return MLX5_QP_OPTPAR_PKEY_INDEX;
1556 case IB_QP_PORT:
1557 return MLX5_QP_OPTPAR_PRI_PORT;
1558 case IB_QP_QKEY:
1559 return MLX5_QP_OPTPAR_Q_KEY;
1560 case IB_QP_AV:
1561 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1562 MLX5_QP_OPTPAR_PRI_PORT;
1563 case IB_QP_PATH_MTU:
1564 return 0;
1565 case IB_QP_TIMEOUT:
1566 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1567 case IB_QP_RETRY_CNT:
1568 return MLX5_QP_OPTPAR_RETRY_COUNT;
1569 case IB_QP_RNR_RETRY:
1570 return MLX5_QP_OPTPAR_RNR_RETRY;
1571 case IB_QP_RQ_PSN:
1572 return 0;
1573 case IB_QP_MAX_QP_RD_ATOMIC:
1574 return MLX5_QP_OPTPAR_SRA_MAX;
1575 case IB_QP_ALT_PATH:
1576 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1577 case IB_QP_MIN_RNR_TIMER:
1578 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1579 case IB_QP_SQ_PSN:
1580 return 0;
1581 case IB_QP_MAX_DEST_RD_ATOMIC:
1582 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1583 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1584 case IB_QP_PATH_MIG_STATE:
1585 return MLX5_QP_OPTPAR_PM_STATE;
1586 case IB_QP_CAP:
1587 return 0;
1588 case IB_QP_DEST_QPN:
1589 return 0;
1590 }
1591 return 0;
1592}
1593
1594static int ib_mask_to_mlx5_opt(int ib_mask)
1595{
1596 int result = 0;
1597 int i;
1598
1599 for (i = 0; i < 8 * sizeof(int); i++) {
1600 if ((1 << i) & ib_mask)
1601 result |= ib_nr_to_mlx5_nr(1 << i);
1602 }
1603
1604 return result;
1605}
1606
1607static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1608 const struct ib_qp_attr *attr, int attr_mask,
1609 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1610{
1611 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1612 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1613 struct mlx5_ib_cq *send_cq, *recv_cq;
1614 struct mlx5_qp_context *context;
1615 struct mlx5_modify_qp_mbox_in *in;
1616 struct mlx5_ib_pd *pd;
1617 enum mlx5_qp_state mlx5_cur, mlx5_new;
1618 enum mlx5_qp_optpar optpar;
1619 int sqd_event;
1620 int mlx5_st;
1621 int err;
1622
1623 in = kzalloc(sizeof(*in), GFP_KERNEL);
1624 if (!in)
1625 return -ENOMEM;
1626
1627 context = &in->ctx;
1628 err = to_mlx5_st(ibqp->qp_type);
1629 if (err < 0)
1630 goto out;
1631
1632 context->flags = cpu_to_be32(err << 16);
1633
1634 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1635 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1636 } else {
1637 switch (attr->path_mig_state) {
1638 case IB_MIG_MIGRATED:
1639 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1640 break;
1641 case IB_MIG_REARM:
1642 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1643 break;
1644 case IB_MIG_ARMED:
1645 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1646 break;
1647 }
1648 }
1649
1650 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1651 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1652 } else if (ibqp->qp_type == IB_QPT_UD ||
1653 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1654 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1655 } else if (attr_mask & IB_QP_PATH_MTU) {
1656 if (attr->path_mtu < IB_MTU_256 ||
1657 attr->path_mtu > IB_MTU_4096) {
1658 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1659 err = -EINVAL;
1660 goto out;
1661 }
Saeed Mahameed938fe832015-05-28 22:28:41 +03001662 context->mtu_msgmax = (attr->path_mtu << 5) |
1663 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
Eli Cohene126ba92013-07-07 17:25:49 +03001664 }
1665
1666 if (attr_mask & IB_QP_DEST_QPN)
1667 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1668
1669 if (attr_mask & IB_QP_PKEY_INDEX)
1670 context->pri_path.pkey_index = attr->pkey_index;
1671
1672 /* todo implement counter_index functionality */
1673
1674 if (is_sqp(ibqp->qp_type))
1675 context->pri_path.port = qp->port;
1676
1677 if (attr_mask & IB_QP_PORT)
1678 context->pri_path.port = attr->port_num;
1679
1680 if (attr_mask & IB_QP_AV) {
1681 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1682 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1683 attr_mask, 0, attr);
1684 if (err)
1685 goto out;
1686 }
1687
1688 if (attr_mask & IB_QP_TIMEOUT)
1689 context->pri_path.ackto_lt |= attr->timeout << 3;
1690
1691 if (attr_mask & IB_QP_ALT_PATH) {
1692 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1693 attr->alt_port_num, attr_mask, 0, attr);
1694 if (err)
1695 goto out;
1696 }
1697
1698 pd = get_pd(qp);
1699 get_cqs(qp, &send_cq, &recv_cq);
1700
1701 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1702 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1703 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1704 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1705
1706 if (attr_mask & IB_QP_RNR_RETRY)
1707 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1708
1709 if (attr_mask & IB_QP_RETRY_CNT)
1710 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1711
1712 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1713 if (attr->max_rd_atomic)
1714 context->params1 |=
1715 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1716 }
1717
1718 if (attr_mask & IB_QP_SQ_PSN)
1719 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1720
1721 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1722 if (attr->max_dest_rd_atomic)
1723 context->params2 |=
1724 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1725 }
1726
1727 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1728 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1729
1730 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1731 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1732
1733 if (attr_mask & IB_QP_RQ_PSN)
1734 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1735
1736 if (attr_mask & IB_QP_QKEY)
1737 context->qkey = cpu_to_be32(attr->qkey);
1738
1739 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1740 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1741
1742 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1743 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1744 sqd_event = 1;
1745 else
1746 sqd_event = 0;
1747
1748 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1749 context->sq_crq_size |= cpu_to_be16(1 << 4);
1750
1751
1752 mlx5_cur = to_mlx5_state(cur_state);
1753 mlx5_new = to_mlx5_state(new_state);
1754 mlx5_st = to_mlx5_st(ibqp->qp_type);
Eli Cohen07c91132013-10-24 12:01:01 +03001755 if (mlx5_st < 0)
Eli Cohene126ba92013-07-07 17:25:49 +03001756 goto out;
1757
Haggai Eran6aec21f2014-12-11 17:04:23 +02001758 /* If moving to a reset or error state, we must disable page faults on
1759 * this QP and flush all current page faults. Otherwise a stale page
1760 * fault may attempt to work on this QP after it is reset and moved
1761 * again to RTS, and may cause the driver and the device to get out of
1762 * sync. */
1763 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1764 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1765 mlx5_ib_qp_disable_pagefaults(qp);
1766
Eli Cohene126ba92013-07-07 17:25:49 +03001767 optpar = ib_mask_to_mlx5_opt(attr_mask);
1768 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1769 in->optparam = cpu_to_be32(optpar);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001770 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
Eli Cohene126ba92013-07-07 17:25:49 +03001771 to_mlx5_state(new_state), in, sqd_event,
1772 &qp->mqp);
1773 if (err)
1774 goto out;
1775
Haggai Eran6aec21f2014-12-11 17:04:23 +02001776 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1777 mlx5_ib_qp_enable_pagefaults(qp);
1778
Eli Cohene126ba92013-07-07 17:25:49 +03001779 qp->state = new_state;
1780
1781 if (attr_mask & IB_QP_ACCESS_FLAGS)
1782 qp->atomic_rd_en = attr->qp_access_flags;
1783 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1784 qp->resp_depth = attr->max_dest_rd_atomic;
1785 if (attr_mask & IB_QP_PORT)
1786 qp->port = attr->port_num;
1787 if (attr_mask & IB_QP_ALT_PATH)
1788 qp->alt_port = attr->alt_port_num;
1789
1790 /*
1791 * If we moved a kernel QP to RESET, clean up all old CQ
1792 * entries and reinitialize the QP.
1793 */
1794 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1795 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1796 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1797 if (send_cq != recv_cq)
1798 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1799
1800 qp->rq.head = 0;
1801 qp->rq.tail = 0;
1802 qp->sq.head = 0;
1803 qp->sq.tail = 0;
1804 qp->sq.cur_post = 0;
1805 qp->sq.last_poll = 0;
1806 qp->db.db[MLX5_RCV_DBR] = 0;
1807 qp->db.db[MLX5_SND_DBR] = 0;
1808 }
1809
1810out:
1811 kfree(in);
1812 return err;
1813}
1814
1815int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1816 int attr_mask, struct ib_udata *udata)
1817{
1818 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1819 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1820 enum ib_qp_state cur_state, new_state;
1821 int err = -EINVAL;
1822 int port;
Achiad Shochat2811ba52015-12-23 18:47:24 +02001823 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
Eli Cohene126ba92013-07-07 17:25:49 +03001824
1825 mutex_lock(&qp->mutex);
1826
1827 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1828 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1829
Achiad Shochat2811ba52015-12-23 18:47:24 +02001830 if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
1831 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1832 ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
1833 }
1834
Eli Cohene126ba92013-07-07 17:25:49 +03001835 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
Matan Barakdd5f03b2013-12-12 18:03:11 +02001836 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
Achiad Shochat2811ba52015-12-23 18:47:24 +02001837 ll))
Eli Cohene126ba92013-07-07 17:25:49 +03001838 goto out;
1839
1840 if ((attr_mask & IB_QP_PORT) &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001841 (attr->port_num == 0 ||
1842 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
Eli Cohene126ba92013-07-07 17:25:49 +03001843 goto out;
1844
1845 if (attr_mask & IB_QP_PKEY_INDEX) {
1846 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001847 if (attr->pkey_index >=
1848 dev->mdev->port_caps[port - 1].pkey_table_len)
Eli Cohene126ba92013-07-07 17:25:49 +03001849 goto out;
1850 }
1851
1852 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001853 attr->max_rd_atomic >
1854 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
Eli Cohene126ba92013-07-07 17:25:49 +03001855 goto out;
1856
1857 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
Saeed Mahameed938fe832015-05-28 22:28:41 +03001858 attr->max_dest_rd_atomic >
1859 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
Eli Cohene126ba92013-07-07 17:25:49 +03001860 goto out;
1861
1862 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1863 err = 0;
1864 goto out;
1865 }
1866
1867 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1868
1869out:
1870 mutex_unlock(&qp->mutex);
1871 return err;
1872}
1873
1874static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1875{
1876 struct mlx5_ib_cq *cq;
1877 unsigned cur;
1878
1879 cur = wq->head - wq->tail;
1880 if (likely(cur + nreq < wq->max_post))
1881 return 0;
1882
1883 cq = to_mcq(ib_cq);
1884 spin_lock(&cq->lock);
1885 cur = wq->head - wq->tail;
1886 spin_unlock(&cq->lock);
1887
1888 return cur + nreq >= wq->max_post;
1889}
1890
1891static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1892 u64 remote_addr, u32 rkey)
1893{
1894 rseg->raddr = cpu_to_be64(remote_addr);
1895 rseg->rkey = cpu_to_be32(rkey);
1896 rseg->reserved = 0;
1897}
1898
Eli Cohene126ba92013-07-07 17:25:49 +03001899static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1900 struct ib_send_wr *wr)
1901{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001902 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1903 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1904 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
Eli Cohene126ba92013-07-07 17:25:49 +03001905}
1906
1907static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1908{
1909 dseg->byte_count = cpu_to_be32(sg->length);
1910 dseg->lkey = cpu_to_be32(sg->lkey);
1911 dseg->addr = cpu_to_be64(sg->addr);
1912}
1913
1914static __be16 get_klm_octo(int npages)
1915{
1916 return cpu_to_be16(ALIGN(npages, 8) / 2);
1917}
1918
1919static __be64 frwr_mkey_mask(void)
1920{
1921 u64 result;
1922
1923 result = MLX5_MKEY_MASK_LEN |
1924 MLX5_MKEY_MASK_PAGE_SIZE |
1925 MLX5_MKEY_MASK_START_ADDR |
1926 MLX5_MKEY_MASK_EN_RINVAL |
1927 MLX5_MKEY_MASK_KEY |
1928 MLX5_MKEY_MASK_LR |
1929 MLX5_MKEY_MASK_LW |
1930 MLX5_MKEY_MASK_RR |
1931 MLX5_MKEY_MASK_RW |
1932 MLX5_MKEY_MASK_A |
1933 MLX5_MKEY_MASK_SMALL_FENCE |
1934 MLX5_MKEY_MASK_FREE;
1935
1936 return cpu_to_be64(result);
1937}
1938
Sagi Grimberge6631812014-02-23 14:19:11 +02001939static __be64 sig_mkey_mask(void)
1940{
1941 u64 result;
1942
1943 result = MLX5_MKEY_MASK_LEN |
1944 MLX5_MKEY_MASK_PAGE_SIZE |
1945 MLX5_MKEY_MASK_START_ADDR |
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001946 MLX5_MKEY_MASK_EN_SIGERR |
Sagi Grimberge6631812014-02-23 14:19:11 +02001947 MLX5_MKEY_MASK_EN_RINVAL |
1948 MLX5_MKEY_MASK_KEY |
1949 MLX5_MKEY_MASK_LR |
1950 MLX5_MKEY_MASK_LW |
1951 MLX5_MKEY_MASK_RR |
1952 MLX5_MKEY_MASK_RW |
1953 MLX5_MKEY_MASK_SMALL_FENCE |
1954 MLX5_MKEY_MASK_FREE |
1955 MLX5_MKEY_MASK_BSF_EN;
1956
1957 return cpu_to_be64(result);
1958}
1959
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001960static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1961 struct mlx5_ib_mr *mr)
1962{
1963 int ndescs = mr->ndescs;
1964
1965 memset(umr, 0, sizeof(*umr));
1966 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1967 umr->klm_octowords = get_klm_octo(ndescs);
1968 umr->mkey_mask = frwr_mkey_mask();
1969}
1970
Sagi Grimbergdd01e662015-10-13 19:11:42 +03001971static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
Eli Cohene126ba92013-07-07 17:25:49 +03001972{
1973 memset(umr, 0, sizeof(*umr));
Sagi Grimbergdd01e662015-10-13 19:11:42 +03001974 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1975 umr->flags = 1 << 7;
Eli Cohene126ba92013-07-07 17:25:49 +03001976}
1977
Haggai Eran968e78d2014-12-11 17:04:11 +02001978static __be64 get_umr_reg_mr_mask(void)
1979{
1980 u64 result;
1981
1982 result = MLX5_MKEY_MASK_LEN |
1983 MLX5_MKEY_MASK_PAGE_SIZE |
1984 MLX5_MKEY_MASK_START_ADDR |
1985 MLX5_MKEY_MASK_PD |
1986 MLX5_MKEY_MASK_LR |
1987 MLX5_MKEY_MASK_LW |
1988 MLX5_MKEY_MASK_KEY |
1989 MLX5_MKEY_MASK_RR |
1990 MLX5_MKEY_MASK_RW |
1991 MLX5_MKEY_MASK_A |
1992 MLX5_MKEY_MASK_FREE;
1993
1994 return cpu_to_be64(result);
1995}
1996
1997static __be64 get_umr_unreg_mr_mask(void)
1998{
1999 u64 result;
2000
2001 result = MLX5_MKEY_MASK_FREE;
2002
2003 return cpu_to_be64(result);
2004}
2005
2006static __be64 get_umr_update_mtt_mask(void)
2007{
2008 u64 result;
2009
2010 result = MLX5_MKEY_MASK_FREE;
2011
2012 return cpu_to_be64(result);
2013}
2014
Eli Cohene126ba92013-07-07 17:25:49 +03002015static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2016 struct ib_send_wr *wr)
2017{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002018 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Eli Cohene126ba92013-07-07 17:25:49 +03002019
2020 memset(umr, 0, sizeof(*umr));
2021
Haggai Eran968e78d2014-12-11 17:04:11 +02002022 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
2023 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
2024 else
2025 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
2026
Eli Cohene126ba92013-07-07 17:25:49 +03002027 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
Eli Cohene126ba92013-07-07 17:25:49 +03002028 umr->klm_octowords = get_klm_octo(umrwr->npages);
Haggai Eran968e78d2014-12-11 17:04:11 +02002029 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
2030 umr->mkey_mask = get_umr_update_mtt_mask();
2031 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
2032 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
2033 } else {
2034 umr->mkey_mask = get_umr_reg_mr_mask();
2035 }
Eli Cohene126ba92013-07-07 17:25:49 +03002036 } else {
Haggai Eran968e78d2014-12-11 17:04:11 +02002037 umr->mkey_mask = get_umr_unreg_mr_mask();
Eli Cohene126ba92013-07-07 17:25:49 +03002038 }
2039
2040 if (!wr->num_sge)
Haggai Eran968e78d2014-12-11 17:04:11 +02002041 umr->flags |= MLX5_UMR_INLINE;
Eli Cohene126ba92013-07-07 17:25:49 +03002042}
2043
2044static u8 get_umr_flags(int acc)
2045{
2046 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
2047 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
2048 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
2049 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
Sagi Grimberg2ac45932014-02-23 14:19:09 +02002050 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
Eli Cohene126ba92013-07-07 17:25:49 +03002051}
2052
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002053static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
2054 struct mlx5_ib_mr *mr,
2055 u32 key, int access)
2056{
2057 int ndescs = ALIGN(mr->ndescs, 8) >> 1;
2058
2059 memset(seg, 0, sizeof(*seg));
2060 seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
2061 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
2062 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2063 seg->start_addr = cpu_to_be64(mr->ibmr.iova);
2064 seg->len = cpu_to_be64(mr->ibmr.length);
2065 seg->xlt_oct_size = cpu_to_be32(ndescs);
2066 seg->log2_page_size = ilog2(mr->ibmr.page_size);
2067}
2068
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002069static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
Eli Cohene126ba92013-07-07 17:25:49 +03002070{
2071 memset(seg, 0, sizeof(*seg));
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002072 seg->status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +03002073}
2074
2075static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2076{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002077 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Haggai Eran968e78d2014-12-11 17:04:11 +02002078
Eli Cohene126ba92013-07-07 17:25:49 +03002079 memset(seg, 0, sizeof(*seg));
2080 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
Haggai Eran968e78d2014-12-11 17:04:11 +02002081 seg->status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +03002082 return;
2083 }
2084
Haggai Eran968e78d2014-12-11 17:04:11 +02002085 seg->flags = convert_access(umrwr->access_flags);
2086 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2087 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2088 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2089 }
2090 seg->len = cpu_to_be64(umrwr->length);
2091 seg->log2_page_size = umrwr->page_shift;
Eli Cohen746b5582013-10-23 09:53:14 +03002092 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
Haggai Eran968e78d2014-12-11 17:04:11 +02002093 mlx5_mkey_variant(umrwr->mkey));
Eli Cohene126ba92013-07-07 17:25:49 +03002094}
2095
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002096static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2097 struct mlx5_ib_mr *mr,
2098 struct mlx5_ib_pd *pd)
2099{
2100 int bcount = mr->desc_size * mr->ndescs;
2101
2102 dseg->addr = cpu_to_be64(mr->desc_map);
2103 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
2104 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2105}
2106
Eli Cohene126ba92013-07-07 17:25:49 +03002107static __be32 send_ieth(struct ib_send_wr *wr)
2108{
2109 switch (wr->opcode) {
2110 case IB_WR_SEND_WITH_IMM:
2111 case IB_WR_RDMA_WRITE_WITH_IMM:
2112 return wr->ex.imm_data;
2113
2114 case IB_WR_SEND_WITH_INV:
2115 return cpu_to_be32(wr->ex.invalidate_rkey);
2116
2117 default:
2118 return 0;
2119 }
2120}
2121
2122static u8 calc_sig(void *wqe, int size)
2123{
2124 u8 *p = wqe;
2125 u8 res = 0;
2126 int i;
2127
2128 for (i = 0; i < size; i++)
2129 res ^= p[i];
2130
2131 return ~res;
2132}
2133
2134static u8 wq_sig(void *wqe)
2135{
2136 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2137}
2138
2139static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2140 void *wqe, int *sz)
2141{
2142 struct mlx5_wqe_inline_seg *seg;
2143 void *qend = qp->sq.qend;
2144 void *addr;
2145 int inl = 0;
2146 int copy;
2147 int len;
2148 int i;
2149
2150 seg = wqe;
2151 wqe += sizeof(*seg);
2152 for (i = 0; i < wr->num_sge; i++) {
2153 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2154 len = wr->sg_list[i].length;
2155 inl += len;
2156
2157 if (unlikely(inl > qp->max_inline_data))
2158 return -ENOMEM;
2159
2160 if (unlikely(wqe + len > qend)) {
2161 copy = qend - wqe;
2162 memcpy(wqe, addr, copy);
2163 addr += copy;
2164 len -= copy;
2165 wqe = mlx5_get_send_wqe(qp, 0);
2166 }
2167 memcpy(wqe, addr, len);
2168 wqe += len;
2169 }
2170
2171 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2172
2173 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2174
2175 return 0;
2176}
2177
Sagi Grimberge6631812014-02-23 14:19:11 +02002178static u16 prot_field_size(enum ib_signature_type type)
2179{
2180 switch (type) {
2181 case IB_SIG_TYPE_T10_DIF:
2182 return MLX5_DIF_SIZE;
2183 default:
2184 return 0;
2185 }
2186}
2187
2188static u8 bs_selector(int block_size)
2189{
2190 switch (block_size) {
2191 case 512: return 0x1;
2192 case 520: return 0x2;
2193 case 4096: return 0x3;
2194 case 4160: return 0x4;
2195 case 1073741824: return 0x5;
2196 default: return 0;
2197 }
2198}
2199
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002200static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2201 struct mlx5_bsf_inl *inl)
Sagi Grimberge6631812014-02-23 14:19:11 +02002202{
Sagi Grimberg142537f2014-08-13 19:54:32 +03002203 /* Valid inline section and allow BSF refresh */
2204 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2205 MLX5_BSF_REFRESH_DIF);
2206 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2207 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002208 /* repeating block */
2209 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2210 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2211 MLX5_DIF_CRC : MLX5_DIF_IPCS;
Sagi Grimberge6631812014-02-23 14:19:11 +02002212
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002213 if (domain->sig.dif.ref_remap)
2214 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
Sagi Grimberge6631812014-02-23 14:19:11 +02002215
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002216 if (domain->sig.dif.app_escape) {
2217 if (domain->sig.dif.ref_escape)
2218 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2219 else
2220 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
Sagi Grimberge6631812014-02-23 14:19:11 +02002221 }
2222
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002223 inl->dif_app_bitmask_check =
2224 cpu_to_be16(domain->sig.dif.apptag_check_mask);
Sagi Grimberge6631812014-02-23 14:19:11 +02002225}
2226
2227static int mlx5_set_bsf(struct ib_mr *sig_mr,
2228 struct ib_sig_attrs *sig_attrs,
2229 struct mlx5_bsf *bsf, u32 data_size)
2230{
2231 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2232 struct mlx5_bsf_basic *basic = &bsf->basic;
2233 struct ib_sig_domain *mem = &sig_attrs->mem;
2234 struct ib_sig_domain *wire = &sig_attrs->wire;
Sagi Grimberge6631812014-02-23 14:19:11 +02002235
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002236 memset(bsf, 0, sizeof(*bsf));
Sagi Grimberge6631812014-02-23 14:19:11 +02002237
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002238 /* Basic + Extended + Inline */
2239 basic->bsf_size_sbs = 1 << 7;
2240 /* Input domain check byte mask */
2241 basic->check_byte_mask = sig_attrs->check_mask;
2242 basic->raw_data_size = cpu_to_be32(data_size);
2243
2244 /* Memory domain */
2245 switch (sig_attrs->mem.sig_type) {
2246 case IB_SIG_TYPE_NONE:
2247 break;
2248 case IB_SIG_TYPE_T10_DIF:
2249 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2250 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2251 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2252 break;
2253 default:
2254 return -EINVAL;
2255 }
2256
2257 /* Wire domain */
2258 switch (sig_attrs->wire.sig_type) {
2259 case IB_SIG_TYPE_NONE:
2260 break;
2261 case IB_SIG_TYPE_T10_DIF:
Sagi Grimberge6631812014-02-23 14:19:11 +02002262 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002263 mem->sig_type == wire->sig_type) {
Sagi Grimberge6631812014-02-23 14:19:11 +02002264 /* Same block structure */
Sagi Grimberg142537f2014-08-13 19:54:32 +03002265 basic->bsf_size_sbs |= 1 << 4;
Sagi Grimberge6631812014-02-23 14:19:11 +02002266 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002267 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002268 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002269 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
Sagi Grimbergc7f44fb2014-05-18 18:32:40 +03002270 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
Sagi Grimbergfd22f782014-08-13 19:54:29 +03002271 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
Sagi Grimberge6631812014-02-23 14:19:11 +02002272 } else
2273 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2274
Sagi Grimberg142537f2014-08-13 19:54:32 +03002275 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002276 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
Sagi Grimberge6631812014-02-23 14:19:11 +02002277 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002278 default:
2279 return -EINVAL;
2280 }
2281
2282 return 0;
2283}
2284
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002285static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2286 struct mlx5_ib_qp *qp, void **seg, int *size)
Sagi Grimberge6631812014-02-23 14:19:11 +02002287{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002288 struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2289 struct ib_mr *sig_mr = wr->sig_mr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002290 struct mlx5_bsf *bsf;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002291 u32 data_len = wr->wr.sg_list->length;
2292 u32 data_key = wr->wr.sg_list->lkey;
2293 u64 data_va = wr->wr.sg_list->addr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002294 int ret;
2295 int wqe_size;
2296
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002297 if (!wr->prot ||
2298 (data_key == wr->prot->lkey &&
2299 data_va == wr->prot->addr &&
2300 data_len == wr->prot->length)) {
Sagi Grimberge6631812014-02-23 14:19:11 +02002301 /**
2302 * Source domain doesn't contain signature information
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002303 * or data and protection are interleaved in memory.
Sagi Grimberge6631812014-02-23 14:19:11 +02002304 * So need construct:
2305 * ------------------
2306 * | data_klm |
2307 * ------------------
2308 * | BSF |
2309 * ------------------
2310 **/
2311 struct mlx5_klm *data_klm = *seg;
2312
2313 data_klm->bcount = cpu_to_be32(data_len);
2314 data_klm->key = cpu_to_be32(data_key);
2315 data_klm->va = cpu_to_be64(data_va);
2316 wqe_size = ALIGN(sizeof(*data_klm), 64);
2317 } else {
2318 /**
2319 * Source domain contains signature information
2320 * So need construct a strided block format:
2321 * ---------------------------
2322 * | stride_block_ctrl |
2323 * ---------------------------
2324 * | data_klm |
2325 * ---------------------------
2326 * | prot_klm |
2327 * ---------------------------
2328 * | BSF |
2329 * ---------------------------
2330 **/
2331 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2332 struct mlx5_stride_block_entry *data_sentry;
2333 struct mlx5_stride_block_entry *prot_sentry;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002334 u32 prot_key = wr->prot->lkey;
2335 u64 prot_va = wr->prot->addr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002336 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2337 int prot_size;
2338
2339 sblock_ctrl = *seg;
2340 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2341 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2342
2343 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2344 if (!prot_size) {
2345 pr_err("Bad block size given: %u\n", block_size);
2346 return -EINVAL;
2347 }
2348 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2349 prot_size);
2350 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2351 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2352 sblock_ctrl->num_entries = cpu_to_be16(2);
2353
2354 data_sentry->bcount = cpu_to_be16(block_size);
2355 data_sentry->key = cpu_to_be32(data_key);
2356 data_sentry->va = cpu_to_be64(data_va);
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002357 data_sentry->stride = cpu_to_be16(block_size);
2358
Sagi Grimberge6631812014-02-23 14:19:11 +02002359 prot_sentry->bcount = cpu_to_be16(prot_size);
2360 prot_sentry->key = cpu_to_be32(prot_key);
Sagi Grimberg5c273b12014-05-18 18:32:39 +03002361 prot_sentry->va = cpu_to_be64(prot_va);
2362 prot_sentry->stride = cpu_to_be16(prot_size);
Sagi Grimberge6631812014-02-23 14:19:11 +02002363
Sagi Grimberge6631812014-02-23 14:19:11 +02002364 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2365 sizeof(*prot_sentry), 64);
2366 }
2367
2368 *seg += wqe_size;
2369 *size += wqe_size / 16;
2370 if (unlikely((*seg == qp->sq.qend)))
2371 *seg = mlx5_get_send_wqe(qp, 0);
2372
2373 bsf = *seg;
2374 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2375 if (ret)
2376 return -EINVAL;
2377
2378 *seg += sizeof(*bsf);
2379 *size += sizeof(*bsf) / 16;
2380 if (unlikely((*seg == qp->sq.qend)))
2381 *seg = mlx5_get_send_wqe(qp, 0);
2382
2383 return 0;
2384}
2385
2386static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002387 struct ib_sig_handover_wr *wr, u32 nelements,
Sagi Grimberge6631812014-02-23 14:19:11 +02002388 u32 length, u32 pdn)
2389{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002390 struct ib_mr *sig_mr = wr->sig_mr;
Sagi Grimberge6631812014-02-23 14:19:11 +02002391 u32 sig_key = sig_mr->rkey;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002392 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
Sagi Grimberge6631812014-02-23 14:19:11 +02002393
2394 memset(seg, 0, sizeof(*seg));
2395
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002396 seg->flags = get_umr_flags(wr->access_flags) |
Sagi Grimberge6631812014-02-23 14:19:11 +02002397 MLX5_ACCESS_MODE_KLM;
2398 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002399 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
Sagi Grimberge6631812014-02-23 14:19:11 +02002400 MLX5_MKEY_BSF_EN | pdn);
2401 seg->len = cpu_to_be64(length);
2402 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2403 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2404}
2405
2406static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002407 u32 nelements)
Sagi Grimberge6631812014-02-23 14:19:11 +02002408{
2409 memset(umr, 0, sizeof(*umr));
2410
2411 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2412 umr->klm_octowords = get_klm_octo(nelements);
2413 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2414 umr->mkey_mask = sig_mkey_mask();
2415}
2416
2417
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002418static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
Sagi Grimberge6631812014-02-23 14:19:11 +02002419 void **seg, int *size)
2420{
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002421 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2422 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
Sagi Grimberge6631812014-02-23 14:19:11 +02002423 u32 pdn = get_pd(qp)->pdn;
2424 u32 klm_oct_size;
2425 int region_len, ret;
2426
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002427 if (unlikely(wr->wr.num_sge != 1) ||
2428 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002429 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2430 unlikely(!sig_mr->sig->sig_status_checked))
Sagi Grimberge6631812014-02-23 14:19:11 +02002431 return -EINVAL;
2432
2433 /* length of the protected region, data + protection */
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002434 region_len = wr->wr.sg_list->length;
2435 if (wr->prot &&
2436 (wr->prot->lkey != wr->wr.sg_list->lkey ||
2437 wr->prot->addr != wr->wr.sg_list->addr ||
2438 wr->prot->length != wr->wr.sg_list->length))
2439 region_len += wr->prot->length;
Sagi Grimberge6631812014-02-23 14:19:11 +02002440
2441 /**
2442 * KLM octoword size - if protection was provided
2443 * then we use strided block format (3 octowords),
2444 * else we use single KLM (1 octoword)
2445 **/
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002446 klm_oct_size = wr->prot ? 3 : 1;
Sagi Grimberge6631812014-02-23 14:19:11 +02002447
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002448 set_sig_umr_segment(*seg, klm_oct_size);
Sagi Grimberge6631812014-02-23 14:19:11 +02002449 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2450 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2451 if (unlikely((*seg == qp->sq.qend)))
2452 *seg = mlx5_get_send_wqe(qp, 0);
2453
2454 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2455 *seg += sizeof(struct mlx5_mkey_seg);
2456 *size += sizeof(struct mlx5_mkey_seg) / 16;
2457 if (unlikely((*seg == qp->sq.qend)))
2458 *seg = mlx5_get_send_wqe(qp, 0);
2459
2460 ret = set_sig_data_segment(wr, qp, seg, size);
2461 if (ret)
2462 return ret;
2463
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02002464 sig_mr->sig->sig_status_checked = false;
Sagi Grimberge6631812014-02-23 14:19:11 +02002465 return 0;
2466}
2467
2468static int set_psv_wr(struct ib_sig_domain *domain,
2469 u32 psv_idx, void **seg, int *size)
2470{
2471 struct mlx5_seg_set_psv *psv_seg = *seg;
2472
2473 memset(psv_seg, 0, sizeof(*psv_seg));
2474 psv_seg->psv_num = cpu_to_be32(psv_idx);
2475 switch (domain->sig_type) {
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002476 case IB_SIG_TYPE_NONE:
2477 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002478 case IB_SIG_TYPE_T10_DIF:
2479 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2480 domain->sig.dif.app_tag);
2481 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
Sagi Grimberge6631812014-02-23 14:19:11 +02002482 break;
Sagi Grimberge6631812014-02-23 14:19:11 +02002483 default:
2484 pr_err("Bad signature type given.\n");
2485 return 1;
2486 }
2487
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002488 *seg += sizeof(*psv_seg);
2489 *size += sizeof(*psv_seg) / 16;
2490
Sagi Grimberge6631812014-02-23 14:19:11 +02002491 return 0;
2492}
2493
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002494static int set_reg_wr(struct mlx5_ib_qp *qp,
2495 struct ib_reg_wr *wr,
2496 void **seg, int *size)
2497{
2498 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
2499 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
2500
2501 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
2502 mlx5_ib_warn(to_mdev(qp->ibqp.device),
2503 "Invalid IB_SEND_INLINE send flag\n");
2504 return -EINVAL;
2505 }
2506
2507 set_reg_umr_seg(*seg, mr);
2508 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2509 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2510 if (unlikely((*seg == qp->sq.qend)))
2511 *seg = mlx5_get_send_wqe(qp, 0);
2512
2513 set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
2514 *seg += sizeof(struct mlx5_mkey_seg);
2515 *size += sizeof(struct mlx5_mkey_seg) / 16;
2516 if (unlikely((*seg == qp->sq.qend)))
2517 *seg = mlx5_get_send_wqe(qp, 0);
2518
2519 set_reg_data_seg(*seg, mr, pd);
2520 *seg += sizeof(struct mlx5_wqe_data_seg);
2521 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2522
2523 return 0;
2524}
2525
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002526static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
Eli Cohene126ba92013-07-07 17:25:49 +03002527{
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002528 set_linv_umr_seg(*seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002529 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2530 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2531 if (unlikely((*seg == qp->sq.qend)))
2532 *seg = mlx5_get_send_wqe(qp, 0);
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002533 set_linv_mkey_seg(*seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002534 *seg += sizeof(struct mlx5_mkey_seg);
2535 *size += sizeof(struct mlx5_mkey_seg) / 16;
2536 if (unlikely((*seg == qp->sq.qend)))
2537 *seg = mlx5_get_send_wqe(qp, 0);
Eli Cohene126ba92013-07-07 17:25:49 +03002538}
2539
2540static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2541{
2542 __be32 *p = NULL;
2543 int tidx = idx;
2544 int i, j;
2545
2546 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2547 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2548 if ((i & 0xf) == 0) {
2549 void *buf = mlx5_get_send_wqe(qp, tidx);
2550 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2551 p = buf;
2552 j = 0;
2553 }
2554 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2555 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2556 be32_to_cpu(p[j + 3]));
2557 }
2558}
2559
2560static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2561 unsigned bytecnt, struct mlx5_ib_qp *qp)
2562{
2563 while (bytecnt > 0) {
2564 __iowrite64_copy(dst++, src++, 8);
2565 __iowrite64_copy(dst++, src++, 8);
2566 __iowrite64_copy(dst++, src++, 8);
2567 __iowrite64_copy(dst++, src++, 8);
2568 __iowrite64_copy(dst++, src++, 8);
2569 __iowrite64_copy(dst++, src++, 8);
2570 __iowrite64_copy(dst++, src++, 8);
2571 __iowrite64_copy(dst++, src++, 8);
2572 bytecnt -= 64;
2573 if (unlikely(src == qp->sq.qend))
2574 src = mlx5_get_send_wqe(qp, 0);
2575 }
2576}
2577
2578static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2579{
2580 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2581 wr->send_flags & IB_SEND_FENCE))
2582 return MLX5_FENCE_MODE_STRONG_ORDERING;
2583
2584 if (unlikely(fence)) {
2585 if (wr->send_flags & IB_SEND_FENCE)
2586 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2587 else
2588 return fence;
2589
2590 } else {
2591 return 0;
2592 }
2593}
2594
Sagi Grimberg6e5eada2014-02-23 14:19:08 +02002595static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2596 struct mlx5_wqe_ctrl_seg **ctrl,
Eli Cohen6a4f1392014-12-02 12:26:18 +02002597 struct ib_send_wr *wr, unsigned *idx,
Sagi Grimberg6e5eada2014-02-23 14:19:08 +02002598 int *size, int nreq)
2599{
2600 int err = 0;
2601
2602 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2603 err = -ENOMEM;
2604 return err;
2605 }
2606
2607 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2608 *seg = mlx5_get_send_wqe(qp, *idx);
2609 *ctrl = *seg;
2610 *(uint32_t *)(*seg + 8) = 0;
2611 (*ctrl)->imm = send_ieth(wr);
2612 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2613 (wr->send_flags & IB_SEND_SIGNALED ?
2614 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2615 (wr->send_flags & IB_SEND_SOLICITED ?
2616 MLX5_WQE_CTRL_SOLICITED : 0);
2617
2618 *seg += sizeof(**ctrl);
2619 *size = sizeof(**ctrl) / 16;
2620
2621 return err;
2622}
2623
2624static void finish_wqe(struct mlx5_ib_qp *qp,
2625 struct mlx5_wqe_ctrl_seg *ctrl,
2626 u8 size, unsigned idx, u64 wr_id,
2627 int nreq, u8 fence, u8 next_fence,
2628 u32 mlx5_opcode)
2629{
2630 u8 opmod = 0;
2631
2632 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2633 mlx5_opcode | ((u32)opmod << 24));
2634 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2635 ctrl->fm_ce_se |= fence;
2636 qp->fm_cache = next_fence;
2637 if (unlikely(qp->wq_sig))
2638 ctrl->signature = wq_sig(ctrl);
2639
2640 qp->sq.wrid[idx] = wr_id;
2641 qp->sq.w_list[idx].opcode = mlx5_opcode;
2642 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2643 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2644 qp->sq.w_list[idx].next = qp->sq.cur_post;
2645}
2646
2647
Eli Cohene126ba92013-07-07 17:25:49 +03002648int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2649 struct ib_send_wr **bad_wr)
2650{
2651 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2652 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
Eli Cohene126ba92013-07-07 17:25:49 +03002653 struct mlx5_ib_qp *qp = to_mqp(ibqp);
Sagi Grimberge6631812014-02-23 14:19:11 +02002654 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +03002655 struct mlx5_wqe_data_seg *dpseg;
2656 struct mlx5_wqe_xrc_seg *xrc;
2657 struct mlx5_bf *bf = qp->bf;
2658 int uninitialized_var(size);
2659 void *qend = qp->sq.qend;
2660 unsigned long flags;
Eli Cohene126ba92013-07-07 17:25:49 +03002661 unsigned idx;
2662 int err = 0;
2663 int inl = 0;
2664 int num_sge;
2665 void *seg;
2666 int nreq;
2667 int i;
2668 u8 next_fence = 0;
Eli Cohene126ba92013-07-07 17:25:49 +03002669 u8 fence;
2670
2671 spin_lock_irqsave(&qp->sq.lock, flags);
2672
2673 for (nreq = 0; wr; nreq++, wr = wr->next) {
Fabian Fredericka8f731e2014-08-12 19:20:08 -04002674 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
Eli Cohene126ba92013-07-07 17:25:49 +03002675 mlx5_ib_warn(dev, "\n");
2676 err = -EINVAL;
2677 *bad_wr = wr;
2678 goto out;
2679 }
2680
Eli Cohene126ba92013-07-07 17:25:49 +03002681 fence = qp->fm_cache;
2682 num_sge = wr->num_sge;
2683 if (unlikely(num_sge > qp->sq.max_gs)) {
2684 mlx5_ib_warn(dev, "\n");
2685 err = -ENOMEM;
2686 *bad_wr = wr;
2687 goto out;
2688 }
2689
Sagi Grimberg6e5eada2014-02-23 14:19:08 +02002690 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2691 if (err) {
2692 mlx5_ib_warn(dev, "\n");
2693 err = -ENOMEM;
2694 *bad_wr = wr;
2695 goto out;
2696 }
Eli Cohene126ba92013-07-07 17:25:49 +03002697
2698 switch (ibqp->qp_type) {
2699 case IB_QPT_XRC_INI:
2700 xrc = seg;
Eli Cohene126ba92013-07-07 17:25:49 +03002701 seg += sizeof(*xrc);
2702 size += sizeof(*xrc) / 16;
2703 /* fall through */
2704 case IB_QPT_RC:
2705 switch (wr->opcode) {
2706 case IB_WR_RDMA_READ:
2707 case IB_WR_RDMA_WRITE:
2708 case IB_WR_RDMA_WRITE_WITH_IMM:
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002709 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2710 rdma_wr(wr)->rkey);
Jack Morgensteinf241e742014-07-28 23:30:23 +03002711 seg += sizeof(struct mlx5_wqe_raddr_seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002712 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2713 break;
2714
2715 case IB_WR_ATOMIC_CMP_AND_SWP:
2716 case IB_WR_ATOMIC_FETCH_AND_ADD:
Eli Cohene126ba92013-07-07 17:25:49 +03002717 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
Eli Cohen81bea282013-09-11 16:35:30 +03002718 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2719 err = -ENOSYS;
2720 *bad_wr = wr;
2721 goto out;
Eli Cohene126ba92013-07-07 17:25:49 +03002722
2723 case IB_WR_LOCAL_INV:
2724 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2725 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2726 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
Sagi Grimbergdd01e662015-10-13 19:11:42 +03002727 set_linv_wr(qp, &seg, &size);
Eli Cohene126ba92013-07-07 17:25:49 +03002728 num_sge = 0;
2729 break;
2730
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002731 case IB_WR_REG_MR:
2732 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2733 qp->sq.wr_data[idx] = IB_WR_REG_MR;
2734 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
2735 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
2736 if (err) {
2737 *bad_wr = wr;
2738 goto out;
2739 }
2740 num_sge = 0;
2741 break;
2742
Sagi Grimberge6631812014-02-23 14:19:11 +02002743 case IB_WR_REG_SIG_MR:
2744 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002745 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
Sagi Grimberge6631812014-02-23 14:19:11 +02002746
2747 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2748 err = set_sig_umr_wr(wr, qp, &seg, &size);
2749 if (err) {
2750 mlx5_ib_warn(dev, "\n");
2751 *bad_wr = wr;
2752 goto out;
2753 }
2754
2755 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2756 nreq, get_fence(fence, wr),
2757 next_fence, MLX5_OPCODE_UMR);
2758 /*
2759 * SET_PSV WQEs are not signaled and solicited
2760 * on error
2761 */
2762 wr->send_flags &= ~IB_SEND_SIGNALED;
2763 wr->send_flags |= IB_SEND_SOLICITED;
2764 err = begin_wqe(qp, &seg, &ctrl, wr,
2765 &idx, &size, nreq);
2766 if (err) {
2767 mlx5_ib_warn(dev, "\n");
2768 err = -ENOMEM;
2769 *bad_wr = wr;
2770 goto out;
2771 }
2772
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002773 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
Sagi Grimberge6631812014-02-23 14:19:11 +02002774 mr->sig->psv_memory.psv_idx, &seg,
2775 &size);
2776 if (err) {
2777 mlx5_ib_warn(dev, "\n");
2778 *bad_wr = wr;
2779 goto out;
2780 }
2781
2782 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2783 nreq, get_fence(fence, wr),
2784 next_fence, MLX5_OPCODE_SET_PSV);
2785 err = begin_wqe(qp, &seg, &ctrl, wr,
2786 &idx, &size, nreq);
2787 if (err) {
2788 mlx5_ib_warn(dev, "\n");
2789 err = -ENOMEM;
2790 *bad_wr = wr;
2791 goto out;
2792 }
2793
2794 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002795 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
Sagi Grimberge6631812014-02-23 14:19:11 +02002796 mr->sig->psv_wire.psv_idx, &seg,
2797 &size);
2798 if (err) {
2799 mlx5_ib_warn(dev, "\n");
2800 *bad_wr = wr;
2801 goto out;
2802 }
2803
2804 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2805 nreq, get_fence(fence, wr),
2806 next_fence, MLX5_OPCODE_SET_PSV);
2807 num_sge = 0;
2808 goto skip_psv;
2809
Eli Cohene126ba92013-07-07 17:25:49 +03002810 default:
2811 break;
2812 }
2813 break;
2814
2815 case IB_QPT_UC:
2816 switch (wr->opcode) {
2817 case IB_WR_RDMA_WRITE:
2818 case IB_WR_RDMA_WRITE_WITH_IMM:
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002819 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2820 rdma_wr(wr)->rkey);
Eli Cohene126ba92013-07-07 17:25:49 +03002821 seg += sizeof(struct mlx5_wqe_raddr_seg);
2822 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2823 break;
2824
2825 default:
2826 break;
2827 }
2828 break;
2829
2830 case IB_QPT_UD:
2831 case IB_QPT_SMI:
2832 case IB_QPT_GSI:
2833 set_datagram_seg(seg, wr);
Jack Morgensteinf241e742014-07-28 23:30:23 +03002834 seg += sizeof(struct mlx5_wqe_datagram_seg);
Eli Cohene126ba92013-07-07 17:25:49 +03002835 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2836 if (unlikely((seg == qend)))
2837 seg = mlx5_get_send_wqe(qp, 0);
2838 break;
2839
2840 case MLX5_IB_QPT_REG_UMR:
2841 if (wr->opcode != MLX5_IB_WR_UMR) {
2842 err = -EINVAL;
2843 mlx5_ib_warn(dev, "bad opcode\n");
2844 goto out;
2845 }
2846 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002847 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
Eli Cohene126ba92013-07-07 17:25:49 +03002848 set_reg_umr_segment(seg, wr);
2849 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2850 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2851 if (unlikely((seg == qend)))
2852 seg = mlx5_get_send_wqe(qp, 0);
2853 set_reg_mkey_segment(seg, wr);
2854 seg += sizeof(struct mlx5_mkey_seg);
2855 size += sizeof(struct mlx5_mkey_seg) / 16;
2856 if (unlikely((seg == qend)))
2857 seg = mlx5_get_send_wqe(qp, 0);
2858 break;
2859
2860 default:
2861 break;
2862 }
2863
2864 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2865 int uninitialized_var(sz);
2866
2867 err = set_data_inl_seg(qp, wr, seg, &sz);
2868 if (unlikely(err)) {
2869 mlx5_ib_warn(dev, "\n");
2870 *bad_wr = wr;
2871 goto out;
2872 }
2873 inl = 1;
2874 size += sz;
2875 } else {
2876 dpseg = seg;
2877 for (i = 0; i < num_sge; i++) {
2878 if (unlikely(dpseg == qend)) {
2879 seg = mlx5_get_send_wqe(qp, 0);
2880 dpseg = seg;
2881 }
2882 if (likely(wr->sg_list[i].length)) {
2883 set_data_ptr_seg(dpseg, wr->sg_list + i);
2884 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2885 dpseg++;
2886 }
2887 }
2888 }
2889
Sagi Grimberg6e5eada2014-02-23 14:19:08 +02002890 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2891 get_fence(fence, wr), next_fence,
2892 mlx5_ib_opcode[wr->opcode]);
Sagi Grimberge6631812014-02-23 14:19:11 +02002893skip_psv:
Eli Cohene126ba92013-07-07 17:25:49 +03002894 if (0)
2895 dump_wqe(qp, idx, size);
2896 }
2897
2898out:
2899 if (likely(nreq)) {
2900 qp->sq.head += nreq;
2901
2902 /* Make sure that descriptors are written before
2903 * updating doorbell record and ringing the doorbell
2904 */
2905 wmb();
2906
2907 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2908
Eli Cohenada388f2014-01-14 17:45:16 +02002909 /* Make sure doorbell record is visible to the HCA before
2910 * we hit doorbell */
2911 wmb();
2912
Eli Cohene126ba92013-07-07 17:25:49 +03002913 if (bf->need_lock)
2914 spin_lock(&bf->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02002915 else
2916 __acquire(&bf->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03002917
2918 /* TBD enable WC */
2919 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2920 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2921 /* wc_wmb(); */
2922 } else {
2923 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2924 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2925 /* Make sure doorbells don't leak out of SQ spinlock
2926 * and reach the HCA out of order.
2927 */
2928 mmiowb();
2929 }
2930 bf->offset ^= bf->buf_size;
2931 if (bf->need_lock)
2932 spin_unlock(&bf->lock);
Eli Cohen6a4f1392014-12-02 12:26:18 +02002933 else
2934 __release(&bf->lock);
Eli Cohene126ba92013-07-07 17:25:49 +03002935 }
2936
2937 spin_unlock_irqrestore(&qp->sq.lock, flags);
2938
2939 return err;
2940}
2941
2942static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2943{
2944 sig->signature = calc_sig(sig, size);
2945}
2946
2947int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2948 struct ib_recv_wr **bad_wr)
2949{
2950 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2951 struct mlx5_wqe_data_seg *scat;
2952 struct mlx5_rwqe_sig *sig;
2953 unsigned long flags;
2954 int err = 0;
2955 int nreq;
2956 int ind;
2957 int i;
2958
2959 spin_lock_irqsave(&qp->rq.lock, flags);
2960
2961 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2962
2963 for (nreq = 0; wr; nreq++, wr = wr->next) {
2964 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2965 err = -ENOMEM;
2966 *bad_wr = wr;
2967 goto out;
2968 }
2969
2970 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2971 err = -EINVAL;
2972 *bad_wr = wr;
2973 goto out;
2974 }
2975
2976 scat = get_recv_wqe(qp, ind);
2977 if (qp->wq_sig)
2978 scat++;
2979
2980 for (i = 0; i < wr->num_sge; i++)
2981 set_data_ptr_seg(scat + i, wr->sg_list + i);
2982
2983 if (i < qp->rq.max_gs) {
2984 scat[i].byte_count = 0;
2985 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2986 scat[i].addr = 0;
2987 }
2988
2989 if (qp->wq_sig) {
2990 sig = (struct mlx5_rwqe_sig *)scat;
2991 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2992 }
2993
2994 qp->rq.wrid[ind] = wr->wr_id;
2995
2996 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2997 }
2998
2999out:
3000 if (likely(nreq)) {
3001 qp->rq.head += nreq;
3002
3003 /* Make sure that descriptors are written before
3004 * doorbell record.
3005 */
3006 wmb();
3007
3008 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
3009 }
3010
3011 spin_unlock_irqrestore(&qp->rq.lock, flags);
3012
3013 return err;
3014}
3015
3016static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
3017{
3018 switch (mlx5_state) {
3019 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
3020 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
3021 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
3022 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
3023 case MLX5_QP_STATE_SQ_DRAINING:
3024 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
3025 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
3026 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
3027 default: return -1;
3028 }
3029}
3030
3031static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
3032{
3033 switch (mlx5_mig_state) {
3034 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
3035 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
3036 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
3037 default: return -1;
3038 }
3039}
3040
3041static int to_ib_qp_access_flags(int mlx5_flags)
3042{
3043 int ib_flags = 0;
3044
3045 if (mlx5_flags & MLX5_QP_BIT_RRE)
3046 ib_flags |= IB_ACCESS_REMOTE_READ;
3047 if (mlx5_flags & MLX5_QP_BIT_RWE)
3048 ib_flags |= IB_ACCESS_REMOTE_WRITE;
3049 if (mlx5_flags & MLX5_QP_BIT_RAE)
3050 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3051
3052 return ib_flags;
3053}
3054
3055static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
3056 struct mlx5_qp_path *path)
3057{
Jack Morgenstein9603b612014-07-28 23:30:22 +03003058 struct mlx5_core_dev *dev = ibdev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03003059
3060 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
3061 ib_ah_attr->port_num = path->port;
3062
Eli Cohenc7a08ac2014-10-02 12:19:42 +03003063 if (ib_ah_attr->port_num == 0 ||
Saeed Mahameed938fe832015-05-28 22:28:41 +03003064 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
Eli Cohene126ba92013-07-07 17:25:49 +03003065 return;
3066
Achiad Shochat2811ba52015-12-23 18:47:24 +02003067 ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf;
Eli Cohene126ba92013-07-07 17:25:49 +03003068
3069 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
3070 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3071 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
3072 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3073 if (ib_ah_attr->ah_flags) {
3074 ib_ah_attr->grh.sgid_index = path->mgid_index;
3075 ib_ah_attr->grh.hop_limit = path->hop_limit;
3076 ib_ah_attr->grh.traffic_class =
3077 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3078 ib_ah_attr->grh.flow_label =
3079 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3080 memcpy(ib_ah_attr->grh.dgid.raw,
3081 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3082 }
3083}
3084
3085int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3086 struct ib_qp_init_attr *qp_init_attr)
3087{
3088 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3089 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3090 struct mlx5_query_qp_mbox_out *outb;
3091 struct mlx5_qp_context *context;
3092 int mlx5_state;
3093 int err = 0;
3094
Haggai Eran6aec21f2014-12-11 17:04:23 +02003095#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3096 /*
3097 * Wait for any outstanding page faults, in case the user frees memory
3098 * based upon this query's result.
3099 */
3100 flush_workqueue(mlx5_ib_page_fault_wq);
3101#endif
3102
Eli Cohene126ba92013-07-07 17:25:49 +03003103 mutex_lock(&qp->mutex);
3104 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3105 if (!outb) {
3106 err = -ENOMEM;
3107 goto out;
3108 }
3109 context = &outb->ctx;
Jack Morgenstein9603b612014-07-28 23:30:22 +03003110 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
Eli Cohene126ba92013-07-07 17:25:49 +03003111 if (err)
3112 goto out_free;
3113
3114 mlx5_state = be32_to_cpu(context->flags) >> 28;
3115
3116 qp->state = to_ib_qp_state(mlx5_state);
3117 qp_attr->qp_state = qp->state;
3118 qp_attr->path_mtu = context->mtu_msgmax >> 5;
3119 qp_attr->path_mig_state =
3120 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3121 qp_attr->qkey = be32_to_cpu(context->qkey);
3122 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3123 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
3124 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3125 qp_attr->qp_access_flags =
3126 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3127
3128 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3129 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3130 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3131 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3132 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
3133 }
3134
3135 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3136 qp_attr->port_num = context->pri_path.port;
3137
3138 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3139 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3140
3141 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3142
3143 qp_attr->max_dest_rd_atomic =
3144 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3145 qp_attr->min_rnr_timer =
3146 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3147 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3148 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3149 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3150 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3151 qp_attr->cur_qp_state = qp_attr->qp_state;
3152 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3153 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3154
3155 if (!ibqp->uobject) {
3156 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3157 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3158 } else {
3159 qp_attr->cap.max_send_wr = 0;
3160 qp_attr->cap.max_send_sge = 0;
3161 }
3162
3163 /* We don't support inline sends for kernel QPs (yet), and we
3164 * don't know what userspace's value should be.
3165 */
3166 qp_attr->cap.max_inline_data = 0;
3167
3168 qp_init_attr->cap = qp_attr->cap;
3169
3170 qp_init_attr->create_flags = 0;
3171 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3172 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3173
Leon Romanovsky051f2632015-12-20 12:16:11 +02003174 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
3175 qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
3176 if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
3177 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
3178 if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
3179 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
3180
Eli Cohene126ba92013-07-07 17:25:49 +03003181 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3182 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3183
3184out_free:
3185 kfree(outb);
3186
3187out:
3188 mutex_unlock(&qp->mutex);
3189 return err;
3190}
3191
3192struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3193 struct ib_ucontext *context,
3194 struct ib_udata *udata)
3195{
3196 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3197 struct mlx5_ib_xrcd *xrcd;
3198 int err;
3199
Saeed Mahameed938fe832015-05-28 22:28:41 +03003200 if (!MLX5_CAP_GEN(dev->mdev, xrc))
Eli Cohene126ba92013-07-07 17:25:49 +03003201 return ERR_PTR(-ENOSYS);
3202
3203 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3204 if (!xrcd)
3205 return ERR_PTR(-ENOMEM);
3206
Jack Morgenstein9603b612014-07-28 23:30:22 +03003207 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
Eli Cohene126ba92013-07-07 17:25:49 +03003208 if (err) {
3209 kfree(xrcd);
3210 return ERR_PTR(-ENOMEM);
3211 }
3212
3213 return &xrcd->ibxrcd;
3214}
3215
3216int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3217{
3218 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3219 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3220 int err;
3221
Jack Morgenstein9603b612014-07-28 23:30:22 +03003222 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
Eli Cohene126ba92013-07-07 17:25:49 +03003223 if (err) {
3224 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3225 return err;
3226 }
3227
3228 kfree(xrcd);
3229
3230 return 0;
3231}