blob: e0c2186529ffb0f65c84e3c8e5ce165bd127020e [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Jack Morgensteinea54b102008-01-28 10:40:59 +020034#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070036#include <linux/netdevice.h>
Jack Morgensteinea54b102008-01-28 10:40:59 +020037
Roland Dreier225c7b12007-05-08 18:00:38 -070038#include <rdma/ib_cache.h>
39#include <rdma/ib_pack.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030040#include <rdma/ib_addr.h>
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +000041#include <rdma/ib_mad.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070042
43#include <linux/mlx4/qp.h>
44
45#include "mlx4_ib.h"
46#include "user.h"
47
48enum {
49 MLX4_IB_ACK_REQ_FREQ = 8,
50};
51
52enum {
53 MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83,
Eli Cohenfa417f72010-10-24 21:08:52 -070054 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
55 MLX4_IB_LINK_TYPE_IB = 0,
56 MLX4_IB_LINK_TYPE_ETH = 1
Roland Dreier225c7b12007-05-08 18:00:38 -070057};
58
59enum {
60 /*
Eli Cohenfa417f72010-10-24 21:08:52 -070061 * Largest possible UD header: send with GRH and immediate
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030062 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
63 * tag. (LRH would only use 8 bytes, so Ethernet is the
64 * biggest case)
Roland Dreier225c7b12007-05-08 18:00:38 -070065 */
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030066 MLX4_IB_UD_HEADER_SIZE = 82,
Eli Cohen417608c2009-11-12 11:19:44 -080067 MLX4_IB_LSO_HEADER_SPARE = 128,
Roland Dreier225c7b12007-05-08 18:00:38 -070068};
69
Eli Cohenfa417f72010-10-24 21:08:52 -070070enum {
71 MLX4_IB_IBOE_ETHERTYPE = 0x8915
72};
73
Roland Dreier225c7b12007-05-08 18:00:38 -070074struct mlx4_ib_sqp {
75 struct mlx4_ib_qp qp;
76 int pkey_index;
77 u32 qkey;
78 u32 send_psn;
79 struct ib_ud_header ud_header;
80 u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
81};
82
Jack Morgenstein83904132007-10-18 17:36:43 +020083enum {
Eli Cohen417608c2009-11-12 11:19:44 -080084 MLX4_IB_MIN_SQ_STRIDE = 6,
85 MLX4_IB_CACHE_LINE_SIZE = 64,
Jack Morgenstein83904132007-10-18 17:36:43 +020086};
87
Or Gerlitz3987a2d2012-01-17 13:39:07 +020088enum {
89 MLX4_RAW_QP_MTU = 7,
90 MLX4_RAW_QP_MSGMAX = 31,
91};
92
Moni Shoua297e0da2013-12-12 18:03:14 +020093#ifndef ETH_ALEN
94#define ETH_ALEN 6
95#endif
96static inline u64 mlx4_mac_to_u64(u8 *addr)
97{
98 u64 mac = 0;
99 int i;
100
101 for (i = 0; i < ETH_ALEN; i++) {
102 mac <<= 8;
103 mac |= addr[i];
104 }
105 return mac;
106}
107
Roland Dreier225c7b12007-05-08 18:00:38 -0700108static const __be32 mlx4_ib_opcode[] = {
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +0300109 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
110 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
111 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
112 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
113 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
114 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
115 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
116 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
117 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
118 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
119 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
120 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
121 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
Shani Michaeli6ff63e12013-02-06 16:19:15 +0000122 [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW),
Roland Dreier225c7b12007-05-08 18:00:38 -0700123};
124
125static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
126{
127 return container_of(mqp, struct mlx4_ib_sqp, qp);
128}
129
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000130static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
Roland Dreier225c7b12007-05-08 18:00:38 -0700131{
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000132 if (!mlx4_is_master(dev->dev))
133 return 0;
134
Jack Morgenstein47605df2012-08-03 08:40:57 +0000135 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
136 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
137 8 * MLX4_MFUNC_MAX;
Roland Dreier225c7b12007-05-08 18:00:38 -0700138}
139
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000140static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
141{
Jack Morgenstein47605df2012-08-03 08:40:57 +0000142 int proxy_sqp = 0;
143 int real_sqp = 0;
144 int i;
145 /* PPF or Native -- real SQP */
146 real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
147 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
148 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
149 if (real_sqp)
150 return 1;
151 /* VF or PF -- proxy SQP */
152 if (mlx4_is_mfunc(dev->dev)) {
153 for (i = 0; i < dev->dev->caps.num_ports; i++) {
154 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] ||
155 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
156 proxy_sqp = 1;
157 break;
158 }
159 }
160 }
161 return proxy_sqp;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000162}
163
164/* used for INIT/CLOSE port logic */
Roland Dreier225c7b12007-05-08 18:00:38 -0700165static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
166{
Jack Morgenstein47605df2012-08-03 08:40:57 +0000167 int proxy_qp0 = 0;
168 int real_qp0 = 0;
169 int i;
170 /* PPF or Native -- real QP0 */
171 real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
172 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
173 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
174 if (real_qp0)
175 return 1;
176 /* VF or PF -- proxy QP0 */
177 if (mlx4_is_mfunc(dev->dev)) {
178 for (i = 0; i < dev->dev->caps.num_ports; i++) {
179 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
180 proxy_qp0 = 1;
181 break;
182 }
183 }
184 }
185 return proxy_qp0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700186}
187
188static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
189{
Roland Dreier1c69fc22008-02-06 21:07:54 -0800190 return mlx4_buf_offset(&qp->buf, offset);
Roland Dreier225c7b12007-05-08 18:00:38 -0700191}
192
193static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
194{
195 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
196}
197
198static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
199{
200 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
201}
202
Roland Dreier0e6e7412007-06-18 08:13:48 -0700203/*
204 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
Jack Morgensteinea54b102008-01-28 10:40:59 +0200205 * first four bytes of every 64 byte chunk with
206 * 0x7FFFFFF | (invalid_ownership_value << 31).
207 *
208 * When the max work request size is less than or equal to the WQE
209 * basic block size, as an optimization, we can stamp all WQEs with
210 * 0xffffffff, and skip the very first chunk of each WQE.
Roland Dreier0e6e7412007-06-18 08:13:48 -0700211 */
Jack Morgensteinea54b102008-01-28 10:40:59 +0200212static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
Roland Dreier0e6e7412007-06-18 08:13:48 -0700213{
Roland Dreierd2ae16d2008-04-16 21:01:07 -0700214 __be32 *wqe;
Roland Dreier0e6e7412007-06-18 08:13:48 -0700215 int i;
Jack Morgensteinea54b102008-01-28 10:40:59 +0200216 int s;
217 int ind;
218 void *buf;
219 __be32 stamp;
Eli Cohen9670e552008-07-14 23:48:44 -0700220 struct mlx4_wqe_ctrl_seg *ctrl;
Roland Dreier0e6e7412007-06-18 08:13:48 -0700221
Jack Morgensteinea54b102008-01-28 10:40:59 +0200222 if (qp->sq_max_wqes_per_wr > 1) {
Eli Cohen9670e552008-07-14 23:48:44 -0700223 s = roundup(size, 1U << qp->sq.wqe_shift);
Jack Morgensteinea54b102008-01-28 10:40:59 +0200224 for (i = 0; i < s; i += 64) {
225 ind = (i >> qp->sq.wqe_shift) + n;
226 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
227 cpu_to_be32(0xffffffff);
228 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
229 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
230 *wqe = stamp;
231 }
232 } else {
Eli Cohen9670e552008-07-14 23:48:44 -0700233 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
234 s = (ctrl->fence_size & 0x3f) << 4;
Jack Morgensteinea54b102008-01-28 10:40:59 +0200235 for (i = 64; i < s; i += 64) {
236 wqe = buf + i;
Roland Dreierd2ae16d2008-04-16 21:01:07 -0700237 *wqe = cpu_to_be32(0xffffffff);
Jack Morgensteinea54b102008-01-28 10:40:59 +0200238 }
239 }
240}
241
242static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
243{
244 struct mlx4_wqe_ctrl_seg *ctrl;
245 struct mlx4_wqe_inline_seg *inl;
246 void *wqe;
247 int s;
248
249 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
250 s = sizeof(struct mlx4_wqe_ctrl_seg);
251
252 if (qp->ibqp.qp_type == IB_QPT_UD) {
253 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
254 struct mlx4_av *av = (struct mlx4_av *)dgram->av;
255 memset(dgram, 0, sizeof *dgram);
256 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
257 s += sizeof(struct mlx4_wqe_datagram_seg);
258 }
259
260 /* Pad the remainder of the WQE with an inline data segment. */
261 if (size > s) {
262 inl = wqe + s;
263 inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
264 }
265 ctrl->srcrb_flags = 0;
266 ctrl->fence_size = size / 16;
267 /*
268 * Make sure descriptor is fully written before setting ownership bit
269 * (because HW can start executing as soon as we do).
270 */
271 wmb();
272
273 ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
274 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
275
276 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
277}
278
279/* Post NOP WQE to prevent wrap-around in the middle of WR */
280static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
281{
282 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
283 if (unlikely(s < qp->sq_max_wqes_per_wr)) {
284 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
285 ind += s;
286 }
287 return ind;
Roland Dreier0e6e7412007-06-18 08:13:48 -0700288}
289
Roland Dreier225c7b12007-05-08 18:00:38 -0700290static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
291{
292 struct ib_event event;
293 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
294
295 if (type == MLX4_EVENT_TYPE_PATH_MIG)
296 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
297
298 if (ibqp->event_handler) {
299 event.device = ibqp->device;
300 event.element.qp = ibqp;
301 switch (type) {
302 case MLX4_EVENT_TYPE_PATH_MIG:
303 event.event = IB_EVENT_PATH_MIG;
304 break;
305 case MLX4_EVENT_TYPE_COMM_EST:
306 event.event = IB_EVENT_COMM_EST;
307 break;
308 case MLX4_EVENT_TYPE_SQ_DRAINED:
309 event.event = IB_EVENT_SQ_DRAINED;
310 break;
311 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
312 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
313 break;
314 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
315 event.event = IB_EVENT_QP_FATAL;
316 break;
317 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
318 event.event = IB_EVENT_PATH_MIG_ERR;
319 break;
320 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
321 event.event = IB_EVENT_QP_REQ_ERR;
322 break;
323 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
324 event.event = IB_EVENT_QP_ACCESS_ERR;
325 break;
326 default:
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300327 pr_warn("Unexpected event type %d "
Roland Dreier225c7b12007-05-08 18:00:38 -0700328 "on QP %06x\n", type, qp->qpn);
329 return;
330 }
331
332 ibqp->event_handler(&event, ibqp->qp_context);
333 }
334}
335
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000336static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
Roland Dreier225c7b12007-05-08 18:00:38 -0700337{
338 /*
339 * UD WQEs must have a datagram segment.
340 * RC and UC WQEs might have a remote address segment.
341 * MLX WQEs need two extra inline data segments (for the UD
342 * header and space for the ICRC).
343 */
344 switch (type) {
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000345 case MLX4_IB_QPT_UD:
Roland Dreier225c7b12007-05-08 18:00:38 -0700346 return sizeof (struct mlx4_wqe_ctrl_seg) +
Eli Cohenb832be12008-04-16 21:09:27 -0700347 sizeof (struct mlx4_wqe_datagram_seg) +
Eli Cohen417608c2009-11-12 11:19:44 -0800348 ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000349 case MLX4_IB_QPT_PROXY_SMI_OWNER:
350 case MLX4_IB_QPT_PROXY_SMI:
351 case MLX4_IB_QPT_PROXY_GSI:
352 return sizeof (struct mlx4_wqe_ctrl_seg) +
353 sizeof (struct mlx4_wqe_datagram_seg) + 64;
354 case MLX4_IB_QPT_TUN_SMI_OWNER:
355 case MLX4_IB_QPT_TUN_GSI:
356 return sizeof (struct mlx4_wqe_ctrl_seg) +
357 sizeof (struct mlx4_wqe_datagram_seg);
358
359 case MLX4_IB_QPT_UC:
Roland Dreier225c7b12007-05-08 18:00:38 -0700360 return sizeof (struct mlx4_wqe_ctrl_seg) +
361 sizeof (struct mlx4_wqe_raddr_seg);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000362 case MLX4_IB_QPT_RC:
Roland Dreier225c7b12007-05-08 18:00:38 -0700363 return sizeof (struct mlx4_wqe_ctrl_seg) +
364 sizeof (struct mlx4_wqe_atomic_seg) +
365 sizeof (struct mlx4_wqe_raddr_seg);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000366 case MLX4_IB_QPT_SMI:
367 case MLX4_IB_QPT_GSI:
Roland Dreier225c7b12007-05-08 18:00:38 -0700368 return sizeof (struct mlx4_wqe_ctrl_seg) +
369 ALIGN(MLX4_IB_UD_HEADER_SIZE +
Roland Dreiere61ef242007-06-18 09:23:47 -0700370 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
371 MLX4_INLINE_ALIGN) *
Roland Dreier225c7b12007-05-08 18:00:38 -0700372 sizeof (struct mlx4_wqe_inline_seg),
373 sizeof (struct mlx4_wqe_data_seg)) +
374 ALIGN(4 +
375 sizeof (struct mlx4_wqe_inline_seg),
376 sizeof (struct mlx4_wqe_data_seg));
377 default:
378 return sizeof (struct mlx4_wqe_ctrl_seg);
379 }
380}
381
Eli Cohen24463042007-05-17 10:32:41 +0300382static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
Sean Hefty0a1405d2011-06-02 11:32:15 -0700383 int is_user, int has_rq, struct mlx4_ib_qp *qp)
Roland Dreier225c7b12007-05-08 18:00:38 -0700384{
Eli Cohen24463042007-05-17 10:32:41 +0300385 /* Sanity check RQ size before proceeding */
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300386 if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
387 cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
Eli Cohen24463042007-05-17 10:32:41 +0300388 return -EINVAL;
389
Sean Hefty0a1405d2011-06-02 11:32:15 -0700390 if (!has_rq) {
Roland Dreiera4cd7ed2007-06-07 23:24:39 -0700391 if (cap->max_recv_wr)
392 return -EINVAL;
Eli Cohen24463042007-05-17 10:32:41 +0300393
Roland Dreier0e6e7412007-06-18 08:13:48 -0700394 qp->rq.wqe_cnt = qp->rq.max_gs = 0;
Roland Dreiera4cd7ed2007-06-07 23:24:39 -0700395 } else {
396 /* HW requires >= 1 RQ entry with >= 1 gather entry */
397 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
398 return -EINVAL;
399
Roland Dreier0e6e7412007-06-18 08:13:48 -0700400 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr));
Roland Dreier42c059ea2007-06-12 10:52:02 -0700401 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
Roland Dreiera4cd7ed2007-06-07 23:24:39 -0700402 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
403 }
Eli Cohen24463042007-05-17 10:32:41 +0300404
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300405 /* leave userspace return values as they were, so as not to break ABI */
406 if (is_user) {
407 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
408 cap->max_recv_sge = qp->rq.max_gs;
409 } else {
410 cap->max_recv_wr = qp->rq.max_post =
411 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
412 cap->max_recv_sge = min(qp->rq.max_gs,
413 min(dev->dev->caps.max_sq_sg,
414 dev->dev->caps.max_rq_sg));
415 }
Eli Cohen24463042007-05-17 10:32:41 +0300416
417 return 0;
418}
419
420static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000421 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
Eli Cohen24463042007-05-17 10:32:41 +0300422{
Jack Morgensteinea54b102008-01-28 10:40:59 +0200423 int s;
424
Eli Cohen24463042007-05-17 10:32:41 +0300425 /* Sanity check SQ size before proceeding */
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300426 if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
427 cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
Eli Cohenb832be12008-04-16 21:09:27 -0700428 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
Roland Dreier225c7b12007-05-08 18:00:38 -0700429 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
430 return -EINVAL;
431
432 /*
433 * For MLX transport we need 2 extra S/G entries:
434 * one for the header and one for the checksum at the end
435 */
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000436 if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
437 type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
Roland Dreier225c7b12007-05-08 18:00:38 -0700438 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
439 return -EINVAL;
440
Jack Morgensteinea54b102008-01-28 10:40:59 +0200441 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
442 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
Eli Cohenb832be12008-04-16 21:09:27 -0700443 send_wqe_overhead(type, qp->flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700444
Roland Dreiercd155c12008-05-20 14:00:02 -0700445 if (s > dev->dev->caps.max_sq_desc_sz)
446 return -EINVAL;
447
Roland Dreier0e6e7412007-06-18 08:13:48 -0700448 /*
Jack Morgensteinea54b102008-01-28 10:40:59 +0200449 * Hermon supports shrinking WQEs, such that a single work
450 * request can include multiple units of 1 << wqe_shift. This
451 * way, work requests can differ in size, and do not have to
452 * be a power of 2 in size, saving memory and speeding up send
453 * WR posting. Unfortunately, if we do this then the
454 * wqe_index field in CQEs can't be used to look up the WR ID
455 * anymore, so we do this only if selective signaling is off.
456 *
457 * Further, on 32-bit platforms, we can't use vmap() to make
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200458 * the QP buffer virtually contiguous. Thus we have to use
Jack Morgensteinea54b102008-01-28 10:40:59 +0200459 * constant-sized WRs to make sure a WR is always fully within
460 * a single page-sized chunk.
461 *
462 * Finally, we use NOP work requests to pad the end of the
463 * work queue, to avoid wrap-around in the middle of WR. We
464 * set NEC bit to avoid getting completions with error for
465 * these NOP WRs, but since NEC is only supported starting
466 * with firmware 2.2.232, we use constant-sized WRs for older
467 * firmware.
468 *
469 * And, since MLX QPs only support SEND, we use constant-sized
470 * WRs in this case.
471 *
472 * We look for the smallest value of wqe_shift such that the
473 * resulting number of wqes does not exceed device
474 * capabilities.
475 *
476 * We set WQE size to at least 64 bytes, this way stamping
477 * invalidates each WQE.
Roland Dreier0e6e7412007-06-18 08:13:48 -0700478 */
Jack Morgensteinea54b102008-01-28 10:40:59 +0200479 if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
480 qp->sq_signal_bits && BITS_PER_LONG == 64 &&
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000481 type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
482 !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
483 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
Jack Morgensteinea54b102008-01-28 10:40:59 +0200484 qp->sq.wqe_shift = ilog2(64);
485 else
486 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
487
488 for (;;) {
Jack Morgensteinea54b102008-01-28 10:40:59 +0200489 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
490
491 /*
492 * We need to leave 2 KB + 1 WR of headroom in the SQ to
493 * allow HW to prefetch.
494 */
495 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
496 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
497 qp->sq_max_wqes_per_wr +
498 qp->sq_spare_wqes);
499
500 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
501 break;
502
503 if (qp->sq_max_wqes_per_wr <= 1)
504 return -EINVAL;
505
506 ++qp->sq.wqe_shift;
507 }
508
Roland Dreiercd155c12008-05-20 14:00:02 -0700509 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
510 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
Eli Cohenb832be12008-04-16 21:09:27 -0700511 send_wqe_overhead(type, qp->flags)) /
512 sizeof (struct mlx4_wqe_data_seg);
Roland Dreier0e6e7412007-06-18 08:13:48 -0700513
514 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
515 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
Roland Dreier225c7b12007-05-08 18:00:38 -0700516 if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
517 qp->rq.offset = 0;
Roland Dreier0e6e7412007-06-18 08:13:48 -0700518 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
Roland Dreier225c7b12007-05-08 18:00:38 -0700519 } else {
Roland Dreier0e6e7412007-06-18 08:13:48 -0700520 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
Roland Dreier225c7b12007-05-08 18:00:38 -0700521 qp->sq.offset = 0;
522 }
523
Jack Morgensteinea54b102008-01-28 10:40:59 +0200524 cap->max_send_wr = qp->sq.max_post =
525 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
Roland Dreiercd155c12008-05-20 14:00:02 -0700526 cap->max_send_sge = min(qp->sq.max_gs,
527 min(dev->dev->caps.max_sq_sg,
528 dev->dev->caps.max_rq_sg));
Roland Dreier54e95f82007-06-18 08:13:53 -0700529 /* We don't support inline sends for kernel QPs (yet) */
530 cap->max_inline_data = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700531
532 return 0;
533}
534
Jack Morgenstein83904132007-10-18 17:36:43 +0200535static int set_user_sq_size(struct mlx4_ib_dev *dev,
536 struct mlx4_ib_qp *qp,
Eli Cohen24463042007-05-17 10:32:41 +0300537 struct mlx4_ib_create_qp *ucmd)
538{
Jack Morgenstein83904132007-10-18 17:36:43 +0200539 /* Sanity check SQ size before proceeding */
540 if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
541 ucmd->log_sq_stride >
542 ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
543 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
544 return -EINVAL;
545
Roland Dreier0e6e7412007-06-18 08:13:48 -0700546 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
Eli Cohen24463042007-05-17 10:32:41 +0300547 qp->sq.wqe_shift = ucmd->log_sq_stride;
548
Roland Dreier0e6e7412007-06-18 08:13:48 -0700549 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
550 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
Eli Cohen24463042007-05-17 10:32:41 +0300551
552 return 0;
553}
554
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000555static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
556{
557 int i;
558
559 qp->sqp_proxy_rcv =
560 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt,
561 GFP_KERNEL);
562 if (!qp->sqp_proxy_rcv)
563 return -ENOMEM;
564 for (i = 0; i < qp->rq.wqe_cnt; i++) {
565 qp->sqp_proxy_rcv[i].addr =
566 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr),
567 GFP_KERNEL);
568 if (!qp->sqp_proxy_rcv[i].addr)
569 goto err;
570 qp->sqp_proxy_rcv[i].map =
571 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
572 sizeof (struct mlx4_ib_proxy_sqp_hdr),
573 DMA_FROM_DEVICE);
574 }
575 return 0;
576
577err:
578 while (i > 0) {
579 --i;
580 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
581 sizeof (struct mlx4_ib_proxy_sqp_hdr),
582 DMA_FROM_DEVICE);
583 kfree(qp->sqp_proxy_rcv[i].addr);
584 }
585 kfree(qp->sqp_proxy_rcv);
586 qp->sqp_proxy_rcv = NULL;
587 return -ENOMEM;
588}
589
590static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
591{
592 int i;
593
594 for (i = 0; i < qp->rq.wqe_cnt; i++) {
595 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
596 sizeof (struct mlx4_ib_proxy_sqp_hdr),
597 DMA_FROM_DEVICE);
598 kfree(qp->sqp_proxy_rcv[i].addr);
599 }
600 kfree(qp->sqp_proxy_rcv);
601}
602
Sean Hefty0a1405d2011-06-02 11:32:15 -0700603static int qp_has_rq(struct ib_qp_init_attr *attr)
604{
605 if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
606 return 0;
607
608 return !attr->srq;
609}
610
Roland Dreier225c7b12007-05-08 18:00:38 -0700611static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
612 struct ib_qp_init_attr *init_attr,
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000613 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp)
Roland Dreier225c7b12007-05-08 18:00:38 -0700614{
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700615 int qpn;
Roland Dreier225c7b12007-05-08 18:00:38 -0700616 int err;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000617 struct mlx4_ib_sqp *sqp;
618 struct mlx4_ib_qp *qp;
619 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
620
621 /* When tunneling special qps, we use a plain UD qp */
622 if (sqpn) {
623 if (mlx4_is_mfunc(dev->dev) &&
624 (!mlx4_is_master(dev->dev) ||
625 !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
626 if (init_attr->qp_type == IB_QPT_GSI)
627 qp_type = MLX4_IB_QPT_PROXY_GSI;
628 else if (mlx4_is_master(dev->dev))
629 qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
630 else
631 qp_type = MLX4_IB_QPT_PROXY_SMI;
632 }
633 qpn = sqpn;
634 /* add extra sg entry for tunneling */
635 init_attr->cap.max_recv_sge++;
636 } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
637 struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
638 container_of(init_attr,
639 struct mlx4_ib_qp_tunnel_init_attr, init_attr);
640 if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
641 tnl_init->proxy_qp_type != IB_QPT_GSI) ||
642 !mlx4_is_master(dev->dev))
643 return -EINVAL;
644 if (tnl_init->proxy_qp_type == IB_QPT_GSI)
645 qp_type = MLX4_IB_QPT_TUN_GSI;
646 else if (tnl_init->slave == mlx4_master_func_num(dev->dev))
647 qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
648 else
649 qp_type = MLX4_IB_QPT_TUN_SMI;
Jack Morgenstein47605df2012-08-03 08:40:57 +0000650 /* we are definitely in the PPF here, since we are creating
651 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
652 qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
653 + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000654 sqpn = qpn;
655 }
656
657 if (!*caller_qp) {
658 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
659 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
660 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
661 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL);
662 if (!sqp)
663 return -ENOMEM;
664 qp = &sqp->qp;
665 } else {
666 qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
667 if (!qp)
668 return -ENOMEM;
669 }
670 } else
671 qp = *caller_qp;
672
673 qp->mlx4_ib_qp_type = qp_type;
Roland Dreier225c7b12007-05-08 18:00:38 -0700674
675 mutex_init(&qp->mutex);
676 spin_lock_init(&qp->sq.lock);
677 spin_lock_init(&qp->rq.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700678 INIT_LIST_HEAD(&qp->gid_list);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +0000679 INIT_LIST_HEAD(&qp->steering_rules);
Roland Dreier225c7b12007-05-08 18:00:38 -0700680
681 qp->state = IB_QPS_RESET;
Jack Morgensteinea54b102008-01-28 10:40:59 +0200682 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
683 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
Roland Dreier225c7b12007-05-08 18:00:38 -0700684
Sean Hefty0a1405d2011-06-02 11:32:15 -0700685 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
Roland Dreier225c7b12007-05-08 18:00:38 -0700686 if (err)
687 goto err;
688
689 if (pd->uobject) {
690 struct mlx4_ib_create_qp ucmd;
691
692 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
693 err = -EFAULT;
694 goto err;
695 }
696
Roland Dreier0e6e7412007-06-18 08:13:48 -0700697 qp->sq_no_prefetch = ucmd.sq_no_prefetch;
698
Jack Morgenstein83904132007-10-18 17:36:43 +0200699 err = set_user_sq_size(dev, qp, &ucmd);
Eli Cohen24463042007-05-17 10:32:41 +0300700 if (err)
701 goto err;
702
Roland Dreier225c7b12007-05-08 18:00:38 -0700703 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700704 qp->buf_size, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -0700705 if (IS_ERR(qp->umem)) {
706 err = PTR_ERR(qp->umem);
707 goto err;
708 }
709
710 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
711 ilog2(qp->umem->page_size), &qp->mtt);
712 if (err)
713 goto err_buf;
714
715 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
716 if (err)
717 goto err_mtt;
718
Sean Hefty0a1405d2011-06-02 11:32:15 -0700719 if (qp_has_rq(init_attr)) {
Roland Dreier02d89b82007-05-23 15:16:08 -0700720 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
721 ucmd.db_addr, &qp->db);
722 if (err)
723 goto err_mtt;
724 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700725 } else {
Roland Dreier0e6e7412007-06-18 08:13:48 -0700726 qp->sq_no_prefetch = 0;
727
Ron Livne521e5752008-07-14 23:48:48 -0700728 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
729 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
730
Eli Cohenb832be12008-04-16 21:09:27 -0700731 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
732 qp->flags |= MLX4_IB_QP_LSO;
733
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000734 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
Eli Cohen24463042007-05-17 10:32:41 +0300735 if (err)
736 goto err;
737
Sean Hefty0a1405d2011-06-02 11:32:15 -0700738 if (qp_has_rq(init_attr)) {
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700739 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
Roland Dreier02d89b82007-05-23 15:16:08 -0700740 if (err)
741 goto err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700742
Roland Dreier02d89b82007-05-23 15:16:08 -0700743 *qp->db.db = 0;
744 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700745
746 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
747 err = -ENOMEM;
748 goto err_db;
749 }
750
751 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
752 &qp->mtt);
753 if (err)
754 goto err_buf;
755
756 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
757 if (err)
758 goto err_mtt;
759
Roland Dreier0e6e7412007-06-18 08:13:48 -0700760 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL);
761 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -0700762
763 if (!qp->sq.wrid || !qp->rq.wrid) {
764 err = -ENOMEM;
765 goto err_wrid;
766 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700767 }
768
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700769 if (sqpn) {
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000770 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
771 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
772 if (alloc_proxy_bufs(pd->device, qp)) {
773 err = -ENOMEM;
774 goto err_wrid;
775 }
776 }
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700777 } else {
Or Gerlitz3987a2d2012-01-17 13:39:07 +0200778 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
779 * BlueFlame setup flow wrongly causes VLAN insertion. */
780 if (init_attr->qp_type == IB_QPT_RAW_PACKET)
781 err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
782 else
783 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700784 if (err)
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000785 goto err_proxy;
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700786 }
787
788 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
Roland Dreier225c7b12007-05-08 18:00:38 -0700789 if (err)
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700790 goto err_qpn;
Roland Dreier225c7b12007-05-08 18:00:38 -0700791
Sean Hefty0a1405d2011-06-02 11:32:15 -0700792 if (init_attr->qp_type == IB_QPT_XRC_TGT)
793 qp->mqp.qpn |= (1 << 23);
794
Roland Dreier225c7b12007-05-08 18:00:38 -0700795 /*
796 * Hardware wants QPN written in big-endian order (after
797 * shifting) for send doorbell. Precompute this value to save
798 * a little bit when posting sends.
799 */
800 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
801
Roland Dreier225c7b12007-05-08 18:00:38 -0700802 qp->mqp.event = mlx4_ib_qp_event;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000803 if (!*caller_qp)
804 *caller_qp = qp;
Roland Dreier225c7b12007-05-08 18:00:38 -0700805 return 0;
806
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700807err_qpn:
808 if (!sqpn)
809 mlx4_qp_release_range(dev->dev, qpn, 1);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000810err_proxy:
811 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
812 free_proxy_bufs(pd->device, qp);
Roland Dreier225c7b12007-05-08 18:00:38 -0700813err_wrid:
Roland Dreier23f1b382007-07-20 21:19:43 -0700814 if (pd->uobject) {
Sean Hefty0a1405d2011-06-02 11:32:15 -0700815 if (qp_has_rq(init_attr))
816 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
Roland Dreier23f1b382007-07-20 21:19:43 -0700817 } else {
Roland Dreier225c7b12007-05-08 18:00:38 -0700818 kfree(qp->sq.wrid);
819 kfree(qp->rq.wrid);
820 }
821
822err_mtt:
823 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
824
825err_buf:
826 if (pd->uobject)
827 ib_umem_release(qp->umem);
828 else
829 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
830
831err_db:
Sean Hefty0a1405d2011-06-02 11:32:15 -0700832 if (!pd->uobject && qp_has_rq(init_attr))
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700833 mlx4_db_free(dev->dev, &qp->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700834
835err:
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000836 if (!*caller_qp)
837 kfree(qp);
Roland Dreier225c7b12007-05-08 18:00:38 -0700838 return err;
839}
840
841static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
842{
843 switch (state) {
844 case IB_QPS_RESET: return MLX4_QP_STATE_RST;
845 case IB_QPS_INIT: return MLX4_QP_STATE_INIT;
846 case IB_QPS_RTR: return MLX4_QP_STATE_RTR;
847 case IB_QPS_RTS: return MLX4_QP_STATE_RTS;
848 case IB_QPS_SQD: return MLX4_QP_STATE_SQD;
849 case IB_QPS_SQE: return MLX4_QP_STATE_SQER;
850 case IB_QPS_ERR: return MLX4_QP_STATE_ERR;
851 default: return -1;
852 }
853}
854
855static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
Roland Dreier338a8fa2009-09-05 20:24:49 -0700856 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
Roland Dreier225c7b12007-05-08 18:00:38 -0700857{
Roland Dreier338a8fa2009-09-05 20:24:49 -0700858 if (send_cq == recv_cq) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700859 spin_lock_irq(&send_cq->lock);
Roland Dreier338a8fa2009-09-05 20:24:49 -0700860 __acquire(&recv_cq->lock);
861 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700862 spin_lock_irq(&send_cq->lock);
863 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
864 } else {
865 spin_lock_irq(&recv_cq->lock);
866 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
867 }
868}
869
870static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
Roland Dreier338a8fa2009-09-05 20:24:49 -0700871 __releases(&send_cq->lock) __releases(&recv_cq->lock)
Roland Dreier225c7b12007-05-08 18:00:38 -0700872{
Roland Dreier338a8fa2009-09-05 20:24:49 -0700873 if (send_cq == recv_cq) {
874 __release(&recv_cq->lock);
Roland Dreier225c7b12007-05-08 18:00:38 -0700875 spin_unlock_irq(&send_cq->lock);
Roland Dreier338a8fa2009-09-05 20:24:49 -0700876 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700877 spin_unlock(&recv_cq->lock);
878 spin_unlock_irq(&send_cq->lock);
879 } else {
880 spin_unlock(&send_cq->lock);
881 spin_unlock_irq(&recv_cq->lock);
882 }
883}
884
Eli Cohenfa417f72010-10-24 21:08:52 -0700885static void del_gid_entries(struct mlx4_ib_qp *qp)
886{
887 struct mlx4_ib_gid_entry *ge, *tmp;
888
889 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
890 list_del(&ge->list);
891 kfree(ge);
892 }
893}
894
Sean Hefty0a1405d2011-06-02 11:32:15 -0700895static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
896{
897 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
898 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
899 else
900 return to_mpd(qp->ibqp.pd);
901}
902
903static void get_cqs(struct mlx4_ib_qp *qp,
904 struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
905{
906 switch (qp->ibqp.qp_type) {
907 case IB_QPT_XRC_TGT:
908 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
909 *recv_cq = *send_cq;
910 break;
911 case IB_QPT_XRC_INI:
912 *send_cq = to_mcq(qp->ibqp.send_cq);
913 *recv_cq = *send_cq;
914 break;
915 default:
916 *send_cq = to_mcq(qp->ibqp.send_cq);
917 *recv_cq = to_mcq(qp->ibqp.recv_cq);
918 break;
919 }
920}
921
Roland Dreier225c7b12007-05-08 18:00:38 -0700922static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
923 int is_user)
924{
925 struct mlx4_ib_cq *send_cq, *recv_cq;
926
927 if (qp->state != IB_QPS_RESET)
928 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
929 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300930 pr_warn("modify QP %06x to RESET failed.\n",
Roland Dreier225c7b12007-05-08 18:00:38 -0700931 qp->mqp.qpn);
932
Sean Hefty0a1405d2011-06-02 11:32:15 -0700933 get_cqs(qp, &send_cq, &recv_cq);
Roland Dreier225c7b12007-05-08 18:00:38 -0700934
935 mlx4_ib_lock_cqs(send_cq, recv_cq);
936
937 if (!is_user) {
938 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
939 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
940 if (send_cq != recv_cq)
941 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
942 }
943
944 mlx4_qp_remove(dev->dev, &qp->mqp);
945
946 mlx4_ib_unlock_cqs(send_cq, recv_cq);
947
948 mlx4_qp_free(dev->dev, &qp->mqp);
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700949
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000950 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp))
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700951 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
952
Roland Dreier225c7b12007-05-08 18:00:38 -0700953 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
954
955 if (is_user) {
Sean Hefty0a1405d2011-06-02 11:32:15 -0700956 if (qp->rq.wqe_cnt)
Roland Dreier02d89b82007-05-23 15:16:08 -0700957 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
958 &qp->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700959 ib_umem_release(qp->umem);
960 } else {
961 kfree(qp->sq.wrid);
962 kfree(qp->rq.wrid);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000963 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
964 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
965 free_proxy_bufs(&dev->ib_dev, qp);
Roland Dreier225c7b12007-05-08 18:00:38 -0700966 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
Sean Hefty0a1405d2011-06-02 11:32:15 -0700967 if (qp->rq.wqe_cnt)
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700968 mlx4_db_free(dev->dev, &qp->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700969 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700970
971 del_gid_entries(qp);
Roland Dreier225c7b12007-05-08 18:00:38 -0700972}
973
Jack Morgenstein47605df2012-08-03 08:40:57 +0000974static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
975{
976 /* Native or PPF */
977 if (!mlx4_is_mfunc(dev->dev) ||
978 (mlx4_is_master(dev->dev) &&
979 attr->create_flags & MLX4_IB_SRIOV_SQP)) {
980 return dev->dev->phys_caps.base_sqpn +
981 (attr->qp_type == IB_QPT_SMI ? 0 : 2) +
982 attr->port_num - 1;
983 }
984 /* PF or VF -- creating proxies */
985 if (attr->qp_type == IB_QPT_SMI)
986 return dev->dev->caps.qp0_proxy[attr->port_num - 1];
987 else
988 return dev->dev->caps.qp1_proxy[attr->port_num - 1];
989}
990
Roland Dreier225c7b12007-05-08 18:00:38 -0700991struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
992 struct ib_qp_init_attr *init_attr,
993 struct ib_udata *udata)
994{
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000995 struct mlx4_ib_qp *qp = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700996 int err;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700997 u16 xrcdn = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700998
Ron Livne521e5752008-07-14 23:48:48 -0700999 /*
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001000 * We only support LSO, vendor flag1, and multicast loopback blocking,
1001 * and only for kernel UD QPs.
Ron Livne521e5752008-07-14 23:48:48 -07001002 */
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001003 if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
1004 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
1005 MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP))
Eli Cohenb832be12008-04-16 21:09:27 -07001006 return ERR_PTR(-EINVAL);
Ron Livne521e5752008-07-14 23:48:48 -07001007
1008 if (init_attr->create_flags &&
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001009 (udata ||
1010 ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) &&
1011 init_attr->qp_type != IB_QPT_UD) ||
1012 ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) &&
1013 init_attr->qp_type > IB_QPT_GSI)))
Eli Cohenb846f252008-04-16 21:09:27 -07001014 return ERR_PTR(-EINVAL);
1015
Roland Dreier225c7b12007-05-08 18:00:38 -07001016 switch (init_attr->qp_type) {
Sean Hefty0a1405d2011-06-02 11:32:15 -07001017 case IB_QPT_XRC_TGT:
1018 pd = to_mxrcd(init_attr->xrcd)->pd;
1019 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1020 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
1021 /* fall through */
1022 case IB_QPT_XRC_INI:
1023 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1024 return ERR_PTR(-ENOSYS);
1025 init_attr->recv_cq = init_attr->send_cq;
1026 /* fall through */
Roland Dreier225c7b12007-05-08 18:00:38 -07001027 case IB_QPT_RC:
1028 case IB_QPT_UC:
Or Gerlitz3987a2d2012-01-17 13:39:07 +02001029 case IB_QPT_RAW_PACKET:
Eli Cohenf507d282008-07-14 23:48:53 -07001030 qp = kzalloc(sizeof *qp, GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -07001031 if (!qp)
1032 return ERR_PTR(-ENOMEM);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001033 /* fall through */
1034 case IB_QPT_UD:
1035 {
1036 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1037 udata, 0, &qp);
1038 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -07001039 return ERR_PTR(err);
Roland Dreier225c7b12007-05-08 18:00:38 -07001040
1041 qp->ibqp.qp_num = qp->mqp.qpn;
Sean Hefty0a1405d2011-06-02 11:32:15 -07001042 qp->xrcdn = xrcdn;
Roland Dreier225c7b12007-05-08 18:00:38 -07001043
1044 break;
1045 }
1046 case IB_QPT_SMI:
1047 case IB_QPT_GSI:
1048 {
1049 /* Userspace is not allowed to create special QPs: */
Sean Hefty0a1405d2011-06-02 11:32:15 -07001050 if (udata)
Roland Dreier225c7b12007-05-08 18:00:38 -07001051 return ERR_PTR(-EINVAL);
1052
Sean Hefty0a1405d2011-06-02 11:32:15 -07001053 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
Jack Morgenstein47605df2012-08-03 08:40:57 +00001054 get_sqp_num(to_mdev(pd->device), init_attr),
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001055 &qp);
1056 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -07001057 return ERR_PTR(err);
Roland Dreier225c7b12007-05-08 18:00:38 -07001058
1059 qp->port = init_attr->port_num;
1060 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
1061
1062 break;
1063 }
1064 default:
1065 /* Don't support raw QPs */
1066 return ERR_PTR(-EINVAL);
1067 }
1068
1069 return &qp->ibqp;
1070}
1071
1072int mlx4_ib_destroy_qp(struct ib_qp *qp)
1073{
1074 struct mlx4_ib_dev *dev = to_mdev(qp->device);
1075 struct mlx4_ib_qp *mqp = to_mqp(qp);
Sean Hefty0a1405d2011-06-02 11:32:15 -07001076 struct mlx4_ib_pd *pd;
Roland Dreier225c7b12007-05-08 18:00:38 -07001077
1078 if (is_qp0(dev, mqp))
1079 mlx4_CLOSE_PORT(dev->dev, mqp->port);
1080
Sean Hefty0a1405d2011-06-02 11:32:15 -07001081 pd = get_pd(mqp);
1082 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
Roland Dreier225c7b12007-05-08 18:00:38 -07001083
1084 if (is_sqp(dev, mqp))
1085 kfree(to_msqp(mqp));
1086 else
1087 kfree(mqp);
1088
1089 return 0;
1090}
1091
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001092static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
Roland Dreier225c7b12007-05-08 18:00:38 -07001093{
1094 switch (type) {
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001095 case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC;
1096 case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC;
1097 case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD;
1098 case MLX4_IB_QPT_XRC_INI:
1099 case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
1100 case MLX4_IB_QPT_SMI:
1101 case MLX4_IB_QPT_GSI:
1102 case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
1103
1104 case MLX4_IB_QPT_PROXY_SMI_OWNER:
1105 case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ?
1106 MLX4_QP_ST_MLX : -1);
1107 case MLX4_IB_QPT_PROXY_SMI:
1108 case MLX4_IB_QPT_TUN_SMI:
1109 case MLX4_IB_QPT_PROXY_GSI:
1110 case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ?
1111 MLX4_QP_ST_UD : -1);
1112 default: return -1;
Roland Dreier225c7b12007-05-08 18:00:38 -07001113 }
1114}
1115
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001116static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
Roland Dreier225c7b12007-05-08 18:00:38 -07001117 int attr_mask)
1118{
1119 u8 dest_rd_atomic;
1120 u32 access_flags;
1121 u32 hw_access_flags = 0;
1122
1123 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1124 dest_rd_atomic = attr->max_dest_rd_atomic;
1125 else
1126 dest_rd_atomic = qp->resp_depth;
1127
1128 if (attr_mask & IB_QP_ACCESS_FLAGS)
1129 access_flags = attr->qp_access_flags;
1130 else
1131 access_flags = qp->atomic_rd_en;
1132
1133 if (!dest_rd_atomic)
1134 access_flags &= IB_ACCESS_REMOTE_WRITE;
1135
1136 if (access_flags & IB_ACCESS_REMOTE_READ)
1137 hw_access_flags |= MLX4_QP_BIT_RRE;
1138 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1139 hw_access_flags |= MLX4_QP_BIT_RAE;
1140 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1141 hw_access_flags |= MLX4_QP_BIT_RWE;
1142
1143 return cpu_to_be32(hw_access_flags);
1144}
1145
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001146static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
Roland Dreier225c7b12007-05-08 18:00:38 -07001147 int attr_mask)
1148{
1149 if (attr_mask & IB_QP_PKEY_INDEX)
1150 sqp->pkey_index = attr->pkey_index;
1151 if (attr_mask & IB_QP_QKEY)
1152 sqp->qkey = attr->qkey;
1153 if (attr_mask & IB_QP_SQ_PSN)
1154 sqp->send_psn = attr->sq_psn;
1155}
1156
1157static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1158{
1159 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1160}
1161
Moni Shoua297e0da2013-12-12 18:03:14 +02001162static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1163 u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1164 u8 port)
Roland Dreier225c7b12007-05-08 18:00:38 -07001165{
Eli Cohenfa417f72010-10-24 21:08:52 -07001166 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1167 IB_LINK_LAYER_ETHERNET;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001168 int vidx;
Moni Shoua297e0da2013-12-12 18:03:14 +02001169 int smac_index;
1170
Eli Cohenfa417f72010-10-24 21:08:52 -07001171
Roland Dreier225c7b12007-05-08 18:00:38 -07001172 path->grh_mylmc = ah->src_path_bits & 0x7f;
1173 path->rlid = cpu_to_be16(ah->dlid);
1174 if (ah->static_rate) {
1175 path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
1176 while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
1177 !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
1178 --path->static_rate;
1179 } else
1180 path->static_rate = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001181
1182 if (ah->ah_flags & IB_AH_GRH) {
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001183 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001184 pr_err("sgid_index (%u) too large. max is %d\n",
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001185 ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
Roland Dreier225c7b12007-05-08 18:00:38 -07001186 return -1;
1187 }
1188
1189 path->grh_mylmc |= 1 << 7;
1190 path->mgid_index = ah->grh.sgid_index;
1191 path->hop_limit = ah->grh.hop_limit;
1192 path->tclass_flowlabel =
1193 cpu_to_be32((ah->grh.traffic_class << 20) |
1194 (ah->grh.flow_label));
1195 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1196 }
1197
Eli Cohenfa417f72010-10-24 21:08:52 -07001198 if (is_eth) {
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001199 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
Or Gerlitz9106c412011-12-11 16:40:05 +02001200 ((port - 1) << 6) | ((ah->sl & 7) << 3);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001201
Eli Cohenfa417f72010-10-24 21:08:52 -07001202 if (!(ah->ah_flags & IB_AH_GRH))
1203 return -1;
1204
Moni Shoua297e0da2013-12-12 18:03:14 +02001205 memcpy(path->dmac, ah->dmac, ETH_ALEN);
Eli Cohenfa417f72010-10-24 21:08:52 -07001206 path->ackto = MLX4_IB_LINK_TYPE_ETH;
Moni Shoua297e0da2013-12-12 18:03:14 +02001207 /* find the index into MAC table for IBoE */
1208 if (!is_zero_ether_addr((const u8 *)&smac)) {
1209 if (mlx4_find_cached_mac(dev->dev, port, smac,
1210 &smac_index))
1211 return -ENOENT;
1212 } else {
1213 smac_index = 0;
1214 }
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001215
Moni Shoua297e0da2013-12-12 18:03:14 +02001216 path->grh_mylmc &= 0x80 | smac_index;
1217
1218 path->feup |= MLX4_FEUP_FORCE_ETH_UP;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001219 if (vlan_tag < 0x1000) {
1220 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
1221 return -ENOENT;
1222
1223 path->vlan_index = vidx;
1224 path->fl = 1 << 6;
Moni Shoua297e0da2013-12-12 18:03:14 +02001225 path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001226 }
1227 } else
1228 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1229 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
Eli Cohenfa417f72010-10-24 21:08:52 -07001230
Roland Dreier225c7b12007-05-08 18:00:38 -07001231 return 0;
1232}
1233
Moni Shoua297e0da2013-12-12 18:03:14 +02001234static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1235 enum ib_qp_attr_mask qp_attr_mask,
1236 struct mlx4_qp_path *path, u8 port)
1237{
1238 return _mlx4_set_path(dev, &qp->ah_attr,
1239 mlx4_mac_to_u64((u8 *)qp->smac),
1240 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
1241 path, port);
1242}
1243
1244static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1245 const struct ib_qp_attr *qp,
1246 enum ib_qp_attr_mask qp_attr_mask,
1247 struct mlx4_qp_path *path, u8 port)
1248{
1249 return _mlx4_set_path(dev, &qp->alt_ah_attr,
1250 mlx4_mac_to_u64((u8 *)qp->alt_smac),
1251 (qp_attr_mask & IB_QP_ALT_VID) ?
1252 qp->alt_vlan_id : 0xffff,
1253 path, port);
1254}
1255
Eli Cohenfa417f72010-10-24 21:08:52 -07001256static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1257{
1258 struct mlx4_ib_gid_entry *ge, *tmp;
1259
1260 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1261 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
1262 ge->added = 1;
1263 ge->port = qp->port;
1264 }
1265 }
1266}
1267
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001268static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1269 const struct ib_qp_attr *attr, int attr_mask,
1270 enum ib_qp_state cur_state, enum ib_qp_state new_state)
Roland Dreier225c7b12007-05-08 18:00:38 -07001271{
1272 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1273 struct mlx4_ib_qp *qp = to_mqp(ibqp);
Sean Hefty0a1405d2011-06-02 11:32:15 -07001274 struct mlx4_ib_pd *pd;
1275 struct mlx4_ib_cq *send_cq, *recv_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -07001276 struct mlx4_qp_context *context;
1277 enum mlx4_qp_optpar optpar = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001278 int sqd_event;
1279 int err = -EINVAL;
1280
1281 context = kzalloc(sizeof *context, GFP_KERNEL);
1282 if (!context)
1283 return -ENOMEM;
1284
Roland Dreier225c7b12007-05-08 18:00:38 -07001285 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001286 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
Roland Dreier225c7b12007-05-08 18:00:38 -07001287
1288 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
1289 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1290 else {
1291 optpar |= MLX4_QP_OPTPAR_PM_STATE;
1292 switch (attr->path_mig_state) {
1293 case IB_MIG_MIGRATED:
1294 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1295 break;
1296 case IB_MIG_REARM:
1297 context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
1298 break;
1299 case IB_MIG_ARMED:
1300 context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
1301 break;
1302 }
1303 }
1304
Eli Cohenb832be12008-04-16 21:09:27 -07001305 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
Roland Dreier225c7b12007-05-08 18:00:38 -07001306 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
Or Gerlitz3987a2d2012-01-17 13:39:07 +02001307 else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1308 context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
Eli Cohenb832be12008-04-16 21:09:27 -07001309 else if (ibqp->qp_type == IB_QPT_UD) {
1310 if (qp->flags & MLX4_IB_QP_LSO)
1311 context->mtu_msgmax = (IB_MTU_4096 << 5) |
1312 ilog2(dev->dev->caps.max_gso_sz);
1313 else
Alex Naslednikov6e0d7332008-08-07 14:06:50 -07001314 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
Eli Cohenb832be12008-04-16 21:09:27 -07001315 } else if (attr_mask & IB_QP_PATH_MTU) {
Roland Dreier225c7b12007-05-08 18:00:38 -07001316 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001317 pr_err("path MTU (%u) is invalid\n",
Roland Dreier225c7b12007-05-08 18:00:38 -07001318 attr->path_mtu);
Florin Malitaf5b40432007-07-19 15:58:09 -04001319 goto out;
Roland Dreier225c7b12007-05-08 18:00:38 -07001320 }
Eli Cohend1f2cd82008-07-14 23:48:45 -07001321 context->mtu_msgmax = (attr->path_mtu << 5) |
1322 ilog2(dev->dev->caps.max_msg_sz);
Roland Dreier225c7b12007-05-08 18:00:38 -07001323 }
1324
Roland Dreier0e6e7412007-06-18 08:13:48 -07001325 if (qp->rq.wqe_cnt)
1326 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
Roland Dreier225c7b12007-05-08 18:00:38 -07001327 context->rq_size_stride |= qp->rq.wqe_shift - 4;
1328
Roland Dreier0e6e7412007-06-18 08:13:48 -07001329 if (qp->sq.wqe_cnt)
1330 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
Roland Dreier225c7b12007-05-08 18:00:38 -07001331 context->sq_size_stride |= qp->sq.wqe_shift - 4;
1332
Sean Hefty0a1405d2011-06-02 11:32:15 -07001333 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
Roland Dreier0e6e7412007-06-18 08:13:48 -07001334 context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
Sean Hefty0a1405d2011-06-02 11:32:15 -07001335 context->xrcd = cpu_to_be32((u32) qp->xrcdn);
Dotan Barak02d7ef62013-04-21 15:10:00 +00001336 if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1337 context->param3 |= cpu_to_be32(1 << 30);
Sean Hefty0a1405d2011-06-02 11:32:15 -07001338 }
Roland Dreier0e6e7412007-06-18 08:13:48 -07001339
Roland Dreier225c7b12007-05-08 18:00:38 -07001340 if (qp->ibqp.uobject)
1341 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
1342 else
1343 context->usr_page = cpu_to_be32(dev->priv_uar.index);
1344
1345 if (attr_mask & IB_QP_DEST_QPN)
1346 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
1347
1348 if (attr_mask & IB_QP_PORT) {
1349 if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
1350 !(attr_mask & IB_QP_AV)) {
1351 mlx4_set_sched(&context->pri_path, attr->port_num);
1352 optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
1353 }
1354 }
1355
Or Gerlitzcfcde112011-06-15 14:49:57 +00001356 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
1357 if (dev->counters[qp->port - 1] != -1) {
1358 context->pri_path.counter_index =
1359 dev->counters[qp->port - 1];
1360 optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
1361 } else
1362 context->pri_path.counter_index = 0xff;
1363 }
1364
Roland Dreier225c7b12007-05-08 18:00:38 -07001365 if (attr_mask & IB_QP_PKEY_INDEX) {
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001366 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1367 context->pri_path.disable_pkey_check = 0x40;
Roland Dreier225c7b12007-05-08 18:00:38 -07001368 context->pri_path.pkey_index = attr->pkey_index;
1369 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
1370 }
1371
Roland Dreier225c7b12007-05-08 18:00:38 -07001372 if (attr_mask & IB_QP_AV) {
Moni Shoua297e0da2013-12-12 18:03:14 +02001373 if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001374 attr_mask & IB_QP_PORT ?
1375 attr->port_num : qp->port))
Roland Dreier225c7b12007-05-08 18:00:38 -07001376 goto out;
Roland Dreier225c7b12007-05-08 18:00:38 -07001377
1378 optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
1379 MLX4_QP_OPTPAR_SCHED_QUEUE);
1380 }
1381
1382 if (attr_mask & IB_QP_TIMEOUT) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001383 context->pri_path.ackto |= attr->timeout << 3;
Roland Dreier225c7b12007-05-08 18:00:38 -07001384 optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
1385 }
1386
1387 if (attr_mask & IB_QP_ALT_PATH) {
Roland Dreier225c7b12007-05-08 18:00:38 -07001388 if (attr->alt_port_num == 0 ||
1389 attr->alt_port_num > dev->dev->caps.num_ports)
Florin Malitaf5b40432007-07-19 15:58:09 -04001390 goto out;
Roland Dreier225c7b12007-05-08 18:00:38 -07001391
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001392 if (attr->alt_pkey_index >=
1393 dev->dev->caps.pkey_table_len[attr->alt_port_num])
Florin Malitaf5b40432007-07-19 15:58:09 -04001394 goto out;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001395
Moni Shoua297e0da2013-12-12 18:03:14 +02001396 if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
1397 attr->alt_port_num))
Florin Malitaf5b40432007-07-19 15:58:09 -04001398 goto out;
Roland Dreier225c7b12007-05-08 18:00:38 -07001399
1400 context->alt_path.pkey_index = attr->alt_pkey_index;
1401 context->alt_path.ackto = attr->alt_timeout << 3;
1402 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
1403 }
1404
Sean Hefty0a1405d2011-06-02 11:32:15 -07001405 pd = get_pd(qp);
1406 get_cqs(qp, &send_cq, &recv_cq);
1407 context->pd = cpu_to_be32(pd->pdn);
1408 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
1409 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
1410 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
Jack Morgenstein57f01b52007-06-06 19:35:04 +03001411
Roland Dreier95d04f02008-07-23 08:12:26 -07001412 /* Set "fast registration enabled" for all kernel QPs */
1413 if (!qp->ibqp.uobject)
1414 context->params1 |= cpu_to_be32(1 << 11);
1415
Jack Morgenstein57f01b52007-06-06 19:35:04 +03001416 if (attr_mask & IB_QP_RNR_RETRY) {
1417 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1418 optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
1419 }
1420
Roland Dreier225c7b12007-05-08 18:00:38 -07001421 if (attr_mask & IB_QP_RETRY_CNT) {
1422 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1423 optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
1424 }
1425
1426 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1427 if (attr->max_rd_atomic)
1428 context->params1 |=
1429 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1430 optpar |= MLX4_QP_OPTPAR_SRA_MAX;
1431 }
1432
1433 if (attr_mask & IB_QP_SQ_PSN)
1434 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1435
Roland Dreier225c7b12007-05-08 18:00:38 -07001436 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1437 if (attr->max_dest_rd_atomic)
1438 context->params2 |=
1439 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1440 optpar |= MLX4_QP_OPTPAR_RRA_MAX;
1441 }
1442
1443 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
1444 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
1445 optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
1446 }
1447
1448 if (ibqp->srq)
1449 context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
1450
1451 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1452 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1453 optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
1454 }
1455 if (attr_mask & IB_QP_RQ_PSN)
1456 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1457
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001458 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
Roland Dreier225c7b12007-05-08 18:00:38 -07001459 if (attr_mask & IB_QP_QKEY) {
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001460 if (qp->mlx4_ib_qp_type &
1461 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
1462 context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
1463 else {
1464 if (mlx4_is_mfunc(dev->dev) &&
1465 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
1466 (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
1467 MLX4_RESERVED_QKEY_BASE) {
1468 pr_err("Cannot use reserved QKEY"
1469 " 0x%x (range 0xffff0000..0xffffffff"
1470 " is reserved)\n", attr->qkey);
1471 err = -EINVAL;
1472 goto out;
1473 }
1474 context->qkey = cpu_to_be32(attr->qkey);
1475 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001476 optpar |= MLX4_QP_OPTPAR_Q_KEY;
1477 }
1478
1479 if (ibqp->srq)
1480 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
1481
Sean Hefty0a1405d2011-06-02 11:32:15 -07001482 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
Roland Dreier225c7b12007-05-08 18:00:38 -07001483 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1484
1485 if (cur_state == IB_QPS_INIT &&
1486 new_state == IB_QPS_RTR &&
1487 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
Or Gerlitz3987a2d2012-01-17 13:39:07 +02001488 ibqp->qp_type == IB_QPT_UD ||
1489 ibqp->qp_type == IB_QPT_RAW_PACKET)) {
Roland Dreier225c7b12007-05-08 18:00:38 -07001490 context->pri_path.sched_queue = (qp->port - 1) << 6;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001491 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
1492 qp->mlx4_ib_qp_type &
1493 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
Roland Dreier225c7b12007-05-08 18:00:38 -07001494 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001495 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
1496 context->pri_path.fl = 0x80;
1497 } else {
1498 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1499 context->pri_path.fl = 0x80;
Roland Dreier225c7b12007-05-08 18:00:38 -07001500 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001501 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001502 }
1503
Eli Cohen3528f692013-04-21 15:10:01 +00001504 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
1505 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1506 MLX4_IB_LINK_TYPE_ETH;
1507
Moni Shoua297e0da2013-12-12 18:03:14 +02001508 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1509 int is_eth = rdma_port_get_link_layer(
1510 &dev->ib_dev, qp->port) ==
1511 IB_LINK_LAYER_ETHERNET;
1512 if (is_eth) {
1513 context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
1514 optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
1515 }
1516 }
1517
1518
Roland Dreier225c7b12007-05-08 18:00:38 -07001519 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1520 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1521 sqd_event = 1;
1522 else
1523 sqd_event = 0;
1524
Vladimir Sokolovskyd57f5f72008-10-08 20:09:01 -07001525 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1526 context->rlkey |= (1 << 4);
1527
Eli Cohenc0be5fb2007-05-24 16:05:01 +03001528 /*
1529 * Before passing a kernel QP to the HW, make sure that the
Roland Dreier0e6e7412007-06-18 08:13:48 -07001530 * ownership bits of the send queue are set and the SQ
1531 * headroom is stamped so that the hardware doesn't start
1532 * processing stale work requests.
Eli Cohenc0be5fb2007-05-24 16:05:01 +03001533 */
1534 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1535 struct mlx4_wqe_ctrl_seg *ctrl;
1536 int i;
1537
Roland Dreier0e6e7412007-06-18 08:13:48 -07001538 for (i = 0; i < qp->sq.wqe_cnt; ++i) {
Eli Cohenc0be5fb2007-05-24 16:05:01 +03001539 ctrl = get_send_wqe(qp, i);
1540 ctrl->owner_opcode = cpu_to_be32(1 << 31);
Eli Cohen9670e552008-07-14 23:48:44 -07001541 if (qp->sq_max_wqes_per_wr == 1)
1542 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);
Roland Dreier0e6e7412007-06-18 08:13:48 -07001543
Jack Morgensteinea54b102008-01-28 10:40:59 +02001544 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
Eli Cohenc0be5fb2007-05-24 16:05:01 +03001545 }
1546 }
1547
Roland Dreier225c7b12007-05-08 18:00:38 -07001548 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
1549 to_mlx4_state(new_state), context, optpar,
1550 sqd_event, &qp->mqp);
1551 if (err)
1552 goto out;
1553
1554 qp->state = new_state;
1555
1556 if (attr_mask & IB_QP_ACCESS_FLAGS)
1557 qp->atomic_rd_en = attr->qp_access_flags;
1558 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1559 qp->resp_depth = attr->max_dest_rd_atomic;
Eli Cohenfa417f72010-10-24 21:08:52 -07001560 if (attr_mask & IB_QP_PORT) {
Roland Dreier225c7b12007-05-08 18:00:38 -07001561 qp->port = attr->port_num;
Eli Cohenfa417f72010-10-24 21:08:52 -07001562 update_mcg_macs(dev, qp);
1563 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001564 if (attr_mask & IB_QP_ALT_PATH)
1565 qp->alt_port = attr->alt_port_num;
1566
1567 if (is_sqp(dev, qp))
1568 store_sqp_attrs(to_msqp(qp), attr, attr_mask);
1569
1570 /*
1571 * If we moved QP0 to RTR, bring the IB link up; if we moved
1572 * QP0 to RESET or ERROR, bring the link back down.
1573 */
1574 if (is_qp0(dev, qp)) {
1575 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001576 if (mlx4_INIT_PORT(dev->dev, qp->port))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001577 pr_warn("INIT_PORT failed for port %d\n",
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001578 qp->port);
Roland Dreier225c7b12007-05-08 18:00:38 -07001579
1580 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1581 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1582 mlx4_CLOSE_PORT(dev->dev, qp->port);
1583 }
1584
1585 /*
1586 * If we moved a kernel QP to RESET, clean up all old CQ
1587 * entries and reinitialize the QP.
1588 */
1589 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
Sean Hefty0a1405d2011-06-02 11:32:15 -07001590 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
Roland Dreier225c7b12007-05-08 18:00:38 -07001591 ibqp->srq ? to_msrq(ibqp->srq): NULL);
Sean Hefty0a1405d2011-06-02 11:32:15 -07001592 if (send_cq != recv_cq)
1593 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
Roland Dreier225c7b12007-05-08 18:00:38 -07001594
1595 qp->rq.head = 0;
1596 qp->rq.tail = 0;
1597 qp->sq.head = 0;
1598 qp->sq.tail = 0;
Jack Morgensteinea54b102008-01-28 10:40:59 +02001599 qp->sq_next_wqe = 0;
Sean Hefty0a1405d2011-06-02 11:32:15 -07001600 if (qp->rq.wqe_cnt)
Roland Dreier02d89b82007-05-23 15:16:08 -07001601 *qp->db.db = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001602 }
1603
1604out:
Roland Dreier225c7b12007-05-08 18:00:38 -07001605 kfree(context);
1606 return err;
1607}
1608
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001609int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1610 int attr_mask, struct ib_udata *udata)
1611{
1612 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1613 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1614 enum ib_qp_state cur_state, new_state;
1615 int err = -EINVAL;
Moni Shoua297e0da2013-12-12 18:03:14 +02001616 int ll;
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001617 mutex_lock(&qp->mutex);
1618
1619 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1620 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1621
Moni Shoua297e0da2013-12-12 18:03:14 +02001622 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1623 ll = IB_LINK_LAYER_UNSPECIFIED;
1624 } else {
1625 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1626 ll = rdma_port_get_link_layer(&dev->ib_dev, port);
1627 }
Matan Barakdd5f03b2013-12-12 18:03:11 +02001628
1629 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
Moni Shoua297e0da2013-12-12 18:03:14 +02001630 attr_mask, ll)) {
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +03001631 pr_debug("qpn 0x%x: invalid attribute mask specified "
1632 "for transition %d to %d. qp_type %d,"
1633 " attr_mask 0x%x\n",
1634 ibqp->qp_num, cur_state, new_state,
1635 ibqp->qp_type, attr_mask);
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001636 goto out;
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +03001637 }
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001638
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001639 if ((attr_mask & IB_QP_PORT) &&
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001640 (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +03001641 pr_debug("qpn 0x%x: invalid port number (%d) specified "
1642 "for transition %d to %d. qp_type %d\n",
1643 ibqp->qp_num, attr->port_num, cur_state,
1644 new_state, ibqp->qp_type);
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001645 goto out;
1646 }
1647
Or Gerlitz3987a2d2012-01-17 13:39:07 +02001648 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
1649 (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
1650 IB_LINK_LAYER_ETHERNET))
1651 goto out;
1652
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001653 if (attr_mask & IB_QP_PKEY_INDEX) {
1654 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +03001655 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
1656 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
1657 "for transition %d to %d. qp_type %d\n",
1658 ibqp->qp_num, attr->pkey_index, cur_state,
1659 new_state, ibqp->qp_type);
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001660 goto out;
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +03001661 }
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001662 }
1663
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001664 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1665 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +03001666 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
1667 "Transition %d to %d. qp_type %d\n",
1668 ibqp->qp_num, attr->max_rd_atomic, cur_state,
1669 new_state, ibqp->qp_type);
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001670 goto out;
1671 }
1672
1673 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1674 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +03001675 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
1676 "Transition %d to %d. qp_type %d\n",
1677 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
1678 new_state, ibqp->qp_type);
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001679 goto out;
1680 }
1681
1682 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1683 err = 0;
1684 goto out;
1685 }
1686
Michael S. Tsirkin65adfa92007-05-14 07:26:51 +03001687 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1688
1689out:
1690 mutex_unlock(&qp->mutex);
1691 return err;
1692}
1693
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001694static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
1695 struct ib_send_wr *wr,
1696 void *wqe, unsigned *mlx_seg_len)
1697{
1698 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
1699 struct ib_device *ib_dev = &mdev->ib_dev;
1700 struct mlx4_wqe_mlx_seg *mlx = wqe;
1701 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1702 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
1703 u16 pkey;
1704 u32 qkey;
1705 int send_size;
1706 int header_size;
1707 int spc;
1708 int i;
1709
1710 if (wr->opcode != IB_WR_SEND)
1711 return -EINVAL;
1712
1713 send_size = 0;
1714
1715 for (i = 0; i < wr->num_sge; ++i)
1716 send_size += wr->sg_list[i].length;
1717
1718 /* for proxy-qp0 sends, need to add in size of tunnel header */
1719 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
1720 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
1721 send_size += sizeof (struct mlx4_ib_tunnel_header);
1722
1723 ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header);
1724
1725 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
1726 sqp->ud_header.lrh.service_level =
1727 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
1728 sqp->ud_header.lrh.destination_lid =
1729 cpu_to_be16(ah->av.ib.g_slid & 0x7f);
1730 sqp->ud_header.lrh.source_lid =
1731 cpu_to_be16(ah->av.ib.g_slid & 0x7f);
1732 }
1733
1734 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
1735
1736 /* force loopback */
1737 mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR);
1738 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1739
1740 sqp->ud_header.lrh.virtual_lane = 0;
1741 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1742 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
1743 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1744 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
1745 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1746 else
1747 sqp->ud_header.bth.destination_qpn =
Jack Morgenstein47605df2012-08-03 08:40:57 +00001748 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001749
1750 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1751 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
1752 return -EINVAL;
1753 sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
1754 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
1755
1756 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1757 sqp->ud_header.immediate_present = 0;
1758
1759 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
1760
1761 /*
1762 * Inline data segments may not cross a 64 byte boundary. If
1763 * our UD header is bigger than the space available up to the
1764 * next 64 byte boundary in the WQE, use two inline data
1765 * segments to hold the UD header.
1766 */
1767 spc = MLX4_INLINE_ALIGN -
1768 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
1769 if (header_size <= spc) {
1770 inl->byte_count = cpu_to_be32(1 << 31 | header_size);
1771 memcpy(inl + 1, sqp->header_buf, header_size);
1772 i = 1;
1773 } else {
1774 inl->byte_count = cpu_to_be32(1 << 31 | spc);
1775 memcpy(inl + 1, sqp->header_buf, spc);
1776
1777 inl = (void *) (inl + 1) + spc;
1778 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
1779 /*
1780 * Need a barrier here to make sure all the data is
1781 * visible before the byte_count field is set.
1782 * Otherwise the HCA prefetcher could grab the 64-byte
1783 * chunk with this inline segment and get a valid (!=
1784 * 0xffffffff) byte count but stale data, and end up
1785 * generating a packet with bad headers.
1786 *
1787 * The first inline segment's byte_count field doesn't
1788 * need a barrier, because it comes after a
1789 * control/MLX segment and therefore is at an offset
1790 * of 16 mod 64.
1791 */
1792 wmb();
1793 inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
1794 i = 2;
1795 }
1796
1797 *mlx_seg_len =
1798 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
1799 return 0;
1800}
1801
Roland Dreier225c7b12007-05-08 18:00:38 -07001802static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
Roland Dreierf4380002008-04-16 21:09:28 -07001803 void *wqe, unsigned *mlx_seg_len)
Roland Dreier225c7b12007-05-08 18:00:38 -07001804{
Eli Cohena4788682010-01-27 13:57:03 +00001805 struct ib_device *ib_dev = sqp->qp.ibqp.device;
Roland Dreier225c7b12007-05-08 18:00:38 -07001806 struct mlx4_wqe_mlx_seg *mlx = wqe;
1807 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1808 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
Kleber Sacilotto de Souzaa0675a32012-08-10 18:25:34 +00001809 struct net_device *ndev;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001810 union ib_gid sgid;
Roland Dreier225c7b12007-05-08 18:00:38 -07001811 u16 pkey;
1812 int send_size;
1813 int header_size;
Roland Dreiere61ef242007-06-18 09:23:47 -07001814 int spc;
Roland Dreier225c7b12007-05-08 18:00:38 -07001815 int i;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001816 int err = 0;
Paul Bolle57d88cf2013-02-25 09:17:13 -08001817 u16 vlan = 0xffff;
Roland Dreiera29bec12013-02-25 09:02:03 -08001818 bool is_eth;
1819 bool is_vlan = false;
1820 bool is_grh;
Roland Dreier225c7b12007-05-08 18:00:38 -07001821
1822 send_size = 0;
1823 for (i = 0; i < wr->num_sge; ++i)
1824 send_size += wr->sg_list[i].length;
1825
Eli Cohenfa417f72010-10-24 21:08:52 -07001826 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
1827 is_grh = mlx4_ib_ah_grh_present(ah);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001828 if (is_eth) {
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001829 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
1830 /* When multi-function is enabled, the ib_core gid
1831 * indexes don't necessarily match the hw ones, so
1832 * we must use our own cache */
1833 sgid.global.subnet_prefix =
1834 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
1835 subnet_prefix;
1836 sgid.global.interface_id =
1837 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
1838 guid_cache[ah->av.ib.gid_index];
1839 } else {
1840 err = ib_get_cached_gid(ib_dev,
1841 be32_to_cpu(ah->av.ib.port_pd) >> 24,
1842 ah->av.ib.gid_index, &sgid);
1843 if (err)
1844 return err;
1845 }
1846
Moni Shoua297e0da2013-12-12 18:03:14 +02001847 if (ah->av.eth.vlan != 0xffff) {
1848 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
1849 is_vlan = 1;
1850 }
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001851 }
1852 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
Roland Dreier225c7b12007-05-08 18:00:38 -07001853
Eli Cohenfa417f72010-10-24 21:08:52 -07001854 if (!is_eth) {
1855 sqp->ud_header.lrh.service_level =
1856 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
1857 sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
1858 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
1859 }
1860
1861 if (is_grh) {
Roland Dreier225c7b12007-05-08 18:00:38 -07001862 sqp->ud_header.grh.traffic_class =
Eli Cohenfa417f72010-10-24 21:08:52 -07001863 (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
Roland Dreier225c7b12007-05-08 18:00:38 -07001864 sqp->ud_header.grh.flow_label =
Eli Cohenfa417f72010-10-24 21:08:52 -07001865 ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
1866 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001867 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
1868 /* When multi-function is enabled, the ib_core gid
1869 * indexes don't necessarily match the hw ones, so
1870 * we must use our own cache */
1871 sqp->ud_header.grh.source_gid.global.subnet_prefix =
1872 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
1873 subnet_prefix;
1874 sqp->ud_header.grh.source_gid.global.interface_id =
1875 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
1876 guid_cache[ah->av.ib.gid_index];
1877 } else
1878 ib_get_cached_gid(ib_dev,
1879 be32_to_cpu(ah->av.ib.port_pd) >> 24,
1880 ah->av.ib.gid_index,
1881 &sqp->ud_header.grh.source_gid);
Roland Dreier225c7b12007-05-08 18:00:38 -07001882 memcpy(sqp->ud_header.grh.destination_gid.raw,
Eli Cohenfa417f72010-10-24 21:08:52 -07001883 ah->av.ib.dgid, 16);
Roland Dreier225c7b12007-05-08 18:00:38 -07001884 }
1885
1886 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
Eli Cohenfa417f72010-10-24 21:08:52 -07001887
1888 if (!is_eth) {
1889 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
1890 (sqp->ud_header.lrh.destination_lid ==
1891 IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
1892 (sqp->ud_header.lrh.service_level << 8));
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00001893 if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
1894 mlx->flags |= cpu_to_be32(0x1); /* force loopback */
Eli Cohenfa417f72010-10-24 21:08:52 -07001895 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1896 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001897
1898 switch (wr->opcode) {
1899 case IB_WR_SEND:
1900 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1901 sqp->ud_header.immediate_present = 0;
1902 break;
1903 case IB_WR_SEND_WITH_IMM:
1904 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1905 sqp->ud_header.immediate_present = 1;
Roland Dreier0f39cf32008-04-16 21:09:32 -07001906 sqp->ud_header.immediate_data = wr->ex.imm_data;
Roland Dreier225c7b12007-05-08 18:00:38 -07001907 break;
1908 default:
1909 return -EINVAL;
1910 }
1911
Eli Cohenfa417f72010-10-24 21:08:52 -07001912 if (is_eth) {
1913 u8 *smac;
Oren Duerc0c1d3d72012-04-29 17:04:24 +03001914 u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
1915
1916 mlx->sched_prio = cpu_to_be16(pcp);
Eli Cohenfa417f72010-10-24 21:08:52 -07001917
1918 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
1919 /* FIXME: cache smac value? */
Kleber Sacilotto de Souzaa0675a32012-08-10 18:25:34 +00001920 ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
1921 if (!ndev)
1922 return -ENODEV;
1923 smac = ndev->dev_addr;
Eli Cohenfa417f72010-10-24 21:08:52 -07001924 memcpy(sqp->ud_header.eth.smac_h, smac, 6);
1925 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
1926 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001927 if (!is_vlan) {
1928 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1929 } else {
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001930 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03001931 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
1932 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001933 } else {
1934 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1935 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1936 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1937 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001938 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1939 if (!sqp->qp.ibqp.qp_num)
1940 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
1941 else
1942 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey);
1943 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1944 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1945 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1946 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1947 sqp->qkey : wr->wr.ud.remote_qkey);
1948 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1949
1950 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
1951
1952 if (0) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001953 pr_err("built UD header of size %d:\n", header_size);
Roland Dreier225c7b12007-05-08 18:00:38 -07001954 for (i = 0; i < header_size / 4; ++i) {
1955 if (i % 8 == 0)
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001956 pr_err(" [%02x] ", i * 4);
1957 pr_cont(" %08x",
1958 be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
Roland Dreier225c7b12007-05-08 18:00:38 -07001959 if ((i + 1) % 8 == 0)
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001960 pr_cont("\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001961 }
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001962 pr_err("\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001963 }
1964
Roland Dreiere61ef242007-06-18 09:23:47 -07001965 /*
1966 * Inline data segments may not cross a 64 byte boundary. If
1967 * our UD header is bigger than the space available up to the
1968 * next 64 byte boundary in the WQE, use two inline data
1969 * segments to hold the UD header.
1970 */
1971 spc = MLX4_INLINE_ALIGN -
1972 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
1973 if (header_size <= spc) {
1974 inl->byte_count = cpu_to_be32(1 << 31 | header_size);
1975 memcpy(inl + 1, sqp->header_buf, header_size);
1976 i = 1;
1977 } else {
1978 inl->byte_count = cpu_to_be32(1 << 31 | spc);
1979 memcpy(inl + 1, sqp->header_buf, spc);
Roland Dreier225c7b12007-05-08 18:00:38 -07001980
Roland Dreiere61ef242007-06-18 09:23:47 -07001981 inl = (void *) (inl + 1) + spc;
1982 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
1983 /*
1984 * Need a barrier here to make sure all the data is
1985 * visible before the byte_count field is set.
1986 * Otherwise the HCA prefetcher could grab the 64-byte
1987 * chunk with this inline segment and get a valid (!=
1988 * 0xffffffff) byte count but stale data, and end up
1989 * generating a packet with bad headers.
1990 *
1991 * The first inline segment's byte_count field doesn't
1992 * need a barrier, because it comes after a
1993 * control/MLX segment and therefore is at an offset
1994 * of 16 mod 64.
1995 */
1996 wmb();
1997 inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
1998 i = 2;
1999 }
2000
Roland Dreierf4380002008-04-16 21:09:28 -07002001 *mlx_seg_len =
2002 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2003 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002004}
2005
2006static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
2007{
2008 unsigned cur;
2009 struct mlx4_ib_cq *cq;
2010
2011 cur = wq->head - wq->tail;
Roland Dreier0e6e7412007-06-18 08:13:48 -07002012 if (likely(cur + nreq < wq->max_post))
Roland Dreier225c7b12007-05-08 18:00:38 -07002013 return 0;
2014
2015 cq = to_mcq(ib_cq);
2016 spin_lock(&cq->lock);
2017 cur = wq->head - wq->tail;
2018 spin_unlock(&cq->lock);
2019
Roland Dreier0e6e7412007-06-18 08:13:48 -07002020 return cur + nreq >= wq->max_post;
Roland Dreier225c7b12007-05-08 18:00:38 -07002021}
2022
Roland Dreier95d04f02008-07-23 08:12:26 -07002023static __be32 convert_access(int acc)
2024{
Shani Michaeli6ff63e12013-02-06 16:19:15 +00002025 return (acc & IB_ACCESS_REMOTE_ATOMIC ?
2026 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) |
2027 (acc & IB_ACCESS_REMOTE_WRITE ?
2028 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
2029 (acc & IB_ACCESS_REMOTE_READ ?
2030 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
Roland Dreier95d04f02008-07-23 08:12:26 -07002031 (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
2032 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
2033}
2034
2035static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
2036{
2037 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
Vladimir Sokolovsky29bdc882008-09-15 14:25:23 -07002038 int i;
2039
2040 for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -07002041 mfrpl->mapped_page_list[i] =
Vladimir Sokolovsky29bdc882008-09-15 14:25:23 -07002042 cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
2043 MLX4_MTT_FLAG_PRESENT);
Roland Dreier95d04f02008-07-23 08:12:26 -07002044
2045 fseg->flags = convert_access(wr->wr.fast_reg.access_flags);
2046 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey);
2047 fseg->buf_list = cpu_to_be64(mfrpl->map);
2048 fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
2049 fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length);
2050 fseg->offset = 0; /* XXX -- is this just for ZBVA? */
2051 fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift);
2052 fseg->reserved[0] = 0;
2053 fseg->reserved[1] = 0;
2054}
2055
Shani Michaeli6ff63e12013-02-06 16:19:15 +00002056static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
2057{
2058 bseg->flags1 =
2059 convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
2060 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
2061 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
2062 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
2063 bseg->flags2 = 0;
2064 if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
2065 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
2066 if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
2067 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
2068 bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
2069 bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
2070 bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
2071 bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
2072}
2073
Roland Dreier95d04f02008-07-23 08:12:26 -07002074static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
2075{
Shani Michaeliaee38fa2013-02-06 16:19:07 +00002076 memset(iseg, 0, sizeof(*iseg));
2077 iseg->mem_key = cpu_to_be32(rkey);
Roland Dreier95d04f02008-07-23 08:12:26 -07002078}
2079
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002080static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
2081 u64 remote_addr, u32 rkey)
2082{
2083 rseg->raddr = cpu_to_be64(remote_addr);
2084 rseg->rkey = cpu_to_be32(rkey);
2085 rseg->reserved = 0;
2086}
2087
2088static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
2089{
2090 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
2091 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
2092 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +03002093 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
2094 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
2095 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002096 } else {
2097 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
2098 aseg->compare = 0;
2099 }
2100
2101}
2102
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +03002103static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
2104 struct ib_send_wr *wr)
2105{
2106 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
2107 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
2108 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
2109 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
2110}
2111
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002112static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
Or Gerlitz80a2dcd2011-10-10 10:54:42 +02002113 struct ib_send_wr *wr)
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002114{
2115 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
2116 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
2117 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
Eli Cohenfa417f72010-10-24 21:08:52 -07002118 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
2119 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002120}
2121
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002122static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2123 struct mlx4_wqe_datagram_seg *dseg,
2124 struct ib_send_wr *wr, enum ib_qp_type qpt)
2125{
2126 union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av;
2127 struct mlx4_av sqp_av = {0};
2128 int port = *((u8 *) &av->ib.port_pd) & 0x3;
2129
2130 /* force loopback */
2131 sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000);
2132 sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */
2133 sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel &
2134 cpu_to_be32(0xf0000000);
2135
2136 memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
Jack Morgenstein47605df2012-08-03 08:40:57 +00002137 /* This function used only for sending on QP1 proxies */
2138 dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
2139 /* Use QKEY from the QP context, which is set by master */
2140 dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002141}
2142
2143static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len)
2144{
2145 struct mlx4_wqe_inline_seg *inl = wqe;
2146 struct mlx4_ib_tunnel_header hdr;
2147 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
2148 int spc;
2149 int i;
2150
2151 memcpy(&hdr.av, &ah->av, sizeof hdr.av);
2152 hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
2153 hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
2154 hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
2155
2156 spc = MLX4_INLINE_ALIGN -
2157 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2158 if (sizeof (hdr) <= spc) {
2159 memcpy(inl + 1, &hdr, sizeof (hdr));
2160 wmb();
2161 inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr));
2162 i = 1;
2163 } else {
2164 memcpy(inl + 1, &hdr, spc);
2165 wmb();
2166 inl->byte_count = cpu_to_be32(1 << 31 | spc);
2167
2168 inl = (void *) (inl + 1) + spc;
2169 memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
2170 wmb();
2171 inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc));
2172 i = 2;
2173 }
2174
2175 *mlx_seg_len =
2176 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
2177}
2178
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002179static void set_mlx_icrc_seg(void *dseg)
Roland Dreierd420d9e2007-07-18 11:46:27 -07002180{
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002181 u32 *t = dseg;
2182 struct mlx4_wqe_inline_seg *iseg = dseg;
2183
2184 t[1] = 0;
2185
2186 /*
2187 * Need a barrier here before writing the byte_count field to
2188 * make sure that all the data is visible before the
2189 * byte_count field is set. Otherwise, if the segment begins
2190 * a new cacheline, the HCA prefetcher could grab the 64-byte
2191 * chunk and get a valid (!= * 0xffffffff) byte count but
2192 * stale data, and end up sending the wrong data.
2193 */
2194 wmb();
2195
2196 iseg->byte_count = cpu_to_be32((1 << 31) | 4);
2197}
2198
2199static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2200{
Roland Dreierd420d9e2007-07-18 11:46:27 -07002201 dseg->lkey = cpu_to_be32(sg->lkey);
2202 dseg->addr = cpu_to_be64(sg->addr);
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002203
2204 /*
2205 * Need a barrier here before writing the byte_count field to
2206 * make sure that all the data is visible before the
2207 * byte_count field is set. Otherwise, if the segment begins
2208 * a new cacheline, the HCA prefetcher could grab the 64-byte
2209 * chunk and get a valid (!= * 0xffffffff) byte count but
2210 * stale data, and end up sending the wrong data.
2211 */
2212 wmb();
2213
2214 dseg->byte_count = cpu_to_be32(sg->length);
Roland Dreierd420d9e2007-07-18 11:46:27 -07002215}
2216
Roland Dreier2242fa42007-10-09 19:59:05 -07002217static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2218{
2219 dseg->byte_count = cpu_to_be32(sg->length);
2220 dseg->lkey = cpu_to_be32(sg->lkey);
2221 dseg->addr = cpu_to_be64(sg->addr);
2222}
2223
Roland Dreier47b37472008-07-22 14:19:39 -07002224static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
Roland Dreier0fd7e1d2009-01-16 12:47:47 -08002225 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
Eli Cohen417608c2009-11-12 11:19:44 -08002226 __be32 *lso_hdr_sz, __be32 *blh)
Eli Cohenb832be12008-04-16 21:09:27 -07002227{
2228 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
2229
Eli Cohen417608c2009-11-12 11:19:44 -08002230 if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
2231 *blh = cpu_to_be32(1 << 6);
Eli Cohenb832be12008-04-16 21:09:27 -07002232
2233 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
2234 wr->num_sge > qp->sq.max_gs - (halign >> 4)))
2235 return -EINVAL;
2236
2237 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
2238
Roland Dreier0fd7e1d2009-01-16 12:47:47 -08002239 *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
2240 wr->wr.ud.hlen);
Eli Cohenb832be12008-04-16 21:09:27 -07002241 *lso_seg_len = halign;
2242 return 0;
2243}
2244
Roland Dreier95d04f02008-07-23 08:12:26 -07002245static __be32 send_ieth(struct ib_send_wr *wr)
2246{
2247 switch (wr->opcode) {
2248 case IB_WR_SEND_WITH_IMM:
2249 case IB_WR_RDMA_WRITE_WITH_IMM:
2250 return wr->ex.imm_data;
2251
2252 case IB_WR_SEND_WITH_INV:
2253 return cpu_to_be32(wr->ex.invalidate_rkey);
2254
2255 default:
2256 return 0;
2257 }
2258}
2259
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002260static void add_zero_len_inline(void *wqe)
2261{
2262 struct mlx4_wqe_inline_seg *inl = wqe;
2263 memset(wqe, 0, 16);
2264 inl->byte_count = cpu_to_be32(1 << 31);
2265}
2266
Roland Dreier225c7b12007-05-08 18:00:38 -07002267int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2268 struct ib_send_wr **bad_wr)
2269{
2270 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2271 void *wqe;
2272 struct mlx4_wqe_ctrl_seg *ctrl;
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002273 struct mlx4_wqe_data_seg *dseg;
Roland Dreier225c7b12007-05-08 18:00:38 -07002274 unsigned long flags;
2275 int nreq;
2276 int err = 0;
Jack Morgensteinea54b102008-01-28 10:40:59 +02002277 unsigned ind;
2278 int uninitialized_var(stamp);
2279 int uninitialized_var(size);
Andrew Mortona3d8e152008-05-16 14:28:30 -07002280 unsigned uninitialized_var(seglen);
Roland Dreier0fd7e1d2009-01-16 12:47:47 -08002281 __be32 dummy;
2282 __be32 *lso_wqe;
2283 __be32 uninitialized_var(lso_hdr_sz);
Eli Cohen417608c2009-11-12 11:19:44 -08002284 __be32 blh;
Roland Dreier225c7b12007-05-08 18:00:38 -07002285 int i;
2286
Roland Dreier96db0e02007-10-30 10:53:54 -07002287 spin_lock_irqsave(&qp->sq.lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -07002288
Jack Morgensteinea54b102008-01-28 10:40:59 +02002289 ind = qp->sq_next_wqe;
Roland Dreier225c7b12007-05-08 18:00:38 -07002290
2291 for (nreq = 0; wr; ++nreq, wr = wr->next) {
Roland Dreier0fd7e1d2009-01-16 12:47:47 -08002292 lso_wqe = &dummy;
Eli Cohen417608c2009-11-12 11:19:44 -08002293 blh = 0;
Roland Dreier0fd7e1d2009-01-16 12:47:47 -08002294
Roland Dreier225c7b12007-05-08 18:00:38 -07002295 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
2296 err = -ENOMEM;
2297 *bad_wr = wr;
2298 goto out;
2299 }
2300
2301 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
2302 err = -EINVAL;
2303 *bad_wr = wr;
2304 goto out;
2305 }
2306
Roland Dreier0e6e7412007-06-18 08:13:48 -07002307 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
Jack Morgensteinea54b102008-01-28 10:40:59 +02002308 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
Roland Dreier225c7b12007-05-08 18:00:38 -07002309
2310 ctrl->srcrb_flags =
2311 (wr->send_flags & IB_SEND_SIGNALED ?
2312 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
2313 (wr->send_flags & IB_SEND_SOLICITED ?
2314 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
Eli Cohen8ff095e2008-04-16 21:01:10 -07002315 ((wr->send_flags & IB_SEND_IP_CSUM) ?
2316 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
2317 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002318 qp->sq_signal_bits;
2319
Roland Dreier95d04f02008-07-23 08:12:26 -07002320 ctrl->imm = send_ieth(wr);
Roland Dreier225c7b12007-05-08 18:00:38 -07002321
2322 wqe += sizeof *ctrl;
2323 size = sizeof *ctrl / 16;
2324
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002325 switch (qp->mlx4_ib_qp_type) {
2326 case MLX4_IB_QPT_RC:
2327 case MLX4_IB_QPT_UC:
Roland Dreier225c7b12007-05-08 18:00:38 -07002328 switch (wr->opcode) {
2329 case IB_WR_ATOMIC_CMP_AND_SWP:
2330 case IB_WR_ATOMIC_FETCH_AND_ADD:
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +03002331 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002332 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2333 wr->wr.atomic.rkey);
Roland Dreier225c7b12007-05-08 18:00:38 -07002334 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2335
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002336 set_atomic_seg(wqe, wr);
Roland Dreier225c7b12007-05-08 18:00:38 -07002337 wqe += sizeof (struct mlx4_wqe_atomic_seg);
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002338
Roland Dreier225c7b12007-05-08 18:00:38 -07002339 size += (sizeof (struct mlx4_wqe_raddr_seg) +
2340 sizeof (struct mlx4_wqe_atomic_seg)) / 16;
2341
2342 break;
2343
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +03002344 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2345 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2346 wr->wr.atomic.rkey);
2347 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2348
2349 set_masked_atomic_seg(wqe, wr);
2350 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
2351
2352 size += (sizeof (struct mlx4_wqe_raddr_seg) +
2353 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
2354
2355 break;
2356
Roland Dreier225c7b12007-05-08 18:00:38 -07002357 case IB_WR_RDMA_READ:
2358 case IB_WR_RDMA_WRITE:
2359 case IB_WR_RDMA_WRITE_WITH_IMM:
Roland Dreier0fbfa6a92007-07-18 11:47:55 -07002360 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2361 wr->wr.rdma.rkey);
Roland Dreier225c7b12007-05-08 18:00:38 -07002362 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2363 size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
Roland Dreier225c7b12007-05-08 18:00:38 -07002364 break;
2365
Roland Dreier95d04f02008-07-23 08:12:26 -07002366 case IB_WR_LOCAL_INV:
Jack Morgenstein2ac6bf42009-06-05 10:36:24 -07002367 ctrl->srcrb_flags |=
2368 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
Roland Dreier95d04f02008-07-23 08:12:26 -07002369 set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
2370 wqe += sizeof (struct mlx4_wqe_local_inval_seg);
2371 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
2372 break;
2373
2374 case IB_WR_FAST_REG_MR:
Jack Morgenstein2ac6bf42009-06-05 10:36:24 -07002375 ctrl->srcrb_flags |=
2376 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
Roland Dreier95d04f02008-07-23 08:12:26 -07002377 set_fmr_seg(wqe, wr);
2378 wqe += sizeof (struct mlx4_wqe_fmr_seg);
2379 size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
2380 break;
2381
Shani Michaeli6ff63e12013-02-06 16:19:15 +00002382 case IB_WR_BIND_MW:
2383 ctrl->srcrb_flags |=
2384 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
2385 set_bind_seg(wqe, wr);
2386 wqe += sizeof(struct mlx4_wqe_bind_seg);
2387 size += sizeof(struct mlx4_wqe_bind_seg) / 16;
2388 break;
Roland Dreier225c7b12007-05-08 18:00:38 -07002389 default:
2390 /* No extra segments required for sends */
2391 break;
2392 }
2393 break;
2394
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002395 case MLX4_IB_QPT_TUN_SMI_OWNER:
2396 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
2397 if (unlikely(err)) {
2398 *bad_wr = wr;
2399 goto out;
2400 }
2401 wqe += seglen;
2402 size += seglen / 16;
2403 break;
2404 case MLX4_IB_QPT_TUN_SMI:
2405 case MLX4_IB_QPT_TUN_GSI:
2406 /* this is a UD qp used in MAD responses to slaves. */
2407 set_datagram_seg(wqe, wr);
2408 /* set the forced-loopback bit in the data seg av */
2409 *(__be32 *) wqe |= cpu_to_be32(0x80000000);
2410 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2411 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2412 break;
2413 case MLX4_IB_QPT_UD:
Or Gerlitz80a2dcd2011-10-10 10:54:42 +02002414 set_datagram_seg(wqe, wr);
Roland Dreier225c7b12007-05-08 18:00:38 -07002415 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2416 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
Eli Cohenb832be12008-04-16 21:09:27 -07002417
2418 if (wr->opcode == IB_WR_LSO) {
Eli Cohen417608c2009-11-12 11:19:44 -08002419 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
Eli Cohenb832be12008-04-16 21:09:27 -07002420 if (unlikely(err)) {
2421 *bad_wr = wr;
2422 goto out;
2423 }
Roland Dreier0fd7e1d2009-01-16 12:47:47 -08002424 lso_wqe = (__be32 *) wqe;
Eli Cohenb832be12008-04-16 21:09:27 -07002425 wqe += seglen;
2426 size += seglen / 16;
2427 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002428 break;
2429
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002430 case MLX4_IB_QPT_PROXY_SMI_OWNER:
2431 if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) {
2432 err = -ENOSYS;
2433 *bad_wr = wr;
2434 goto out;
2435 }
2436 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
2437 if (unlikely(err)) {
2438 *bad_wr = wr;
2439 goto out;
2440 }
2441 wqe += seglen;
2442 size += seglen / 16;
2443 /* to start tunnel header on a cache-line boundary */
2444 add_zero_len_inline(wqe);
2445 wqe += 16;
2446 size++;
2447 build_tunnel_header(wr, wqe, &seglen);
2448 wqe += seglen;
2449 size += seglen / 16;
2450 break;
2451 case MLX4_IB_QPT_PROXY_SMI:
2452 /* don't allow QP0 sends on guests */
2453 err = -ENOSYS;
2454 *bad_wr = wr;
2455 goto out;
2456 case MLX4_IB_QPT_PROXY_GSI:
2457 /* If we are tunneling special qps, this is a UD qp.
2458 * In this case we first add a UD segment targeting
2459 * the tunnel qp, and then add a header with address
2460 * information */
2461 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type);
2462 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2463 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2464 build_tunnel_header(wr, wqe, &seglen);
2465 wqe += seglen;
2466 size += seglen / 16;
2467 break;
2468
2469 case MLX4_IB_QPT_SMI:
2470 case MLX4_IB_QPT_GSI:
Roland Dreierf4380002008-04-16 21:09:28 -07002471 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
2472 if (unlikely(err)) {
Roland Dreier225c7b12007-05-08 18:00:38 -07002473 *bad_wr = wr;
2474 goto out;
2475 }
Roland Dreierf4380002008-04-16 21:09:28 -07002476 wqe += seglen;
2477 size += seglen / 16;
Roland Dreier225c7b12007-05-08 18:00:38 -07002478 break;
2479
2480 default:
2481 break;
2482 }
2483
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002484 /*
2485 * Write data segments in reverse order, so as to
2486 * overwrite cacheline stamp last within each
2487 * cacheline. This avoids issues with WQE
2488 * prefetching.
2489 */
Roland Dreier225c7b12007-05-08 18:00:38 -07002490
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002491 dseg = wqe;
2492 dseg += wr->num_sge - 1;
2493 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
Roland Dreier225c7b12007-05-08 18:00:38 -07002494
2495 /* Add one more inline data segment for ICRC for MLX sends */
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002496 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
2497 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
2498 qp->mlx4_ib_qp_type &
2499 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) {
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002500 set_mlx_icrc_seg(dseg + 1);
Roland Dreier225c7b12007-05-08 18:00:38 -07002501 size += sizeof (struct mlx4_wqe_data_seg) / 16;
2502 }
2503
Jack Morgenstein6e694ea2007-09-19 09:52:25 -07002504 for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
2505 set_data_seg(dseg, wr->sg_list + i);
2506
Roland Dreier0fd7e1d2009-01-16 12:47:47 -08002507 /*
2508 * Possibly overwrite stamping in cacheline with LSO
2509 * segment only after making sure all data segments
2510 * are written.
2511 */
2512 wmb();
2513 *lso_wqe = lso_hdr_sz;
2514
Roland Dreier225c7b12007-05-08 18:00:38 -07002515 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
2516 MLX4_WQE_CTRL_FENCE : 0) | size;
2517
2518 /*
2519 * Make sure descriptor is fully written before
2520 * setting ownership bit (because HW can start
2521 * executing as soon as we do).
2522 */
2523 wmb();
2524
Roland Dreier59b0ed122007-05-19 08:51:58 -07002525 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
Eli Cohen4ba6b8e2012-02-09 18:52:50 +02002526 *bad_wr = wr;
Roland Dreier225c7b12007-05-08 18:00:38 -07002527 err = -EINVAL;
2528 goto out;
2529 }
2530
2531 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
Eli Cohen417608c2009-11-12 11:19:44 -08002532 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
Roland Dreier0e6e7412007-06-18 08:13:48 -07002533
Jack Morgensteinea54b102008-01-28 10:40:59 +02002534 stamp = ind + qp->sq_spare_wqes;
2535 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
2536
Roland Dreier0e6e7412007-06-18 08:13:48 -07002537 /*
2538 * We can improve latency by not stamping the last
2539 * send queue WQE until after ringing the doorbell, so
2540 * only stamp here if there are still more WQEs to post.
Jack Morgensteinea54b102008-01-28 10:40:59 +02002541 *
2542 * Same optimization applies to padding with NOP wqe
2543 * in case of WQE shrinking (used to prevent wrap-around
2544 * in the middle of WR).
Roland Dreier0e6e7412007-06-18 08:13:48 -07002545 */
Jack Morgensteinea54b102008-01-28 10:40:59 +02002546 if (wr->next) {
2547 stamp_send_wqe(qp, stamp, size * 16);
2548 ind = pad_wraparound(qp, ind);
2549 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002550 }
2551
2552out:
2553 if (likely(nreq)) {
2554 qp->sq.head += nreq;
2555
2556 /*
2557 * Make sure that descriptors are written before
2558 * doorbell record.
2559 */
2560 wmb();
2561
2562 writel(qp->doorbell_qpn,
2563 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
2564
2565 /*
2566 * Make sure doorbells don't leak out of SQ spinlock
2567 * and reach the HCA out of order.
2568 */
2569 mmiowb();
Roland Dreier0e6e7412007-06-18 08:13:48 -07002570
Jack Morgensteinea54b102008-01-28 10:40:59 +02002571 stamp_send_wqe(qp, stamp, size * 16);
2572
2573 ind = pad_wraparound(qp, ind);
2574 qp->sq_next_wqe = ind;
Roland Dreier225c7b12007-05-08 18:00:38 -07002575 }
2576
Roland Dreier96db0e02007-10-30 10:53:54 -07002577 spin_unlock_irqrestore(&qp->sq.lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -07002578
2579 return err;
2580}
2581
2582int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2583 struct ib_recv_wr **bad_wr)
2584{
2585 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2586 struct mlx4_wqe_data_seg *scat;
2587 unsigned long flags;
2588 int err = 0;
2589 int nreq;
2590 int ind;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002591 int max_gs;
Roland Dreier225c7b12007-05-08 18:00:38 -07002592 int i;
2593
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002594 max_gs = qp->rq.max_gs;
Roland Dreier225c7b12007-05-08 18:00:38 -07002595 spin_lock_irqsave(&qp->rq.lock, flags);
2596
Roland Dreier0e6e7412007-06-18 08:13:48 -07002597 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
Roland Dreier225c7b12007-05-08 18:00:38 -07002598
2599 for (nreq = 0; wr; ++nreq, wr = wr->next) {
Or Gerlitz2b946072010-01-06 12:51:30 -08002600 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
Roland Dreier225c7b12007-05-08 18:00:38 -07002601 err = -ENOMEM;
2602 *bad_wr = wr;
2603 goto out;
2604 }
2605
2606 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2607 err = -EINVAL;
2608 *bad_wr = wr;
2609 goto out;
2610 }
2611
2612 scat = get_recv_wqe(qp, ind);
2613
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002614 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
2615 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
2616 ib_dma_sync_single_for_device(ibqp->device,
2617 qp->sqp_proxy_rcv[ind].map,
2618 sizeof (struct mlx4_ib_proxy_sqp_hdr),
2619 DMA_FROM_DEVICE);
2620 scat->byte_count =
2621 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr));
2622 /* use dma lkey from upper layer entry */
2623 scat->lkey = cpu_to_be32(wr->sg_list->lkey);
2624 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
2625 scat++;
2626 max_gs--;
2627 }
2628
Roland Dreier2242fa42007-10-09 19:59:05 -07002629 for (i = 0; i < wr->num_sge; ++i)
2630 __set_data_seg(scat + i, wr->sg_list + i);
Roland Dreier225c7b12007-05-08 18:00:38 -07002631
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +00002632 if (i < max_gs) {
Roland Dreier225c7b12007-05-08 18:00:38 -07002633 scat[i].byte_count = 0;
2634 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
2635 scat[i].addr = 0;
2636 }
2637
2638 qp->rq.wrid[ind] = wr->wr_id;
2639
Roland Dreier0e6e7412007-06-18 08:13:48 -07002640 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
Roland Dreier225c7b12007-05-08 18:00:38 -07002641 }
2642
2643out:
2644 if (likely(nreq)) {
2645 qp->rq.head += nreq;
2646
2647 /*
2648 * Make sure that descriptors are written before
2649 * doorbell record.
2650 */
2651 wmb();
2652
2653 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2654 }
2655
2656 spin_unlock_irqrestore(&qp->rq.lock, flags);
2657
2658 return err;
2659}
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002660
2661static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
2662{
2663 switch (mlx4_state) {
2664 case MLX4_QP_STATE_RST: return IB_QPS_RESET;
2665 case MLX4_QP_STATE_INIT: return IB_QPS_INIT;
2666 case MLX4_QP_STATE_RTR: return IB_QPS_RTR;
2667 case MLX4_QP_STATE_RTS: return IB_QPS_RTS;
2668 case MLX4_QP_STATE_SQ_DRAINING:
2669 case MLX4_QP_STATE_SQD: return IB_QPS_SQD;
2670 case MLX4_QP_STATE_SQER: return IB_QPS_SQE;
2671 case MLX4_QP_STATE_ERR: return IB_QPS_ERR;
2672 default: return -1;
2673 }
2674}
2675
2676static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
2677{
2678 switch (mlx4_mig_state) {
2679 case MLX4_QP_PM_ARMED: return IB_MIG_ARMED;
2680 case MLX4_QP_PM_REARM: return IB_MIG_REARM;
2681 case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2682 default: return -1;
2683 }
2684}
2685
2686static int to_ib_qp_access_flags(int mlx4_flags)
2687{
2688 int ib_flags = 0;
2689
2690 if (mlx4_flags & MLX4_QP_BIT_RRE)
2691 ib_flags |= IB_ACCESS_REMOTE_READ;
2692 if (mlx4_flags & MLX4_QP_BIT_RWE)
2693 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2694 if (mlx4_flags & MLX4_QP_BIT_RAE)
2695 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2696
2697 return ib_flags;
2698}
2699
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03002700static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002701 struct mlx4_qp_path *path)
2702{
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03002703 struct mlx4_dev *dev = ibdev->dev;
2704 int is_eth;
2705
Dotan Barak8fcea952007-07-15 15:00:09 +03002706 memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002707 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
2708
2709 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
2710 return;
2711
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03002712 is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
2713 IB_LINK_LAYER_ETHERNET;
2714 if (is_eth)
2715 ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
2716 ((path->sched_queue & 4) << 1);
2717 else
2718 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
2719
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002720 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002721 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
2722 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2723 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
2724 if (ib_ah_attr->ah_flags) {
2725 ib_ah_attr->grh.sgid_index = path->mgid_index;
2726 ib_ah_attr->grh.hop_limit = path->hop_limit;
2727 ib_ah_attr->grh.traffic_class =
2728 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2729 ib_ah_attr->grh.flow_label =
Jack Morgenstein586bb582007-07-17 18:37:38 -07002730 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002731 memcpy(ib_ah_attr->grh.dgid.raw,
2732 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
2733 }
2734}
2735
2736int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2737 struct ib_qp_init_attr *qp_init_attr)
2738{
2739 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2740 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2741 struct mlx4_qp_context context;
2742 int mlx4_state;
Dotan Barak0df670302008-04-16 21:09:34 -07002743 int err = 0;
2744
2745 mutex_lock(&qp->mutex);
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002746
2747 if (qp->state == IB_QPS_RESET) {
2748 qp_attr->qp_state = IB_QPS_RESET;
2749 goto done;
2750 }
2751
2752 err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
Dotan Barak0df670302008-04-16 21:09:34 -07002753 if (err) {
2754 err = -EINVAL;
2755 goto out;
2756 }
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002757
2758 mlx4_state = be32_to_cpu(context.flags) >> 28;
2759
Dotan Barak0df670302008-04-16 21:09:34 -07002760 qp->state = to_ib_qp_state(mlx4_state);
2761 qp_attr->qp_state = qp->state;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002762 qp_attr->path_mtu = context.mtu_msgmax >> 5;
2763 qp_attr->path_mig_state =
2764 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
2765 qp_attr->qkey = be32_to_cpu(context.qkey);
2766 qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
2767 qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff;
2768 qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff;
2769 qp_attr->qp_access_flags =
2770 to_ib_qp_access_flags(be32_to_cpu(context.params2));
2771
2772 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
Eli Cohen4c3eb3c2010-08-26 17:19:22 +03002773 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
2774 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002775 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
2776 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2777 }
2778
2779 qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
Jack Morgenstein1c27cb72007-07-17 18:37:38 -07002780 if (qp_attr->qp_state == IB_QPS_INIT)
2781 qp_attr->port_num = qp->port;
2782 else
2783 qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002784
2785 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2786 qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
2787
2788 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
2789
2790 qp_attr->max_dest_rd_atomic =
2791 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
2792 qp_attr->min_rnr_timer =
2793 (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
2794 qp_attr->timeout = context.pri_path.ackto >> 3;
2795 qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7;
2796 qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7;
2797 qp_attr->alt_timeout = context.alt_path.ackto >> 3;
2798
2799done:
2800 qp_attr->cur_qp_state = qp_attr->qp_state;
Roland Dreier7f5eb9b2007-07-17 20:59:02 -07002801 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
2802 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
2803
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002804 if (!ibqp->uobject) {
Roland Dreier7f5eb9b2007-07-17 20:59:02 -07002805 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
2806 qp_attr->cap.max_send_sge = qp->sq.max_gs;
2807 } else {
2808 qp_attr->cap.max_send_wr = 0;
2809 qp_attr->cap.max_send_sge = 0;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002810 }
2811
Roland Dreier7f5eb9b2007-07-17 20:59:02 -07002812 /*
2813 * We don't support inline sends for kernel QPs (yet), and we
2814 * don't know what userspace's value should be.
2815 */
2816 qp_attr->cap.max_inline_data = 0;
2817
2818 qp_init_attr->cap = qp_attr->cap;
2819
Ron Livne521e5752008-07-14 23:48:48 -07002820 qp_init_attr->create_flags = 0;
2821 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2822 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
2823
2824 if (qp->flags & MLX4_IB_QP_LSO)
2825 qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
2826
Dotan Barak46db5672012-08-23 14:09:03 +00002827 qp_init_attr->sq_sig_type =
2828 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
2829 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
2830
Dotan Barak0df670302008-04-16 21:09:34 -07002831out:
2832 mutex_unlock(&qp->mutex);
2833 return err;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002834}
2835