blob: cd8b6721ac9c1b5d8401dbbdae77cee064f39356 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier80c8ec22005-07-07 17:57:20 -07003 * Copyright (c) 2005 Cisco Systems. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07004 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08005 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
36 */
37
38#include <linux/init.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080039#include <linux/string.h>
40#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Roland Dreiera4d61e82005-08-25 13:40:04 -070042#include <rdma/ib_verbs.h>
43#include <rdma/ib_cache.h>
44#include <rdma/ib_pack.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include "mthca_dev.h"
47#include "mthca_cmd.h"
48#include "mthca_memfree.h"
Roland Dreierc04bc3d2005-08-19 10:33:35 -070049#include "mthca_wqe.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51enum {
52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53 MTHCA_ACK_REQ_FREQ = 10,
54 MTHCA_FLIGHT_LIMIT = 9,
Roland Dreier80c8ec22005-07-07 17:57:20 -070055 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058};
59
60enum {
61 MTHCA_QP_STATE_RST = 0,
62 MTHCA_QP_STATE_INIT = 1,
63 MTHCA_QP_STATE_RTR = 2,
64 MTHCA_QP_STATE_RTS = 3,
65 MTHCA_QP_STATE_SQE = 4,
66 MTHCA_QP_STATE_SQD = 5,
67 MTHCA_QP_STATE_ERR = 6,
68 MTHCA_QP_STATE_DRAINING = 7
69};
70
71enum {
72 MTHCA_QP_ST_RC = 0x0,
73 MTHCA_QP_ST_UC = 0x1,
74 MTHCA_QP_ST_RD = 0x2,
75 MTHCA_QP_ST_UD = 0x3,
76 MTHCA_QP_ST_MLX = 0x7
77};
78
79enum {
80 MTHCA_QP_PM_MIGRATED = 0x3,
81 MTHCA_QP_PM_ARMED = 0x0,
82 MTHCA_QP_PM_REARM = 0x1
83};
84
85enum {
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE = 1 << 8,
88 /* params1 */
89 MTHCA_QP_BIT_SRE = 1 << 15,
90 MTHCA_QP_BIT_SWE = 1 << 14,
91 MTHCA_QP_BIT_SAE = 1 << 13,
92 MTHCA_QP_BIT_SIC = 1 << 4,
93 MTHCA_QP_BIT_SSC = 1 << 3,
94 /* params2 */
95 MTHCA_QP_BIT_RRE = 1 << 15,
96 MTHCA_QP_BIT_RWE = 1 << 14,
97 MTHCA_QP_BIT_RAE = 1 << 13,
98 MTHCA_QP_BIT_RIC = 1 << 4,
99 MTHCA_QP_BIT_RSC = 1 << 3
100};
101
102struct mthca_qp_path {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700103 __be32 port_pkey;
104 u8 rnr_retry;
105 u8 g_mylmc;
106 __be16 rlid;
107 u8 ackto;
108 u8 mgid_index;
109 u8 static_rate;
110 u8 hop_limit;
111 __be32 sl_tclass_flowlabel;
112 u8 rgid[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113} __attribute__((packed));
114
115struct mthca_qp_context {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700116 __be32 flags;
117 __be32 tavor_sched_queue; /* Reserved on Arbel */
118 u8 mtu_msgmax;
119 u8 rq_size_stride; /* Reserved on Tavor */
120 u8 sq_size_stride; /* Reserved on Tavor */
121 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
122 __be32 usr_page;
123 __be32 local_qpn;
124 __be32 remote_qpn;
125 u32 reserved1[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct mthca_qp_path pri_path;
127 struct mthca_qp_path alt_path;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700128 __be32 rdd;
129 __be32 pd;
130 __be32 wqe_base;
131 __be32 wqe_lkey;
132 __be32 params1;
133 __be32 reserved2;
134 __be32 next_send_psn;
135 __be32 cqn_snd;
136 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
137 __be32 snd_db_index; /* (debugging only entries) */
138 __be32 last_acked_psn;
139 __be32 ssn;
140 __be32 params2;
141 __be32 rnr_nextrecvpsn;
142 __be32 ra_buff_indx;
143 __be32 cqn_rcv;
144 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
145 __be32 rcv_db_index; /* (debugging only entries) */
146 __be32 qkey;
147 __be32 srqn;
148 __be32 rmsn;
149 __be16 rq_wqe_counter; /* reserved on Tavor */
150 __be16 sq_wqe_counter; /* reserved on Tavor */
151 u32 reserved3[18];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152} __attribute__((packed));
153
154struct mthca_qp_param {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700155 __be32 opt_param_mask;
156 u32 reserved1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 struct mthca_qp_context context;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700158 u32 reserved2[62];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159} __attribute__((packed));
160
161enum {
162 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
163 MTHCA_QP_OPTPAR_RRE = 1 << 1,
164 MTHCA_QP_OPTPAR_RAE = 1 << 2,
165 MTHCA_QP_OPTPAR_RWE = 1 << 3,
166 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
167 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
168 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
169 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
170 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
171 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
172 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
173 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
174 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
175 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
176 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
177 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
178 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
179};
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181static const u8 mthca_opcode[] = {
182 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
183 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
184 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
185 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
186 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
187 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
188 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
189};
190
191static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
192{
193 return qp->qpn >= dev->qp_table.sqp_start &&
194 qp->qpn <= dev->qp_table.sqp_start + 3;
195}
196
197static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
198{
199 return qp->qpn >= dev->qp_table.sqp_start &&
200 qp->qpn <= dev->qp_table.sqp_start + 1;
201}
202
203static void *get_recv_wqe(struct mthca_qp *qp, int n)
204{
205 if (qp->is_direct)
206 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
207 else
208 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
209 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
210}
211
212static void *get_send_wqe(struct mthca_qp *qp, int n)
213{
214 if (qp->is_direct)
215 return qp->queue.direct.buf + qp->send_wqe_offset +
216 (n << qp->sq.wqe_shift);
217 else
218 return qp->queue.page_list[(qp->send_wqe_offset +
219 (n << qp->sq.wqe_shift)) >>
220 PAGE_SHIFT].buf +
221 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
222 (PAGE_SIZE - 1));
223}
224
Michael S. Tsirkin0964d912006-07-14 00:23:51 -0700225static void mthca_wq_reset(struct mthca_wq *wq)
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700226{
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700227 wq->next_ind = 0;
228 wq->last_comp = wq->max - 1;
229 wq->head = 0;
230 wq->tail = 0;
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700231}
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
234 enum ib_event_type event_type)
235{
236 struct mthca_qp *qp;
237 struct ib_event event;
238
239 spin_lock(&dev->qp_table.lock);
240 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
241 if (qp)
Roland Dreiera3285aa2006-05-09 10:50:29 -0700242 ++qp->refcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 spin_unlock(&dev->qp_table.lock);
244
245 if (!qp) {
246 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
247 return;
248 }
249
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700250 if (event_type == IB_EVENT_PATH_MIG)
251 qp->port = qp->alt_port;
252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 event.device = &dev->ib_dev;
254 event.event = event_type;
255 event.element.qp = &qp->ibqp;
256 if (qp->ibqp.event_handler)
257 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
258
Roland Dreiera3285aa2006-05-09 10:50:29 -0700259 spin_lock(&dev->qp_table.lock);
260 if (!--qp->refcount)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 wake_up(&qp->wait);
Roland Dreiera3285aa2006-05-09 10:50:29 -0700262 spin_unlock(&dev->qp_table.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263}
264
265static int to_mthca_state(enum ib_qp_state ib_state)
266{
267 switch (ib_state) {
268 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
269 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
270 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
271 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
272 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
273 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
274 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
275 default: return -1;
276 }
277}
278
279enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
280
281static int to_mthca_st(int transport)
282{
283 switch (transport) {
284 case RC: return MTHCA_QP_ST_RC;
285 case UC: return MTHCA_QP_ST_UC;
286 case UD: return MTHCA_QP_ST_UD;
287 case RD: return MTHCA_QP_ST_RD;
288 case MLX: return MTHCA_QP_ST_MLX;
289 default: return -1;
290 }
291}
292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
294 int attr_mask)
295{
296 if (attr_mask & IB_QP_PKEY_INDEX)
297 sqp->pkey_index = attr->pkey_index;
298 if (attr_mask & IB_QP_QKEY)
299 sqp->qkey = attr->qkey;
300 if (attr_mask & IB_QP_SQ_PSN)
301 sqp->send_psn = attr->sq_psn;
302}
303
304static void init_port(struct mthca_dev *dev, int port)
305{
306 int err;
307 u8 status;
308 struct mthca_init_ib_param param;
309
310 memset(&param, 0, sizeof param);
311
Roland Dreierda6561c2005-08-17 07:39:10 -0700312 param.port_width = dev->limits.port_width_cap;
313 param.vl_cap = dev->limits.vl_cap;
314 param.mtu_cap = dev->limits.mtu_cap;
315 param.gid_cap = dev->limits.gid_table_len;
316 param.pkey_cap = dev->limits.pkey_table_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
318 err = mthca_INIT_IB(dev, &param, port, &status);
319 if (err)
320 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
321 if (status)
322 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
323}
324
Jack Morgensteind1646f82005-12-15 14:36:24 -0800325static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
326 int attr_mask)
327{
328 u8 dest_rd_atomic;
329 u32 access_flags;
330 u32 hw_access_flags = 0;
331
332 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
333 dest_rd_atomic = attr->max_dest_rd_atomic;
334 else
335 dest_rd_atomic = qp->resp_depth;
336
337 if (attr_mask & IB_QP_ACCESS_FLAGS)
338 access_flags = attr->qp_access_flags;
339 else
340 access_flags = qp->atomic_rd_en;
341
342 if (!dest_rd_atomic)
343 access_flags &= IB_ACCESS_REMOTE_WRITE;
344
345 if (access_flags & IB_ACCESS_REMOTE_READ)
346 hw_access_flags |= MTHCA_QP_BIT_RRE;
347 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
348 hw_access_flags |= MTHCA_QP_BIT_RAE;
349 if (access_flags & IB_ACCESS_REMOTE_WRITE)
350 hw_access_flags |= MTHCA_QP_BIT_RWE;
351
352 return cpu_to_be32(hw_access_flags);
353}
354
Eli Cohen8ebe5072006-02-13 16:40:21 -0800355static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
356{
357 switch (mthca_state) {
358 case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
359 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
360 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
361 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
362 case MTHCA_QP_STATE_DRAINING:
363 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
364 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
365 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
366 default: return -1;
367 }
368}
369
370static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
371{
372 switch (mthca_mig_state) {
373 case 0: return IB_MIG_ARMED;
374 case 1: return IB_MIG_REARM;
375 case 3: return IB_MIG_MIGRATED;
376 default: return -1;
377 }
378}
379
380static int to_ib_qp_access_flags(int mthca_flags)
381{
382 int ib_flags = 0;
383
384 if (mthca_flags & MTHCA_QP_BIT_RRE)
385 ib_flags |= IB_ACCESS_REMOTE_READ;
386 if (mthca_flags & MTHCA_QP_BIT_RWE)
387 ib_flags |= IB_ACCESS_REMOTE_WRITE;
388 if (mthca_flags & MTHCA_QP_BIT_RAE)
389 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
390
391 return ib_flags;
392}
393
394static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
395 struct mthca_qp_path *path)
396{
397 memset(ib_ah_attr, 0, sizeof *path);
398 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700399
400 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
401 return;
402
Eli Cohen8ebe5072006-02-13 16:40:21 -0800403 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
404 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
405 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700406 ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
407 path->static_rate & 0x7,
408 ib_ah_attr->port_num);
Eli Cohen8ebe5072006-02-13 16:40:21 -0800409 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
410 if (ib_ah_attr->ah_flags) {
411 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
412 ib_ah_attr->grh.hop_limit = path->hop_limit;
413 ib_ah_attr->grh.traffic_class =
414 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
415 ib_ah_attr->grh.flow_label =
416 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
417 memcpy(ib_ah_attr->grh.dgid.raw,
418 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
419 }
420}
421
422int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
423 struct ib_qp_init_attr *qp_init_attr)
424{
425 struct mthca_dev *dev = to_mdev(ibqp->device);
426 struct mthca_qp *qp = to_mqp(ibqp);
427 int err;
428 struct mthca_mailbox *mailbox;
429 struct mthca_qp_param *qp_param;
430 struct mthca_qp_context *context;
431 int mthca_state;
432 u8 status;
433
434 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
435 if (IS_ERR(mailbox))
436 return PTR_ERR(mailbox);
437
438 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
439 if (err)
440 goto out;
441 if (status) {
442 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
443 err = -EINVAL;
444 goto out;
445 }
446
447 qp_param = mailbox->buf;
448 context = &qp_param->context;
449 mthca_state = be32_to_cpu(context->flags) >> 28;
450
451 qp_attr->qp_state = to_ib_qp_state(mthca_state);
452 qp_attr->cur_qp_state = qp_attr->qp_state;
453 qp_attr->path_mtu = context->mtu_msgmax >> 5;
454 qp_attr->path_mig_state =
455 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
456 qp_attr->qkey = be32_to_cpu(context->qkey);
457 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
458 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
459 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
460 qp_attr->qp_access_flags =
461 to_ib_qp_access_flags(be32_to_cpu(context->params2));
462 qp_attr->cap.max_send_wr = qp->sq.max;
463 qp_attr->cap.max_recv_wr = qp->rq.max;
464 qp_attr->cap.max_send_sge = qp->sq.max_gs;
465 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
466 qp_attr->cap.max_inline_data = qp->max_inline_data;
467
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700468 if (qp->transport == RC || qp->transport == UC) {
469 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
470 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
471 }
Eli Cohen8ebe5072006-02-13 16:40:21 -0800472
473 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
474 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
475
476 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
477 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
478
479 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
480
481 qp_attr->max_dest_rd_atomic =
482 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
483 qp_attr->min_rnr_timer =
484 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
485 qp_attr->port_num = qp_attr->ah_attr.port_num;
486 qp_attr->timeout = context->pri_path.ackto >> 3;
487 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
488 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
489 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
490 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
491 qp_init_attr->cap = qp_attr->cap;
492
493out:
494 mthca_free_mailbox(dev, mailbox);
495 return err;
496}
497
Dotan Barak0ef61db2006-03-19 17:20:36 +0200498static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700499 struct mthca_qp_path *path, u8 port)
Dotan Barak4de144b2006-01-06 13:23:58 -0800500{
501 path->g_mylmc = ah->src_path_bits & 0x7f;
502 path->rlid = cpu_to_be16(ah->dlid);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700503 path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
Dotan Barak4de144b2006-01-06 13:23:58 -0800504
505 if (ah->ah_flags & IB_AH_GRH) {
Dotan Barak0ef61db2006-03-19 17:20:36 +0200506 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
507 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
508 ah->grh.sgid_index, dev->limits.gid_table_len-1);
509 return -1;
510 }
511
Dotan Barak4de144b2006-01-06 13:23:58 -0800512 path->g_mylmc |= 1 << 7;
513 path->mgid_index = ah->grh.sgid_index;
514 path->hop_limit = ah->grh.hop_limit;
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800515 path->sl_tclass_flowlabel =
Dotan Barak4de144b2006-01-06 13:23:58 -0800516 cpu_to_be32((ah->sl << 28) |
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800517 (ah->grh.traffic_class << 20) |
Dotan Barak4de144b2006-01-06 13:23:58 -0800518 (ah->grh.flow_label));
519 memcpy(path->rgid, ah->grh.dgid.raw, 16);
520 } else
521 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
Dotan Barak0ef61db2006-03-19 17:20:36 +0200522
523 return 0;
Dotan Barak4de144b2006-01-06 13:23:58 -0800524}
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
527{
528 struct mthca_dev *dev = to_mdev(ibqp->device);
529 struct mthca_qp *qp = to_mqp(ibqp);
530 enum ib_qp_state cur_state, new_state;
Roland Dreiered878452005-06-27 14:36:45 -0700531 struct mthca_mailbox *mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct mthca_qp_param *qp_param;
533 struct mthca_qp_context *qp_context;
Roland Dreier3fa1fa32006-02-03 14:53:28 -0800534 u32 sqd_event = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 u8 status;
Roland Dreierc9c5d9f2006-06-17 20:37:41 -0700536 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700538 mutex_lock(&qp->mutex);
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 if (attr_mask & IB_QP_CUR_STATE) {
Roland Dreierd8441832006-02-13 16:30:18 -0800541 cur_state = attr->cur_qp_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 } else {
543 spin_lock_irq(&qp->sq.lock);
544 spin_lock(&qp->rq.lock);
545 cur_state = qp->state;
546 spin_unlock(&qp->rq.lock);
547 spin_unlock_irq(&qp->sq.lock);
548 }
549
Roland Dreierd8441832006-02-13 16:30:18 -0800550 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Roland Dreierd8441832006-02-13 16:30:18 -0800552 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
553 mthca_dbg(dev, "Bad QP transition (transport %d) "
554 "%d->%d with attr 0x%08x\n",
555 qp->transport, cur_state, new_state,
556 attr_mask);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700557 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
559
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800560 if ((attr_mask & IB_QP_PKEY_INDEX) &&
Jack Morgensteind09e3272005-11-03 14:58:33 -0800561 attr->pkey_index >= dev->limits.pkey_table_len) {
Dotan Barak67e73772006-03-01 14:28:12 -0800562 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
563 attr->pkey_index, dev->limits.pkey_table_len-1);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700564 goto out;
Jack Morgensteind09e3272005-11-03 14:58:33 -0800565 }
566
Jack Morgenstein38d1e792006-01-05 16:13:46 -0800567 if ((attr_mask & IB_QP_PORT) &&
568 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
569 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700570 goto out;
Jack Morgenstein38d1e792006-01-05 16:13:46 -0800571 }
572
Jack Morgenstein94361cf2005-12-09 16:32:21 -0800573 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
574 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
575 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
576 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700577 goto out;
Jack Morgenstein94361cf2005-12-09 16:32:21 -0800578 }
579
580 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
581 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
582 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
583 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700584 goto out;
Jack Morgenstein94361cf2005-12-09 16:32:21 -0800585 }
586
Roland Dreiered878452005-06-27 14:36:45 -0700587 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700588 if (IS_ERR(mailbox)) {
589 err = PTR_ERR(mailbox);
590 goto out;
591 }
Roland Dreiered878452005-06-27 14:36:45 -0700592 qp_param = mailbox->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 qp_context = &qp_param->context;
594 memset(qp_param, 0, sizeof *qp_param);
595
596 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
597 (to_mthca_st(qp->transport) << 16));
598 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
599 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
600 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
601 else {
602 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
603 switch (attr->path_mig_state) {
604 case IB_MIG_MIGRATED:
605 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
606 break;
607 case IB_MIG_REARM:
608 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
609 break;
610 case IB_MIG_ARMED:
611 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
612 break;
613 }
614 }
615
616 /* leave tavor_sched_queue as 0 */
617
618 if (qp->transport == MLX || qp->transport == UD)
619 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200620 else if (attr_mask & IB_QP_PATH_MTU) {
621 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
622 mthca_dbg(dev, "path MTU (%u) is invalid\n",
623 attr->path_mtu);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700624 goto out_mailbox;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700629 if (mthca_is_memfree(dev)) {
Roland Dreierec34a922005-08-19 10:59:31 -0700630 if (qp->rq.max)
631 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
632 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
633
634 if (qp->sq.max)
635 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
636 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
638
639 /* leave arbel_sched_queue as 0 */
640
Roland Dreier80c8ec22005-07-07 17:57:20 -0700641 if (qp->ibqp.uobject)
642 qp_context->usr_page =
643 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
644 else
645 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 qp_context->local_qpn = cpu_to_be32(qp->qpn);
647 if (attr_mask & IB_QP_DEST_QPN) {
648 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
649 }
650
651 if (qp->transport == MLX)
652 qp_context->pri_path.port_pkey |=
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700653 cpu_to_be32(qp->port << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 else {
655 if (attr_mask & IB_QP_PORT) {
656 qp_context->pri_path.port_pkey |=
657 cpu_to_be32(attr->port_num << 24);
658 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
659 }
660 }
661
662 if (attr_mask & IB_QP_PKEY_INDEX) {
663 qp_context->pri_path.port_pkey |=
664 cpu_to_be32(attr->pkey_index);
665 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
666 }
667
668 if (attr_mask & IB_QP_RNR_RETRY) {
Dotan Barak4de144b2006-01-06 13:23:58 -0800669 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
670 attr->rnr_retry << 5;
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800671 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
Dotan Barak4de144b2006-01-06 13:23:58 -0800672 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 }
674
675 if (attr_mask & IB_QP_AV) {
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700676 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
677 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700678 goto out_mailbox;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
681 }
682
683 if (attr_mask & IB_QP_TIMEOUT) {
Roland Dreierbb4a7f02005-09-12 14:08:51 -0700684 qp_context->pri_path.ackto = attr->timeout << 3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
686 }
687
Dotan Barak4de144b2006-01-06 13:23:58 -0800688 if (attr_mask & IB_QP_ALT_PATH) {
Dotan Barak67e73772006-03-01 14:28:12 -0800689 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
690 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
691 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700692 goto out_mailbox;
Dotan Barak67e73772006-03-01 14:28:12 -0800693 }
694
Dotan Barak4de144b2006-01-06 13:23:58 -0800695 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800696 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
Dotan Barak4de144b2006-01-06 13:23:58 -0800697 attr->alt_port_num);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700698 goto out_mailbox;
Dotan Barak4de144b2006-01-06 13:23:58 -0800699 }
700
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700701 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
702 attr->alt_ah_attr.port_num))
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700703 goto out_mailbox;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200704
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800705 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
Dotan Barak4de144b2006-01-06 13:23:58 -0800706 attr->alt_port_num << 24);
707 qp_context->alt_path.ackto = attr->alt_timeout << 3;
708 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 /* leave rdd as 0 */
712 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
713 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
714 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
715 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
716 (MTHCA_FLIGHT_LIMIT << 24) |
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800717 MTHCA_QP_BIT_SWE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
719 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
720 if (attr_mask & IB_QP_RETRY_CNT) {
721 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
722 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
723 }
724
Roland Dreier34a4a752005-06-27 14:36:41 -0700725 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800726 if (attr->max_rd_atomic) {
727 qp_context->params1 |=
728 cpu_to_be32(MTHCA_QP_BIT_SRE |
729 MTHCA_QP_BIT_SAE);
Jack Morgenstein6aa2e4e2005-12-09 16:38:04 -0800730 qp_context->params1 |=
731 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800732 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
734 }
735
736 if (attr_mask & IB_QP_SQ_PSN)
737 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
738 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
739
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700740 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
742 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
743 }
744
Roland Dreier34a4a752005-06-27 14:36:41 -0700745 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Jack Morgenstein6aa2e4e2005-12-09 16:38:04 -0800746 if (attr->max_dest_rd_atomic)
747 qp_context->params2 |=
748 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 }
752
Jack Morgensteind1646f82005-12-15 14:36:24 -0800753 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
754 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
755 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
756 MTHCA_QP_OPTPAR_RRE |
757 MTHCA_QP_OPTPAR_RAE);
758 }
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
761
Roland Dreierec34a922005-08-19 10:59:31 -0700762 if (ibqp->srq)
763 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
766 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
767 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
768 }
769 if (attr_mask & IB_QP_RQ_PSN)
770 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
771
772 qp_context->ra_buff_indx =
773 cpu_to_be32(dev->qp_table.rdb_base +
774 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
775 dev->qp_table.rdb_shift));
776
777 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
778
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700779 if (mthca_is_memfree(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
781
782 if (attr_mask & IB_QP_QKEY) {
783 qp_context->qkey = cpu_to_be32(attr->qkey);
784 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
785 }
786
Roland Dreierec34a922005-08-19 10:59:31 -0700787 if (ibqp->srq)
788 qp_context->srqn = cpu_to_be32(1 << 24 |
789 to_msrq(ibqp->srq)->srqn);
790
Roland Dreier3fa1fa32006-02-03 14:53:28 -0800791 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
792 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
793 attr->en_sqd_async_notify)
794 sqd_event = 1 << 31;
795
Roland Dreierd8441832006-02-13 16:30:18 -0800796 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
797 mailbox, sqd_event, &status);
Roland Dreier192daa12006-03-24 15:47:30 -0800798 if (err)
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700799 goto out_mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 if (status) {
Roland Dreierd8441832006-02-13 16:30:18 -0800801 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
802 cur_state, new_state, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 err = -EINVAL;
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700804 goto out_mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 }
806
Roland Dreier192daa12006-03-24 15:47:30 -0800807 qp->state = new_state;
808 if (attr_mask & IB_QP_ACCESS_FLAGS)
809 qp->atomic_rd_en = attr->qp_access_flags;
810 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
811 qp->resp_depth = attr->max_dest_rd_atomic;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700812 if (attr_mask & IB_QP_PORT)
813 qp->port = attr->port_num;
814 if (attr_mask & IB_QP_ALT_PATH)
815 qp->alt_port = attr->alt_port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 if (is_sqp(dev, qp))
818 store_attrs(to_msqp(qp), attr, attr_mask);
819
820 /*
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700821 * If we moved QP0 to RTR, bring the IB link up; if we moved
822 * QP0 to RESET or ERROR, bring the link back down.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 */
824 if (is_qp0(dev, qp)) {
825 if (cur_state != IB_QPS_RTR &&
826 new_state == IB_QPS_RTR)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700827 init_port(dev, qp->port);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829 if (cur_state != IB_QPS_RESET &&
830 cur_state != IB_QPS_ERR &&
831 (new_state == IB_QPS_RESET ||
832 new_state == IB_QPS_ERR))
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700833 mthca_CLOSE_IB(dev, qp->port, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 }
835
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700836 /*
837 * If we moved a kernel QP to RESET, clean up all old CQ
838 * entries and reinitialize the QP.
839 */
Roland Dreier192daa12006-03-24 15:47:30 -0800840 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
Roland Dreiera3285aa2006-05-09 10:50:29 -0700841 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700842 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
843 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
Roland Dreiera3285aa2006-05-09 10:50:29 -0700844 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700845 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
846
Michael S. Tsirkin0964d912006-07-14 00:23:51 -0700847 mthca_wq_reset(&qp->sq);
Michael S. Tsirkin187a2582005-11-28 11:19:43 -0800848 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
849
Michael S. Tsirkin0964d912006-07-14 00:23:51 -0700850 mthca_wq_reset(&qp->rq);
Michael S. Tsirkin187a2582005-11-28 11:19:43 -0800851 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700852
853 if (mthca_is_memfree(dev)) {
854 *qp->sq.db = 0;
855 *qp->rq.db = 0;
856 }
857 }
858
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700859out_mailbox:
Roland Dreier192daa12006-03-24 15:47:30 -0800860 mthca_free_mailbox(dev, mailbox);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700861
862out:
863 mutex_unlock(&qp->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 return err;
865}
866
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800867static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800868{
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800869 /*
870 * Calculate the maximum size of WQE s/g segments, excluding
871 * the next segment and other non-data segments.
872 */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800873 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800874
875 switch (qp->transport) {
876 case MLX:
877 max_data_size -= 2 * sizeof (struct mthca_data_seg);
878 break;
879
880 case UD:
881 if (mthca_is_memfree(dev))
882 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
883 else
884 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
885 break;
886
887 default:
888 max_data_size -= sizeof (struct mthca_raddr_seg);
889 break;
890 }
891
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800892 return max_data_size;
893}
894
895static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
896{
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800897 /* We don't support inline data for kernel QPs (yet). */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800898 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
899}
900
901static void mthca_adjust_qp_caps(struct mthca_dev *dev,
902 struct mthca_pd *pd,
903 struct mthca_qp *qp)
904{
905 int max_data_size = mthca_max_data_size(dev, qp,
906 min(dev->limits.max_desc_sz,
907 1 << qp->sq.wqe_shift));
908
909 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800910
Michael S. Tsirkin48fd0d12005-11-18 14:11:17 -0800911 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
912 max_data_size / sizeof (struct mthca_data_seg));
913 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
914 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
915 sizeof (struct mthca_next_seg)) /
916 sizeof (struct mthca_data_seg));
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800917}
918
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919/*
920 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
921 * rq.max_gs and sq.max_gs must all be assigned.
922 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
923 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
924 * queue)
925 */
926static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
927 struct mthca_pd *pd,
928 struct mthca_qp *qp)
929{
930 int size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 int err = -ENOMEM;
932
933 size = sizeof (struct mthca_next_seg) +
934 qp->rq.max_gs * sizeof (struct mthca_data_seg);
935
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800936 if (size > dev->limits.max_desc_sz)
937 return -EINVAL;
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
940 qp->rq.wqe_shift++)
941 ; /* nothing */
942
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800943 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 switch (qp->transport) {
945 case MLX:
946 size += 2 * sizeof (struct mthca_data_seg);
947 break;
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 case UD:
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800950 size += mthca_is_memfree(dev) ?
951 sizeof (struct mthca_arbel_ud_seg) :
952 sizeof (struct mthca_tavor_ud_seg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 break;
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800954
955 case UC:
956 size += sizeof (struct mthca_raddr_seg);
957 break;
958
959 case RC:
960 size += sizeof (struct mthca_raddr_seg);
961 /*
962 * An atomic op will require an atomic segment, a
963 * remote address segment and one scatter entry.
964 */
965 size = max_t(int, size,
966 sizeof (struct mthca_atomic_seg) +
967 sizeof (struct mthca_raddr_seg) +
968 sizeof (struct mthca_data_seg));
969 break;
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 default:
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800972 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 }
974
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800975 /* Make sure that we have enough space for a bind request */
976 size = max_t(int, size, sizeof (struct mthca_bind_seg));
977
978 size += sizeof (struct mthca_next_seg);
979
980 if (size > dev->limits.max_desc_sz)
981 return -EINVAL;
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
984 qp->sq.wqe_shift++)
985 ; /* nothing */
986
987 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
988 1 << qp->sq.wqe_shift);
Roland Dreier80c8ec22005-07-07 17:57:20 -0700989
990 /*
991 * If this is a userspace QP, we don't actually have to
992 * allocate anything. All we need is to calculate the WQE
993 * sizes and the send_wqe_offset, so we're done now.
994 */
995 if (pd->ibpd.uobject)
996 return 0;
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 size = PAGE_ALIGN(qp->send_wqe_offset +
999 (qp->sq.max << qp->sq.wqe_shift));
1000
1001 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1002 GFP_KERNEL);
1003 if (!qp->wrid)
1004 goto err_out;
1005
Roland Dreier87b81672005-08-18 13:39:31 -07001006 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1007 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (err)
Roland Dreier87b81672005-08-18 13:39:31 -07001009 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 return 0;
1012
Roland Dreier87b81672005-08-18 13:39:31 -07001013err_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 kfree(qp->wrid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 return err;
1016}
1017
Roland Dreier80c8ec22005-07-07 17:57:20 -07001018static void mthca_free_wqe_buf(struct mthca_dev *dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 struct mthca_qp *qp)
1020{
Roland Dreier87b81672005-08-18 13:39:31 -07001021 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1022 (qp->sq.max << qp->sq.wqe_shift)),
1023 &qp->queue, qp->is_direct, &qp->mr);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001024 kfree(qp->wrid);
1025}
1026
1027static int mthca_map_memfree(struct mthca_dev *dev,
1028 struct mthca_qp *qp)
1029{
1030 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Roland Dreierd10ddbf2005-04-16 15:26:32 -07001032 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1034 if (ret)
1035 return ret;
1036
1037 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1038 if (ret)
1039 goto err_qpc;
1040
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08001041 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1042 qp->qpn << dev->qp_table.rdb_shift);
1043 if (ret)
1044 goto err_eqpc;
Roland Dreier08aeb142005-04-16 15:26:34 -07001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 }
1047
1048 return 0;
1049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050err_eqpc:
1051 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1052
1053err_qpc:
1054 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1055
1056 return ret;
1057}
1058
Roland Dreier80c8ec22005-07-07 17:57:20 -07001059static void mthca_unmap_memfree(struct mthca_dev *dev,
1060 struct mthca_qp *qp)
1061{
1062 mthca_table_put(dev, dev->qp_table.rdb_table,
1063 qp->qpn << dev->qp_table.rdb_shift);
1064 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1065 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1066}
1067
1068static int mthca_alloc_memfree(struct mthca_dev *dev,
1069 struct mthca_qp *qp)
1070{
1071 int ret = 0;
1072
1073 if (mthca_is_memfree(dev)) {
1074 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1075 qp->qpn, &qp->rq.db);
1076 if (qp->rq.db_index < 0)
1077 return ret;
1078
1079 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1080 qp->qpn, &qp->sq.db);
1081 if (qp->sq.db_index < 0)
1082 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1083 }
1084
1085 return ret;
1086}
1087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088static void mthca_free_memfree(struct mthca_dev *dev,
1089 struct mthca_qp *qp)
1090{
Roland Dreierd10ddbf2005-04-16 15:26:32 -07001091 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1093 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 }
1095}
1096
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097static int mthca_alloc_qp_common(struct mthca_dev *dev,
1098 struct mthca_pd *pd,
1099 struct mthca_cq *send_cq,
1100 struct mthca_cq *recv_cq,
1101 enum ib_sig_type send_policy,
1102 struct mthca_qp *qp)
1103{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 int ret;
1105 int i;
1106
Roland Dreiera3285aa2006-05-09 10:50:29 -07001107 qp->refcount = 1;
Michael S. Tsirkin30a7e8e2005-09-07 09:45:00 -07001108 init_waitqueue_head(&qp->wait);
Roland Dreierc93b6fb2006-06-17 20:37:41 -07001109 mutex_init(&qp->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 qp->state = IB_QPS_RESET;
1111 qp->atomic_rd_en = 0;
1112 qp->resp_depth = 0;
1113 qp->sq_policy = send_policy;
Michael S. Tsirkin0964d912006-07-14 00:23:51 -07001114 mthca_wq_reset(&qp->sq);
1115 mthca_wq_reset(&qp->rq);
1116
Zach Browna46f9482006-07-04 02:57:52 -07001117 spin_lock_init(&qp->sq.lock);
1118 spin_lock_init(&qp->rq.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
Roland Dreier80c8ec22005-07-07 17:57:20 -07001120 ret = mthca_map_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 if (ret)
1122 return ret;
1123
1124 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1125 if (ret) {
Roland Dreier80c8ec22005-07-07 17:57:20 -07001126 mthca_unmap_memfree(dev, qp);
1127 return ret;
1128 }
1129
Jack Morgenstein77369ed2005-11-09 11:26:07 -08001130 mthca_adjust_qp_caps(dev, pd, qp);
1131
Roland Dreier80c8ec22005-07-07 17:57:20 -07001132 /*
1133 * If this is a userspace QP, we're done now. The doorbells
1134 * will be allocated and buffers will be initialized in
1135 * userspace.
1136 */
1137 if (pd->ibpd.uobject)
1138 return 0;
1139
1140 ret = mthca_alloc_memfree(dev, qp);
1141 if (ret) {
1142 mthca_free_wqe_buf(dev, qp);
1143 mthca_unmap_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return ret;
1145 }
1146
Roland Dreierd10ddbf2005-04-16 15:26:32 -07001147 if (mthca_is_memfree(dev)) {
Roland Dreierddf841f2005-04-16 15:26:33 -07001148 struct mthca_next_seg *next;
1149 struct mthca_data_seg *scatter;
1150 int size = (sizeof (struct mthca_next_seg) +
1151 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 for (i = 0; i < qp->rq.max; ++i) {
Roland Dreierddf841f2005-04-16 15:26:33 -07001154 next = get_recv_wqe(qp, i);
1155 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1156 qp->rq.wqe_shift);
1157 next->ee_nds = cpu_to_be32(size);
1158
1159 for (scatter = (void *) (next + 1);
1160 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1161 ++scatter)
1162 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 }
1164
1165 for (i = 0; i < qp->sq.max; ++i) {
Roland Dreierddf841f2005-04-16 15:26:33 -07001166 next = get_send_wqe(qp, i);
1167 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1168 qp->sq.wqe_shift) +
1169 qp->send_wqe_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 }
1171 }
1172
Roland Dreierd6cff022005-09-13 10:41:03 -07001173 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1174 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 return 0;
1177}
1178
Roland Dreier80c8ec22005-07-07 17:57:20 -07001179static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001180 struct mthca_pd *pd, struct mthca_qp *qp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181{
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001182 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1183
Roland Dreier80c8ec22005-07-07 17:57:20 -07001184 /* Sanity check QP size before proceeding */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001185 if (cap->max_send_wr > dev->limits.max_wqes ||
1186 cap->max_recv_wr > dev->limits.max_wqes ||
1187 cap->max_send_sge > dev->limits.max_sg ||
1188 cap->max_recv_sge > dev->limits.max_sg ||
1189 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1190 return -EINVAL;
1191
1192 /*
1193 * For MLX transport we need 2 extra S/G entries:
1194 * one for the header and one for the checksum at the end
1195 */
1196 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
Roland Dreier80c8ec22005-07-07 17:57:20 -07001197 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Roland Dreier80c8ec22005-07-07 17:57:20 -07001199 if (mthca_is_memfree(dev)) {
1200 qp->rq.max = cap->max_recv_wr ?
1201 roundup_pow_of_two(cap->max_recv_wr) : 0;
1202 qp->sq.max = cap->max_send_wr ?
1203 roundup_pow_of_two(cap->max_send_wr) : 0;
1204 } else {
1205 qp->rq.max = cap->max_recv_wr;
1206 qp->sq.max = cap->max_send_wr;
1207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Roland Dreier80c8ec22005-07-07 17:57:20 -07001209 qp->rq.max_gs = cap->max_recv_sge;
1210 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1211 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1212 MTHCA_INLINE_CHUNK_SIZE) /
1213 sizeof (struct mthca_data_seg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Roland Dreier80c8ec22005-07-07 17:57:20 -07001215 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216}
1217
1218int mthca_alloc_qp(struct mthca_dev *dev,
1219 struct mthca_pd *pd,
1220 struct mthca_cq *send_cq,
1221 struct mthca_cq *recv_cq,
1222 enum ib_qp_type type,
1223 enum ib_sig_type send_policy,
Roland Dreier80c8ec22005-07-07 17:57:20 -07001224 struct ib_qp_cap *cap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 struct mthca_qp *qp)
1226{
1227 int err;
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 switch (type) {
1230 case IB_QPT_RC: qp->transport = RC; break;
1231 case IB_QPT_UC: qp->transport = UC; break;
1232 case IB_QPT_UD: qp->transport = UD; break;
1233 default: return -EINVAL;
1234 }
1235
Jack Morgensteinb3f64962006-03-22 09:52:31 +02001236 err = mthca_set_qp_size(dev, cap, pd, qp);
1237 if (err)
1238 return err;
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1241 if (qp->qpn == -1)
1242 return -ENOMEM;
1243
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001244 /* initialize port to zero for error-catching. */
1245 qp->port = 0;
1246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1248 send_policy, qp);
1249 if (err) {
1250 mthca_free(&dev->qp_table.alloc, qp->qpn);
1251 return err;
1252 }
1253
1254 spin_lock_irq(&dev->qp_table.lock);
1255 mthca_array_set(&dev->qp_table.qp,
1256 qp->qpn & (dev->limits.num_qps - 1), qp);
1257 spin_unlock_irq(&dev->qp_table.lock);
1258
1259 return 0;
1260}
1261
1262int mthca_alloc_sqp(struct mthca_dev *dev,
1263 struct mthca_pd *pd,
1264 struct mthca_cq *send_cq,
1265 struct mthca_cq *recv_cq,
1266 enum ib_sig_type send_policy,
Roland Dreier80c8ec22005-07-07 17:57:20 -07001267 struct ib_qp_cap *cap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 int qpn,
1269 int port,
1270 struct mthca_sqp *sqp)
1271{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
Roland Dreier80c8ec22005-07-07 17:57:20 -07001273 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Jack Morgensteinb3f64962006-03-22 09:52:31 +02001275 sqp->qp.transport = MLX;
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001276 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001277 if (err)
1278 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
1280 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1281 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1282 &sqp->header_dma, GFP_KERNEL);
1283 if (!sqp->header_buf)
1284 return -ENOMEM;
1285
1286 spin_lock_irq(&dev->qp_table.lock);
1287 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1288 err = -EBUSY;
1289 else
1290 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1291 spin_unlock_irq(&dev->qp_table.lock);
1292
1293 if (err)
1294 goto err_out;
1295
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001296 sqp->qp.port = port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 sqp->qp.qpn = mqpn;
1298 sqp->qp.transport = MLX;
1299
1300 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1301 send_policy, &sqp->qp);
1302 if (err)
1303 goto err_out_free;
1304
1305 atomic_inc(&pd->sqp_count);
1306
1307 return 0;
1308
1309 err_out_free:
1310 /*
1311 * Lock CQs here, so that CQ polling code can do QP lookup
1312 * without taking a lock.
1313 */
1314 spin_lock_irq(&send_cq->lock);
1315 if (send_cq != recv_cq)
1316 spin_lock(&recv_cq->lock);
1317
1318 spin_lock(&dev->qp_table.lock);
1319 mthca_array_clear(&dev->qp_table.qp, mqpn);
1320 spin_unlock(&dev->qp_table.lock);
1321
1322 if (send_cq != recv_cq)
1323 spin_unlock(&recv_cq->lock);
1324 spin_unlock_irq(&send_cq->lock);
1325
1326 err_out:
1327 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1328 sqp->header_buf, sqp->header_dma);
1329
1330 return err;
1331}
1332
Roland Dreiera3285aa2006-05-09 10:50:29 -07001333static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1334{
1335 int c;
1336
1337 spin_lock_irq(&dev->qp_table.lock);
1338 c = qp->refcount;
1339 spin_unlock_irq(&dev->qp_table.lock);
1340
1341 return c;
1342}
1343
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344void mthca_free_qp(struct mthca_dev *dev,
1345 struct mthca_qp *qp)
1346{
1347 u8 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 struct mthca_cq *send_cq;
1349 struct mthca_cq *recv_cq;
1350
1351 send_cq = to_mcq(qp->ibqp.send_cq);
1352 recv_cq = to_mcq(qp->ibqp.recv_cq);
1353
1354 /*
1355 * Lock CQs here, so that CQ polling code can do QP lookup
1356 * without taking a lock.
1357 */
1358 spin_lock_irq(&send_cq->lock);
1359 if (send_cq != recv_cq)
1360 spin_lock(&recv_cq->lock);
1361
1362 spin_lock(&dev->qp_table.lock);
1363 mthca_array_clear(&dev->qp_table.qp,
1364 qp->qpn & (dev->limits.num_qps - 1));
Roland Dreiera3285aa2006-05-09 10:50:29 -07001365 --qp->refcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 spin_unlock(&dev->qp_table.lock);
1367
1368 if (send_cq != recv_cq)
1369 spin_unlock(&recv_cq->lock);
1370 spin_unlock_irq(&send_cq->lock);
1371
Roland Dreiera3285aa2006-05-09 10:50:29 -07001372 wait_event(qp->wait, !get_qp_refcount(dev, qp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
1374 if (qp->state != IB_QPS_RESET)
Roland Dreierd8441832006-02-13 16:30:18 -08001375 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1376 NULL, 0, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Roland Dreier80c8ec22005-07-07 17:57:20 -07001378 /*
1379 * If this is a userspace QP, the buffers, MR, CQs and so on
1380 * will be cleaned up in userspace, so all we have to do is
1381 * unref the mem-free tables and free the QPN in our table.
1382 */
1383 if (!qp->ibqp.uobject) {
Roland Dreiera3285aa2006-05-09 10:50:29 -07001384 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
Roland Dreierec34a922005-08-19 10:59:31 -07001385 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001386 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
Roland Dreiera3285aa2006-05-09 10:50:29 -07001387 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
Roland Dreierec34a922005-08-19 10:59:31 -07001388 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
Roland Dreier80c8ec22005-07-07 17:57:20 -07001390 mthca_free_memfree(dev, qp);
1391 mthca_free_wqe_buf(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 }
1393
Roland Dreier80c8ec22005-07-07 17:57:20 -07001394 mthca_unmap_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
1396 if (is_sqp(dev, qp)) {
1397 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1398 dma_free_coherent(&dev->pdev->dev,
1399 to_msqp(qp)->header_buf_size,
1400 to_msqp(qp)->header_buf,
1401 to_msqp(qp)->header_dma);
1402 } else
1403 mthca_free(&dev->qp_table.alloc, qp->qpn);
1404}
1405
1406/* Create UD header for an MLX send and build a data segment for it */
1407static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1408 int ind, struct ib_send_wr *wr,
1409 struct mthca_mlx_seg *mlx,
1410 struct mthca_data_seg *data)
1411{
1412 int header_size;
1413 int err;
Sean Hefty97f52eb2005-08-13 21:05:57 -07001414 u16 pkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
1416 ib_ud_header_init(256, /* assume a MAD */
Michael S. Tsirkin9eacee22006-01-12 15:55:41 -08001417 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 &sqp->ud_header);
1419
1420 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1421 if (err)
1422 return err;
1423 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1424 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
Sean Hefty97f52eb2005-08-13 21:05:57 -07001425 (sqp->ud_header.lrh.destination_lid ==
1426 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 (sqp->ud_header.lrh.service_level << 8));
1428 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1429 mlx->vcrc = 0;
1430
1431 switch (wr->opcode) {
1432 case IB_WR_SEND:
1433 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1434 sqp->ud_header.immediate_present = 0;
1435 break;
1436 case IB_WR_SEND_WITH_IMM:
1437 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1438 sqp->ud_header.immediate_present = 1;
1439 sqp->ud_header.immediate_data = wr->imm_data;
1440 break;
1441 default:
1442 return -EINVAL;
1443 }
1444
1445 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
Sean Hefty97f52eb2005-08-13 21:05:57 -07001446 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1447 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1449 if (!sqp->qp.ibqp.qp_num)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001450 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
Sean Hefty97f52eb2005-08-13 21:05:57 -07001451 sqp->pkey_index, &pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 else
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001453 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
Sean Hefty97f52eb2005-08-13 21:05:57 -07001454 wr->wr.ud.pkey_index, &pkey);
1455 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1457 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1458 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1459 sqp->qkey : wr->wr.ud.remote_qkey);
1460 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1461
1462 header_size = ib_ud_header_pack(&sqp->ud_header,
1463 sqp->header_buf +
1464 ind * MTHCA_UD_HEADER_SIZE);
1465
1466 data->byte_count = cpu_to_be32(header_size);
1467 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1468 data->addr = cpu_to_be64(sqp->header_dma +
1469 ind * MTHCA_UD_HEADER_SIZE);
1470
1471 return 0;
1472}
1473
1474static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1475 struct ib_cq *ib_cq)
1476{
1477 unsigned cur;
1478 struct mthca_cq *cq;
1479
1480 cur = wq->head - wq->tail;
1481 if (likely(cur + nreq < wq->max))
1482 return 0;
1483
1484 cq = to_mcq(ib_cq);
1485 spin_lock(&cq->lock);
1486 cur = wq->head - wq->tail;
1487 spin_unlock(&cq->lock);
1488
1489 return cur + nreq >= wq->max;
1490}
1491
1492int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1493 struct ib_send_wr **bad_wr)
1494{
1495 struct mthca_dev *dev = to_mdev(ibqp->device);
1496 struct mthca_qp *qp = to_mqp(ibqp);
1497 void *wqe;
1498 void *prev_wqe;
1499 unsigned long flags;
1500 int err = 0;
1501 int nreq;
1502 int i;
1503 int size;
1504 int size0 = 0;
1505 u32 f0 = 0;
1506 int ind;
1507 u8 op0 = 0;
1508
1509 spin_lock_irqsave(&qp->sq.lock, flags);
1510
1511 /* XXX check that state is OK to post send */
1512
1513 ind = qp->sq.next_ind;
1514
1515 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1516 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1517 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1518 " %d max, %d nreq)\n", qp->qpn,
1519 qp->sq.head, qp->sq.tail,
1520 qp->sq.max, nreq);
1521 err = -ENOMEM;
1522 *bad_wr = wr;
1523 goto out;
1524 }
1525
1526 wqe = get_send_wqe(qp, ind);
1527 prev_wqe = qp->sq.last;
1528 qp->sq.last = wqe;
1529
1530 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1531 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1532 ((struct mthca_next_seg *) wqe)->flags =
1533 ((wr->send_flags & IB_SEND_SIGNALED) ?
1534 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1535 ((wr->send_flags & IB_SEND_SOLICITED) ?
1536 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1537 cpu_to_be32(1);
1538 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1539 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
Roland Dreier3fba2312005-04-16 15:26:16 -07001540 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 wqe += sizeof (struct mthca_next_seg);
1543 size = sizeof (struct mthca_next_seg) / 16;
1544
1545 switch (qp->transport) {
1546 case RC:
1547 switch (wr->opcode) {
1548 case IB_WR_ATOMIC_CMP_AND_SWP:
1549 case IB_WR_ATOMIC_FETCH_AND_ADD:
1550 ((struct mthca_raddr_seg *) wqe)->raddr =
1551 cpu_to_be64(wr->wr.atomic.remote_addr);
1552 ((struct mthca_raddr_seg *) wqe)->rkey =
1553 cpu_to_be32(wr->wr.atomic.rkey);
1554 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1555
1556 wqe += sizeof (struct mthca_raddr_seg);
1557
1558 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1559 ((struct mthca_atomic_seg *) wqe)->swap_add =
1560 cpu_to_be64(wr->wr.atomic.swap);
1561 ((struct mthca_atomic_seg *) wqe)->compare =
1562 cpu_to_be64(wr->wr.atomic.compare_add);
1563 } else {
1564 ((struct mthca_atomic_seg *) wqe)->swap_add =
1565 cpu_to_be64(wr->wr.atomic.compare_add);
1566 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1567 }
1568
1569 wqe += sizeof (struct mthca_atomic_seg);
Michael S. Tsirkin62abb842005-11-09 11:30:14 -08001570 size += (sizeof (struct mthca_raddr_seg) +
1571 sizeof (struct mthca_atomic_seg)) / 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 break;
1573
1574 case IB_WR_RDMA_WRITE:
1575 case IB_WR_RDMA_WRITE_WITH_IMM:
1576 case IB_WR_RDMA_READ:
1577 ((struct mthca_raddr_seg *) wqe)->raddr =
1578 cpu_to_be64(wr->wr.rdma.remote_addr);
1579 ((struct mthca_raddr_seg *) wqe)->rkey =
1580 cpu_to_be32(wr->wr.rdma.rkey);
1581 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1582 wqe += sizeof (struct mthca_raddr_seg);
1583 size += sizeof (struct mthca_raddr_seg) / 16;
1584 break;
1585
1586 default:
1587 /* No extra segments required for sends */
1588 break;
1589 }
1590
1591 break;
1592
Roland Dreier9e6970b2005-06-27 14:36:42 -07001593 case UC:
1594 switch (wr->opcode) {
1595 case IB_WR_RDMA_WRITE:
1596 case IB_WR_RDMA_WRITE_WITH_IMM:
1597 ((struct mthca_raddr_seg *) wqe)->raddr =
1598 cpu_to_be64(wr->wr.rdma.remote_addr);
1599 ((struct mthca_raddr_seg *) wqe)->rkey =
1600 cpu_to_be32(wr->wr.rdma.rkey);
1601 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1602 wqe += sizeof (struct mthca_raddr_seg);
1603 size += sizeof (struct mthca_raddr_seg) / 16;
1604 break;
1605
1606 default:
1607 /* No extra segments required for sends */
1608 break;
1609 }
1610
1611 break;
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 case UD:
1614 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1615 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1616 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1617 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1618 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1619 cpu_to_be32(wr->wr.ud.remote_qpn);
1620 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1621 cpu_to_be32(wr->wr.ud.remote_qkey);
1622
1623 wqe += sizeof (struct mthca_tavor_ud_seg);
1624 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1625 break;
1626
1627 case MLX:
1628 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1629 wqe - sizeof (struct mthca_next_seg),
1630 wqe);
1631 if (err) {
1632 *bad_wr = wr;
1633 goto out;
1634 }
1635 wqe += sizeof (struct mthca_data_seg);
1636 size += sizeof (struct mthca_data_seg) / 16;
1637 break;
1638 }
1639
1640 if (wr->num_sge > qp->sq.max_gs) {
1641 mthca_err(dev, "too many gathers\n");
1642 err = -EINVAL;
1643 *bad_wr = wr;
1644 goto out;
1645 }
1646
1647 for (i = 0; i < wr->num_sge; ++i) {
1648 ((struct mthca_data_seg *) wqe)->byte_count =
1649 cpu_to_be32(wr->sg_list[i].length);
1650 ((struct mthca_data_seg *) wqe)->lkey =
1651 cpu_to_be32(wr->sg_list[i].lkey);
1652 ((struct mthca_data_seg *) wqe)->addr =
1653 cpu_to_be64(wr->sg_list[i].addr);
1654 wqe += sizeof (struct mthca_data_seg);
1655 size += sizeof (struct mthca_data_seg) / 16;
1656 }
1657
1658 /* Add one more inline data segment for ICRC */
1659 if (qp->transport == MLX) {
1660 ((struct mthca_data_seg *) wqe)->byte_count =
1661 cpu_to_be32((1 << 31) | 4);
1662 ((u32 *) wqe)[1] = 0;
1663 wqe += sizeof (struct mthca_data_seg);
1664 size += sizeof (struct mthca_data_seg) / 16;
1665 }
1666
1667 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1668
1669 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1670 mthca_err(dev, "opcode invalid\n");
1671 err = -EINVAL;
1672 *bad_wr = wr;
1673 goto out;
1674 }
1675
Roland Dreierd6cff022005-09-13 10:41:03 -07001676 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1677 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1678 qp->send_wqe_offset) |
1679 mthca_opcode[wr->opcode]);
1680 wmb();
1681 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
Dotan Barak7667abd12006-02-27 21:02:00 -08001682 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
1683 ((wr->send_flags & IB_SEND_FENCE) ?
1684 MTHCA_NEXT_FENCE : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
1686 if (!size0) {
1687 size0 = size;
1688 op0 = mthca_opcode[wr->opcode];
1689 }
1690
1691 ++ind;
1692 if (unlikely(ind >= qp->sq.max))
1693 ind -= qp->sq.max;
1694 }
1695
1696out:
1697 if (likely(nreq)) {
Sean Hefty97f52eb2005-08-13 21:05:57 -07001698 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
1700 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1701 qp->send_wqe_offset) | f0 | op0);
1702 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1703
1704 wmb();
1705
1706 mthca_write64(doorbell,
1707 dev->kar + MTHCA_SEND_DOORBELL,
1708 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1709 }
1710
1711 qp->sq.next_ind = ind;
1712 qp->sq.head += nreq;
1713
1714 spin_unlock_irqrestore(&qp->sq.lock, flags);
1715 return err;
1716}
1717
1718int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1719 struct ib_recv_wr **bad_wr)
1720{
1721 struct mthca_dev *dev = to_mdev(ibqp->device);
1722 struct mthca_qp *qp = to_mqp(ibqp);
Michael S. Tsirkinae57e242005-11-09 14:59:57 -08001723 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 unsigned long flags;
1725 int err = 0;
1726 int nreq;
1727 int i;
1728 int size;
1729 int size0 = 0;
1730 int ind;
1731 void *wqe;
1732 void *prev_wqe;
1733
1734 spin_lock_irqsave(&qp->rq.lock, flags);
1735
1736 /* XXX check that state is OK to post receive */
1737
1738 ind = qp->rq.next_ind;
1739
Michael S. Tsirkin23f3bc02006-05-18 18:32:54 +03001740 for (nreq = 0; wr; wr = wr->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1742 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1743 " %d max, %d nreq)\n", qp->qpn,
1744 qp->rq.head, qp->rq.tail,
1745 qp->rq.max, nreq);
1746 err = -ENOMEM;
1747 *bad_wr = wr;
1748 goto out;
1749 }
1750
1751 wqe = get_recv_wqe(qp, ind);
1752 prev_wqe = qp->rq.last;
1753 qp->rq.last = wqe;
1754
1755 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1756 ((struct mthca_next_seg *) wqe)->ee_nds =
1757 cpu_to_be32(MTHCA_NEXT_DBD);
1758 ((struct mthca_next_seg *) wqe)->flags = 0;
1759
1760 wqe += sizeof (struct mthca_next_seg);
1761 size = sizeof (struct mthca_next_seg) / 16;
1762
1763 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1764 err = -EINVAL;
1765 *bad_wr = wr;
1766 goto out;
1767 }
1768
1769 for (i = 0; i < wr->num_sge; ++i) {
1770 ((struct mthca_data_seg *) wqe)->byte_count =
1771 cpu_to_be32(wr->sg_list[i].length);
1772 ((struct mthca_data_seg *) wqe)->lkey =
1773 cpu_to_be32(wr->sg_list[i].lkey);
1774 ((struct mthca_data_seg *) wqe)->addr =
1775 cpu_to_be64(wr->sg_list[i].addr);
1776 wqe += sizeof (struct mthca_data_seg);
1777 size += sizeof (struct mthca_data_seg) / 16;
1778 }
1779
1780 qp->wrid[ind] = wr->wr_id;
1781
Roland Dreierd6cff022005-09-13 10:41:03 -07001782 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1783 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1784 wmb();
1785 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1786 cpu_to_be32(MTHCA_NEXT_DBD | size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
1788 if (!size0)
1789 size0 = size;
1790
1791 ++ind;
1792 if (unlikely(ind >= qp->rq.max))
1793 ind -= qp->rq.max;
Michael S. Tsirkin23f3bc02006-05-18 18:32:54 +03001794
1795 ++nreq;
1796 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1797 nreq = 0;
1798
1799 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1800 doorbell[1] = cpu_to_be32(qp->qpn << 8);
1801
1802 wmb();
1803
1804 mthca_write64(doorbell,
1805 dev->kar + MTHCA_RECEIVE_DOORBELL,
1806 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1807
1808 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1809 size0 = 0;
1810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 }
1812
1813out:
1814 if (likely(nreq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1816 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1817
1818 wmb();
1819
1820 mthca_write64(doorbell,
1821 dev->kar + MTHCA_RECEIVE_DOORBELL,
1822 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1823 }
1824
1825 qp->rq.next_ind = ind;
1826 qp->rq.head += nreq;
1827
1828 spin_unlock_irqrestore(&qp->rq.lock, flags);
1829 return err;
1830}
1831
1832int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1833 struct ib_send_wr **bad_wr)
1834{
1835 struct mthca_dev *dev = to_mdev(ibqp->device);
1836 struct mthca_qp *qp = to_mqp(ibqp);
Michael S. Tsirkine0ae9ec2005-11-29 11:33:46 -08001837 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 void *wqe;
1839 void *prev_wqe;
1840 unsigned long flags;
1841 int err = 0;
1842 int nreq;
1843 int i;
1844 int size;
1845 int size0 = 0;
1846 u32 f0 = 0;
1847 int ind;
1848 u8 op0 = 0;
1849
1850 spin_lock_irqsave(&qp->sq.lock, flags);
1851
1852 /* XXX check that state is OK to post send */
1853
1854 ind = qp->sq.head & (qp->sq.max - 1);
1855
1856 for (nreq = 0; wr; ++nreq, wr = wr->next) {
Michael S. Tsirkine0ae9ec2005-11-29 11:33:46 -08001857 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1858 nreq = 0;
1859
1860 doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1861 ((qp->sq.head & 0xffff) << 8) |
1862 f0 | op0);
1863 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1864
1865 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1866 size0 = 0;
1867
1868 /*
1869 * Make sure that descriptors are written before
1870 * doorbell record.
1871 */
1872 wmb();
1873 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1874
1875 /*
1876 * Make sure doorbell record is written before we
1877 * write MMIO send doorbell.
1878 */
1879 wmb();
1880 mthca_write64(doorbell,
1881 dev->kar + MTHCA_SEND_DOORBELL,
1882 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1883 }
1884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1886 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1887 " %d max, %d nreq)\n", qp->qpn,
1888 qp->sq.head, qp->sq.tail,
1889 qp->sq.max, nreq);
1890 err = -ENOMEM;
1891 *bad_wr = wr;
1892 goto out;
1893 }
1894
1895 wqe = get_send_wqe(qp, ind);
1896 prev_wqe = qp->sq.last;
1897 qp->sq.last = wqe;
1898
1899 ((struct mthca_next_seg *) wqe)->flags =
1900 ((wr->send_flags & IB_SEND_SIGNALED) ?
1901 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1902 ((wr->send_flags & IB_SEND_SOLICITED) ?
1903 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1904 cpu_to_be32(1);
1905 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1906 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
Roland Dreier3fba2312005-04-16 15:26:16 -07001907 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909 wqe += sizeof (struct mthca_next_seg);
1910 size = sizeof (struct mthca_next_seg) / 16;
1911
1912 switch (qp->transport) {
Roland Dreierddb934e2005-04-16 15:26:23 -07001913 case RC:
1914 switch (wr->opcode) {
1915 case IB_WR_ATOMIC_CMP_AND_SWP:
1916 case IB_WR_ATOMIC_FETCH_AND_ADD:
1917 ((struct mthca_raddr_seg *) wqe)->raddr =
1918 cpu_to_be64(wr->wr.atomic.remote_addr);
1919 ((struct mthca_raddr_seg *) wqe)->rkey =
1920 cpu_to_be32(wr->wr.atomic.rkey);
1921 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1922
1923 wqe += sizeof (struct mthca_raddr_seg);
1924
1925 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1926 ((struct mthca_atomic_seg *) wqe)->swap_add =
1927 cpu_to_be64(wr->wr.atomic.swap);
1928 ((struct mthca_atomic_seg *) wqe)->compare =
1929 cpu_to_be64(wr->wr.atomic.compare_add);
1930 } else {
1931 ((struct mthca_atomic_seg *) wqe)->swap_add =
1932 cpu_to_be64(wr->wr.atomic.compare_add);
1933 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1934 }
1935
1936 wqe += sizeof (struct mthca_atomic_seg);
Michael S. Tsirkin62abb842005-11-09 11:30:14 -08001937 size += (sizeof (struct mthca_raddr_seg) +
1938 sizeof (struct mthca_atomic_seg)) / 16;
Roland Dreierddb934e2005-04-16 15:26:23 -07001939 break;
1940
Roland Dreier9e6970b2005-06-27 14:36:42 -07001941 case IB_WR_RDMA_READ:
Roland Dreierddb934e2005-04-16 15:26:23 -07001942 case IB_WR_RDMA_WRITE:
1943 case IB_WR_RDMA_WRITE_WITH_IMM:
Roland Dreier9e6970b2005-06-27 14:36:42 -07001944 ((struct mthca_raddr_seg *) wqe)->raddr =
1945 cpu_to_be64(wr->wr.rdma.remote_addr);
1946 ((struct mthca_raddr_seg *) wqe)->rkey =
1947 cpu_to_be32(wr->wr.rdma.rkey);
1948 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1949 wqe += sizeof (struct mthca_raddr_seg);
1950 size += sizeof (struct mthca_raddr_seg) / 16;
1951 break;
1952
1953 default:
1954 /* No extra segments required for sends */
1955 break;
1956 }
1957
1958 break;
1959
1960 case UC:
1961 switch (wr->opcode) {
1962 case IB_WR_RDMA_WRITE:
1963 case IB_WR_RDMA_WRITE_WITH_IMM:
Roland Dreierddb934e2005-04-16 15:26:23 -07001964 ((struct mthca_raddr_seg *) wqe)->raddr =
1965 cpu_to_be64(wr->wr.rdma.remote_addr);
1966 ((struct mthca_raddr_seg *) wqe)->rkey =
1967 cpu_to_be32(wr->wr.rdma.rkey);
1968 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1969 wqe += sizeof (struct mthca_raddr_seg);
1970 size += sizeof (struct mthca_raddr_seg) / 16;
1971 break;
1972
1973 default:
1974 /* No extra segments required for sends */
1975 break;
1976 }
1977
1978 break;
1979
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 case UD:
1981 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
1982 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1983 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
1984 cpu_to_be32(wr->wr.ud.remote_qpn);
1985 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
1986 cpu_to_be32(wr->wr.ud.remote_qkey);
1987
1988 wqe += sizeof (struct mthca_arbel_ud_seg);
1989 size += sizeof (struct mthca_arbel_ud_seg) / 16;
1990 break;
1991
1992 case MLX:
1993 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1994 wqe - sizeof (struct mthca_next_seg),
1995 wqe);
1996 if (err) {
1997 *bad_wr = wr;
1998 goto out;
1999 }
2000 wqe += sizeof (struct mthca_data_seg);
2001 size += sizeof (struct mthca_data_seg) / 16;
2002 break;
2003 }
2004
2005 if (wr->num_sge > qp->sq.max_gs) {
2006 mthca_err(dev, "too many gathers\n");
2007 err = -EINVAL;
2008 *bad_wr = wr;
2009 goto out;
2010 }
2011
2012 for (i = 0; i < wr->num_sge; ++i) {
2013 ((struct mthca_data_seg *) wqe)->byte_count =
2014 cpu_to_be32(wr->sg_list[i].length);
2015 ((struct mthca_data_seg *) wqe)->lkey =
2016 cpu_to_be32(wr->sg_list[i].lkey);
2017 ((struct mthca_data_seg *) wqe)->addr =
2018 cpu_to_be64(wr->sg_list[i].addr);
2019 wqe += sizeof (struct mthca_data_seg);
2020 size += sizeof (struct mthca_data_seg) / 16;
2021 }
2022
2023 /* Add one more inline data segment for ICRC */
2024 if (qp->transport == MLX) {
2025 ((struct mthca_data_seg *) wqe)->byte_count =
2026 cpu_to_be32((1 << 31) | 4);
2027 ((u32 *) wqe)[1] = 0;
2028 wqe += sizeof (struct mthca_data_seg);
2029 size += sizeof (struct mthca_data_seg) / 16;
2030 }
2031
2032 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2033
2034 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2035 mthca_err(dev, "opcode invalid\n");
2036 err = -EINVAL;
2037 *bad_wr = wr;
2038 goto out;
2039 }
2040
Roland Dreierd6cff022005-09-13 10:41:03 -07002041 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2042 cpu_to_be32(((ind << qp->sq.wqe_shift) +
2043 qp->send_wqe_offset) |
2044 mthca_opcode[wr->opcode]);
2045 wmb();
2046 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
Dotan Barak7667abd12006-02-27 21:02:00 -08002047 cpu_to_be32(MTHCA_NEXT_DBD | size |
Roland Dreierb0b3a8e2006-03-24 15:47:29 -08002048 ((wr->send_flags & IB_SEND_FENCE) ?
2049 MTHCA_NEXT_FENCE : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
2051 if (!size0) {
2052 size0 = size;
2053 op0 = mthca_opcode[wr->opcode];
2054 }
2055
2056 ++ind;
2057 if (unlikely(ind >= qp->sq.max))
2058 ind -= qp->sq.max;
2059 }
2060
2061out:
2062 if (likely(nreq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 doorbell[0] = cpu_to_be32((nreq << 24) |
2064 ((qp->sq.head & 0xffff) << 8) |
2065 f0 | op0);
2066 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
2067
2068 qp->sq.head += nreq;
2069
2070 /*
2071 * Make sure that descriptors are written before
2072 * doorbell record.
2073 */
2074 wmb();
2075 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2076
2077 /*
2078 * Make sure doorbell record is written before we
2079 * write MMIO send doorbell.
2080 */
2081 wmb();
2082 mthca_write64(doorbell,
2083 dev->kar + MTHCA_SEND_DOORBELL,
2084 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2085 }
2086
2087 spin_unlock_irqrestore(&qp->sq.lock, flags);
2088 return err;
2089}
2090
2091int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2092 struct ib_recv_wr **bad_wr)
2093{
2094 struct mthca_dev *dev = to_mdev(ibqp->device);
2095 struct mthca_qp *qp = to_mqp(ibqp);
2096 unsigned long flags;
2097 int err = 0;
2098 int nreq;
2099 int ind;
2100 int i;
2101 void *wqe;
2102
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08002103 spin_lock_irqsave(&qp->rq.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
2105 /* XXX check that state is OK to post receive */
2106
2107 ind = qp->rq.head & (qp->rq.max - 1);
2108
2109 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2110 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2111 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2112 " %d max, %d nreq)\n", qp->qpn,
2113 qp->rq.head, qp->rq.tail,
2114 qp->rq.max, nreq);
2115 err = -ENOMEM;
2116 *bad_wr = wr;
2117 goto out;
2118 }
2119
2120 wqe = get_recv_wqe(qp, ind);
2121
2122 ((struct mthca_next_seg *) wqe)->flags = 0;
2123
2124 wqe += sizeof (struct mthca_next_seg);
2125
2126 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2127 err = -EINVAL;
2128 *bad_wr = wr;
2129 goto out;
2130 }
2131
2132 for (i = 0; i < wr->num_sge; ++i) {
2133 ((struct mthca_data_seg *) wqe)->byte_count =
2134 cpu_to_be32(wr->sg_list[i].length);
2135 ((struct mthca_data_seg *) wqe)->lkey =
2136 cpu_to_be32(wr->sg_list[i].lkey);
2137 ((struct mthca_data_seg *) wqe)->addr =
2138 cpu_to_be64(wr->sg_list[i].addr);
2139 wqe += sizeof (struct mthca_data_seg);
2140 }
2141
2142 if (i < qp->rq.max_gs) {
2143 ((struct mthca_data_seg *) wqe)->byte_count = 0;
Roland Dreierddf841f2005-04-16 15:26:33 -07002144 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 ((struct mthca_data_seg *) wqe)->addr = 0;
2146 }
2147
2148 qp->wrid[ind] = wr->wr_id;
2149
2150 ++ind;
2151 if (unlikely(ind >= qp->rq.max))
2152 ind -= qp->rq.max;
2153 }
2154out:
2155 if (likely(nreq)) {
2156 qp->rq.head += nreq;
2157
2158 /*
2159 * Make sure that descriptors are written before
2160 * doorbell record.
2161 */
2162 wmb();
2163 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2164 }
2165
2166 spin_unlock_irqrestore(&qp->rq.lock, flags);
2167 return err;
2168}
2169
Roland Dreierd9b98b02006-01-31 20:45:51 -08002170void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2171 int index, int *dbd, __be32 *new_wqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
2173 struct mthca_next_seg *next;
2174
Roland Dreierec34a922005-08-19 10:59:31 -07002175 /*
2176 * For SRQs, all WQEs generate a CQE, so we're always at the
2177 * end of the doorbell chain.
2178 */
2179 if (qp->ibqp.srq) {
2180 *new_wqe = 0;
Roland Dreierd9b98b02006-01-31 20:45:51 -08002181 return;
Roland Dreierec34a922005-08-19 10:59:31 -07002182 }
2183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 if (is_send)
2185 next = get_send_wqe(qp, index);
2186 else
2187 next = get_recv_wqe(qp, index);
2188
Roland Dreier288bdeb2005-08-19 09:19:05 -07002189 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 if (next->ee_nds & cpu_to_be32(0x3f))
2191 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2192 (next->ee_nds & cpu_to_be32(0x3f));
2193 else
2194 *new_wqe = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195}
2196
2197int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2198{
2199 int err;
2200 u8 status;
2201 int i;
2202
2203 spin_lock_init(&dev->qp_table.lock);
2204
2205 /*
2206 * We reserve 2 extra QPs per port for the special QPs. The
2207 * special QP for port 1 has to be even, so round up.
2208 */
2209 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2210 err = mthca_alloc_init(&dev->qp_table.alloc,
2211 dev->limits.num_qps,
2212 (1 << 24) - 1,
2213 dev->qp_table.sqp_start +
2214 MTHCA_MAX_PORTS * 2);
2215 if (err)
2216 return err;
2217
2218 err = mthca_array_init(&dev->qp_table.qp,
2219 dev->limits.num_qps);
2220 if (err) {
2221 mthca_alloc_cleanup(&dev->qp_table.alloc);
2222 return err;
2223 }
2224
2225 for (i = 0; i < 2; ++i) {
2226 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2227 dev->qp_table.sqp_start + i * 2,
2228 &status);
2229 if (err)
2230 goto err_out;
2231 if (status) {
2232 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2233 "status %02x, aborting.\n",
2234 status);
2235 err = -EINVAL;
2236 goto err_out;
2237 }
2238 }
2239 return 0;
2240
2241 err_out:
2242 for (i = 0; i < 2; ++i)
2243 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2244
2245 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2246 mthca_alloc_cleanup(&dev->qp_table.alloc);
2247
2248 return err;
2249}
2250
Roland Dreiere1f78682006-03-29 09:36:46 -08002251void mthca_cleanup_qp_table(struct mthca_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
2253 int i;
2254 u8 status;
2255
2256 for (i = 0; i < 2; ++i)
2257 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2258
Michael S. Tsirkin71eea472005-09-20 10:54:48 -07002259 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 mthca_alloc_cleanup(&dev->qp_table.alloc);
2261}