blob: 490fc783bb0c899709488f7f83fbeaf261b36a0c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier80c8ec22005-07-07 17:57:20 -07003 * Copyright (c) 2005 Cisco Systems. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07004 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08005 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
36 */
37
38#include <linux/init.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080039#include <linux/string.h>
40#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Roland Dreiera4d61e82005-08-25 13:40:04 -070042#include <rdma/ib_verbs.h>
43#include <rdma/ib_cache.h>
44#include <rdma/ib_pack.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include "mthca_dev.h"
47#include "mthca_cmd.h"
48#include "mthca_memfree.h"
Roland Dreierc04bc3d2005-08-19 10:33:35 -070049#include "mthca_wqe.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51enum {
52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53 MTHCA_ACK_REQ_FREQ = 10,
54 MTHCA_FLIGHT_LIMIT = 9,
Roland Dreier80c8ec22005-07-07 17:57:20 -070055 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058};
59
60enum {
61 MTHCA_QP_STATE_RST = 0,
62 MTHCA_QP_STATE_INIT = 1,
63 MTHCA_QP_STATE_RTR = 2,
64 MTHCA_QP_STATE_RTS = 3,
65 MTHCA_QP_STATE_SQE = 4,
66 MTHCA_QP_STATE_SQD = 5,
67 MTHCA_QP_STATE_ERR = 6,
68 MTHCA_QP_STATE_DRAINING = 7
69};
70
71enum {
72 MTHCA_QP_ST_RC = 0x0,
73 MTHCA_QP_ST_UC = 0x1,
74 MTHCA_QP_ST_RD = 0x2,
75 MTHCA_QP_ST_UD = 0x3,
76 MTHCA_QP_ST_MLX = 0x7
77};
78
79enum {
80 MTHCA_QP_PM_MIGRATED = 0x3,
81 MTHCA_QP_PM_ARMED = 0x0,
82 MTHCA_QP_PM_REARM = 0x1
83};
84
85enum {
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE = 1 << 8,
88 /* params1 */
89 MTHCA_QP_BIT_SRE = 1 << 15,
90 MTHCA_QP_BIT_SWE = 1 << 14,
91 MTHCA_QP_BIT_SAE = 1 << 13,
92 MTHCA_QP_BIT_SIC = 1 << 4,
93 MTHCA_QP_BIT_SSC = 1 << 3,
94 /* params2 */
95 MTHCA_QP_BIT_RRE = 1 << 15,
96 MTHCA_QP_BIT_RWE = 1 << 14,
97 MTHCA_QP_BIT_RAE = 1 << 13,
98 MTHCA_QP_BIT_RIC = 1 << 4,
99 MTHCA_QP_BIT_RSC = 1 << 3
100};
101
102struct mthca_qp_path {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700103 __be32 port_pkey;
104 u8 rnr_retry;
105 u8 g_mylmc;
106 __be16 rlid;
107 u8 ackto;
108 u8 mgid_index;
109 u8 static_rate;
110 u8 hop_limit;
111 __be32 sl_tclass_flowlabel;
112 u8 rgid[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113} __attribute__((packed));
114
115struct mthca_qp_context {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700116 __be32 flags;
117 __be32 tavor_sched_queue; /* Reserved on Arbel */
118 u8 mtu_msgmax;
119 u8 rq_size_stride; /* Reserved on Tavor */
120 u8 sq_size_stride; /* Reserved on Tavor */
121 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
122 __be32 usr_page;
123 __be32 local_qpn;
124 __be32 remote_qpn;
125 u32 reserved1[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct mthca_qp_path pri_path;
127 struct mthca_qp_path alt_path;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700128 __be32 rdd;
129 __be32 pd;
130 __be32 wqe_base;
131 __be32 wqe_lkey;
132 __be32 params1;
133 __be32 reserved2;
134 __be32 next_send_psn;
135 __be32 cqn_snd;
136 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
137 __be32 snd_db_index; /* (debugging only entries) */
138 __be32 last_acked_psn;
139 __be32 ssn;
140 __be32 params2;
141 __be32 rnr_nextrecvpsn;
142 __be32 ra_buff_indx;
143 __be32 cqn_rcv;
144 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
145 __be32 rcv_db_index; /* (debugging only entries) */
146 __be32 qkey;
147 __be32 srqn;
148 __be32 rmsn;
149 __be16 rq_wqe_counter; /* reserved on Tavor */
150 __be16 sq_wqe_counter; /* reserved on Tavor */
151 u32 reserved3[18];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152} __attribute__((packed));
153
154struct mthca_qp_param {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700155 __be32 opt_param_mask;
156 u32 reserved1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 struct mthca_qp_context context;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700158 u32 reserved2[62];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159} __attribute__((packed));
160
161enum {
162 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
163 MTHCA_QP_OPTPAR_RRE = 1 << 1,
164 MTHCA_QP_OPTPAR_RAE = 1 << 2,
165 MTHCA_QP_OPTPAR_RWE = 1 << 3,
166 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
167 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
168 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
169 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
170 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
171 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
172 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
173 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
174 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
175 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
176 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
177 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
178 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
179};
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181static const u8 mthca_opcode[] = {
182 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
183 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
184 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
185 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
186 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
187 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
188 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
189};
190
191static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
192{
193 return qp->qpn >= dev->qp_table.sqp_start &&
194 qp->qpn <= dev->qp_table.sqp_start + 3;
195}
196
197static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
198{
199 return qp->qpn >= dev->qp_table.sqp_start &&
200 qp->qpn <= dev->qp_table.sqp_start + 1;
201}
202
203static void *get_recv_wqe(struct mthca_qp *qp, int n)
204{
205 if (qp->is_direct)
206 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
207 else
208 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
209 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
210}
211
212static void *get_send_wqe(struct mthca_qp *qp, int n)
213{
214 if (qp->is_direct)
215 return qp->queue.direct.buf + qp->send_wqe_offset +
216 (n << qp->sq.wqe_shift);
217 else
218 return qp->queue.page_list[(qp->send_wqe_offset +
219 (n << qp->sq.wqe_shift)) >>
220 PAGE_SHIFT].buf +
221 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
222 (PAGE_SIZE - 1));
223}
224
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700225static void mthca_wq_init(struct mthca_wq *wq)
226{
Zach Browna46f9482006-07-04 02:57:52 -0700227 /* mthca_alloc_qp_common() initializes the locks */
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700228 wq->next_ind = 0;
229 wq->last_comp = wq->max - 1;
230 wq->head = 0;
231 wq->tail = 0;
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700232}
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
235 enum ib_event_type event_type)
236{
237 struct mthca_qp *qp;
238 struct ib_event event;
239
240 spin_lock(&dev->qp_table.lock);
241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
242 if (qp)
Roland Dreiera3285aa2006-05-09 10:50:29 -0700243 ++qp->refcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 spin_unlock(&dev->qp_table.lock);
245
246 if (!qp) {
247 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
248 return;
249 }
250
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700251 if (event_type == IB_EVENT_PATH_MIG)
252 qp->port = qp->alt_port;
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 event.device = &dev->ib_dev;
255 event.event = event_type;
256 event.element.qp = &qp->ibqp;
257 if (qp->ibqp.event_handler)
258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
259
Roland Dreiera3285aa2006-05-09 10:50:29 -0700260 spin_lock(&dev->qp_table.lock);
261 if (!--qp->refcount)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 wake_up(&qp->wait);
Roland Dreiera3285aa2006-05-09 10:50:29 -0700263 spin_unlock(&dev->qp_table.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static int to_mthca_state(enum ib_qp_state ib_state)
267{
268 switch (ib_state) {
269 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
270 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
271 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
272 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
273 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
274 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
275 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
276 default: return -1;
277 }
278}
279
280enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
281
282static int to_mthca_st(int transport)
283{
284 switch (transport) {
285 case RC: return MTHCA_QP_ST_RC;
286 case UC: return MTHCA_QP_ST_UC;
287 case UD: return MTHCA_QP_ST_UD;
288 case RD: return MTHCA_QP_ST_RD;
289 case MLX: return MTHCA_QP_ST_MLX;
290 default: return -1;
291 }
292}
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
295 int attr_mask)
296{
297 if (attr_mask & IB_QP_PKEY_INDEX)
298 sqp->pkey_index = attr->pkey_index;
299 if (attr_mask & IB_QP_QKEY)
300 sqp->qkey = attr->qkey;
301 if (attr_mask & IB_QP_SQ_PSN)
302 sqp->send_psn = attr->sq_psn;
303}
304
305static void init_port(struct mthca_dev *dev, int port)
306{
307 int err;
308 u8 status;
309 struct mthca_init_ib_param param;
310
311 memset(&param, 0, sizeof param);
312
Roland Dreierda6561c2005-08-17 07:39:10 -0700313 param.port_width = dev->limits.port_width_cap;
314 param.vl_cap = dev->limits.vl_cap;
315 param.mtu_cap = dev->limits.mtu_cap;
316 param.gid_cap = dev->limits.gid_table_len;
317 param.pkey_cap = dev->limits.pkey_table_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
319 err = mthca_INIT_IB(dev, &param, port, &status);
320 if (err)
321 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
322 if (status)
323 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
324}
325
Jack Morgensteind1646f82005-12-15 14:36:24 -0800326static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
327 int attr_mask)
328{
329 u8 dest_rd_atomic;
330 u32 access_flags;
331 u32 hw_access_flags = 0;
332
333 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
334 dest_rd_atomic = attr->max_dest_rd_atomic;
335 else
336 dest_rd_atomic = qp->resp_depth;
337
338 if (attr_mask & IB_QP_ACCESS_FLAGS)
339 access_flags = attr->qp_access_flags;
340 else
341 access_flags = qp->atomic_rd_en;
342
343 if (!dest_rd_atomic)
344 access_flags &= IB_ACCESS_REMOTE_WRITE;
345
346 if (access_flags & IB_ACCESS_REMOTE_READ)
347 hw_access_flags |= MTHCA_QP_BIT_RRE;
348 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
349 hw_access_flags |= MTHCA_QP_BIT_RAE;
350 if (access_flags & IB_ACCESS_REMOTE_WRITE)
351 hw_access_flags |= MTHCA_QP_BIT_RWE;
352
353 return cpu_to_be32(hw_access_flags);
354}
355
Eli Cohen8ebe5072006-02-13 16:40:21 -0800356static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
357{
358 switch (mthca_state) {
359 case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
360 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
361 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
362 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
363 case MTHCA_QP_STATE_DRAINING:
364 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
365 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
366 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
367 default: return -1;
368 }
369}
370
371static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
372{
373 switch (mthca_mig_state) {
374 case 0: return IB_MIG_ARMED;
375 case 1: return IB_MIG_REARM;
376 case 3: return IB_MIG_MIGRATED;
377 default: return -1;
378 }
379}
380
381static int to_ib_qp_access_flags(int mthca_flags)
382{
383 int ib_flags = 0;
384
385 if (mthca_flags & MTHCA_QP_BIT_RRE)
386 ib_flags |= IB_ACCESS_REMOTE_READ;
387 if (mthca_flags & MTHCA_QP_BIT_RWE)
388 ib_flags |= IB_ACCESS_REMOTE_WRITE;
389 if (mthca_flags & MTHCA_QP_BIT_RAE)
390 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
391
392 return ib_flags;
393}
394
395static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
396 struct mthca_qp_path *path)
397{
398 memset(ib_ah_attr, 0, sizeof *path);
399 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700400
401 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
402 return;
403
Eli Cohen8ebe5072006-02-13 16:40:21 -0800404 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
405 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
406 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700407 ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
408 path->static_rate & 0x7,
409 ib_ah_attr->port_num);
Eli Cohen8ebe5072006-02-13 16:40:21 -0800410 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
411 if (ib_ah_attr->ah_flags) {
412 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
413 ib_ah_attr->grh.hop_limit = path->hop_limit;
414 ib_ah_attr->grh.traffic_class =
415 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
416 ib_ah_attr->grh.flow_label =
417 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
418 memcpy(ib_ah_attr->grh.dgid.raw,
419 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
420 }
421}
422
423int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
424 struct ib_qp_init_attr *qp_init_attr)
425{
426 struct mthca_dev *dev = to_mdev(ibqp->device);
427 struct mthca_qp *qp = to_mqp(ibqp);
428 int err;
429 struct mthca_mailbox *mailbox;
430 struct mthca_qp_param *qp_param;
431 struct mthca_qp_context *context;
432 int mthca_state;
433 u8 status;
434
435 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
436 if (IS_ERR(mailbox))
437 return PTR_ERR(mailbox);
438
439 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
440 if (err)
441 goto out;
442 if (status) {
443 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
444 err = -EINVAL;
445 goto out;
446 }
447
448 qp_param = mailbox->buf;
449 context = &qp_param->context;
450 mthca_state = be32_to_cpu(context->flags) >> 28;
451
452 qp_attr->qp_state = to_ib_qp_state(mthca_state);
453 qp_attr->cur_qp_state = qp_attr->qp_state;
454 qp_attr->path_mtu = context->mtu_msgmax >> 5;
455 qp_attr->path_mig_state =
456 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
457 qp_attr->qkey = be32_to_cpu(context->qkey);
458 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
459 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
460 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
461 qp_attr->qp_access_flags =
462 to_ib_qp_access_flags(be32_to_cpu(context->params2));
463 qp_attr->cap.max_send_wr = qp->sq.max;
464 qp_attr->cap.max_recv_wr = qp->rq.max;
465 qp_attr->cap.max_send_sge = qp->sq.max_gs;
466 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
467 qp_attr->cap.max_inline_data = qp->max_inline_data;
468
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700469 if (qp->transport == RC || qp->transport == UC) {
470 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
471 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
472 }
Eli Cohen8ebe5072006-02-13 16:40:21 -0800473
474 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
475 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
476
477 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
478 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
479
480 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
481
482 qp_attr->max_dest_rd_atomic =
483 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
484 qp_attr->min_rnr_timer =
485 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
486 qp_attr->port_num = qp_attr->ah_attr.port_num;
487 qp_attr->timeout = context->pri_path.ackto >> 3;
488 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
489 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
490 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
491 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
492 qp_init_attr->cap = qp_attr->cap;
493
494out:
495 mthca_free_mailbox(dev, mailbox);
496 return err;
497}
498
Dotan Barak0ef61db2006-03-19 17:20:36 +0200499static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700500 struct mthca_qp_path *path, u8 port)
Dotan Barak4de144b2006-01-06 13:23:58 -0800501{
502 path->g_mylmc = ah->src_path_bits & 0x7f;
503 path->rlid = cpu_to_be16(ah->dlid);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700504 path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
Dotan Barak4de144b2006-01-06 13:23:58 -0800505
506 if (ah->ah_flags & IB_AH_GRH) {
Dotan Barak0ef61db2006-03-19 17:20:36 +0200507 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
508 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
509 ah->grh.sgid_index, dev->limits.gid_table_len-1);
510 return -1;
511 }
512
Dotan Barak4de144b2006-01-06 13:23:58 -0800513 path->g_mylmc |= 1 << 7;
514 path->mgid_index = ah->grh.sgid_index;
515 path->hop_limit = ah->grh.hop_limit;
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800516 path->sl_tclass_flowlabel =
Dotan Barak4de144b2006-01-06 13:23:58 -0800517 cpu_to_be32((ah->sl << 28) |
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800518 (ah->grh.traffic_class << 20) |
Dotan Barak4de144b2006-01-06 13:23:58 -0800519 (ah->grh.flow_label));
520 memcpy(path->rgid, ah->grh.dgid.raw, 16);
521 } else
522 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
Dotan Barak0ef61db2006-03-19 17:20:36 +0200523
524 return 0;
Dotan Barak4de144b2006-01-06 13:23:58 -0800525}
526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
528{
529 struct mthca_dev *dev = to_mdev(ibqp->device);
530 struct mthca_qp *qp = to_mqp(ibqp);
531 enum ib_qp_state cur_state, new_state;
Roland Dreiered878452005-06-27 14:36:45 -0700532 struct mthca_mailbox *mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 struct mthca_qp_param *qp_param;
534 struct mthca_qp_context *qp_context;
Roland Dreier3fa1fa32006-02-03 14:53:28 -0800535 u32 sqd_event = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 u8 status;
Roland Dreierc9c5d9f2006-06-17 20:37:41 -0700537 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700539 mutex_lock(&qp->mutex);
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 if (attr_mask & IB_QP_CUR_STATE) {
Roland Dreierd8441832006-02-13 16:30:18 -0800542 cur_state = attr->cur_qp_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 } else {
544 spin_lock_irq(&qp->sq.lock);
545 spin_lock(&qp->rq.lock);
546 cur_state = qp->state;
547 spin_unlock(&qp->rq.lock);
548 spin_unlock_irq(&qp->sq.lock);
549 }
550
Roland Dreierd8441832006-02-13 16:30:18 -0800551 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Roland Dreierd8441832006-02-13 16:30:18 -0800553 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
554 mthca_dbg(dev, "Bad QP transition (transport %d) "
555 "%d->%d with attr 0x%08x\n",
556 qp->transport, cur_state, new_state,
557 attr_mask);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700558 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 }
560
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800561 if ((attr_mask & IB_QP_PKEY_INDEX) &&
Jack Morgensteind09e3272005-11-03 14:58:33 -0800562 attr->pkey_index >= dev->limits.pkey_table_len) {
Dotan Barak67e73772006-03-01 14:28:12 -0800563 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
564 attr->pkey_index, dev->limits.pkey_table_len-1);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700565 goto out;
Jack Morgensteind09e3272005-11-03 14:58:33 -0800566 }
567
Jack Morgenstein38d1e792006-01-05 16:13:46 -0800568 if ((attr_mask & IB_QP_PORT) &&
569 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
570 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700571 goto out;
Jack Morgenstein38d1e792006-01-05 16:13:46 -0800572 }
573
Jack Morgenstein94361cf2005-12-09 16:32:21 -0800574 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
575 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
576 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
577 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700578 goto out;
Jack Morgenstein94361cf2005-12-09 16:32:21 -0800579 }
580
581 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
582 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
583 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
584 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700585 goto out;
Jack Morgenstein94361cf2005-12-09 16:32:21 -0800586 }
587
Roland Dreiered878452005-06-27 14:36:45 -0700588 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700589 if (IS_ERR(mailbox)) {
590 err = PTR_ERR(mailbox);
591 goto out;
592 }
Roland Dreiered878452005-06-27 14:36:45 -0700593 qp_param = mailbox->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 qp_context = &qp_param->context;
595 memset(qp_param, 0, sizeof *qp_param);
596
597 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
598 (to_mthca_st(qp->transport) << 16));
599 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
600 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
601 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
602 else {
603 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
604 switch (attr->path_mig_state) {
605 case IB_MIG_MIGRATED:
606 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
607 break;
608 case IB_MIG_REARM:
609 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
610 break;
611 case IB_MIG_ARMED:
612 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
613 break;
614 }
615 }
616
617 /* leave tavor_sched_queue as 0 */
618
619 if (qp->transport == MLX || qp->transport == UD)
620 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200621 else if (attr_mask & IB_QP_PATH_MTU) {
622 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
623 mthca_dbg(dev, "path MTU (%u) is invalid\n",
624 attr->path_mtu);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700625 goto out_mailbox;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700630 if (mthca_is_memfree(dev)) {
Roland Dreierec34a922005-08-19 10:59:31 -0700631 if (qp->rq.max)
632 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
633 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
634
635 if (qp->sq.max)
636 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
637 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 }
639
640 /* leave arbel_sched_queue as 0 */
641
Roland Dreier80c8ec22005-07-07 17:57:20 -0700642 if (qp->ibqp.uobject)
643 qp_context->usr_page =
644 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
645 else
646 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 qp_context->local_qpn = cpu_to_be32(qp->qpn);
648 if (attr_mask & IB_QP_DEST_QPN) {
649 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
650 }
651
652 if (qp->transport == MLX)
653 qp_context->pri_path.port_pkey |=
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700654 cpu_to_be32(qp->port << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 else {
656 if (attr_mask & IB_QP_PORT) {
657 qp_context->pri_path.port_pkey |=
658 cpu_to_be32(attr->port_num << 24);
659 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
660 }
661 }
662
663 if (attr_mask & IB_QP_PKEY_INDEX) {
664 qp_context->pri_path.port_pkey |=
665 cpu_to_be32(attr->pkey_index);
666 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
667 }
668
669 if (attr_mask & IB_QP_RNR_RETRY) {
Dotan Barak4de144b2006-01-06 13:23:58 -0800670 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
671 attr->rnr_retry << 5;
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800672 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
Dotan Barak4de144b2006-01-06 13:23:58 -0800673 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 }
675
676 if (attr_mask & IB_QP_AV) {
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700677 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
678 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700679 goto out_mailbox;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200680
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
682 }
683
684 if (attr_mask & IB_QP_TIMEOUT) {
Roland Dreierbb4a7f02005-09-12 14:08:51 -0700685 qp_context->pri_path.ackto = attr->timeout << 3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
687 }
688
Dotan Barak4de144b2006-01-06 13:23:58 -0800689 if (attr_mask & IB_QP_ALT_PATH) {
Dotan Barak67e73772006-03-01 14:28:12 -0800690 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
691 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
692 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700693 goto out_mailbox;
Dotan Barak67e73772006-03-01 14:28:12 -0800694 }
695
Dotan Barak4de144b2006-01-06 13:23:58 -0800696 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800697 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
Dotan Barak4de144b2006-01-06 13:23:58 -0800698 attr->alt_port_num);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700699 goto out_mailbox;
Dotan Barak4de144b2006-01-06 13:23:58 -0800700 }
701
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700702 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
703 attr->alt_ah_attr.port_num))
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700704 goto out_mailbox;
Dotan Barak0ef61db2006-03-19 17:20:36 +0200705
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800706 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
Dotan Barak4de144b2006-01-06 13:23:58 -0800707 attr->alt_port_num << 24);
708 qp_context->alt_path.ackto = attr->alt_timeout << 3;
709 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 /* leave rdd as 0 */
713 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
714 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
715 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
716 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
717 (MTHCA_FLIGHT_LIMIT << 24) |
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800718 MTHCA_QP_BIT_SWE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
720 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
721 if (attr_mask & IB_QP_RETRY_CNT) {
722 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
723 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
724 }
725
Roland Dreier34a4a752005-06-27 14:36:41 -0700726 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800727 if (attr->max_rd_atomic) {
728 qp_context->params1 |=
729 cpu_to_be32(MTHCA_QP_BIT_SRE |
730 MTHCA_QP_BIT_SAE);
Jack Morgenstein6aa2e4e2005-12-09 16:38:04 -0800731 qp_context->params1 |=
732 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
735 }
736
737 if (attr_mask & IB_QP_SQ_PSN)
738 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
739 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
740
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700741 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
743 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
744 }
745
Roland Dreier34a4a752005-06-27 14:36:41 -0700746 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Jack Morgenstein6aa2e4e2005-12-09 16:38:04 -0800747 if (attr->max_dest_rd_atomic)
748 qp_context->params2 |=
749 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753
Jack Morgensteind1646f82005-12-15 14:36:24 -0800754 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
755 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
756 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
757 MTHCA_QP_OPTPAR_RRE |
758 MTHCA_QP_OPTPAR_RAE);
759 }
760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
762
Roland Dreierec34a922005-08-19 10:59:31 -0700763 if (ibqp->srq)
764 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
765
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
767 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
768 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
769 }
770 if (attr_mask & IB_QP_RQ_PSN)
771 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
772
773 qp_context->ra_buff_indx =
774 cpu_to_be32(dev->qp_table.rdb_base +
775 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
776 dev->qp_table.rdb_shift));
777
778 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
779
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700780 if (mthca_is_memfree(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
782
783 if (attr_mask & IB_QP_QKEY) {
784 qp_context->qkey = cpu_to_be32(attr->qkey);
785 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
786 }
787
Roland Dreierec34a922005-08-19 10:59:31 -0700788 if (ibqp->srq)
789 qp_context->srqn = cpu_to_be32(1 << 24 |
790 to_msrq(ibqp->srq)->srqn);
791
Roland Dreier3fa1fa32006-02-03 14:53:28 -0800792 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
793 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
794 attr->en_sqd_async_notify)
795 sqd_event = 1 << 31;
796
Roland Dreierd8441832006-02-13 16:30:18 -0800797 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
798 mailbox, sqd_event, &status);
Roland Dreier192daa12006-03-24 15:47:30 -0800799 if (err)
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700800 goto out_mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 if (status) {
Roland Dreierd8441832006-02-13 16:30:18 -0800802 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
803 cur_state, new_state, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 err = -EINVAL;
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700805 goto out_mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 }
807
Roland Dreier192daa12006-03-24 15:47:30 -0800808 qp->state = new_state;
809 if (attr_mask & IB_QP_ACCESS_FLAGS)
810 qp->atomic_rd_en = attr->qp_access_flags;
811 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
812 qp->resp_depth = attr->max_dest_rd_atomic;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700813 if (attr_mask & IB_QP_PORT)
814 qp->port = attr->port_num;
815 if (attr_mask & IB_QP_ALT_PATH)
816 qp->alt_port = attr->alt_port_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818 if (is_sqp(dev, qp))
819 store_attrs(to_msqp(qp), attr, attr_mask);
820
821 /*
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700822 * If we moved QP0 to RTR, bring the IB link up; if we moved
823 * QP0 to RESET or ERROR, bring the link back down.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 */
825 if (is_qp0(dev, qp)) {
826 if (cur_state != IB_QPS_RTR &&
827 new_state == IB_QPS_RTR)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700828 init_port(dev, qp->port);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 if (cur_state != IB_QPS_RESET &&
831 cur_state != IB_QPS_ERR &&
832 (new_state == IB_QPS_RESET ||
833 new_state == IB_QPS_ERR))
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700834 mthca_CLOSE_IB(dev, qp->port, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700837 /*
838 * If we moved a kernel QP to RESET, clean up all old CQ
839 * entries and reinitialize the QP.
840 */
Roland Dreier192daa12006-03-24 15:47:30 -0800841 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
Roland Dreiera3285aa2006-05-09 10:50:29 -0700842 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700843 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
844 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
Roland Dreiera3285aa2006-05-09 10:50:29 -0700845 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700846 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
847
848 mthca_wq_init(&qp->sq);
Michael S. Tsirkin187a2582005-11-28 11:19:43 -0800849 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
850
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700851 mthca_wq_init(&qp->rq);
Michael S. Tsirkin187a2582005-11-28 11:19:43 -0800852 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700853
854 if (mthca_is_memfree(dev)) {
855 *qp->sq.db = 0;
856 *qp->rq.db = 0;
857 }
858 }
859
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700860out_mailbox:
Roland Dreier192daa12006-03-24 15:47:30 -0800861 mthca_free_mailbox(dev, mailbox);
Roland Dreierc93b6fb2006-06-17 20:37:41 -0700862
863out:
864 mutex_unlock(&qp->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 return err;
866}
867
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800868static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800869{
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800870 /*
871 * Calculate the maximum size of WQE s/g segments, excluding
872 * the next segment and other non-data segments.
873 */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800874 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800875
876 switch (qp->transport) {
877 case MLX:
878 max_data_size -= 2 * sizeof (struct mthca_data_seg);
879 break;
880
881 case UD:
882 if (mthca_is_memfree(dev))
883 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
884 else
885 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
886 break;
887
888 default:
889 max_data_size -= sizeof (struct mthca_raddr_seg);
890 break;
891 }
892
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800893 return max_data_size;
894}
895
896static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
897{
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800898 /* We don't support inline data for kernel QPs (yet). */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800899 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
900}
901
902static void mthca_adjust_qp_caps(struct mthca_dev *dev,
903 struct mthca_pd *pd,
904 struct mthca_qp *qp)
905{
906 int max_data_size = mthca_max_data_size(dev, qp,
907 min(dev->limits.max_desc_sz,
908 1 << qp->sq.wqe_shift));
909
910 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800911
Michael S. Tsirkin48fd0d12005-11-18 14:11:17 -0800912 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
913 max_data_size / sizeof (struct mthca_data_seg));
914 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
915 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
916 sizeof (struct mthca_next_seg)) /
917 sizeof (struct mthca_data_seg));
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800918}
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920/*
921 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
922 * rq.max_gs and sq.max_gs must all be assigned.
923 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
924 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
925 * queue)
926 */
927static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
928 struct mthca_pd *pd,
929 struct mthca_qp *qp)
930{
931 int size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 int err = -ENOMEM;
933
934 size = sizeof (struct mthca_next_seg) +
935 qp->rq.max_gs * sizeof (struct mthca_data_seg);
936
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800937 if (size > dev->limits.max_desc_sz)
938 return -EINVAL;
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
941 qp->rq.wqe_shift++)
942 ; /* nothing */
943
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800944 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 switch (qp->transport) {
946 case MLX:
947 size += 2 * sizeof (struct mthca_data_seg);
948 break;
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 case UD:
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800951 size += mthca_is_memfree(dev) ?
952 sizeof (struct mthca_arbel_ud_seg) :
953 sizeof (struct mthca_tavor_ud_seg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 break;
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800955
956 case UC:
957 size += sizeof (struct mthca_raddr_seg);
958 break;
959
960 case RC:
961 size += sizeof (struct mthca_raddr_seg);
962 /*
963 * An atomic op will require an atomic segment, a
964 * remote address segment and one scatter entry.
965 */
966 size = max_t(int, size,
967 sizeof (struct mthca_atomic_seg) +
968 sizeof (struct mthca_raddr_seg) +
969 sizeof (struct mthca_data_seg));
970 break;
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 default:
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800973 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 }
975
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800976 /* Make sure that we have enough space for a bind request */
977 size = max_t(int, size, sizeof (struct mthca_bind_seg));
978
979 size += sizeof (struct mthca_next_seg);
980
981 if (size > dev->limits.max_desc_sz)
982 return -EINVAL;
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
985 qp->sq.wqe_shift++)
986 ; /* nothing */
987
988 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
989 1 << qp->sq.wqe_shift);
Roland Dreier80c8ec22005-07-07 17:57:20 -0700990
991 /*
992 * If this is a userspace QP, we don't actually have to
993 * allocate anything. All we need is to calculate the WQE
994 * sizes and the send_wqe_offset, so we're done now.
995 */
996 if (pd->ibpd.uobject)
997 return 0;
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 size = PAGE_ALIGN(qp->send_wqe_offset +
1000 (qp->sq.max << qp->sq.wqe_shift));
1001
1002 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1003 GFP_KERNEL);
1004 if (!qp->wrid)
1005 goto err_out;
1006
Roland Dreier87b81672005-08-18 13:39:31 -07001007 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1008 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 if (err)
Roland Dreier87b81672005-08-18 13:39:31 -07001010 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 return 0;
1013
Roland Dreier87b81672005-08-18 13:39:31 -07001014err_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 kfree(qp->wrid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 return err;
1017}
1018
Roland Dreier80c8ec22005-07-07 17:57:20 -07001019static void mthca_free_wqe_buf(struct mthca_dev *dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 struct mthca_qp *qp)
1021{
Roland Dreier87b81672005-08-18 13:39:31 -07001022 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1023 (qp->sq.max << qp->sq.wqe_shift)),
1024 &qp->queue, qp->is_direct, &qp->mr);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001025 kfree(qp->wrid);
1026}
1027
1028static int mthca_map_memfree(struct mthca_dev *dev,
1029 struct mthca_qp *qp)
1030{
1031 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Roland Dreierd10ddbf2005-04-16 15:26:32 -07001033 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1035 if (ret)
1036 return ret;
1037
1038 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1039 if (ret)
1040 goto err_qpc;
1041
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08001042 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1043 qp->qpn << dev->qp_table.rdb_shift);
1044 if (ret)
1045 goto err_eqpc;
Roland Dreier08aeb142005-04-16 15:26:34 -07001046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 }
1048
1049 return 0;
1050
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051err_eqpc:
1052 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1053
1054err_qpc:
1055 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1056
1057 return ret;
1058}
1059
Roland Dreier80c8ec22005-07-07 17:57:20 -07001060static void mthca_unmap_memfree(struct mthca_dev *dev,
1061 struct mthca_qp *qp)
1062{
1063 mthca_table_put(dev, dev->qp_table.rdb_table,
1064 qp->qpn << dev->qp_table.rdb_shift);
1065 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1066 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1067}
1068
1069static int mthca_alloc_memfree(struct mthca_dev *dev,
1070 struct mthca_qp *qp)
1071{
1072 int ret = 0;
1073
1074 if (mthca_is_memfree(dev)) {
1075 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1076 qp->qpn, &qp->rq.db);
1077 if (qp->rq.db_index < 0)
1078 return ret;
1079
1080 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1081 qp->qpn, &qp->sq.db);
1082 if (qp->sq.db_index < 0)
1083 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1084 }
1085
1086 return ret;
1087}
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089static void mthca_free_memfree(struct mthca_dev *dev,
1090 struct mthca_qp *qp)
1091{
Roland Dreierd10ddbf2005-04-16 15:26:32 -07001092 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1094 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
1096}
1097
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098static int mthca_alloc_qp_common(struct mthca_dev *dev,
1099 struct mthca_pd *pd,
1100 struct mthca_cq *send_cq,
1101 struct mthca_cq *recv_cq,
1102 enum ib_sig_type send_policy,
1103 struct mthca_qp *qp)
1104{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 int ret;
1106 int i;
1107
Roland Dreiera3285aa2006-05-09 10:50:29 -07001108 qp->refcount = 1;
Michael S. Tsirkin30a7e8e2005-09-07 09:45:00 -07001109 init_waitqueue_head(&qp->wait);
Roland Dreierc93b6fb2006-06-17 20:37:41 -07001110 mutex_init(&qp->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 qp->state = IB_QPS_RESET;
1112 qp->atomic_rd_en = 0;
1113 qp->resp_depth = 0;
1114 qp->sq_policy = send_policy;
1115 mthca_wq_init(&qp->sq);
1116 mthca_wq_init(&qp->rq);
Zach Browna46f9482006-07-04 02:57:52 -07001117 /* these are initialized separately so lockdep can tell them apart */
1118 spin_lock_init(&qp->sq.lock);
1119 spin_lock_init(&qp->rq.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Roland Dreier80c8ec22005-07-07 17:57:20 -07001121 ret = mthca_map_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 if (ret)
1123 return ret;
1124
1125 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1126 if (ret) {
Roland Dreier80c8ec22005-07-07 17:57:20 -07001127 mthca_unmap_memfree(dev, qp);
1128 return ret;
1129 }
1130
Jack Morgenstein77369ed2005-11-09 11:26:07 -08001131 mthca_adjust_qp_caps(dev, pd, qp);
1132
Roland Dreier80c8ec22005-07-07 17:57:20 -07001133 /*
1134 * If this is a userspace QP, we're done now. The doorbells
1135 * will be allocated and buffers will be initialized in
1136 * userspace.
1137 */
1138 if (pd->ibpd.uobject)
1139 return 0;
1140
1141 ret = mthca_alloc_memfree(dev, qp);
1142 if (ret) {
1143 mthca_free_wqe_buf(dev, qp);
1144 mthca_unmap_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return ret;
1146 }
1147
Roland Dreierd10ddbf2005-04-16 15:26:32 -07001148 if (mthca_is_memfree(dev)) {
Roland Dreierddf841f2005-04-16 15:26:33 -07001149 struct mthca_next_seg *next;
1150 struct mthca_data_seg *scatter;
1151 int size = (sizeof (struct mthca_next_seg) +
1152 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 for (i = 0; i < qp->rq.max; ++i) {
Roland Dreierddf841f2005-04-16 15:26:33 -07001155 next = get_recv_wqe(qp, i);
1156 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1157 qp->rq.wqe_shift);
1158 next->ee_nds = cpu_to_be32(size);
1159
1160 for (scatter = (void *) (next + 1);
1161 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1162 ++scatter)
1163 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 }
1165
1166 for (i = 0; i < qp->sq.max; ++i) {
Roland Dreierddf841f2005-04-16 15:26:33 -07001167 next = get_send_wqe(qp, i);
1168 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1169 qp->sq.wqe_shift) +
1170 qp->send_wqe_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172 }
1173
Roland Dreierd6cff022005-09-13 10:41:03 -07001174 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1175 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1176
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 return 0;
1178}
1179
Roland Dreier80c8ec22005-07-07 17:57:20 -07001180static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001181 struct mthca_pd *pd, struct mthca_qp *qp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182{
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001183 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1184
Roland Dreier80c8ec22005-07-07 17:57:20 -07001185 /* Sanity check QP size before proceeding */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001186 if (cap->max_send_wr > dev->limits.max_wqes ||
1187 cap->max_recv_wr > dev->limits.max_wqes ||
1188 cap->max_send_sge > dev->limits.max_sg ||
1189 cap->max_recv_sge > dev->limits.max_sg ||
1190 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1191 return -EINVAL;
1192
1193 /*
1194 * For MLX transport we need 2 extra S/G entries:
1195 * one for the header and one for the checksum at the end
1196 */
1197 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
Roland Dreier80c8ec22005-07-07 17:57:20 -07001198 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
Roland Dreier80c8ec22005-07-07 17:57:20 -07001200 if (mthca_is_memfree(dev)) {
1201 qp->rq.max = cap->max_recv_wr ?
1202 roundup_pow_of_two(cap->max_recv_wr) : 0;
1203 qp->sq.max = cap->max_send_wr ?
1204 roundup_pow_of_two(cap->max_send_wr) : 0;
1205 } else {
1206 qp->rq.max = cap->max_recv_wr;
1207 qp->sq.max = cap->max_send_wr;
1208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Roland Dreier80c8ec22005-07-07 17:57:20 -07001210 qp->rq.max_gs = cap->max_recv_sge;
1211 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1212 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1213 MTHCA_INLINE_CHUNK_SIZE) /
1214 sizeof (struct mthca_data_seg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Roland Dreier80c8ec22005-07-07 17:57:20 -07001216 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217}
1218
1219int mthca_alloc_qp(struct mthca_dev *dev,
1220 struct mthca_pd *pd,
1221 struct mthca_cq *send_cq,
1222 struct mthca_cq *recv_cq,
1223 enum ib_qp_type type,
1224 enum ib_sig_type send_policy,
Roland Dreier80c8ec22005-07-07 17:57:20 -07001225 struct ib_qp_cap *cap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 struct mthca_qp *qp)
1227{
1228 int err;
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 switch (type) {
1231 case IB_QPT_RC: qp->transport = RC; break;
1232 case IB_QPT_UC: qp->transport = UC; break;
1233 case IB_QPT_UD: qp->transport = UD; break;
1234 default: return -EINVAL;
1235 }
1236
Jack Morgensteinb3f64962006-03-22 09:52:31 +02001237 err = mthca_set_qp_size(dev, cap, pd, qp);
1238 if (err)
1239 return err;
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1242 if (qp->qpn == -1)
1243 return -ENOMEM;
1244
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001245 /* initialize port to zero for error-catching. */
1246 qp->port = 0;
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1249 send_policy, qp);
1250 if (err) {
1251 mthca_free(&dev->qp_table.alloc, qp->qpn);
1252 return err;
1253 }
1254
1255 spin_lock_irq(&dev->qp_table.lock);
1256 mthca_array_set(&dev->qp_table.qp,
1257 qp->qpn & (dev->limits.num_qps - 1), qp);
1258 spin_unlock_irq(&dev->qp_table.lock);
1259
1260 return 0;
1261}
1262
1263int mthca_alloc_sqp(struct mthca_dev *dev,
1264 struct mthca_pd *pd,
1265 struct mthca_cq *send_cq,
1266 struct mthca_cq *recv_cq,
1267 enum ib_sig_type send_policy,
Roland Dreier80c8ec22005-07-07 17:57:20 -07001268 struct ib_qp_cap *cap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 int qpn,
1270 int port,
1271 struct mthca_sqp *sqp)
1272{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
Roland Dreier80c8ec22005-07-07 17:57:20 -07001274 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Jack Morgensteinb3f64962006-03-22 09:52:31 +02001276 sqp->qp.transport = MLX;
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001277 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001278 if (err)
1279 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
1281 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1282 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1283 &sqp->header_dma, GFP_KERNEL);
1284 if (!sqp->header_buf)
1285 return -ENOMEM;
1286
1287 spin_lock_irq(&dev->qp_table.lock);
1288 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1289 err = -EBUSY;
1290 else
1291 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1292 spin_unlock_irq(&dev->qp_table.lock);
1293
1294 if (err)
1295 goto err_out;
1296
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001297 sqp->qp.port = port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 sqp->qp.qpn = mqpn;
1299 sqp->qp.transport = MLX;
1300
1301 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1302 send_policy, &sqp->qp);
1303 if (err)
1304 goto err_out_free;
1305
1306 atomic_inc(&pd->sqp_count);
1307
1308 return 0;
1309
1310 err_out_free:
1311 /*
1312 * Lock CQs here, so that CQ polling code can do QP lookup
1313 * without taking a lock.
1314 */
1315 spin_lock_irq(&send_cq->lock);
1316 if (send_cq != recv_cq)
1317 spin_lock(&recv_cq->lock);
1318
1319 spin_lock(&dev->qp_table.lock);
1320 mthca_array_clear(&dev->qp_table.qp, mqpn);
1321 spin_unlock(&dev->qp_table.lock);
1322
1323 if (send_cq != recv_cq)
1324 spin_unlock(&recv_cq->lock);
1325 spin_unlock_irq(&send_cq->lock);
1326
1327 err_out:
1328 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1329 sqp->header_buf, sqp->header_dma);
1330
1331 return err;
1332}
1333
Roland Dreiera3285aa2006-05-09 10:50:29 -07001334static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1335{
1336 int c;
1337
1338 spin_lock_irq(&dev->qp_table.lock);
1339 c = qp->refcount;
1340 spin_unlock_irq(&dev->qp_table.lock);
1341
1342 return c;
1343}
1344
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345void mthca_free_qp(struct mthca_dev *dev,
1346 struct mthca_qp *qp)
1347{
1348 u8 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 struct mthca_cq *send_cq;
1350 struct mthca_cq *recv_cq;
1351
1352 send_cq = to_mcq(qp->ibqp.send_cq);
1353 recv_cq = to_mcq(qp->ibqp.recv_cq);
1354
1355 /*
1356 * Lock CQs here, so that CQ polling code can do QP lookup
1357 * without taking a lock.
1358 */
1359 spin_lock_irq(&send_cq->lock);
1360 if (send_cq != recv_cq)
1361 spin_lock(&recv_cq->lock);
1362
1363 spin_lock(&dev->qp_table.lock);
1364 mthca_array_clear(&dev->qp_table.qp,
1365 qp->qpn & (dev->limits.num_qps - 1));
Roland Dreiera3285aa2006-05-09 10:50:29 -07001366 --qp->refcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 spin_unlock(&dev->qp_table.lock);
1368
1369 if (send_cq != recv_cq)
1370 spin_unlock(&recv_cq->lock);
1371 spin_unlock_irq(&send_cq->lock);
1372
Roland Dreiera3285aa2006-05-09 10:50:29 -07001373 wait_event(qp->wait, !get_qp_refcount(dev, qp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375 if (qp->state != IB_QPS_RESET)
Roland Dreierd8441832006-02-13 16:30:18 -08001376 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1377 NULL, 0, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Roland Dreier80c8ec22005-07-07 17:57:20 -07001379 /*
1380 * If this is a userspace QP, the buffers, MR, CQs and so on
1381 * will be cleaned up in userspace, so all we have to do is
1382 * unref the mem-free tables and free the QPN in our table.
1383 */
1384 if (!qp->ibqp.uobject) {
Roland Dreiera3285aa2006-05-09 10:50:29 -07001385 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
Roland Dreierec34a922005-08-19 10:59:31 -07001386 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001387 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
Roland Dreiera3285aa2006-05-09 10:50:29 -07001388 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
Roland Dreierec34a922005-08-19 10:59:31 -07001389 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Roland Dreier80c8ec22005-07-07 17:57:20 -07001391 mthca_free_memfree(dev, qp);
1392 mthca_free_wqe_buf(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 }
1394
Roland Dreier80c8ec22005-07-07 17:57:20 -07001395 mthca_unmap_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397 if (is_sqp(dev, qp)) {
1398 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1399 dma_free_coherent(&dev->pdev->dev,
1400 to_msqp(qp)->header_buf_size,
1401 to_msqp(qp)->header_buf,
1402 to_msqp(qp)->header_dma);
1403 } else
1404 mthca_free(&dev->qp_table.alloc, qp->qpn);
1405}
1406
1407/* Create UD header for an MLX send and build a data segment for it */
1408static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1409 int ind, struct ib_send_wr *wr,
1410 struct mthca_mlx_seg *mlx,
1411 struct mthca_data_seg *data)
1412{
1413 int header_size;
1414 int err;
Sean Hefty97f52eb2005-08-13 21:05:57 -07001415 u16 pkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
1417 ib_ud_header_init(256, /* assume a MAD */
Michael S. Tsirkin9eacee22006-01-12 15:55:41 -08001418 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 &sqp->ud_header);
1420
1421 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1422 if (err)
1423 return err;
1424 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1425 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
Sean Hefty97f52eb2005-08-13 21:05:57 -07001426 (sqp->ud_header.lrh.destination_lid ==
1427 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 (sqp->ud_header.lrh.service_level << 8));
1429 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1430 mlx->vcrc = 0;
1431
1432 switch (wr->opcode) {
1433 case IB_WR_SEND:
1434 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1435 sqp->ud_header.immediate_present = 0;
1436 break;
1437 case IB_WR_SEND_WITH_IMM:
1438 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1439 sqp->ud_header.immediate_present = 1;
1440 sqp->ud_header.immediate_data = wr->imm_data;
1441 break;
1442 default:
1443 return -EINVAL;
1444 }
1445
1446 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
Sean Hefty97f52eb2005-08-13 21:05:57 -07001447 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1448 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1450 if (!sqp->qp.ibqp.qp_num)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001451 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
Sean Hefty97f52eb2005-08-13 21:05:57 -07001452 sqp->pkey_index, &pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 else
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -07001454 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
Sean Hefty97f52eb2005-08-13 21:05:57 -07001455 wr->wr.ud.pkey_index, &pkey);
1456 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1458 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1459 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1460 sqp->qkey : wr->wr.ud.remote_qkey);
1461 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1462
1463 header_size = ib_ud_header_pack(&sqp->ud_header,
1464 sqp->header_buf +
1465 ind * MTHCA_UD_HEADER_SIZE);
1466
1467 data->byte_count = cpu_to_be32(header_size);
1468 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1469 data->addr = cpu_to_be64(sqp->header_dma +
1470 ind * MTHCA_UD_HEADER_SIZE);
1471
1472 return 0;
1473}
1474
1475static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1476 struct ib_cq *ib_cq)
1477{
1478 unsigned cur;
1479 struct mthca_cq *cq;
1480
1481 cur = wq->head - wq->tail;
1482 if (likely(cur + nreq < wq->max))
1483 return 0;
1484
1485 cq = to_mcq(ib_cq);
1486 spin_lock(&cq->lock);
1487 cur = wq->head - wq->tail;
1488 spin_unlock(&cq->lock);
1489
1490 return cur + nreq >= wq->max;
1491}
1492
1493int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1494 struct ib_send_wr **bad_wr)
1495{
1496 struct mthca_dev *dev = to_mdev(ibqp->device);
1497 struct mthca_qp *qp = to_mqp(ibqp);
1498 void *wqe;
1499 void *prev_wqe;
1500 unsigned long flags;
1501 int err = 0;
1502 int nreq;
1503 int i;
1504 int size;
1505 int size0 = 0;
1506 u32 f0 = 0;
1507 int ind;
1508 u8 op0 = 0;
1509
1510 spin_lock_irqsave(&qp->sq.lock, flags);
1511
1512 /* XXX check that state is OK to post send */
1513
1514 ind = qp->sq.next_ind;
1515
1516 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1517 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1518 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1519 " %d max, %d nreq)\n", qp->qpn,
1520 qp->sq.head, qp->sq.tail,
1521 qp->sq.max, nreq);
1522 err = -ENOMEM;
1523 *bad_wr = wr;
1524 goto out;
1525 }
1526
1527 wqe = get_send_wqe(qp, ind);
1528 prev_wqe = qp->sq.last;
1529 qp->sq.last = wqe;
1530
1531 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1532 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1533 ((struct mthca_next_seg *) wqe)->flags =
1534 ((wr->send_flags & IB_SEND_SIGNALED) ?
1535 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1536 ((wr->send_flags & IB_SEND_SOLICITED) ?
1537 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1538 cpu_to_be32(1);
1539 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1540 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
Roland Dreier3fba2312005-04-16 15:26:16 -07001541 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543 wqe += sizeof (struct mthca_next_seg);
1544 size = sizeof (struct mthca_next_seg) / 16;
1545
1546 switch (qp->transport) {
1547 case RC:
1548 switch (wr->opcode) {
1549 case IB_WR_ATOMIC_CMP_AND_SWP:
1550 case IB_WR_ATOMIC_FETCH_AND_ADD:
1551 ((struct mthca_raddr_seg *) wqe)->raddr =
1552 cpu_to_be64(wr->wr.atomic.remote_addr);
1553 ((struct mthca_raddr_seg *) wqe)->rkey =
1554 cpu_to_be32(wr->wr.atomic.rkey);
1555 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1556
1557 wqe += sizeof (struct mthca_raddr_seg);
1558
1559 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1560 ((struct mthca_atomic_seg *) wqe)->swap_add =
1561 cpu_to_be64(wr->wr.atomic.swap);
1562 ((struct mthca_atomic_seg *) wqe)->compare =
1563 cpu_to_be64(wr->wr.atomic.compare_add);
1564 } else {
1565 ((struct mthca_atomic_seg *) wqe)->swap_add =
1566 cpu_to_be64(wr->wr.atomic.compare_add);
1567 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1568 }
1569
1570 wqe += sizeof (struct mthca_atomic_seg);
Michael S. Tsirkin62abb842005-11-09 11:30:14 -08001571 size += (sizeof (struct mthca_raddr_seg) +
1572 sizeof (struct mthca_atomic_seg)) / 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 break;
1574
1575 case IB_WR_RDMA_WRITE:
1576 case IB_WR_RDMA_WRITE_WITH_IMM:
1577 case IB_WR_RDMA_READ:
1578 ((struct mthca_raddr_seg *) wqe)->raddr =
1579 cpu_to_be64(wr->wr.rdma.remote_addr);
1580 ((struct mthca_raddr_seg *) wqe)->rkey =
1581 cpu_to_be32(wr->wr.rdma.rkey);
1582 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1583 wqe += sizeof (struct mthca_raddr_seg);
1584 size += sizeof (struct mthca_raddr_seg) / 16;
1585 break;
1586
1587 default:
1588 /* No extra segments required for sends */
1589 break;
1590 }
1591
1592 break;
1593
Roland Dreier9e6970b2005-06-27 14:36:42 -07001594 case UC:
1595 switch (wr->opcode) {
1596 case IB_WR_RDMA_WRITE:
1597 case IB_WR_RDMA_WRITE_WITH_IMM:
1598 ((struct mthca_raddr_seg *) wqe)->raddr =
1599 cpu_to_be64(wr->wr.rdma.remote_addr);
1600 ((struct mthca_raddr_seg *) wqe)->rkey =
1601 cpu_to_be32(wr->wr.rdma.rkey);
1602 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1603 wqe += sizeof (struct mthca_raddr_seg);
1604 size += sizeof (struct mthca_raddr_seg) / 16;
1605 break;
1606
1607 default:
1608 /* No extra segments required for sends */
1609 break;
1610 }
1611
1612 break;
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 case UD:
1615 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1616 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1617 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1618 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1619 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1620 cpu_to_be32(wr->wr.ud.remote_qpn);
1621 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1622 cpu_to_be32(wr->wr.ud.remote_qkey);
1623
1624 wqe += sizeof (struct mthca_tavor_ud_seg);
1625 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1626 break;
1627
1628 case MLX:
1629 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1630 wqe - sizeof (struct mthca_next_seg),
1631 wqe);
1632 if (err) {
1633 *bad_wr = wr;
1634 goto out;
1635 }
1636 wqe += sizeof (struct mthca_data_seg);
1637 size += sizeof (struct mthca_data_seg) / 16;
1638 break;
1639 }
1640
1641 if (wr->num_sge > qp->sq.max_gs) {
1642 mthca_err(dev, "too many gathers\n");
1643 err = -EINVAL;
1644 *bad_wr = wr;
1645 goto out;
1646 }
1647
1648 for (i = 0; i < wr->num_sge; ++i) {
1649 ((struct mthca_data_seg *) wqe)->byte_count =
1650 cpu_to_be32(wr->sg_list[i].length);
1651 ((struct mthca_data_seg *) wqe)->lkey =
1652 cpu_to_be32(wr->sg_list[i].lkey);
1653 ((struct mthca_data_seg *) wqe)->addr =
1654 cpu_to_be64(wr->sg_list[i].addr);
1655 wqe += sizeof (struct mthca_data_seg);
1656 size += sizeof (struct mthca_data_seg) / 16;
1657 }
1658
1659 /* Add one more inline data segment for ICRC */
1660 if (qp->transport == MLX) {
1661 ((struct mthca_data_seg *) wqe)->byte_count =
1662 cpu_to_be32((1 << 31) | 4);
1663 ((u32 *) wqe)[1] = 0;
1664 wqe += sizeof (struct mthca_data_seg);
1665 size += sizeof (struct mthca_data_seg) / 16;
1666 }
1667
1668 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1669
1670 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1671 mthca_err(dev, "opcode invalid\n");
1672 err = -EINVAL;
1673 *bad_wr = wr;
1674 goto out;
1675 }
1676
Roland Dreierd6cff022005-09-13 10:41:03 -07001677 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1678 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1679 qp->send_wqe_offset) |
1680 mthca_opcode[wr->opcode]);
1681 wmb();
1682 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
Dotan Barak7667abd12006-02-27 21:02:00 -08001683 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
1684 ((wr->send_flags & IB_SEND_FENCE) ?
1685 MTHCA_NEXT_FENCE : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687 if (!size0) {
1688 size0 = size;
1689 op0 = mthca_opcode[wr->opcode];
1690 }
1691
1692 ++ind;
1693 if (unlikely(ind >= qp->sq.max))
1694 ind -= qp->sq.max;
1695 }
1696
1697out:
1698 if (likely(nreq)) {
Sean Hefty97f52eb2005-08-13 21:05:57 -07001699 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
1701 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1702 qp->send_wqe_offset) | f0 | op0);
1703 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1704
1705 wmb();
1706
1707 mthca_write64(doorbell,
1708 dev->kar + MTHCA_SEND_DOORBELL,
1709 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1710 }
1711
1712 qp->sq.next_ind = ind;
1713 qp->sq.head += nreq;
1714
1715 spin_unlock_irqrestore(&qp->sq.lock, flags);
1716 return err;
1717}
1718
1719int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1720 struct ib_recv_wr **bad_wr)
1721{
1722 struct mthca_dev *dev = to_mdev(ibqp->device);
1723 struct mthca_qp *qp = to_mqp(ibqp);
Michael S. Tsirkinae57e242005-11-09 14:59:57 -08001724 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 unsigned long flags;
1726 int err = 0;
1727 int nreq;
1728 int i;
1729 int size;
1730 int size0 = 0;
1731 int ind;
1732 void *wqe;
1733 void *prev_wqe;
1734
1735 spin_lock_irqsave(&qp->rq.lock, flags);
1736
1737 /* XXX check that state is OK to post receive */
1738
1739 ind = qp->rq.next_ind;
1740
Michael S. Tsirkin23f3bc02006-05-18 18:32:54 +03001741 for (nreq = 0; wr; wr = wr->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1743 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1744 " %d max, %d nreq)\n", qp->qpn,
1745 qp->rq.head, qp->rq.tail,
1746 qp->rq.max, nreq);
1747 err = -ENOMEM;
1748 *bad_wr = wr;
1749 goto out;
1750 }
1751
1752 wqe = get_recv_wqe(qp, ind);
1753 prev_wqe = qp->rq.last;
1754 qp->rq.last = wqe;
1755
1756 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1757 ((struct mthca_next_seg *) wqe)->ee_nds =
1758 cpu_to_be32(MTHCA_NEXT_DBD);
1759 ((struct mthca_next_seg *) wqe)->flags = 0;
1760
1761 wqe += sizeof (struct mthca_next_seg);
1762 size = sizeof (struct mthca_next_seg) / 16;
1763
1764 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1765 err = -EINVAL;
1766 *bad_wr = wr;
1767 goto out;
1768 }
1769
1770 for (i = 0; i < wr->num_sge; ++i) {
1771 ((struct mthca_data_seg *) wqe)->byte_count =
1772 cpu_to_be32(wr->sg_list[i].length);
1773 ((struct mthca_data_seg *) wqe)->lkey =
1774 cpu_to_be32(wr->sg_list[i].lkey);
1775 ((struct mthca_data_seg *) wqe)->addr =
1776 cpu_to_be64(wr->sg_list[i].addr);
1777 wqe += sizeof (struct mthca_data_seg);
1778 size += sizeof (struct mthca_data_seg) / 16;
1779 }
1780
1781 qp->wrid[ind] = wr->wr_id;
1782
Roland Dreierd6cff022005-09-13 10:41:03 -07001783 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1784 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1785 wmb();
1786 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1787 cpu_to_be32(MTHCA_NEXT_DBD | size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 if (!size0)
1790 size0 = size;
1791
1792 ++ind;
1793 if (unlikely(ind >= qp->rq.max))
1794 ind -= qp->rq.max;
Michael S. Tsirkin23f3bc02006-05-18 18:32:54 +03001795
1796 ++nreq;
1797 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1798 nreq = 0;
1799
1800 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1801 doorbell[1] = cpu_to_be32(qp->qpn << 8);
1802
1803 wmb();
1804
1805 mthca_write64(doorbell,
1806 dev->kar + MTHCA_RECEIVE_DOORBELL,
1807 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1808
1809 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1810 size0 = 0;
1811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 }
1813
1814out:
1815 if (likely(nreq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1817 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1818
1819 wmb();
1820
1821 mthca_write64(doorbell,
1822 dev->kar + MTHCA_RECEIVE_DOORBELL,
1823 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1824 }
1825
1826 qp->rq.next_ind = ind;
1827 qp->rq.head += nreq;
1828
1829 spin_unlock_irqrestore(&qp->rq.lock, flags);
1830 return err;
1831}
1832
1833int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1834 struct ib_send_wr **bad_wr)
1835{
1836 struct mthca_dev *dev = to_mdev(ibqp->device);
1837 struct mthca_qp *qp = to_mqp(ibqp);
Michael S. Tsirkine0ae9ec2005-11-29 11:33:46 -08001838 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 void *wqe;
1840 void *prev_wqe;
1841 unsigned long flags;
1842 int err = 0;
1843 int nreq;
1844 int i;
1845 int size;
1846 int size0 = 0;
1847 u32 f0 = 0;
1848 int ind;
1849 u8 op0 = 0;
1850
1851 spin_lock_irqsave(&qp->sq.lock, flags);
1852
1853 /* XXX check that state is OK to post send */
1854
1855 ind = qp->sq.head & (qp->sq.max - 1);
1856
1857 for (nreq = 0; wr; ++nreq, wr = wr->next) {
Michael S. Tsirkine0ae9ec2005-11-29 11:33:46 -08001858 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1859 nreq = 0;
1860
1861 doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1862 ((qp->sq.head & 0xffff) << 8) |
1863 f0 | op0);
1864 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1865
1866 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1867 size0 = 0;
1868
1869 /*
1870 * Make sure that descriptors are written before
1871 * doorbell record.
1872 */
1873 wmb();
1874 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1875
1876 /*
1877 * Make sure doorbell record is written before we
1878 * write MMIO send doorbell.
1879 */
1880 wmb();
1881 mthca_write64(doorbell,
1882 dev->kar + MTHCA_SEND_DOORBELL,
1883 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1884 }
1885
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1887 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1888 " %d max, %d nreq)\n", qp->qpn,
1889 qp->sq.head, qp->sq.tail,
1890 qp->sq.max, nreq);
1891 err = -ENOMEM;
1892 *bad_wr = wr;
1893 goto out;
1894 }
1895
1896 wqe = get_send_wqe(qp, ind);
1897 prev_wqe = qp->sq.last;
1898 qp->sq.last = wqe;
1899
1900 ((struct mthca_next_seg *) wqe)->flags =
1901 ((wr->send_flags & IB_SEND_SIGNALED) ?
1902 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1903 ((wr->send_flags & IB_SEND_SOLICITED) ?
1904 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1905 cpu_to_be32(1);
1906 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1907 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
Roland Dreier3fba2312005-04-16 15:26:16 -07001908 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 wqe += sizeof (struct mthca_next_seg);
1911 size = sizeof (struct mthca_next_seg) / 16;
1912
1913 switch (qp->transport) {
Roland Dreierddb934e2005-04-16 15:26:23 -07001914 case RC:
1915 switch (wr->opcode) {
1916 case IB_WR_ATOMIC_CMP_AND_SWP:
1917 case IB_WR_ATOMIC_FETCH_AND_ADD:
1918 ((struct mthca_raddr_seg *) wqe)->raddr =
1919 cpu_to_be64(wr->wr.atomic.remote_addr);
1920 ((struct mthca_raddr_seg *) wqe)->rkey =
1921 cpu_to_be32(wr->wr.atomic.rkey);
1922 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1923
1924 wqe += sizeof (struct mthca_raddr_seg);
1925
1926 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1927 ((struct mthca_atomic_seg *) wqe)->swap_add =
1928 cpu_to_be64(wr->wr.atomic.swap);
1929 ((struct mthca_atomic_seg *) wqe)->compare =
1930 cpu_to_be64(wr->wr.atomic.compare_add);
1931 } else {
1932 ((struct mthca_atomic_seg *) wqe)->swap_add =
1933 cpu_to_be64(wr->wr.atomic.compare_add);
1934 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1935 }
1936
1937 wqe += sizeof (struct mthca_atomic_seg);
Michael S. Tsirkin62abb842005-11-09 11:30:14 -08001938 size += (sizeof (struct mthca_raddr_seg) +
1939 sizeof (struct mthca_atomic_seg)) / 16;
Roland Dreierddb934e2005-04-16 15:26:23 -07001940 break;
1941
Roland Dreier9e6970b2005-06-27 14:36:42 -07001942 case IB_WR_RDMA_READ:
Roland Dreierddb934e2005-04-16 15:26:23 -07001943 case IB_WR_RDMA_WRITE:
1944 case IB_WR_RDMA_WRITE_WITH_IMM:
Roland Dreier9e6970b2005-06-27 14:36:42 -07001945 ((struct mthca_raddr_seg *) wqe)->raddr =
1946 cpu_to_be64(wr->wr.rdma.remote_addr);
1947 ((struct mthca_raddr_seg *) wqe)->rkey =
1948 cpu_to_be32(wr->wr.rdma.rkey);
1949 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1950 wqe += sizeof (struct mthca_raddr_seg);
1951 size += sizeof (struct mthca_raddr_seg) / 16;
1952 break;
1953
1954 default:
1955 /* No extra segments required for sends */
1956 break;
1957 }
1958
1959 break;
1960
1961 case UC:
1962 switch (wr->opcode) {
1963 case IB_WR_RDMA_WRITE:
1964 case IB_WR_RDMA_WRITE_WITH_IMM:
Roland Dreierddb934e2005-04-16 15:26:23 -07001965 ((struct mthca_raddr_seg *) wqe)->raddr =
1966 cpu_to_be64(wr->wr.rdma.remote_addr);
1967 ((struct mthca_raddr_seg *) wqe)->rkey =
1968 cpu_to_be32(wr->wr.rdma.rkey);
1969 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1970 wqe += sizeof (struct mthca_raddr_seg);
1971 size += sizeof (struct mthca_raddr_seg) / 16;
1972 break;
1973
1974 default:
1975 /* No extra segments required for sends */
1976 break;
1977 }
1978
1979 break;
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 case UD:
1982 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
1983 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1984 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
1985 cpu_to_be32(wr->wr.ud.remote_qpn);
1986 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
1987 cpu_to_be32(wr->wr.ud.remote_qkey);
1988
1989 wqe += sizeof (struct mthca_arbel_ud_seg);
1990 size += sizeof (struct mthca_arbel_ud_seg) / 16;
1991 break;
1992
1993 case MLX:
1994 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1995 wqe - sizeof (struct mthca_next_seg),
1996 wqe);
1997 if (err) {
1998 *bad_wr = wr;
1999 goto out;
2000 }
2001 wqe += sizeof (struct mthca_data_seg);
2002 size += sizeof (struct mthca_data_seg) / 16;
2003 break;
2004 }
2005
2006 if (wr->num_sge > qp->sq.max_gs) {
2007 mthca_err(dev, "too many gathers\n");
2008 err = -EINVAL;
2009 *bad_wr = wr;
2010 goto out;
2011 }
2012
2013 for (i = 0; i < wr->num_sge; ++i) {
2014 ((struct mthca_data_seg *) wqe)->byte_count =
2015 cpu_to_be32(wr->sg_list[i].length);
2016 ((struct mthca_data_seg *) wqe)->lkey =
2017 cpu_to_be32(wr->sg_list[i].lkey);
2018 ((struct mthca_data_seg *) wqe)->addr =
2019 cpu_to_be64(wr->sg_list[i].addr);
2020 wqe += sizeof (struct mthca_data_seg);
2021 size += sizeof (struct mthca_data_seg) / 16;
2022 }
2023
2024 /* Add one more inline data segment for ICRC */
2025 if (qp->transport == MLX) {
2026 ((struct mthca_data_seg *) wqe)->byte_count =
2027 cpu_to_be32((1 << 31) | 4);
2028 ((u32 *) wqe)[1] = 0;
2029 wqe += sizeof (struct mthca_data_seg);
2030 size += sizeof (struct mthca_data_seg) / 16;
2031 }
2032
2033 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2034
2035 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2036 mthca_err(dev, "opcode invalid\n");
2037 err = -EINVAL;
2038 *bad_wr = wr;
2039 goto out;
2040 }
2041
Roland Dreierd6cff022005-09-13 10:41:03 -07002042 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2043 cpu_to_be32(((ind << qp->sq.wqe_shift) +
2044 qp->send_wqe_offset) |
2045 mthca_opcode[wr->opcode]);
2046 wmb();
2047 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
Dotan Barak7667abd12006-02-27 21:02:00 -08002048 cpu_to_be32(MTHCA_NEXT_DBD | size |
Roland Dreierb0b3a8e2006-03-24 15:47:29 -08002049 ((wr->send_flags & IB_SEND_FENCE) ?
2050 MTHCA_NEXT_FENCE : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
2052 if (!size0) {
2053 size0 = size;
2054 op0 = mthca_opcode[wr->opcode];
2055 }
2056
2057 ++ind;
2058 if (unlikely(ind >= qp->sq.max))
2059 ind -= qp->sq.max;
2060 }
2061
2062out:
2063 if (likely(nreq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 doorbell[0] = cpu_to_be32((nreq << 24) |
2065 ((qp->sq.head & 0xffff) << 8) |
2066 f0 | op0);
2067 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
2068
2069 qp->sq.head += nreq;
2070
2071 /*
2072 * Make sure that descriptors are written before
2073 * doorbell record.
2074 */
2075 wmb();
2076 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2077
2078 /*
2079 * Make sure doorbell record is written before we
2080 * write MMIO send doorbell.
2081 */
2082 wmb();
2083 mthca_write64(doorbell,
2084 dev->kar + MTHCA_SEND_DOORBELL,
2085 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2086 }
2087
2088 spin_unlock_irqrestore(&qp->sq.lock, flags);
2089 return err;
2090}
2091
2092int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2093 struct ib_recv_wr **bad_wr)
2094{
2095 struct mthca_dev *dev = to_mdev(ibqp->device);
2096 struct mthca_qp *qp = to_mqp(ibqp);
2097 unsigned long flags;
2098 int err = 0;
2099 int nreq;
2100 int ind;
2101 int i;
2102 void *wqe;
2103
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08002104 spin_lock_irqsave(&qp->rq.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
2106 /* XXX check that state is OK to post receive */
2107
2108 ind = qp->rq.head & (qp->rq.max - 1);
2109
2110 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2111 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2112 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2113 " %d max, %d nreq)\n", qp->qpn,
2114 qp->rq.head, qp->rq.tail,
2115 qp->rq.max, nreq);
2116 err = -ENOMEM;
2117 *bad_wr = wr;
2118 goto out;
2119 }
2120
2121 wqe = get_recv_wqe(qp, ind);
2122
2123 ((struct mthca_next_seg *) wqe)->flags = 0;
2124
2125 wqe += sizeof (struct mthca_next_seg);
2126
2127 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2128 err = -EINVAL;
2129 *bad_wr = wr;
2130 goto out;
2131 }
2132
2133 for (i = 0; i < wr->num_sge; ++i) {
2134 ((struct mthca_data_seg *) wqe)->byte_count =
2135 cpu_to_be32(wr->sg_list[i].length);
2136 ((struct mthca_data_seg *) wqe)->lkey =
2137 cpu_to_be32(wr->sg_list[i].lkey);
2138 ((struct mthca_data_seg *) wqe)->addr =
2139 cpu_to_be64(wr->sg_list[i].addr);
2140 wqe += sizeof (struct mthca_data_seg);
2141 }
2142
2143 if (i < qp->rq.max_gs) {
2144 ((struct mthca_data_seg *) wqe)->byte_count = 0;
Roland Dreierddf841f2005-04-16 15:26:33 -07002145 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 ((struct mthca_data_seg *) wqe)->addr = 0;
2147 }
2148
2149 qp->wrid[ind] = wr->wr_id;
2150
2151 ++ind;
2152 if (unlikely(ind >= qp->rq.max))
2153 ind -= qp->rq.max;
2154 }
2155out:
2156 if (likely(nreq)) {
2157 qp->rq.head += nreq;
2158
2159 /*
2160 * Make sure that descriptors are written before
2161 * doorbell record.
2162 */
2163 wmb();
2164 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2165 }
2166
2167 spin_unlock_irqrestore(&qp->rq.lock, flags);
2168 return err;
2169}
2170
Roland Dreierd9b98b02006-01-31 20:45:51 -08002171void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2172 int index, int *dbd, __be32 *new_wqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173{
2174 struct mthca_next_seg *next;
2175
Roland Dreierec34a922005-08-19 10:59:31 -07002176 /*
2177 * For SRQs, all WQEs generate a CQE, so we're always at the
2178 * end of the doorbell chain.
2179 */
2180 if (qp->ibqp.srq) {
2181 *new_wqe = 0;
Roland Dreierd9b98b02006-01-31 20:45:51 -08002182 return;
Roland Dreierec34a922005-08-19 10:59:31 -07002183 }
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 if (is_send)
2186 next = get_send_wqe(qp, index);
2187 else
2188 next = get_recv_wqe(qp, index);
2189
Roland Dreier288bdeb2005-08-19 09:19:05 -07002190 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 if (next->ee_nds & cpu_to_be32(0x3f))
2192 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2193 (next->ee_nds & cpu_to_be32(0x3f));
2194 else
2195 *new_wqe = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196}
2197
2198int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2199{
2200 int err;
2201 u8 status;
2202 int i;
2203
2204 spin_lock_init(&dev->qp_table.lock);
2205
2206 /*
2207 * We reserve 2 extra QPs per port for the special QPs. The
2208 * special QP for port 1 has to be even, so round up.
2209 */
2210 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2211 err = mthca_alloc_init(&dev->qp_table.alloc,
2212 dev->limits.num_qps,
2213 (1 << 24) - 1,
2214 dev->qp_table.sqp_start +
2215 MTHCA_MAX_PORTS * 2);
2216 if (err)
2217 return err;
2218
2219 err = mthca_array_init(&dev->qp_table.qp,
2220 dev->limits.num_qps);
2221 if (err) {
2222 mthca_alloc_cleanup(&dev->qp_table.alloc);
2223 return err;
2224 }
2225
2226 for (i = 0; i < 2; ++i) {
2227 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2228 dev->qp_table.sqp_start + i * 2,
2229 &status);
2230 if (err)
2231 goto err_out;
2232 if (status) {
2233 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2234 "status %02x, aborting.\n",
2235 status);
2236 err = -EINVAL;
2237 goto err_out;
2238 }
2239 }
2240 return 0;
2241
2242 err_out:
2243 for (i = 0; i < 2; ++i)
2244 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2245
2246 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2247 mthca_alloc_cleanup(&dev->qp_table.alloc);
2248
2249 return err;
2250}
2251
Roland Dreiere1f78682006-03-29 09:36:46 -08002252void mthca_cleanup_qp_table(struct mthca_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253{
2254 int i;
2255 u8 status;
2256
2257 for (i = 0; i < 2; ++i)
2258 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2259
Michael S. Tsirkin71eea472005-09-20 10:54:48 -07002260 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 mthca_alloc_cleanup(&dev->qp_table.alloc);
2262}