blob: c2d3300dace9798984e1d44a95d6ee324aefc035 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier80c8ec22005-07-07 17:57:20 -07003 * Copyright (c) 2005 Cisco Systems. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07004 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08005 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
36 */
37
38#include <linux/init.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080039#include <linux/string.h>
40#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Roland Dreiera4d61e82005-08-25 13:40:04 -070042#include <rdma/ib_verbs.h>
43#include <rdma/ib_cache.h>
44#include <rdma/ib_pack.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include "mthca_dev.h"
47#include "mthca_cmd.h"
48#include "mthca_memfree.h"
Roland Dreierc04bc3d2005-08-19 10:33:35 -070049#include "mthca_wqe.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51enum {
52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53 MTHCA_ACK_REQ_FREQ = 10,
54 MTHCA_FLIGHT_LIMIT = 9,
Roland Dreier80c8ec22005-07-07 17:57:20 -070055 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058};
59
60enum {
61 MTHCA_QP_STATE_RST = 0,
62 MTHCA_QP_STATE_INIT = 1,
63 MTHCA_QP_STATE_RTR = 2,
64 MTHCA_QP_STATE_RTS = 3,
65 MTHCA_QP_STATE_SQE = 4,
66 MTHCA_QP_STATE_SQD = 5,
67 MTHCA_QP_STATE_ERR = 6,
68 MTHCA_QP_STATE_DRAINING = 7
69};
70
71enum {
72 MTHCA_QP_ST_RC = 0x0,
73 MTHCA_QP_ST_UC = 0x1,
74 MTHCA_QP_ST_RD = 0x2,
75 MTHCA_QP_ST_UD = 0x3,
76 MTHCA_QP_ST_MLX = 0x7
77};
78
79enum {
80 MTHCA_QP_PM_MIGRATED = 0x3,
81 MTHCA_QP_PM_ARMED = 0x0,
82 MTHCA_QP_PM_REARM = 0x1
83};
84
85enum {
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE = 1 << 8,
88 /* params1 */
89 MTHCA_QP_BIT_SRE = 1 << 15,
90 MTHCA_QP_BIT_SWE = 1 << 14,
91 MTHCA_QP_BIT_SAE = 1 << 13,
92 MTHCA_QP_BIT_SIC = 1 << 4,
93 MTHCA_QP_BIT_SSC = 1 << 3,
94 /* params2 */
95 MTHCA_QP_BIT_RRE = 1 << 15,
96 MTHCA_QP_BIT_RWE = 1 << 14,
97 MTHCA_QP_BIT_RAE = 1 << 13,
98 MTHCA_QP_BIT_RIC = 1 << 4,
99 MTHCA_QP_BIT_RSC = 1 << 3
100};
101
102struct mthca_qp_path {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700103 __be32 port_pkey;
104 u8 rnr_retry;
105 u8 g_mylmc;
106 __be16 rlid;
107 u8 ackto;
108 u8 mgid_index;
109 u8 static_rate;
110 u8 hop_limit;
111 __be32 sl_tclass_flowlabel;
112 u8 rgid[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113} __attribute__((packed));
114
115struct mthca_qp_context {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700116 __be32 flags;
117 __be32 tavor_sched_queue; /* Reserved on Arbel */
118 u8 mtu_msgmax;
119 u8 rq_size_stride; /* Reserved on Tavor */
120 u8 sq_size_stride; /* Reserved on Tavor */
121 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
122 __be32 usr_page;
123 __be32 local_qpn;
124 __be32 remote_qpn;
125 u32 reserved1[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct mthca_qp_path pri_path;
127 struct mthca_qp_path alt_path;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700128 __be32 rdd;
129 __be32 pd;
130 __be32 wqe_base;
131 __be32 wqe_lkey;
132 __be32 params1;
133 __be32 reserved2;
134 __be32 next_send_psn;
135 __be32 cqn_snd;
136 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
137 __be32 snd_db_index; /* (debugging only entries) */
138 __be32 last_acked_psn;
139 __be32 ssn;
140 __be32 params2;
141 __be32 rnr_nextrecvpsn;
142 __be32 ra_buff_indx;
143 __be32 cqn_rcv;
144 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
145 __be32 rcv_db_index; /* (debugging only entries) */
146 __be32 qkey;
147 __be32 srqn;
148 __be32 rmsn;
149 __be16 rq_wqe_counter; /* reserved on Tavor */
150 __be16 sq_wqe_counter; /* reserved on Tavor */
151 u32 reserved3[18];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152} __attribute__((packed));
153
154struct mthca_qp_param {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700155 __be32 opt_param_mask;
156 u32 reserved1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 struct mthca_qp_context context;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700158 u32 reserved2[62];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159} __attribute__((packed));
160
161enum {
162 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
163 MTHCA_QP_OPTPAR_RRE = 1 << 1,
164 MTHCA_QP_OPTPAR_RAE = 1 << 2,
165 MTHCA_QP_OPTPAR_RWE = 1 << 3,
166 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
167 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
168 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
169 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
170 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
171 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
172 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
173 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
174 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
175 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
176 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
177 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
178 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
179};
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181static const u8 mthca_opcode[] = {
182 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
183 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
184 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
185 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
186 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
187 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
188 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
189};
190
191static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
192{
193 return qp->qpn >= dev->qp_table.sqp_start &&
194 qp->qpn <= dev->qp_table.sqp_start + 3;
195}
196
197static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
198{
199 return qp->qpn >= dev->qp_table.sqp_start &&
200 qp->qpn <= dev->qp_table.sqp_start + 1;
201}
202
203static void *get_recv_wqe(struct mthca_qp *qp, int n)
204{
205 if (qp->is_direct)
206 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
207 else
208 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
209 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
210}
211
212static void *get_send_wqe(struct mthca_qp *qp, int n)
213{
214 if (qp->is_direct)
215 return qp->queue.direct.buf + qp->send_wqe_offset +
216 (n << qp->sq.wqe_shift);
217 else
218 return qp->queue.page_list[(qp->send_wqe_offset +
219 (n << qp->sq.wqe_shift)) >>
220 PAGE_SHIFT].buf +
221 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
222 (PAGE_SIZE - 1));
223}
224
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700225static void mthca_wq_init(struct mthca_wq *wq)
226{
227 spin_lock_init(&wq->lock);
228 wq->next_ind = 0;
229 wq->last_comp = wq->max - 1;
230 wq->head = 0;
231 wq->tail = 0;
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700232}
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
235 enum ib_event_type event_type)
236{
237 struct mthca_qp *qp;
238 struct ib_event event;
239
240 spin_lock(&dev->qp_table.lock);
241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
242 if (qp)
243 atomic_inc(&qp->refcount);
244 spin_unlock(&dev->qp_table.lock);
245
246 if (!qp) {
247 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
248 return;
249 }
250
251 event.device = &dev->ib_dev;
252 event.event = event_type;
253 event.element.qp = &qp->ibqp;
254 if (qp->ibqp.event_handler)
255 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
256
257 if (atomic_dec_and_test(&qp->refcount))
258 wake_up(&qp->wait);
259}
260
261static int to_mthca_state(enum ib_qp_state ib_state)
262{
263 switch (ib_state) {
264 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
265 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
266 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
267 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
268 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
269 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
270 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
271 default: return -1;
272 }
273}
274
275enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
276
277static int to_mthca_st(int transport)
278{
279 switch (transport) {
280 case RC: return MTHCA_QP_ST_RC;
281 case UC: return MTHCA_QP_ST_UC;
282 case UD: return MTHCA_QP_ST_UD;
283 case RD: return MTHCA_QP_ST_RD;
284 case MLX: return MTHCA_QP_ST_MLX;
285 default: return -1;
286 }
287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
290 int attr_mask)
291{
292 if (attr_mask & IB_QP_PKEY_INDEX)
293 sqp->pkey_index = attr->pkey_index;
294 if (attr_mask & IB_QP_QKEY)
295 sqp->qkey = attr->qkey;
296 if (attr_mask & IB_QP_SQ_PSN)
297 sqp->send_psn = attr->sq_psn;
298}
299
300static void init_port(struct mthca_dev *dev, int port)
301{
302 int err;
303 u8 status;
304 struct mthca_init_ib_param param;
305
306 memset(&param, 0, sizeof param);
307
Roland Dreierda6561c2005-08-17 07:39:10 -0700308 param.port_width = dev->limits.port_width_cap;
309 param.vl_cap = dev->limits.vl_cap;
310 param.mtu_cap = dev->limits.mtu_cap;
311 param.gid_cap = dev->limits.gid_table_len;
312 param.pkey_cap = dev->limits.pkey_table_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 err = mthca_INIT_IB(dev, &param, port, &status);
315 if (err)
316 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
317 if (status)
318 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
319}
320
Jack Morgensteind1646f82005-12-15 14:36:24 -0800321static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
322 int attr_mask)
323{
324 u8 dest_rd_atomic;
325 u32 access_flags;
326 u32 hw_access_flags = 0;
327
328 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
329 dest_rd_atomic = attr->max_dest_rd_atomic;
330 else
331 dest_rd_atomic = qp->resp_depth;
332
333 if (attr_mask & IB_QP_ACCESS_FLAGS)
334 access_flags = attr->qp_access_flags;
335 else
336 access_flags = qp->atomic_rd_en;
337
338 if (!dest_rd_atomic)
339 access_flags &= IB_ACCESS_REMOTE_WRITE;
340
341 if (access_flags & IB_ACCESS_REMOTE_READ)
342 hw_access_flags |= MTHCA_QP_BIT_RRE;
343 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
344 hw_access_flags |= MTHCA_QP_BIT_RAE;
345 if (access_flags & IB_ACCESS_REMOTE_WRITE)
346 hw_access_flags |= MTHCA_QP_BIT_RWE;
347
348 return cpu_to_be32(hw_access_flags);
349}
350
Dotan Barak4de144b2006-01-06 13:23:58 -0800351static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
352{
353 path->g_mylmc = ah->src_path_bits & 0x7f;
354 path->rlid = cpu_to_be16(ah->dlid);
355 path->static_rate = !!ah->static_rate;
356
357 if (ah->ah_flags & IB_AH_GRH) {
358 path->g_mylmc |= 1 << 7;
359 path->mgid_index = ah->grh.sgid_index;
360 path->hop_limit = ah->grh.hop_limit;
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800361 path->sl_tclass_flowlabel =
Dotan Barak4de144b2006-01-06 13:23:58 -0800362 cpu_to_be32((ah->sl << 28) |
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800363 (ah->grh.traffic_class << 20) |
Dotan Barak4de144b2006-01-06 13:23:58 -0800364 (ah->grh.flow_label));
365 memcpy(path->rgid, ah->grh.dgid.raw, 16);
366 } else
367 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
371{
372 struct mthca_dev *dev = to_mdev(ibqp->device);
373 struct mthca_qp *qp = to_mqp(ibqp);
374 enum ib_qp_state cur_state, new_state;
Roland Dreiered878452005-06-27 14:36:45 -0700375 struct mthca_mailbox *mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 struct mthca_qp_param *qp_param;
377 struct mthca_qp_context *qp_context;
Roland Dreier3fa1fa32006-02-03 14:53:28 -0800378 u32 sqd_event = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 u8 status;
380 int err;
381
382 if (attr_mask & IB_QP_CUR_STATE) {
Roland Dreierd8441832006-02-13 16:30:18 -0800383 cur_state = attr->cur_qp_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 } else {
385 spin_lock_irq(&qp->sq.lock);
386 spin_lock(&qp->rq.lock);
387 cur_state = qp->state;
388 spin_unlock(&qp->rq.lock);
389 spin_unlock_irq(&qp->sq.lock);
390 }
391
Roland Dreierd8441832006-02-13 16:30:18 -0800392 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Roland Dreierd8441832006-02-13 16:30:18 -0800394 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
395 mthca_dbg(dev, "Bad QP transition (transport %d) "
396 "%d->%d with attr 0x%08x\n",
397 qp->transport, cur_state, new_state,
398 attr_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 return -EINVAL;
400 }
401
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800402 if ((attr_mask & IB_QP_PKEY_INDEX) &&
Jack Morgensteind09e3272005-11-03 14:58:33 -0800403 attr->pkey_index >= dev->limits.pkey_table_len) {
404 mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800405 attr->pkey_index,dev->limits.pkey_table_len-1);
Jack Morgensteind09e3272005-11-03 14:58:33 -0800406 return -EINVAL;
407 }
408
Jack Morgenstein38d1e792006-01-05 16:13:46 -0800409 if ((attr_mask & IB_QP_PORT) &&
410 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
411 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
412 return -EINVAL;
413 }
414
Jack Morgenstein94361cf2005-12-09 16:32:21 -0800415 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
416 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
417 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
418 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
419 return -EINVAL;
420 }
421
422 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
423 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
424 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
425 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
426 return -EINVAL;
427 }
428
Roland Dreiered878452005-06-27 14:36:45 -0700429 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
430 if (IS_ERR(mailbox))
431 return PTR_ERR(mailbox);
432 qp_param = mailbox->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 qp_context = &qp_param->context;
434 memset(qp_param, 0, sizeof *qp_param);
435
436 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
437 (to_mthca_st(qp->transport) << 16));
438 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
439 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
440 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
441 else {
442 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
443 switch (attr->path_mig_state) {
444 case IB_MIG_MIGRATED:
445 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
446 break;
447 case IB_MIG_REARM:
448 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
449 break;
450 case IB_MIG_ARMED:
451 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
452 break;
453 }
454 }
455
456 /* leave tavor_sched_queue as 0 */
457
458 if (qp->transport == MLX || qp->transport == UD)
459 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
460 else if (attr_mask & IB_QP_PATH_MTU)
461 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
462
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700463 if (mthca_is_memfree(dev)) {
Roland Dreierec34a922005-08-19 10:59:31 -0700464 if (qp->rq.max)
465 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
466 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
467
468 if (qp->sq.max)
469 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
470 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472
473 /* leave arbel_sched_queue as 0 */
474
Roland Dreier80c8ec22005-07-07 17:57:20 -0700475 if (qp->ibqp.uobject)
476 qp_context->usr_page =
477 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
478 else
479 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 qp_context->local_qpn = cpu_to_be32(qp->qpn);
481 if (attr_mask & IB_QP_DEST_QPN) {
482 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
483 }
484
485 if (qp->transport == MLX)
486 qp_context->pri_path.port_pkey |=
487 cpu_to_be32(to_msqp(qp)->port << 24);
488 else {
489 if (attr_mask & IB_QP_PORT) {
490 qp_context->pri_path.port_pkey |=
491 cpu_to_be32(attr->port_num << 24);
492 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
493 }
494 }
495
496 if (attr_mask & IB_QP_PKEY_INDEX) {
497 qp_context->pri_path.port_pkey |=
498 cpu_to_be32(attr->pkey_index);
499 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
500 }
501
502 if (attr_mask & IB_QP_RNR_RETRY) {
Dotan Barak4de144b2006-01-06 13:23:58 -0800503 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
504 attr->rnr_retry << 5;
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800505 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
Dotan Barak4de144b2006-01-06 13:23:58 -0800506 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
509 if (attr_mask & IB_QP_AV) {
Dotan Barak4de144b2006-01-06 13:23:58 -0800510 mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
512 }
513
514 if (attr_mask & IB_QP_TIMEOUT) {
Roland Dreierbb4a7f02005-09-12 14:08:51 -0700515 qp_context->pri_path.ackto = attr->timeout << 3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
517 }
518
Dotan Barak4de144b2006-01-06 13:23:58 -0800519 if (attr_mask & IB_QP_ALT_PATH) {
520 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800521 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
Dotan Barak4de144b2006-01-06 13:23:58 -0800522 attr->alt_port_num);
523 return -EINVAL;
524 }
525
526 mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800527 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
Dotan Barak4de144b2006-01-06 13:23:58 -0800528 attr->alt_port_num << 24);
529 qp_context->alt_path.ackto = attr->alt_timeout << 3;
530 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 /* leave rdd as 0 */
534 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
535 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
536 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
537 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
538 (MTHCA_FLIGHT_LIMIT << 24) |
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800539 MTHCA_QP_BIT_SWE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
541 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
542 if (attr_mask & IB_QP_RETRY_CNT) {
543 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
544 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
545 }
546
Roland Dreier34a4a752005-06-27 14:36:41 -0700547 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800548 if (attr->max_rd_atomic) {
549 qp_context->params1 |=
550 cpu_to_be32(MTHCA_QP_BIT_SRE |
551 MTHCA_QP_BIT_SAE);
Jack Morgenstein6aa2e4e2005-12-09 16:38:04 -0800552 qp_context->params1 |=
553 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
Jack Morgensteinc4342d82005-12-15 19:59:01 -0800554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
556 }
557
558 if (attr_mask & IB_QP_SQ_PSN)
559 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
560 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
561
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700562 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
564 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
565 }
566
Roland Dreier34a4a752005-06-27 14:36:41 -0700567 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Jack Morgenstein6aa2e4e2005-12-09 16:38:04 -0800568 if (attr->max_dest_rd_atomic)
569 qp_context->params2 |=
570 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 }
574
Jack Morgensteind1646f82005-12-15 14:36:24 -0800575 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
576 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
577 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
578 MTHCA_QP_OPTPAR_RRE |
579 MTHCA_QP_OPTPAR_RAE);
580 }
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
583
Roland Dreierec34a922005-08-19 10:59:31 -0700584 if (ibqp->srq)
585 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
588 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
589 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
590 }
591 if (attr_mask & IB_QP_RQ_PSN)
592 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
593
594 qp_context->ra_buff_indx =
595 cpu_to_be32(dev->qp_table.rdb_base +
596 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
597 dev->qp_table.rdb_shift));
598
599 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
600
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700601 if (mthca_is_memfree(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
603
604 if (attr_mask & IB_QP_QKEY) {
605 qp_context->qkey = cpu_to_be32(attr->qkey);
606 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
607 }
608
Roland Dreierec34a922005-08-19 10:59:31 -0700609 if (ibqp->srq)
610 qp_context->srqn = cpu_to_be32(1 << 24 |
611 to_msrq(ibqp->srq)->srqn);
612
Roland Dreier3fa1fa32006-02-03 14:53:28 -0800613 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
614 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
615 attr->en_sqd_async_notify)
616 sqd_event = 1 << 31;
617
Roland Dreierd8441832006-02-13 16:30:18 -0800618 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
619 mailbox, sqd_event, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if (status) {
Roland Dreierd8441832006-02-13 16:30:18 -0800621 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
622 cur_state, new_state, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 err = -EINVAL;
624 }
625
Jack Morgenstein44b5b032005-12-09 16:40:14 -0800626 if (!err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 qp->state = new_state;
Jack Morgenstein44b5b032005-12-09 16:40:14 -0800628 if (attr_mask & IB_QP_ACCESS_FLAGS)
629 qp->atomic_rd_en = attr->qp_access_flags;
630 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
631 qp->resp_depth = attr->max_dest_rd_atomic;
632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Roland Dreiered878452005-06-27 14:36:45 -0700634 mthca_free_mailbox(dev, mailbox);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
636 if (is_sqp(dev, qp))
637 store_attrs(to_msqp(qp), attr, attr_mask);
638
639 /*
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700640 * If we moved QP0 to RTR, bring the IB link up; if we moved
641 * QP0 to RESET or ERROR, bring the link back down.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 */
643 if (is_qp0(dev, qp)) {
644 if (cur_state != IB_QPS_RTR &&
645 new_state == IB_QPS_RTR)
646 init_port(dev, to_msqp(qp)->port);
647
648 if (cur_state != IB_QPS_RESET &&
649 cur_state != IB_QPS_ERR &&
650 (new_state == IB_QPS_RESET ||
651 new_state == IB_QPS_ERR))
652 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
653 }
654
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700655 /*
656 * If we moved a kernel QP to RESET, clean up all old CQ
657 * entries and reinitialize the QP.
658 */
659 if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
660 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
661 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
662 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
663 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
664 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
665
666 mthca_wq_init(&qp->sq);
Michael S. Tsirkin187a2582005-11-28 11:19:43 -0800667 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
668
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700669 mthca_wq_init(&qp->rq);
Michael S. Tsirkin187a2582005-11-28 11:19:43 -0800670 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
Roland Dreierc9fe2b32005-09-07 09:43:23 -0700671
672 if (mthca_is_memfree(dev)) {
673 *qp->sq.db = 0;
674 *qp->rq.db = 0;
675 }
676 }
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return err;
679}
680
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800681static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800682{
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800683 /*
684 * Calculate the maximum size of WQE s/g segments, excluding
685 * the next segment and other non-data segments.
686 */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800687 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800688
689 switch (qp->transport) {
690 case MLX:
691 max_data_size -= 2 * sizeof (struct mthca_data_seg);
692 break;
693
694 case UD:
695 if (mthca_is_memfree(dev))
696 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
697 else
698 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
699 break;
700
701 default:
702 max_data_size -= sizeof (struct mthca_raddr_seg);
703 break;
704 }
705
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800706 return max_data_size;
707}
708
709static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
710{
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800711 /* We don't support inline data for kernel QPs (yet). */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800712 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
713}
714
715static void mthca_adjust_qp_caps(struct mthca_dev *dev,
716 struct mthca_pd *pd,
717 struct mthca_qp *qp)
718{
719 int max_data_size = mthca_max_data_size(dev, qp,
720 min(dev->limits.max_desc_sz,
721 1 << qp->sq.wqe_shift));
722
723 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800724
Michael S. Tsirkin48fd0d12005-11-18 14:11:17 -0800725 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
726 max_data_size / sizeof (struct mthca_data_seg));
727 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
728 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
729 sizeof (struct mthca_next_seg)) /
730 sizeof (struct mthca_data_seg));
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800731}
732
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733/*
734 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
735 * rq.max_gs and sq.max_gs must all be assigned.
736 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
737 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
738 * queue)
739 */
740static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
741 struct mthca_pd *pd,
742 struct mthca_qp *qp)
743{
744 int size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 int err = -ENOMEM;
746
747 size = sizeof (struct mthca_next_seg) +
748 qp->rq.max_gs * sizeof (struct mthca_data_seg);
749
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800750 if (size > dev->limits.max_desc_sz)
751 return -EINVAL;
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
754 qp->rq.wqe_shift++)
755 ; /* nothing */
756
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800757 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 switch (qp->transport) {
759 case MLX:
760 size += 2 * sizeof (struct mthca_data_seg);
761 break;
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800762
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 case UD:
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800764 size += mthca_is_memfree(dev) ?
765 sizeof (struct mthca_arbel_ud_seg) :
766 sizeof (struct mthca_tavor_ud_seg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 break;
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800768
769 case UC:
770 size += sizeof (struct mthca_raddr_seg);
771 break;
772
773 case RC:
774 size += sizeof (struct mthca_raddr_seg);
775 /*
776 * An atomic op will require an atomic segment, a
777 * remote address segment and one scatter entry.
778 */
779 size = max_t(int, size,
780 sizeof (struct mthca_atomic_seg) +
781 sizeof (struct mthca_raddr_seg) +
782 sizeof (struct mthca_data_seg));
783 break;
784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 default:
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800786 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
788
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800789 /* Make sure that we have enough space for a bind request */
790 size = max_t(int, size, sizeof (struct mthca_bind_seg));
791
792 size += sizeof (struct mthca_next_seg);
793
794 if (size > dev->limits.max_desc_sz)
795 return -EINVAL;
796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
798 qp->sq.wqe_shift++)
799 ; /* nothing */
800
801 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
802 1 << qp->sq.wqe_shift);
Roland Dreier80c8ec22005-07-07 17:57:20 -0700803
804 /*
805 * If this is a userspace QP, we don't actually have to
806 * allocate anything. All we need is to calculate the WQE
807 * sizes and the send_wqe_offset, so we're done now.
808 */
809 if (pd->ibpd.uobject)
810 return 0;
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 size = PAGE_ALIGN(qp->send_wqe_offset +
813 (qp->sq.max << qp->sq.wqe_shift));
814
815 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
816 GFP_KERNEL);
817 if (!qp->wrid)
818 goto err_out;
819
Roland Dreier87b81672005-08-18 13:39:31 -0700820 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
821 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 if (err)
Roland Dreier87b81672005-08-18 13:39:31 -0700823 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 return 0;
826
Roland Dreier87b81672005-08-18 13:39:31 -0700827err_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 kfree(qp->wrid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 return err;
830}
831
Roland Dreier80c8ec22005-07-07 17:57:20 -0700832static void mthca_free_wqe_buf(struct mthca_dev *dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 struct mthca_qp *qp)
834{
Roland Dreier87b81672005-08-18 13:39:31 -0700835 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
836 (qp->sq.max << qp->sq.wqe_shift)),
837 &qp->queue, qp->is_direct, &qp->mr);
Roland Dreier80c8ec22005-07-07 17:57:20 -0700838 kfree(qp->wrid);
839}
840
841static int mthca_map_memfree(struct mthca_dev *dev,
842 struct mthca_qp *qp)
843{
844 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700846 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
848 if (ret)
849 return ret;
850
851 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
852 if (ret)
853 goto err_qpc;
854
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800855 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
856 qp->qpn << dev->qp_table.rdb_shift);
857 if (ret)
858 goto err_eqpc;
Roland Dreier08aeb142005-04-16 15:26:34 -0700859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 }
861
862 return 0;
863
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864err_eqpc:
865 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
866
867err_qpc:
868 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
869
870 return ret;
871}
872
Roland Dreier80c8ec22005-07-07 17:57:20 -0700873static void mthca_unmap_memfree(struct mthca_dev *dev,
874 struct mthca_qp *qp)
875{
876 mthca_table_put(dev, dev->qp_table.rdb_table,
877 qp->qpn << dev->qp_table.rdb_shift);
878 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
879 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
880}
881
882static int mthca_alloc_memfree(struct mthca_dev *dev,
883 struct mthca_qp *qp)
884{
885 int ret = 0;
886
887 if (mthca_is_memfree(dev)) {
888 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
889 qp->qpn, &qp->rq.db);
890 if (qp->rq.db_index < 0)
891 return ret;
892
893 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
894 qp->qpn, &qp->sq.db);
895 if (qp->sq.db_index < 0)
896 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
897 }
898
899 return ret;
900}
901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902static void mthca_free_memfree(struct mthca_dev *dev,
903 struct mthca_qp *qp)
904{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700905 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
907 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 }
909}
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911static int mthca_alloc_qp_common(struct mthca_dev *dev,
912 struct mthca_pd *pd,
913 struct mthca_cq *send_cq,
914 struct mthca_cq *recv_cq,
915 enum ib_sig_type send_policy,
916 struct mthca_qp *qp)
917{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 int ret;
919 int i;
920
921 atomic_set(&qp->refcount, 1);
Michael S. Tsirkin30a7e8e2005-09-07 09:45:00 -0700922 init_waitqueue_head(&qp->wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 qp->state = IB_QPS_RESET;
924 qp->atomic_rd_en = 0;
925 qp->resp_depth = 0;
926 qp->sq_policy = send_policy;
927 mthca_wq_init(&qp->sq);
928 mthca_wq_init(&qp->rq);
929
Roland Dreier80c8ec22005-07-07 17:57:20 -0700930 ret = mthca_map_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 if (ret)
932 return ret;
933
934 ret = mthca_alloc_wqe_buf(dev, pd, qp);
935 if (ret) {
Roland Dreier80c8ec22005-07-07 17:57:20 -0700936 mthca_unmap_memfree(dev, qp);
937 return ret;
938 }
939
Jack Morgenstein77369ed2005-11-09 11:26:07 -0800940 mthca_adjust_qp_caps(dev, pd, qp);
941
Roland Dreier80c8ec22005-07-07 17:57:20 -0700942 /*
943 * If this is a userspace QP, we're done now. The doorbells
944 * will be allocated and buffers will be initialized in
945 * userspace.
946 */
947 if (pd->ibpd.uobject)
948 return 0;
949
950 ret = mthca_alloc_memfree(dev, qp);
951 if (ret) {
952 mthca_free_wqe_buf(dev, qp);
953 mthca_unmap_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 return ret;
955 }
956
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700957 if (mthca_is_memfree(dev)) {
Roland Dreierddf841f2005-04-16 15:26:33 -0700958 struct mthca_next_seg *next;
959 struct mthca_data_seg *scatter;
960 int size = (sizeof (struct mthca_next_seg) +
961 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 for (i = 0; i < qp->rq.max; ++i) {
Roland Dreierddf841f2005-04-16 15:26:33 -0700964 next = get_recv_wqe(qp, i);
965 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
966 qp->rq.wqe_shift);
967 next->ee_nds = cpu_to_be32(size);
968
969 for (scatter = (void *) (next + 1);
970 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
971 ++scatter)
972 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 }
974
975 for (i = 0; i < qp->sq.max; ++i) {
Roland Dreierddf841f2005-04-16 15:26:33 -0700976 next = get_send_wqe(qp, i);
977 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
978 qp->sq.wqe_shift) +
979 qp->send_wqe_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 }
981 }
982
Roland Dreierd6cff022005-09-13 10:41:03 -0700983 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
984 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 return 0;
987}
988
Roland Dreier80c8ec22005-07-07 17:57:20 -0700989static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800990 struct mthca_pd *pd, struct mthca_qp *qp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991{
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800992 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
993
Roland Dreier80c8ec22005-07-07 17:57:20 -0700994 /* Sanity check QP size before proceeding */
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -0800995 if (cap->max_send_wr > dev->limits.max_wqes ||
996 cap->max_recv_wr > dev->limits.max_wqes ||
997 cap->max_send_sge > dev->limits.max_sg ||
998 cap->max_recv_sge > dev->limits.max_sg ||
999 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1000 return -EINVAL;
1001
1002 /*
1003 * For MLX transport we need 2 extra S/G entries:
1004 * one for the header and one for the checksum at the end
1005 */
1006 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
Roland Dreier80c8ec22005-07-07 17:57:20 -07001007 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
Roland Dreier80c8ec22005-07-07 17:57:20 -07001009 if (mthca_is_memfree(dev)) {
1010 qp->rq.max = cap->max_recv_wr ?
1011 roundup_pow_of_two(cap->max_recv_wr) : 0;
1012 qp->sq.max = cap->max_send_wr ?
1013 roundup_pow_of_two(cap->max_send_wr) : 0;
1014 } else {
1015 qp->rq.max = cap->max_recv_wr;
1016 qp->sq.max = cap->max_send_wr;
1017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Roland Dreier80c8ec22005-07-07 17:57:20 -07001019 qp->rq.max_gs = cap->max_recv_sge;
1020 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1021 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1022 MTHCA_INLINE_CHUNK_SIZE) /
1023 sizeof (struct mthca_data_seg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Roland Dreier80c8ec22005-07-07 17:57:20 -07001025 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026}
1027
1028int mthca_alloc_qp(struct mthca_dev *dev,
1029 struct mthca_pd *pd,
1030 struct mthca_cq *send_cq,
1031 struct mthca_cq *recv_cq,
1032 enum ib_qp_type type,
1033 enum ib_sig_type send_policy,
Roland Dreier80c8ec22005-07-07 17:57:20 -07001034 struct ib_qp_cap *cap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 struct mthca_qp *qp)
1036{
1037 int err;
1038
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001039 err = mthca_set_qp_size(dev, cap, pd, qp);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001040 if (err)
1041 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
1043 switch (type) {
1044 case IB_QPT_RC: qp->transport = RC; break;
1045 case IB_QPT_UC: qp->transport = UC; break;
1046 case IB_QPT_UD: qp->transport = UD; break;
1047 default: return -EINVAL;
1048 }
1049
1050 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1051 if (qp->qpn == -1)
1052 return -ENOMEM;
1053
1054 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1055 send_policy, qp);
1056 if (err) {
1057 mthca_free(&dev->qp_table.alloc, qp->qpn);
1058 return err;
1059 }
1060
1061 spin_lock_irq(&dev->qp_table.lock);
1062 mthca_array_set(&dev->qp_table.qp,
1063 qp->qpn & (dev->limits.num_qps - 1), qp);
1064 spin_unlock_irq(&dev->qp_table.lock);
1065
1066 return 0;
1067}
1068
1069int mthca_alloc_sqp(struct mthca_dev *dev,
1070 struct mthca_pd *pd,
1071 struct mthca_cq *send_cq,
1072 struct mthca_cq *recv_cq,
1073 enum ib_sig_type send_policy,
Roland Dreier80c8ec22005-07-07 17:57:20 -07001074 struct ib_qp_cap *cap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 int qpn,
1076 int port,
1077 struct mthca_sqp *sqp)
1078{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
Roland Dreier80c8ec22005-07-07 17:57:20 -07001080 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
Jack Morgenstein5b3bc7a2006-01-06 12:57:30 -08001082 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001083 if (err)
1084 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1087 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1088 &sqp->header_dma, GFP_KERNEL);
1089 if (!sqp->header_buf)
1090 return -ENOMEM;
1091
1092 spin_lock_irq(&dev->qp_table.lock);
1093 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1094 err = -EBUSY;
1095 else
1096 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1097 spin_unlock_irq(&dev->qp_table.lock);
1098
1099 if (err)
1100 goto err_out;
1101
1102 sqp->port = port;
1103 sqp->qp.qpn = mqpn;
1104 sqp->qp.transport = MLX;
1105
1106 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1107 send_policy, &sqp->qp);
1108 if (err)
1109 goto err_out_free;
1110
1111 atomic_inc(&pd->sqp_count);
1112
1113 return 0;
1114
1115 err_out_free:
1116 /*
1117 * Lock CQs here, so that CQ polling code can do QP lookup
1118 * without taking a lock.
1119 */
1120 spin_lock_irq(&send_cq->lock);
1121 if (send_cq != recv_cq)
1122 spin_lock(&recv_cq->lock);
1123
1124 spin_lock(&dev->qp_table.lock);
1125 mthca_array_clear(&dev->qp_table.qp, mqpn);
1126 spin_unlock(&dev->qp_table.lock);
1127
1128 if (send_cq != recv_cq)
1129 spin_unlock(&recv_cq->lock);
1130 spin_unlock_irq(&send_cq->lock);
1131
1132 err_out:
1133 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1134 sqp->header_buf, sqp->header_dma);
1135
1136 return err;
1137}
1138
1139void mthca_free_qp(struct mthca_dev *dev,
1140 struct mthca_qp *qp)
1141{
1142 u8 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 struct mthca_cq *send_cq;
1144 struct mthca_cq *recv_cq;
1145
1146 send_cq = to_mcq(qp->ibqp.send_cq);
1147 recv_cq = to_mcq(qp->ibqp.recv_cq);
1148
1149 /*
1150 * Lock CQs here, so that CQ polling code can do QP lookup
1151 * without taking a lock.
1152 */
1153 spin_lock_irq(&send_cq->lock);
1154 if (send_cq != recv_cq)
1155 spin_lock(&recv_cq->lock);
1156
1157 spin_lock(&dev->qp_table.lock);
1158 mthca_array_clear(&dev->qp_table.qp,
1159 qp->qpn & (dev->limits.num_qps - 1));
1160 spin_unlock(&dev->qp_table.lock);
1161
1162 if (send_cq != recv_cq)
1163 spin_unlock(&recv_cq->lock);
1164 spin_unlock_irq(&send_cq->lock);
1165
1166 atomic_dec(&qp->refcount);
1167 wait_event(qp->wait, !atomic_read(&qp->refcount));
1168
1169 if (qp->state != IB_QPS_RESET)
Roland Dreierd8441832006-02-13 16:30:18 -08001170 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1171 NULL, 0, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Roland Dreier80c8ec22005-07-07 17:57:20 -07001173 /*
1174 * If this is a userspace QP, the buffers, MR, CQs and so on
1175 * will be cleaned up in userspace, so all we have to do is
1176 * unref the mem-free tables and free the QPN in our table.
1177 */
1178 if (!qp->ibqp.uobject) {
Roland Dreierec34a922005-08-19 10:59:31 -07001179 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
1180 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
Roland Dreier80c8ec22005-07-07 17:57:20 -07001181 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
Roland Dreierec34a922005-08-19 10:59:31 -07001182 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
1183 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
Roland Dreier80c8ec22005-07-07 17:57:20 -07001185 mthca_free_memfree(dev, qp);
1186 mthca_free_wqe_buf(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 }
1188
Roland Dreier80c8ec22005-07-07 17:57:20 -07001189 mthca_unmap_memfree(dev, qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 if (is_sqp(dev, qp)) {
1192 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1193 dma_free_coherent(&dev->pdev->dev,
1194 to_msqp(qp)->header_buf_size,
1195 to_msqp(qp)->header_buf,
1196 to_msqp(qp)->header_dma);
1197 } else
1198 mthca_free(&dev->qp_table.alloc, qp->qpn);
1199}
1200
1201/* Create UD header for an MLX send and build a data segment for it */
1202static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1203 int ind, struct ib_send_wr *wr,
1204 struct mthca_mlx_seg *mlx,
1205 struct mthca_data_seg *data)
1206{
1207 int header_size;
1208 int err;
Sean Hefty97f52eb2005-08-13 21:05:57 -07001209 u16 pkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 ib_ud_header_init(256, /* assume a MAD */
Michael S. Tsirkin9eacee22006-01-12 15:55:41 -08001212 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 &sqp->ud_header);
1214
1215 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1216 if (err)
1217 return err;
1218 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1219 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
Sean Hefty97f52eb2005-08-13 21:05:57 -07001220 (sqp->ud_header.lrh.destination_lid ==
1221 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 (sqp->ud_header.lrh.service_level << 8));
1223 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1224 mlx->vcrc = 0;
1225
1226 switch (wr->opcode) {
1227 case IB_WR_SEND:
1228 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1229 sqp->ud_header.immediate_present = 0;
1230 break;
1231 case IB_WR_SEND_WITH_IMM:
1232 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1233 sqp->ud_header.immediate_present = 1;
1234 sqp->ud_header.immediate_data = wr->imm_data;
1235 break;
1236 default:
1237 return -EINVAL;
1238 }
1239
1240 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
Sean Hefty97f52eb2005-08-13 21:05:57 -07001241 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1242 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1244 if (!sqp->qp.ibqp.qp_num)
1245 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
Sean Hefty97f52eb2005-08-13 21:05:57 -07001246 sqp->pkey_index, &pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 else
1248 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
Sean Hefty97f52eb2005-08-13 21:05:57 -07001249 wr->wr.ud.pkey_index, &pkey);
1250 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1252 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1253 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1254 sqp->qkey : wr->wr.ud.remote_qkey);
1255 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1256
1257 header_size = ib_ud_header_pack(&sqp->ud_header,
1258 sqp->header_buf +
1259 ind * MTHCA_UD_HEADER_SIZE);
1260
1261 data->byte_count = cpu_to_be32(header_size);
1262 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1263 data->addr = cpu_to_be64(sqp->header_dma +
1264 ind * MTHCA_UD_HEADER_SIZE);
1265
1266 return 0;
1267}
1268
1269static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1270 struct ib_cq *ib_cq)
1271{
1272 unsigned cur;
1273 struct mthca_cq *cq;
1274
1275 cur = wq->head - wq->tail;
1276 if (likely(cur + nreq < wq->max))
1277 return 0;
1278
1279 cq = to_mcq(ib_cq);
1280 spin_lock(&cq->lock);
1281 cur = wq->head - wq->tail;
1282 spin_unlock(&cq->lock);
1283
1284 return cur + nreq >= wq->max;
1285}
1286
1287int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1288 struct ib_send_wr **bad_wr)
1289{
1290 struct mthca_dev *dev = to_mdev(ibqp->device);
1291 struct mthca_qp *qp = to_mqp(ibqp);
1292 void *wqe;
1293 void *prev_wqe;
1294 unsigned long flags;
1295 int err = 0;
1296 int nreq;
1297 int i;
1298 int size;
1299 int size0 = 0;
1300 u32 f0 = 0;
1301 int ind;
1302 u8 op0 = 0;
1303
1304 spin_lock_irqsave(&qp->sq.lock, flags);
1305
1306 /* XXX check that state is OK to post send */
1307
1308 ind = qp->sq.next_ind;
1309
1310 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1311 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1312 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1313 " %d max, %d nreq)\n", qp->qpn,
1314 qp->sq.head, qp->sq.tail,
1315 qp->sq.max, nreq);
1316 err = -ENOMEM;
1317 *bad_wr = wr;
1318 goto out;
1319 }
1320
1321 wqe = get_send_wqe(qp, ind);
1322 prev_wqe = qp->sq.last;
1323 qp->sq.last = wqe;
1324
1325 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1326 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1327 ((struct mthca_next_seg *) wqe)->flags =
1328 ((wr->send_flags & IB_SEND_SIGNALED) ?
1329 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1330 ((wr->send_flags & IB_SEND_SOLICITED) ?
1331 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1332 cpu_to_be32(1);
1333 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1334 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
Roland Dreier3fba2312005-04-16 15:26:16 -07001335 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
1337 wqe += sizeof (struct mthca_next_seg);
1338 size = sizeof (struct mthca_next_seg) / 16;
1339
1340 switch (qp->transport) {
1341 case RC:
1342 switch (wr->opcode) {
1343 case IB_WR_ATOMIC_CMP_AND_SWP:
1344 case IB_WR_ATOMIC_FETCH_AND_ADD:
1345 ((struct mthca_raddr_seg *) wqe)->raddr =
1346 cpu_to_be64(wr->wr.atomic.remote_addr);
1347 ((struct mthca_raddr_seg *) wqe)->rkey =
1348 cpu_to_be32(wr->wr.atomic.rkey);
1349 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1350
1351 wqe += sizeof (struct mthca_raddr_seg);
1352
1353 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1354 ((struct mthca_atomic_seg *) wqe)->swap_add =
1355 cpu_to_be64(wr->wr.atomic.swap);
1356 ((struct mthca_atomic_seg *) wqe)->compare =
1357 cpu_to_be64(wr->wr.atomic.compare_add);
1358 } else {
1359 ((struct mthca_atomic_seg *) wqe)->swap_add =
1360 cpu_to_be64(wr->wr.atomic.compare_add);
1361 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1362 }
1363
1364 wqe += sizeof (struct mthca_atomic_seg);
Michael S. Tsirkin62abb842005-11-09 11:30:14 -08001365 size += (sizeof (struct mthca_raddr_seg) +
1366 sizeof (struct mthca_atomic_seg)) / 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 break;
1368
1369 case IB_WR_RDMA_WRITE:
1370 case IB_WR_RDMA_WRITE_WITH_IMM:
1371 case IB_WR_RDMA_READ:
1372 ((struct mthca_raddr_seg *) wqe)->raddr =
1373 cpu_to_be64(wr->wr.rdma.remote_addr);
1374 ((struct mthca_raddr_seg *) wqe)->rkey =
1375 cpu_to_be32(wr->wr.rdma.rkey);
1376 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1377 wqe += sizeof (struct mthca_raddr_seg);
1378 size += sizeof (struct mthca_raddr_seg) / 16;
1379 break;
1380
1381 default:
1382 /* No extra segments required for sends */
1383 break;
1384 }
1385
1386 break;
1387
Roland Dreier9e6970b2005-06-27 14:36:42 -07001388 case UC:
1389 switch (wr->opcode) {
1390 case IB_WR_RDMA_WRITE:
1391 case IB_WR_RDMA_WRITE_WITH_IMM:
1392 ((struct mthca_raddr_seg *) wqe)->raddr =
1393 cpu_to_be64(wr->wr.rdma.remote_addr);
1394 ((struct mthca_raddr_seg *) wqe)->rkey =
1395 cpu_to_be32(wr->wr.rdma.rkey);
1396 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1397 wqe += sizeof (struct mthca_raddr_seg);
1398 size += sizeof (struct mthca_raddr_seg) / 16;
1399 break;
1400
1401 default:
1402 /* No extra segments required for sends */
1403 break;
1404 }
1405
1406 break;
1407
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 case UD:
1409 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1410 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1411 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1412 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1413 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1414 cpu_to_be32(wr->wr.ud.remote_qpn);
1415 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1416 cpu_to_be32(wr->wr.ud.remote_qkey);
1417
1418 wqe += sizeof (struct mthca_tavor_ud_seg);
1419 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1420 break;
1421
1422 case MLX:
1423 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1424 wqe - sizeof (struct mthca_next_seg),
1425 wqe);
1426 if (err) {
1427 *bad_wr = wr;
1428 goto out;
1429 }
1430 wqe += sizeof (struct mthca_data_seg);
1431 size += sizeof (struct mthca_data_seg) / 16;
1432 break;
1433 }
1434
1435 if (wr->num_sge > qp->sq.max_gs) {
1436 mthca_err(dev, "too many gathers\n");
1437 err = -EINVAL;
1438 *bad_wr = wr;
1439 goto out;
1440 }
1441
1442 for (i = 0; i < wr->num_sge; ++i) {
1443 ((struct mthca_data_seg *) wqe)->byte_count =
1444 cpu_to_be32(wr->sg_list[i].length);
1445 ((struct mthca_data_seg *) wqe)->lkey =
1446 cpu_to_be32(wr->sg_list[i].lkey);
1447 ((struct mthca_data_seg *) wqe)->addr =
1448 cpu_to_be64(wr->sg_list[i].addr);
1449 wqe += sizeof (struct mthca_data_seg);
1450 size += sizeof (struct mthca_data_seg) / 16;
1451 }
1452
1453 /* Add one more inline data segment for ICRC */
1454 if (qp->transport == MLX) {
1455 ((struct mthca_data_seg *) wqe)->byte_count =
1456 cpu_to_be32((1 << 31) | 4);
1457 ((u32 *) wqe)[1] = 0;
1458 wqe += sizeof (struct mthca_data_seg);
1459 size += sizeof (struct mthca_data_seg) / 16;
1460 }
1461
1462 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1463
1464 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1465 mthca_err(dev, "opcode invalid\n");
1466 err = -EINVAL;
1467 *bad_wr = wr;
1468 goto out;
1469 }
1470
Roland Dreierd6cff022005-09-13 10:41:03 -07001471 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1472 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1473 qp->send_wqe_offset) |
1474 mthca_opcode[wr->opcode]);
1475 wmb();
1476 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1477 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479 if (!size0) {
1480 size0 = size;
1481 op0 = mthca_opcode[wr->opcode];
1482 }
1483
1484 ++ind;
1485 if (unlikely(ind >= qp->sq.max))
1486 ind -= qp->sq.max;
1487 }
1488
1489out:
1490 if (likely(nreq)) {
Sean Hefty97f52eb2005-08-13 21:05:57 -07001491 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
1493 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1494 qp->send_wqe_offset) | f0 | op0);
1495 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1496
1497 wmb();
1498
1499 mthca_write64(doorbell,
1500 dev->kar + MTHCA_SEND_DOORBELL,
1501 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1502 }
1503
1504 qp->sq.next_ind = ind;
1505 qp->sq.head += nreq;
1506
1507 spin_unlock_irqrestore(&qp->sq.lock, flags);
1508 return err;
1509}
1510
1511int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1512 struct ib_recv_wr **bad_wr)
1513{
1514 struct mthca_dev *dev = to_mdev(ibqp->device);
1515 struct mthca_qp *qp = to_mqp(ibqp);
Michael S. Tsirkinae57e242005-11-09 14:59:57 -08001516 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 unsigned long flags;
1518 int err = 0;
1519 int nreq;
1520 int i;
1521 int size;
1522 int size0 = 0;
1523 int ind;
1524 void *wqe;
1525 void *prev_wqe;
1526
1527 spin_lock_irqsave(&qp->rq.lock, flags);
1528
1529 /* XXX check that state is OK to post receive */
1530
1531 ind = qp->rq.next_ind;
1532
1533 for (nreq = 0; wr; ++nreq, wr = wr->next) {
Michael S. Tsirkinae57e242005-11-09 14:59:57 -08001534 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1535 nreq = 0;
1536
1537 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1538 doorbell[1] = cpu_to_be32(qp->qpn << 8);
1539
1540 wmb();
1541
1542 mthca_write64(doorbell,
1543 dev->kar + MTHCA_RECEIVE_DOORBELL,
1544 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1545
1546 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1547 size0 = 0;
1548 }
1549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1551 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1552 " %d max, %d nreq)\n", qp->qpn,
1553 qp->rq.head, qp->rq.tail,
1554 qp->rq.max, nreq);
1555 err = -ENOMEM;
1556 *bad_wr = wr;
1557 goto out;
1558 }
1559
1560 wqe = get_recv_wqe(qp, ind);
1561 prev_wqe = qp->rq.last;
1562 qp->rq.last = wqe;
1563
1564 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1565 ((struct mthca_next_seg *) wqe)->ee_nds =
1566 cpu_to_be32(MTHCA_NEXT_DBD);
1567 ((struct mthca_next_seg *) wqe)->flags = 0;
1568
1569 wqe += sizeof (struct mthca_next_seg);
1570 size = sizeof (struct mthca_next_seg) / 16;
1571
1572 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1573 err = -EINVAL;
1574 *bad_wr = wr;
1575 goto out;
1576 }
1577
1578 for (i = 0; i < wr->num_sge; ++i) {
1579 ((struct mthca_data_seg *) wqe)->byte_count =
1580 cpu_to_be32(wr->sg_list[i].length);
1581 ((struct mthca_data_seg *) wqe)->lkey =
1582 cpu_to_be32(wr->sg_list[i].lkey);
1583 ((struct mthca_data_seg *) wqe)->addr =
1584 cpu_to_be64(wr->sg_list[i].addr);
1585 wqe += sizeof (struct mthca_data_seg);
1586 size += sizeof (struct mthca_data_seg) / 16;
1587 }
1588
1589 qp->wrid[ind] = wr->wr_id;
1590
Roland Dreierd6cff022005-09-13 10:41:03 -07001591 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1592 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1593 wmb();
1594 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1595 cpu_to_be32(MTHCA_NEXT_DBD | size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
1597 if (!size0)
1598 size0 = size;
1599
1600 ++ind;
1601 if (unlikely(ind >= qp->rq.max))
1602 ind -= qp->rq.max;
1603 }
1604
1605out:
1606 if (likely(nreq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1608 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1609
1610 wmb();
1611
1612 mthca_write64(doorbell,
1613 dev->kar + MTHCA_RECEIVE_DOORBELL,
1614 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1615 }
1616
1617 qp->rq.next_ind = ind;
1618 qp->rq.head += nreq;
1619
1620 spin_unlock_irqrestore(&qp->rq.lock, flags);
1621 return err;
1622}
1623
1624int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1625 struct ib_send_wr **bad_wr)
1626{
1627 struct mthca_dev *dev = to_mdev(ibqp->device);
1628 struct mthca_qp *qp = to_mqp(ibqp);
Michael S. Tsirkine0ae9ec2005-11-29 11:33:46 -08001629 __be32 doorbell[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 void *wqe;
1631 void *prev_wqe;
1632 unsigned long flags;
1633 int err = 0;
1634 int nreq;
1635 int i;
1636 int size;
1637 int size0 = 0;
1638 u32 f0 = 0;
1639 int ind;
1640 u8 op0 = 0;
1641
1642 spin_lock_irqsave(&qp->sq.lock, flags);
1643
1644 /* XXX check that state is OK to post send */
1645
1646 ind = qp->sq.head & (qp->sq.max - 1);
1647
1648 for (nreq = 0; wr; ++nreq, wr = wr->next) {
Michael S. Tsirkine0ae9ec2005-11-29 11:33:46 -08001649 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1650 nreq = 0;
1651
1652 doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1653 ((qp->sq.head & 0xffff) << 8) |
1654 f0 | op0);
1655 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1656
1657 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1658 size0 = 0;
1659
1660 /*
1661 * Make sure that descriptors are written before
1662 * doorbell record.
1663 */
1664 wmb();
1665 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1666
1667 /*
1668 * Make sure doorbell record is written before we
1669 * write MMIO send doorbell.
1670 */
1671 wmb();
1672 mthca_write64(doorbell,
1673 dev->kar + MTHCA_SEND_DOORBELL,
1674 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1675 }
1676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1678 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1679 " %d max, %d nreq)\n", qp->qpn,
1680 qp->sq.head, qp->sq.tail,
1681 qp->sq.max, nreq);
1682 err = -ENOMEM;
1683 *bad_wr = wr;
1684 goto out;
1685 }
1686
1687 wqe = get_send_wqe(qp, ind);
1688 prev_wqe = qp->sq.last;
1689 qp->sq.last = wqe;
1690
1691 ((struct mthca_next_seg *) wqe)->flags =
1692 ((wr->send_flags & IB_SEND_SIGNALED) ?
1693 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1694 ((wr->send_flags & IB_SEND_SOLICITED) ?
1695 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1696 cpu_to_be32(1);
1697 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1698 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
Roland Dreier3fba2312005-04-16 15:26:16 -07001699 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
1701 wqe += sizeof (struct mthca_next_seg);
1702 size = sizeof (struct mthca_next_seg) / 16;
1703
1704 switch (qp->transport) {
Roland Dreierddb934e2005-04-16 15:26:23 -07001705 case RC:
1706 switch (wr->opcode) {
1707 case IB_WR_ATOMIC_CMP_AND_SWP:
1708 case IB_WR_ATOMIC_FETCH_AND_ADD:
1709 ((struct mthca_raddr_seg *) wqe)->raddr =
1710 cpu_to_be64(wr->wr.atomic.remote_addr);
1711 ((struct mthca_raddr_seg *) wqe)->rkey =
1712 cpu_to_be32(wr->wr.atomic.rkey);
1713 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1714
1715 wqe += sizeof (struct mthca_raddr_seg);
1716
1717 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1718 ((struct mthca_atomic_seg *) wqe)->swap_add =
1719 cpu_to_be64(wr->wr.atomic.swap);
1720 ((struct mthca_atomic_seg *) wqe)->compare =
1721 cpu_to_be64(wr->wr.atomic.compare_add);
1722 } else {
1723 ((struct mthca_atomic_seg *) wqe)->swap_add =
1724 cpu_to_be64(wr->wr.atomic.compare_add);
1725 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1726 }
1727
1728 wqe += sizeof (struct mthca_atomic_seg);
Michael S. Tsirkin62abb842005-11-09 11:30:14 -08001729 size += (sizeof (struct mthca_raddr_seg) +
1730 sizeof (struct mthca_atomic_seg)) / 16;
Roland Dreierddb934e2005-04-16 15:26:23 -07001731 break;
1732
Roland Dreier9e6970b2005-06-27 14:36:42 -07001733 case IB_WR_RDMA_READ:
Roland Dreierddb934e2005-04-16 15:26:23 -07001734 case IB_WR_RDMA_WRITE:
1735 case IB_WR_RDMA_WRITE_WITH_IMM:
Roland Dreier9e6970b2005-06-27 14:36:42 -07001736 ((struct mthca_raddr_seg *) wqe)->raddr =
1737 cpu_to_be64(wr->wr.rdma.remote_addr);
1738 ((struct mthca_raddr_seg *) wqe)->rkey =
1739 cpu_to_be32(wr->wr.rdma.rkey);
1740 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1741 wqe += sizeof (struct mthca_raddr_seg);
1742 size += sizeof (struct mthca_raddr_seg) / 16;
1743 break;
1744
1745 default:
1746 /* No extra segments required for sends */
1747 break;
1748 }
1749
1750 break;
1751
1752 case UC:
1753 switch (wr->opcode) {
1754 case IB_WR_RDMA_WRITE:
1755 case IB_WR_RDMA_WRITE_WITH_IMM:
Roland Dreierddb934e2005-04-16 15:26:23 -07001756 ((struct mthca_raddr_seg *) wqe)->raddr =
1757 cpu_to_be64(wr->wr.rdma.remote_addr);
1758 ((struct mthca_raddr_seg *) wqe)->rkey =
1759 cpu_to_be32(wr->wr.rdma.rkey);
1760 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1761 wqe += sizeof (struct mthca_raddr_seg);
1762 size += sizeof (struct mthca_raddr_seg) / 16;
1763 break;
1764
1765 default:
1766 /* No extra segments required for sends */
1767 break;
1768 }
1769
1770 break;
1771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 case UD:
1773 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
1774 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1775 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
1776 cpu_to_be32(wr->wr.ud.remote_qpn);
1777 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
1778 cpu_to_be32(wr->wr.ud.remote_qkey);
1779
1780 wqe += sizeof (struct mthca_arbel_ud_seg);
1781 size += sizeof (struct mthca_arbel_ud_seg) / 16;
1782 break;
1783
1784 case MLX:
1785 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1786 wqe - sizeof (struct mthca_next_seg),
1787 wqe);
1788 if (err) {
1789 *bad_wr = wr;
1790 goto out;
1791 }
1792 wqe += sizeof (struct mthca_data_seg);
1793 size += sizeof (struct mthca_data_seg) / 16;
1794 break;
1795 }
1796
1797 if (wr->num_sge > qp->sq.max_gs) {
1798 mthca_err(dev, "too many gathers\n");
1799 err = -EINVAL;
1800 *bad_wr = wr;
1801 goto out;
1802 }
1803
1804 for (i = 0; i < wr->num_sge; ++i) {
1805 ((struct mthca_data_seg *) wqe)->byte_count =
1806 cpu_to_be32(wr->sg_list[i].length);
1807 ((struct mthca_data_seg *) wqe)->lkey =
1808 cpu_to_be32(wr->sg_list[i].lkey);
1809 ((struct mthca_data_seg *) wqe)->addr =
1810 cpu_to_be64(wr->sg_list[i].addr);
1811 wqe += sizeof (struct mthca_data_seg);
1812 size += sizeof (struct mthca_data_seg) / 16;
1813 }
1814
1815 /* Add one more inline data segment for ICRC */
1816 if (qp->transport == MLX) {
1817 ((struct mthca_data_seg *) wqe)->byte_count =
1818 cpu_to_be32((1 << 31) | 4);
1819 ((u32 *) wqe)[1] = 0;
1820 wqe += sizeof (struct mthca_data_seg);
1821 size += sizeof (struct mthca_data_seg) / 16;
1822 }
1823
1824 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1825
1826 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1827 mthca_err(dev, "opcode invalid\n");
1828 err = -EINVAL;
1829 *bad_wr = wr;
1830 goto out;
1831 }
1832
Roland Dreierd6cff022005-09-13 10:41:03 -07001833 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1834 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1835 qp->send_wqe_offset) |
1836 mthca_opcode[wr->opcode]);
1837 wmb();
1838 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1839 cpu_to_be32(MTHCA_NEXT_DBD | size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
1841 if (!size0) {
1842 size0 = size;
1843 op0 = mthca_opcode[wr->opcode];
1844 }
1845
1846 ++ind;
1847 if (unlikely(ind >= qp->sq.max))
1848 ind -= qp->sq.max;
1849 }
1850
1851out:
1852 if (likely(nreq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 doorbell[0] = cpu_to_be32((nreq << 24) |
1854 ((qp->sq.head & 0xffff) << 8) |
1855 f0 | op0);
1856 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1857
1858 qp->sq.head += nreq;
1859
1860 /*
1861 * Make sure that descriptors are written before
1862 * doorbell record.
1863 */
1864 wmb();
1865 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1866
1867 /*
1868 * Make sure doorbell record is written before we
1869 * write MMIO send doorbell.
1870 */
1871 wmb();
1872 mthca_write64(doorbell,
1873 dev->kar + MTHCA_SEND_DOORBELL,
1874 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1875 }
1876
1877 spin_unlock_irqrestore(&qp->sq.lock, flags);
1878 return err;
1879}
1880
1881int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1882 struct ib_recv_wr **bad_wr)
1883{
1884 struct mthca_dev *dev = to_mdev(ibqp->device);
1885 struct mthca_qp *qp = to_mqp(ibqp);
1886 unsigned long flags;
1887 int err = 0;
1888 int nreq;
1889 int ind;
1890 int i;
1891 void *wqe;
1892
Roland Dreier2fa5e2e2006-02-01 13:38:24 -08001893 spin_lock_irqsave(&qp->rq.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
1895 /* XXX check that state is OK to post receive */
1896
1897 ind = qp->rq.head & (qp->rq.max - 1);
1898
1899 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1900 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1901 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1902 " %d max, %d nreq)\n", qp->qpn,
1903 qp->rq.head, qp->rq.tail,
1904 qp->rq.max, nreq);
1905 err = -ENOMEM;
1906 *bad_wr = wr;
1907 goto out;
1908 }
1909
1910 wqe = get_recv_wqe(qp, ind);
1911
1912 ((struct mthca_next_seg *) wqe)->flags = 0;
1913
1914 wqe += sizeof (struct mthca_next_seg);
1915
1916 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1917 err = -EINVAL;
1918 *bad_wr = wr;
1919 goto out;
1920 }
1921
1922 for (i = 0; i < wr->num_sge; ++i) {
1923 ((struct mthca_data_seg *) wqe)->byte_count =
1924 cpu_to_be32(wr->sg_list[i].length);
1925 ((struct mthca_data_seg *) wqe)->lkey =
1926 cpu_to_be32(wr->sg_list[i].lkey);
1927 ((struct mthca_data_seg *) wqe)->addr =
1928 cpu_to_be64(wr->sg_list[i].addr);
1929 wqe += sizeof (struct mthca_data_seg);
1930 }
1931
1932 if (i < qp->rq.max_gs) {
1933 ((struct mthca_data_seg *) wqe)->byte_count = 0;
Roland Dreierddf841f2005-04-16 15:26:33 -07001934 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 ((struct mthca_data_seg *) wqe)->addr = 0;
1936 }
1937
1938 qp->wrid[ind] = wr->wr_id;
1939
1940 ++ind;
1941 if (unlikely(ind >= qp->rq.max))
1942 ind -= qp->rq.max;
1943 }
1944out:
1945 if (likely(nreq)) {
1946 qp->rq.head += nreq;
1947
1948 /*
1949 * Make sure that descriptors are written before
1950 * doorbell record.
1951 */
1952 wmb();
1953 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
1954 }
1955
1956 spin_unlock_irqrestore(&qp->rq.lock, flags);
1957 return err;
1958}
1959
Roland Dreierd9b98b02006-01-31 20:45:51 -08001960void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
1961 int index, int *dbd, __be32 *new_wqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962{
1963 struct mthca_next_seg *next;
1964
Roland Dreierec34a922005-08-19 10:59:31 -07001965 /*
1966 * For SRQs, all WQEs generate a CQE, so we're always at the
1967 * end of the doorbell chain.
1968 */
1969 if (qp->ibqp.srq) {
1970 *new_wqe = 0;
Roland Dreierd9b98b02006-01-31 20:45:51 -08001971 return;
Roland Dreierec34a922005-08-19 10:59:31 -07001972 }
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 if (is_send)
1975 next = get_send_wqe(qp, index);
1976 else
1977 next = get_recv_wqe(qp, index);
1978
Roland Dreier288bdeb2005-08-19 09:19:05 -07001979 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 if (next->ee_nds & cpu_to_be32(0x3f))
1981 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
1982 (next->ee_nds & cpu_to_be32(0x3f));
1983 else
1984 *new_wqe = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985}
1986
1987int __devinit mthca_init_qp_table(struct mthca_dev *dev)
1988{
1989 int err;
1990 u8 status;
1991 int i;
1992
1993 spin_lock_init(&dev->qp_table.lock);
1994
1995 /*
1996 * We reserve 2 extra QPs per port for the special QPs. The
1997 * special QP for port 1 has to be even, so round up.
1998 */
1999 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2000 err = mthca_alloc_init(&dev->qp_table.alloc,
2001 dev->limits.num_qps,
2002 (1 << 24) - 1,
2003 dev->qp_table.sqp_start +
2004 MTHCA_MAX_PORTS * 2);
2005 if (err)
2006 return err;
2007
2008 err = mthca_array_init(&dev->qp_table.qp,
2009 dev->limits.num_qps);
2010 if (err) {
2011 mthca_alloc_cleanup(&dev->qp_table.alloc);
2012 return err;
2013 }
2014
2015 for (i = 0; i < 2; ++i) {
2016 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2017 dev->qp_table.sqp_start + i * 2,
2018 &status);
2019 if (err)
2020 goto err_out;
2021 if (status) {
2022 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2023 "status %02x, aborting.\n",
2024 status);
2025 err = -EINVAL;
2026 goto err_out;
2027 }
2028 }
2029 return 0;
2030
2031 err_out:
2032 for (i = 0; i < 2; ++i)
2033 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2034
2035 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2036 mthca_alloc_cleanup(&dev->qp_table.alloc);
2037
2038 return err;
2039}
2040
2041void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2042{
2043 int i;
2044 u8 status;
2045
2046 for (i = 0; i < 2; ++i)
2047 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2048
Michael S. Tsirkin71eea472005-09-20 10:54:48 -07002049 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 mthca_alloc_cleanup(&dev->qp_table.alloc);
2051}