blob: c1f573a331c7f04ea78d203f8135bab8a93523d9 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -04002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040037#include <linux/jhash.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070038
39#include "qib.h"
40
41#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
42#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
43
44static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
45 struct qpn_map *map, unsigned off)
46{
47 return (map - qpt->map) * BITS_PER_PAGE + off;
48}
49
50static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
51 struct qpn_map *map, unsigned off,
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080052 unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -070053{
54 if (qpt->mask) {
55 off++;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080056 if (((off & qpt->mask) >> 1) >= n)
57 off = (off | qpt->mask) + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -070058 } else
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 return off;
61}
62
63/*
64 * Convert the AETH credit code into the number of credits.
65 */
66static u32 credit_table[31] = {
67 0, /* 0 */
68 1, /* 1 */
69 2, /* 2 */
70 3, /* 3 */
71 4, /* 4 */
72 6, /* 5 */
73 8, /* 6 */
74 12, /* 7 */
75 16, /* 8 */
76 24, /* 9 */
77 32, /* A */
78 48, /* B */
79 64, /* C */
80 96, /* D */
81 128, /* E */
82 192, /* F */
83 256, /* 10 */
84 384, /* 11 */
85 512, /* 12 */
86 768, /* 13 */
87 1024, /* 14 */
88 1536, /* 15 */
89 2048, /* 16 */
90 3072, /* 17 */
91 4096, /* 18 */
92 6144, /* 19 */
93 8192, /* 1A */
94 12288, /* 1B */
95 16384, /* 1C */
96 24576, /* 1D */
97 32768 /* 1E */
98};
99
100static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101{
102 unsigned long page = get_zeroed_page(GFP_KERNEL);
103
104 /*
105 * Free the page if someone raced with us installing it.
106 */
107
108 spin_lock(&qpt->lock);
109 if (map->page)
110 free_page(page);
111 else
112 map->page = (void *)page;
113 spin_unlock(&qpt->lock);
114}
115
116/*
117 * Allocate the next available QPN or
118 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119 */
120static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 enum ib_qp_type type, u8 port)
122{
123 u32 i, offset, max_scan, qpn;
124 struct qpn_map *map;
125 u32 ret;
Ralph Campbellf9315512010-05-23 21:44:54 -0700126
127 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
128 unsigned n;
129
130 ret = type == IB_QPT_GSI;
131 n = 1 << (ret + 2 * (port - 1));
132 spin_lock(&qpt->lock);
133 if (qpt->flags & n)
134 ret = -EINVAL;
135 else
136 qpt->flags |= n;
137 spin_unlock(&qpt->lock);
138 goto bail;
139 }
140
Mike Marciniszyn7c3edd32011-01-10 17:42:22 -0800141 qpn = qpt->last + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700142 if (qpn >= QPN_MAX)
143 qpn = 2;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800144 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
145 qpn = (qpn | qpt->mask) + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700146 offset = qpn & BITS_PER_PAGE_MASK;
147 map = &qpt->map[qpn / BITS_PER_PAGE];
148 max_scan = qpt->nmaps - !offset;
149 for (i = 0;;) {
150 if (unlikely(!map->page)) {
151 get_map_page(qpt, map);
152 if (unlikely(!map->page))
153 break;
154 }
155 do {
156 if (!test_and_set_bit(offset, map->page)) {
157 qpt->last = qpn;
158 ret = qpn;
159 goto bail;
160 }
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800161 offset = find_next_offset(qpt, map, offset,
162 dd->n_krcv_queues);
Ralph Campbellf9315512010-05-23 21:44:54 -0700163 qpn = mk_qpn(qpt, map, offset);
164 /*
165 * This test differs from alloc_pidmap().
166 * If find_next_offset() does find a zero
167 * bit, we don't need to check for QPN
168 * wrapping around past our starting QPN.
169 * We just need to be sure we don't loop
170 * forever.
171 */
172 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
173 /*
174 * In order to keep the number of pages allocated to a
175 * minimum, we scan the all existing pages before increasing
176 * the size of the bitmap table.
177 */
178 if (++i > max_scan) {
179 if (qpt->nmaps == QPNMAP_ENTRIES)
180 break;
181 map = &qpt->map[qpt->nmaps++];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800182 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700183 } else if (map < &qpt->map[qpt->nmaps]) {
184 ++map;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800185 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700186 } else {
187 map = &qpt->map[0];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800188 offset = 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700189 }
190 qpn = mk_qpn(qpt, map, offset);
191 }
192
193 ret = -ENOMEM;
194
195bail:
196 return ret;
197}
198
199static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
200{
201 struct qpn_map *map;
202
203 map = qpt->map + qpn / BITS_PER_PAGE;
204 if (map->page)
205 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
206}
207
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400208static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
209{
210 return jhash_1word(qpn, dev->qp_rnd) &
211 (dev->qp_table_size - 1);
212}
213
214
Ralph Campbellf9315512010-05-23 21:44:54 -0700215/*
216 * Put the QP into the hash table.
217 * The hash table holds a reference to the QP.
218 */
219static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
220{
221 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700222 unsigned long flags;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400223 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700224
Ralph Campbellf9315512010-05-23 21:44:54 -0700225 atomic_inc(&qp->refcount);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400226 spin_lock_irqsave(&dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700227
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400228 if (qp->ibqp.qp_num == 0)
229 rcu_assign_pointer(ibp->qp0, qp);
230 else if (qp->ibqp.qp_num == 1)
231 rcu_assign_pointer(ibp->qp1, qp);
232 else {
233 qp->next = dev->qp_table[n];
234 rcu_assign_pointer(dev->qp_table[n], qp);
235 }
236
Ralph Campbellf9315512010-05-23 21:44:54 -0700237 spin_unlock_irqrestore(&dev->qpt_lock, flags);
238}
239
240/*
241 * Remove the QP from the table so it can't be found asynchronously by
242 * the receive interrupt routine.
243 */
244static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
245{
246 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400247 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700248 unsigned long flags;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400249 int removed = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700250
Ralph Campbellf9315512010-05-23 21:44:54 -0700251 spin_lock_irqsave(&dev->qpt_lock, flags);
252
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000253 if (rcu_dereference_protected(ibp->qp0,
254 lockdep_is_held(&dev->qpt_lock)) == qp) {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400255 rcu_assign_pointer(ibp->qp0, NULL);
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000256 } else if (rcu_dereference_protected(ibp->qp1,
257 lockdep_is_held(&dev->qpt_lock)) == qp) {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400258 rcu_assign_pointer(ibp->qp1, NULL);
259 } else {
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000260 struct qib_qp *q;
261 struct qib_qp __rcu **qpp;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400262
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400263 removed = 0;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400264 qpp = &dev->qp_table[n];
Mike Marciniszynd359f352013-01-24 18:59:34 +0000265 for (; (q = rcu_dereference_protected(*qpp,
266 lockdep_is_held(&dev->qpt_lock))) != NULL;
267 qpp = &q->next)
Ralph Campbellf9315512010-05-23 21:44:54 -0700268 if (q == qp) {
Mike Marciniszynbcc9b672013-02-07 20:47:51 +0000269 rcu_assign_pointer(*qpp,
270 rcu_dereference_protected(qp->next,
271 lockdep_is_held(&dev->qpt_lock)));
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400272 removed = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700273 break;
274 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400275 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700276
277 spin_unlock_irqrestore(&dev->qpt_lock, flags);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400278 if (removed) {
279 synchronize_rcu();
280 atomic_dec(&qp->refcount);
281 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700282}
283
284/**
285 * qib_free_all_qps - check for QPs still in use
286 * @qpt: the QP table to empty
287 *
288 * There should not be any QPs still in use.
289 * Free memory for table.
290 */
291unsigned qib_free_all_qps(struct qib_devdata *dd)
292{
293 struct qib_ibdev *dev = &dd->verbs_dev;
294 unsigned long flags;
295 struct qib_qp *qp;
296 unsigned n, qp_inuse = 0;
297
298 for (n = 0; n < dd->num_pports; n++) {
299 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
300
301 if (!qib_mcast_tree_empty(ibp))
302 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400303 rcu_read_lock();
304 if (rcu_dereference(ibp->qp0))
Ralph Campbellf9315512010-05-23 21:44:54 -0700305 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400306 if (rcu_dereference(ibp->qp1))
Ralph Campbellf9315512010-05-23 21:44:54 -0700307 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400308 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700309 }
310
311 spin_lock_irqsave(&dev->qpt_lock, flags);
312 for (n = 0; n < dev->qp_table_size; n++) {
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000313 qp = rcu_dereference_protected(dev->qp_table[n],
314 lockdep_is_held(&dev->qpt_lock));
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400315 rcu_assign_pointer(dev->qp_table[n], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700316
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000317 for (; qp; qp = rcu_dereference_protected(qp->next,
318 lockdep_is_held(&dev->qpt_lock)))
Ralph Campbellf9315512010-05-23 21:44:54 -0700319 qp_inuse++;
320 }
321 spin_unlock_irqrestore(&dev->qpt_lock, flags);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400322 synchronize_rcu();
Ralph Campbellf9315512010-05-23 21:44:54 -0700323
324 return qp_inuse;
325}
326
327/**
328 * qib_lookup_qpn - return the QP with the given QPN
329 * @qpt: the QP table
330 * @qpn: the QP number to look up
331 *
332 * The caller is responsible for decrementing the QP reference count
333 * when done.
334 */
335struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
336{
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400337 struct qib_qp *qp = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700338
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400339 rcu_read_lock();
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400340 if (unlikely(qpn <= 1)) {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400341 if (qpn == 0)
342 qp = rcu_dereference(ibp->qp0);
343 else
344 qp = rcu_dereference(ibp->qp1);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400345 if (qp)
346 atomic_inc(&qp->refcount);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400347 } else {
348 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
349 unsigned n = qpn_hash(dev, qpn);
Ralph Campbellf9315512010-05-23 21:44:54 -0700350
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000351 for (qp = rcu_dereference(dev->qp_table[n]); qp;
352 qp = rcu_dereference(qp->next))
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400353 if (qp->ibqp.qp_num == qpn) {
354 atomic_inc(&qp->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700355 break;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400356 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400357 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400358 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700359 return qp;
360}
361
362/**
363 * qib_reset_qp - initialize the QP state to the reset state
364 * @qp: the QP to reset
365 * @type: the QP type
366 */
367static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
368{
369 qp->remote_qpn = 0;
370 qp->qkey = 0;
371 qp->qp_access_flags = 0;
372 atomic_set(&qp->s_dma_busy, 0);
373 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
374 qp->s_hdrwords = 0;
375 qp->s_wqe = NULL;
376 qp->s_draining = 0;
377 qp->s_next_psn = 0;
378 qp->s_last_psn = 0;
379 qp->s_sending_psn = 0;
380 qp->s_sending_hpsn = 0;
381 qp->s_psn = 0;
382 qp->r_psn = 0;
383 qp->r_msn = 0;
384 if (type == IB_QPT_RC) {
385 qp->s_state = IB_OPCODE_RC_SEND_LAST;
386 qp->r_state = IB_OPCODE_RC_SEND_LAST;
387 } else {
388 qp->s_state = IB_OPCODE_UC_SEND_LAST;
389 qp->r_state = IB_OPCODE_UC_SEND_LAST;
390 }
391 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
392 qp->r_nak_state = 0;
393 qp->r_aflags = 0;
394 qp->r_flags = 0;
395 qp->s_head = 0;
396 qp->s_tail = 0;
397 qp->s_cur = 0;
398 qp->s_acked = 0;
399 qp->s_last = 0;
400 qp->s_ssn = 1;
401 qp->s_lsn = 0;
402 qp->s_mig_state = IB_MIG_MIGRATED;
403 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
404 qp->r_head_ack_queue = 0;
405 qp->s_tail_ack_queue = 0;
406 qp->s_num_rd_atomic = 0;
407 if (qp->r_rq.wq) {
408 qp->r_rq.wq->head = 0;
409 qp->r_rq.wq->tail = 0;
410 }
411 qp->r_sge.num_sge = 0;
412}
413
414static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
415{
416 unsigned n;
417
418 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400419 qib_put_ss(&qp->s_rdma_read_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700420
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400421 qib_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700422
423 if (clr_sends) {
424 while (qp->s_last != qp->s_head) {
425 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
426 unsigned i;
427
428 for (i = 0; i < wqe->wr.num_sge; i++) {
429 struct qib_sge *sge = &wqe->sg_list[i];
430
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400431 qib_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700432 }
433 if (qp->ibqp.qp_type == IB_QPT_UD ||
434 qp->ibqp.qp_type == IB_QPT_SMI ||
435 qp->ibqp.qp_type == IB_QPT_GSI)
436 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
437 if (++qp->s_last >= qp->s_size)
438 qp->s_last = 0;
439 }
440 if (qp->s_rdma_mr) {
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400441 qib_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700442 qp->s_rdma_mr = NULL;
443 }
444 }
445
446 if (qp->ibqp.qp_type != IB_QPT_RC)
447 return;
448
449 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
450 struct qib_ack_entry *e = &qp->s_ack_queue[n];
451
452 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
453 e->rdma_sge.mr) {
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400454 qib_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700455 e->rdma_sge.mr = NULL;
456 }
457 }
458}
459
460/**
461 * qib_error_qp - put a QP into the error state
462 * @qp: the QP to put into the error state
463 * @err: the receive completion error to signal if a RWQE is active
464 *
465 * Flushes both send and receive work queues.
466 * Returns true if last WQE event should be generated.
Ralph Campbella5210c12010-08-02 22:39:30 +0000467 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700468 * If we are already in error state, just return.
469 */
470int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
471{
472 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
473 struct ib_wc wc;
474 int ret = 0;
475
476 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
477 goto bail;
478
479 qp->state = IB_QPS_ERR;
480
481 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
482 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
483 del_timer(&qp->s_timer);
484 }
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800485
486 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
487 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
488
Ralph Campbellf9315512010-05-23 21:44:54 -0700489 spin_lock(&dev->pending_lock);
490 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
491 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
492 list_del_init(&qp->iowait);
493 }
494 spin_unlock(&dev->pending_lock);
495
496 if (!(qp->s_flags & QIB_S_BUSY)) {
497 qp->s_hdrwords = 0;
498 if (qp->s_rdma_mr) {
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400499 qib_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700500 qp->s_rdma_mr = NULL;
501 }
502 if (qp->s_tx) {
503 qib_put_txreq(qp->s_tx);
504 qp->s_tx = NULL;
505 }
506 }
507
508 /* Schedule the sending tasklet to drain the send work queue. */
509 if (qp->s_last != qp->s_head)
510 qib_schedule_send(qp);
511
512 clear_mr_refs(qp, 0);
513
514 memset(&wc, 0, sizeof(wc));
515 wc.qp = &qp->ibqp;
516 wc.opcode = IB_WC_RECV;
517
518 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
519 wc.wr_id = qp->r_wr_id;
520 wc.status = err;
521 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
522 }
523 wc.status = IB_WC_WR_FLUSH_ERR;
524
525 if (qp->r_rq.wq) {
526 struct qib_rwq *wq;
527 u32 head;
528 u32 tail;
529
530 spin_lock(&qp->r_rq.lock);
531
532 /* sanity check pointers before trusting them */
533 wq = qp->r_rq.wq;
534 head = wq->head;
535 if (head >= qp->r_rq.size)
536 head = 0;
537 tail = wq->tail;
538 if (tail >= qp->r_rq.size)
539 tail = 0;
540 while (tail != head) {
541 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
542 if (++tail >= qp->r_rq.size)
543 tail = 0;
544 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
545 }
546 wq->tail = tail;
547
548 spin_unlock(&qp->r_rq.lock);
549 } else if (qp->ibqp.event_handler)
550 ret = 1;
551
552bail:
553 return ret;
554}
555
556/**
557 * qib_modify_qp - modify the attributes of a queue pair
558 * @ibqp: the queue pair who's attributes we're modifying
559 * @attr: the new attributes
560 * @attr_mask: the mask of attributes to modify
561 * @udata: user data for libibverbs.so
562 *
563 * Returns 0 on success, otherwise returns an errno.
564 */
565int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
566 int attr_mask, struct ib_udata *udata)
567{
568 struct qib_ibdev *dev = to_idev(ibqp->device);
569 struct qib_qp *qp = to_iqp(ibqp);
570 enum ib_qp_state cur_state, new_state;
571 struct ib_event ev;
572 int lastwqe = 0;
573 int mig = 0;
574 int ret;
575 u32 pmtu = 0; /* for gcc warning only */
576
577 spin_lock_irq(&qp->r_lock);
578 spin_lock(&qp->s_lock);
579
580 cur_state = attr_mask & IB_QP_CUR_STATE ?
581 attr->cur_qp_state : qp->state;
582 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
583
584 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
585 attr_mask))
586 goto inval;
587
588 if (attr_mask & IB_QP_AV) {
589 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
590 goto inval;
591 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
592 goto inval;
593 }
594
595 if (attr_mask & IB_QP_ALT_PATH) {
596 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
597 goto inval;
598 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
599 goto inval;
600 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
601 goto inval;
602 }
603
604 if (attr_mask & IB_QP_PKEY_INDEX)
605 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
606 goto inval;
607
608 if (attr_mask & IB_QP_MIN_RNR_TIMER)
609 if (attr->min_rnr_timer > 31)
610 goto inval;
611
612 if (attr_mask & IB_QP_PORT)
613 if (qp->ibqp.qp_type == IB_QPT_SMI ||
614 qp->ibqp.qp_type == IB_QPT_GSI ||
615 attr->port_num == 0 ||
616 attr->port_num > ibqp->device->phys_port_cnt)
617 goto inval;
618
619 if (attr_mask & IB_QP_DEST_QPN)
620 if (attr->dest_qp_num > QIB_QPN_MASK)
621 goto inval;
622
623 if (attr_mask & IB_QP_RETRY_CNT)
624 if (attr->retry_cnt > 7)
625 goto inval;
626
627 if (attr_mask & IB_QP_RNR_RETRY)
628 if (attr->rnr_retry > 7)
629 goto inval;
630
631 /*
632 * Don't allow invalid path_mtu values. OK to set greater
633 * than the active mtu (or even the max_cap, if we have tuned
634 * that to a small mtu. We'll set qp->path_mtu
635 * to the lesser of requested attribute mtu and active,
636 * for packetizing messages.
637 * Note that the QP port has to be set in INIT and MTU in RTR.
638 */
639 if (attr_mask & IB_QP_PATH_MTU) {
640 struct qib_devdata *dd = dd_from_dev(dev);
641 int mtu, pidx = qp->port_num - 1;
642
643 mtu = ib_mtu_enum_to_int(attr->path_mtu);
644 if (mtu == -1)
645 goto inval;
646 if (mtu > dd->pport[pidx].ibmtu) {
647 switch (dd->pport[pidx].ibmtu) {
648 case 4096:
649 pmtu = IB_MTU_4096;
650 break;
651 case 2048:
652 pmtu = IB_MTU_2048;
653 break;
654 case 1024:
655 pmtu = IB_MTU_1024;
656 break;
657 case 512:
658 pmtu = IB_MTU_512;
659 break;
660 case 256:
661 pmtu = IB_MTU_256;
662 break;
663 default:
664 pmtu = IB_MTU_2048;
665 }
666 } else
667 pmtu = attr->path_mtu;
668 }
669
670 if (attr_mask & IB_QP_PATH_MIG_STATE) {
671 if (attr->path_mig_state == IB_MIG_REARM) {
672 if (qp->s_mig_state == IB_MIG_ARMED)
673 goto inval;
674 if (new_state != IB_QPS_RTS)
675 goto inval;
676 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
677 if (qp->s_mig_state == IB_MIG_REARM)
678 goto inval;
679 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
680 goto inval;
681 if (qp->s_mig_state == IB_MIG_ARMED)
682 mig = 1;
683 } else
684 goto inval;
685 }
686
687 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
688 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
689 goto inval;
690
691 switch (new_state) {
692 case IB_QPS_RESET:
693 if (qp->state != IB_QPS_RESET) {
694 qp->state = IB_QPS_RESET;
695 spin_lock(&dev->pending_lock);
696 if (!list_empty(&qp->iowait))
697 list_del_init(&qp->iowait);
698 spin_unlock(&dev->pending_lock);
699 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
700 spin_unlock(&qp->s_lock);
701 spin_unlock_irq(&qp->r_lock);
702 /* Stop the sending work queue and retry timer */
703 cancel_work_sync(&qp->s_work);
704 del_timer_sync(&qp->s_timer);
705 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
706 if (qp->s_tx) {
707 qib_put_txreq(qp->s_tx);
708 qp->s_tx = NULL;
709 }
710 remove_qp(dev, qp);
711 wait_event(qp->wait, !atomic_read(&qp->refcount));
712 spin_lock_irq(&qp->r_lock);
713 spin_lock(&qp->s_lock);
714 clear_mr_refs(qp, 1);
715 qib_reset_qp(qp, ibqp->qp_type);
716 }
717 break;
718
719 case IB_QPS_RTR:
720 /* Allow event to retrigger if QP set to RTR more than once */
721 qp->r_flags &= ~QIB_R_COMM_EST;
722 qp->state = new_state;
723 break;
724
725 case IB_QPS_SQD:
726 qp->s_draining = qp->s_last != qp->s_cur;
727 qp->state = new_state;
728 break;
729
730 case IB_QPS_SQE:
731 if (qp->ibqp.qp_type == IB_QPT_RC)
732 goto inval;
733 qp->state = new_state;
734 break;
735
736 case IB_QPS_ERR:
737 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
738 break;
739
740 default:
741 qp->state = new_state;
742 break;
743 }
744
745 if (attr_mask & IB_QP_PKEY_INDEX)
746 qp->s_pkey_index = attr->pkey_index;
747
748 if (attr_mask & IB_QP_PORT)
749 qp->port_num = attr->port_num;
750
751 if (attr_mask & IB_QP_DEST_QPN)
752 qp->remote_qpn = attr->dest_qp_num;
753
754 if (attr_mask & IB_QP_SQ_PSN) {
755 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
756 qp->s_psn = qp->s_next_psn;
757 qp->s_sending_psn = qp->s_next_psn;
758 qp->s_last_psn = qp->s_next_psn - 1;
759 qp->s_sending_hpsn = qp->s_last_psn;
760 }
761
762 if (attr_mask & IB_QP_RQ_PSN)
763 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
764
765 if (attr_mask & IB_QP_ACCESS_FLAGS)
766 qp->qp_access_flags = attr->qp_access_flags;
767
768 if (attr_mask & IB_QP_AV) {
769 qp->remote_ah_attr = attr->ah_attr;
770 qp->s_srate = attr->ah_attr.static_rate;
771 }
772
773 if (attr_mask & IB_QP_ALT_PATH) {
774 qp->alt_ah_attr = attr->alt_ah_attr;
775 qp->s_alt_pkey_index = attr->alt_pkey_index;
776 }
777
778 if (attr_mask & IB_QP_PATH_MIG_STATE) {
779 qp->s_mig_state = attr->path_mig_state;
780 if (mig) {
781 qp->remote_ah_attr = qp->alt_ah_attr;
782 qp->port_num = qp->alt_ah_attr.port_num;
783 qp->s_pkey_index = qp->s_alt_pkey_index;
784 }
785 }
786
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400787 if (attr_mask & IB_QP_PATH_MTU) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700788 qp->path_mtu = pmtu;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400789 qp->pmtu = ib_mtu_enum_to_int(pmtu);
790 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700791
792 if (attr_mask & IB_QP_RETRY_CNT) {
793 qp->s_retry_cnt = attr->retry_cnt;
794 qp->s_retry = attr->retry_cnt;
795 }
796
797 if (attr_mask & IB_QP_RNR_RETRY) {
798 qp->s_rnr_retry_cnt = attr->rnr_retry;
799 qp->s_rnr_retry = attr->rnr_retry;
800 }
801
802 if (attr_mask & IB_QP_MIN_RNR_TIMER)
803 qp->r_min_rnr_timer = attr->min_rnr_timer;
804
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400805 if (attr_mask & IB_QP_TIMEOUT) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700806 qp->timeout = attr->timeout;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400807 qp->timeout_jiffies =
808 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
809 1000UL);
810 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700811
812 if (attr_mask & IB_QP_QKEY)
813 qp->qkey = attr->qkey;
814
815 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
816 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
817
818 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
819 qp->s_max_rd_atomic = attr->max_rd_atomic;
820
821 spin_unlock(&qp->s_lock);
822 spin_unlock_irq(&qp->r_lock);
823
824 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
825 insert_qp(dev, qp);
826
827 if (lastwqe) {
828 ev.device = qp->ibqp.device;
829 ev.element.qp = &qp->ibqp;
830 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
831 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
832 }
833 if (mig) {
834 ev.device = qp->ibqp.device;
835 ev.element.qp = &qp->ibqp;
836 ev.event = IB_EVENT_PATH_MIG;
837 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
838 }
839 ret = 0;
840 goto bail;
841
842inval:
843 spin_unlock(&qp->s_lock);
844 spin_unlock_irq(&qp->r_lock);
845 ret = -EINVAL;
846
847bail:
848 return ret;
849}
850
851int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
852 int attr_mask, struct ib_qp_init_attr *init_attr)
853{
854 struct qib_qp *qp = to_iqp(ibqp);
855
856 attr->qp_state = qp->state;
857 attr->cur_qp_state = attr->qp_state;
858 attr->path_mtu = qp->path_mtu;
859 attr->path_mig_state = qp->s_mig_state;
860 attr->qkey = qp->qkey;
861 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
862 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
863 attr->dest_qp_num = qp->remote_qpn;
864 attr->qp_access_flags = qp->qp_access_flags;
865 attr->cap.max_send_wr = qp->s_size - 1;
866 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
867 attr->cap.max_send_sge = qp->s_max_sge;
868 attr->cap.max_recv_sge = qp->r_rq.max_sge;
869 attr->cap.max_inline_data = 0;
870 attr->ah_attr = qp->remote_ah_attr;
871 attr->alt_ah_attr = qp->alt_ah_attr;
872 attr->pkey_index = qp->s_pkey_index;
873 attr->alt_pkey_index = qp->s_alt_pkey_index;
874 attr->en_sqd_async_notify = 0;
875 attr->sq_draining = qp->s_draining;
876 attr->max_rd_atomic = qp->s_max_rd_atomic;
877 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
878 attr->min_rnr_timer = qp->r_min_rnr_timer;
879 attr->port_num = qp->port_num;
880 attr->timeout = qp->timeout;
881 attr->retry_cnt = qp->s_retry_cnt;
882 attr->rnr_retry = qp->s_rnr_retry_cnt;
883 attr->alt_port_num = qp->alt_ah_attr.port_num;
884 attr->alt_timeout = qp->alt_timeout;
885
886 init_attr->event_handler = qp->ibqp.event_handler;
887 init_attr->qp_context = qp->ibqp.qp_context;
888 init_attr->send_cq = qp->ibqp.send_cq;
889 init_attr->recv_cq = qp->ibqp.recv_cq;
890 init_attr->srq = qp->ibqp.srq;
891 init_attr->cap = attr->cap;
892 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
893 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
894 else
895 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
896 init_attr->qp_type = qp->ibqp.qp_type;
897 init_attr->port_num = qp->port_num;
898 return 0;
899}
900
901/**
902 * qib_compute_aeth - compute the AETH (syndrome + MSN)
903 * @qp: the queue pair to compute the AETH for
904 *
905 * Returns the AETH.
906 */
907__be32 qib_compute_aeth(struct qib_qp *qp)
908{
909 u32 aeth = qp->r_msn & QIB_MSN_MASK;
910
911 if (qp->ibqp.srq) {
912 /*
913 * Shared receive queues don't generate credits.
914 * Set the credit field to the invalid value.
915 */
916 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
917 } else {
918 u32 min, max, x;
919 u32 credits;
920 struct qib_rwq *wq = qp->r_rq.wq;
921 u32 head;
922 u32 tail;
923
924 /* sanity check pointers before trusting them */
925 head = wq->head;
926 if (head >= qp->r_rq.size)
927 head = 0;
928 tail = wq->tail;
929 if (tail >= qp->r_rq.size)
930 tail = 0;
931 /*
932 * Compute the number of credits available (RWQEs).
933 * XXX Not holding the r_rq.lock here so there is a small
934 * chance that the pair of reads are not atomic.
935 */
936 credits = head - tail;
937 if ((int)credits < 0)
938 credits += qp->r_rq.size;
939 /*
940 * Binary search the credit table to find the code to
941 * use.
942 */
943 min = 0;
944 max = 31;
945 for (;;) {
946 x = (min + max) / 2;
947 if (credit_table[x] == credits)
948 break;
949 if (credit_table[x] > credits)
950 max = x;
951 else if (min == x)
952 break;
953 else
954 min = x;
955 }
956 aeth |= x << QIB_AETH_CREDIT_SHIFT;
957 }
958 return cpu_to_be32(aeth);
959}
960
961/**
962 * qib_create_qp - create a queue pair for a device
963 * @ibpd: the protection domain who's device we create the queue pair for
964 * @init_attr: the attributes of the queue pair
965 * @udata: user data for libibverbs.so
966 *
967 * Returns the queue pair on success, otherwise returns an errno.
968 *
969 * Called by the ib_create_qp() core verbs function.
970 */
971struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
972 struct ib_qp_init_attr *init_attr,
973 struct ib_udata *udata)
974{
975 struct qib_qp *qp;
976 int err;
977 struct qib_swqe *swq = NULL;
978 struct qib_ibdev *dev;
979 struct qib_devdata *dd;
980 size_t sz;
981 size_t sg_list_sz;
982 struct ib_qp *ret;
983
984 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
985 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
986 ret = ERR_PTR(-EINVAL);
987 goto bail;
988 }
989
990 /* Check receive queue parameters if no SRQ is specified. */
991 if (!init_attr->srq) {
992 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
993 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
994 ret = ERR_PTR(-EINVAL);
995 goto bail;
996 }
997 if (init_attr->cap.max_send_sge +
998 init_attr->cap.max_send_wr +
999 init_attr->cap.max_recv_sge +
1000 init_attr->cap.max_recv_wr == 0) {
1001 ret = ERR_PTR(-EINVAL);
1002 goto bail;
1003 }
1004 }
1005
1006 switch (init_attr->qp_type) {
1007 case IB_QPT_SMI:
1008 case IB_QPT_GSI:
1009 if (init_attr->port_num == 0 ||
1010 init_attr->port_num > ibpd->device->phys_port_cnt) {
1011 ret = ERR_PTR(-EINVAL);
1012 goto bail;
1013 }
1014 case IB_QPT_UC:
1015 case IB_QPT_RC:
1016 case IB_QPT_UD:
1017 sz = sizeof(struct qib_sge) *
1018 init_attr->cap.max_send_sge +
1019 sizeof(struct qib_swqe);
1020 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1021 if (swq == NULL) {
1022 ret = ERR_PTR(-ENOMEM);
1023 goto bail;
1024 }
1025 sz = sizeof(*qp);
1026 sg_list_sz = 0;
1027 if (init_attr->srq) {
1028 struct qib_srq *srq = to_isrq(init_attr->srq);
1029
1030 if (srq->rq.max_sge > 1)
1031 sg_list_sz = sizeof(*qp->r_sg_list) *
1032 (srq->rq.max_sge - 1);
1033 } else if (init_attr->cap.max_recv_sge > 1)
1034 sg_list_sz = sizeof(*qp->r_sg_list) *
1035 (init_attr->cap.max_recv_sge - 1);
1036 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1037 if (!qp) {
1038 ret = ERR_PTR(-ENOMEM);
1039 goto bail_swq;
1040 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001041 RCU_INIT_POINTER(qp->next, NULL);
Mike Marciniszyn1c942832012-05-07 14:02:31 -04001042 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1043 if (!qp->s_hdr) {
1044 ret = ERR_PTR(-ENOMEM);
1045 goto bail_qp;
1046 }
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -04001047 qp->timeout_jiffies =
1048 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1049 1000UL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001050 if (init_attr->srq)
1051 sz = 0;
1052 else {
1053 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1054 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1055 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1056 sizeof(struct qib_rwqe);
1057 qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1058 qp->r_rq.size * sz);
1059 if (!qp->r_rq.wq) {
1060 ret = ERR_PTR(-ENOMEM);
1061 goto bail_qp;
1062 }
1063 }
1064
1065 /*
1066 * ib_create_qp() will initialize qp->ibqp
1067 * except for qp->ibqp.qp_num.
1068 */
1069 spin_lock_init(&qp->r_lock);
1070 spin_lock_init(&qp->s_lock);
1071 spin_lock_init(&qp->r_rq.lock);
1072 atomic_set(&qp->refcount, 0);
1073 init_waitqueue_head(&qp->wait);
1074 init_waitqueue_head(&qp->wait_dma);
1075 init_timer(&qp->s_timer);
1076 qp->s_timer.data = (unsigned long)qp;
1077 INIT_WORK(&qp->s_work, qib_do_send);
1078 INIT_LIST_HEAD(&qp->iowait);
1079 INIT_LIST_HEAD(&qp->rspwait);
1080 qp->state = IB_QPS_RESET;
1081 qp->s_wq = swq;
1082 qp->s_size = init_attr->cap.max_send_wr + 1;
1083 qp->s_max_sge = init_attr->cap.max_send_sge;
1084 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1085 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1086 dev = to_idev(ibpd->device);
1087 dd = dd_from_dev(dev);
1088 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1089 init_attr->port_num);
1090 if (err < 0) {
1091 ret = ERR_PTR(err);
1092 vfree(qp->r_rq.wq);
1093 goto bail_qp;
1094 }
1095 qp->ibqp.qp_num = err;
1096 qp->port_num = init_attr->port_num;
Ralph Campbellf9315512010-05-23 21:44:54 -07001097 qib_reset_qp(qp, init_attr->qp_type);
1098 break;
1099
1100 default:
1101 /* Don't support raw QPs */
1102 ret = ERR_PTR(-ENOSYS);
1103 goto bail;
1104 }
1105
1106 init_attr->cap.max_inline_data = 0;
1107
1108 /*
1109 * Return the address of the RWQ as the offset to mmap.
1110 * See qib_mmap() for details.
1111 */
1112 if (udata && udata->outlen >= sizeof(__u64)) {
1113 if (!qp->r_rq.wq) {
1114 __u64 offset = 0;
1115
1116 err = ib_copy_to_udata(udata, &offset,
1117 sizeof(offset));
1118 if (err) {
1119 ret = ERR_PTR(err);
1120 goto bail_ip;
1121 }
1122 } else {
1123 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1124
1125 qp->ip = qib_create_mmap_info(dev, s,
1126 ibpd->uobject->context,
1127 qp->r_rq.wq);
1128 if (!qp->ip) {
1129 ret = ERR_PTR(-ENOMEM);
1130 goto bail_ip;
1131 }
1132
1133 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1134 sizeof(qp->ip->offset));
1135 if (err) {
1136 ret = ERR_PTR(err);
1137 goto bail_ip;
1138 }
1139 }
1140 }
1141
1142 spin_lock(&dev->n_qps_lock);
1143 if (dev->n_qps_allocated == ib_qib_max_qps) {
1144 spin_unlock(&dev->n_qps_lock);
1145 ret = ERR_PTR(-ENOMEM);
1146 goto bail_ip;
1147 }
1148
1149 dev->n_qps_allocated++;
1150 spin_unlock(&dev->n_qps_lock);
1151
1152 if (qp->ip) {
1153 spin_lock_irq(&dev->pending_lock);
1154 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1155 spin_unlock_irq(&dev->pending_lock);
1156 }
1157
1158 ret = &qp->ibqp;
1159 goto bail;
1160
1161bail_ip:
1162 if (qp->ip)
1163 kref_put(&qp->ip->ref, qib_release_mmap_info);
1164 else
1165 vfree(qp->r_rq.wq);
1166 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1167bail_qp:
Mike Marciniszyn1c942832012-05-07 14:02:31 -04001168 kfree(qp->s_hdr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001169 kfree(qp);
1170bail_swq:
1171 vfree(swq);
1172bail:
1173 return ret;
1174}
1175
1176/**
1177 * qib_destroy_qp - destroy a queue pair
1178 * @ibqp: the queue pair to destroy
1179 *
1180 * Returns 0 on success.
1181 *
1182 * Note that this can be called while the QP is actively sending or
1183 * receiving!
1184 */
1185int qib_destroy_qp(struct ib_qp *ibqp)
1186{
1187 struct qib_qp *qp = to_iqp(ibqp);
1188 struct qib_ibdev *dev = to_idev(ibqp->device);
1189
1190 /* Make sure HW and driver activity is stopped. */
1191 spin_lock_irq(&qp->s_lock);
1192 if (qp->state != IB_QPS_RESET) {
1193 qp->state = IB_QPS_RESET;
1194 spin_lock(&dev->pending_lock);
1195 if (!list_empty(&qp->iowait))
1196 list_del_init(&qp->iowait);
1197 spin_unlock(&dev->pending_lock);
1198 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1199 spin_unlock_irq(&qp->s_lock);
1200 cancel_work_sync(&qp->s_work);
1201 del_timer_sync(&qp->s_timer);
1202 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1203 if (qp->s_tx) {
1204 qib_put_txreq(qp->s_tx);
1205 qp->s_tx = NULL;
1206 }
1207 remove_qp(dev, qp);
1208 wait_event(qp->wait, !atomic_read(&qp->refcount));
1209 clear_mr_refs(qp, 1);
1210 } else
1211 spin_unlock_irq(&qp->s_lock);
1212
1213 /* all user's cleaned up, mark it available */
1214 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1215 spin_lock(&dev->n_qps_lock);
1216 dev->n_qps_allocated--;
1217 spin_unlock(&dev->n_qps_lock);
1218
1219 if (qp->ip)
1220 kref_put(&qp->ip->ref, qib_release_mmap_info);
1221 else
1222 vfree(qp->r_rq.wq);
1223 vfree(qp->s_wq);
Mike Marciniszyn1c942832012-05-07 14:02:31 -04001224 kfree(qp->s_hdr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001225 kfree(qp);
1226 return 0;
1227}
1228
1229/**
1230 * qib_init_qpn_table - initialize the QP number table for a device
1231 * @qpt: the QPN table
1232 */
1233void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1234{
1235 spin_lock_init(&qpt->lock);
1236 qpt->last = 1; /* start with QPN 2 */
1237 qpt->nmaps = 1;
1238 qpt->mask = dd->qpn_mask;
1239}
1240
1241/**
1242 * qib_free_qpn_table - free the QP number table for a device
1243 * @qpt: the QPN table
1244 */
1245void qib_free_qpn_table(struct qib_qpn_table *qpt)
1246{
1247 int i;
1248
1249 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1250 if (qpt->map[i].page)
1251 free_page((unsigned long) qpt->map[i].page);
1252}
1253
1254/**
1255 * qib_get_credit - flush the send work queue of a QP
1256 * @qp: the qp who's send work queue to flush
1257 * @aeth: the Acknowledge Extended Transport Header
1258 *
1259 * The QP s_lock should be held.
1260 */
1261void qib_get_credit(struct qib_qp *qp, u32 aeth)
1262{
1263 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1264
1265 /*
1266 * If the credit is invalid, we can send
1267 * as many packets as we like. Otherwise, we have to
1268 * honor the credit field.
1269 */
1270 if (credit == QIB_AETH_CREDIT_INVAL) {
1271 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1272 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1273 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1274 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1275 qib_schedule_send(qp);
1276 }
1277 }
1278 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1279 /* Compute new LSN (i.e., MSN + credit) */
1280 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1281 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1282 qp->s_lsn = credit;
1283 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1284 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1285 qib_schedule_send(qp);
1286 }
1287 }
1288 }
1289}