blob: 693041b076f00d5387a8b90a386a233893478d12 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040037#include <linux/jhash.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070038
39#include "qib.h"
40
41#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
42#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
43
44static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
45 struct qpn_map *map, unsigned off)
46{
47 return (map - qpt->map) * BITS_PER_PAGE + off;
48}
49
50static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
51 struct qpn_map *map, unsigned off,
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080052 unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -070053{
54 if (qpt->mask) {
55 off++;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080056 if (((off & qpt->mask) >> 1) >= n)
57 off = (off | qpt->mask) + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -070058 } else
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 return off;
61}
62
63/*
64 * Convert the AETH credit code into the number of credits.
65 */
66static u32 credit_table[31] = {
67 0, /* 0 */
68 1, /* 1 */
69 2, /* 2 */
70 3, /* 3 */
71 4, /* 4 */
72 6, /* 5 */
73 8, /* 6 */
74 12, /* 7 */
75 16, /* 8 */
76 24, /* 9 */
77 32, /* A */
78 48, /* B */
79 64, /* C */
80 96, /* D */
81 128, /* E */
82 192, /* F */
83 256, /* 10 */
84 384, /* 11 */
85 512, /* 12 */
86 768, /* 13 */
87 1024, /* 14 */
88 1536, /* 15 */
89 2048, /* 16 */
90 3072, /* 17 */
91 4096, /* 18 */
92 6144, /* 19 */
93 8192, /* 1A */
94 12288, /* 1B */
95 16384, /* 1C */
96 24576, /* 1D */
97 32768 /* 1E */
98};
99
100static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101{
102 unsigned long page = get_zeroed_page(GFP_KERNEL);
103
104 /*
105 * Free the page if someone raced with us installing it.
106 */
107
108 spin_lock(&qpt->lock);
109 if (map->page)
110 free_page(page);
111 else
112 map->page = (void *)page;
113 spin_unlock(&qpt->lock);
114}
115
116/*
117 * Allocate the next available QPN or
118 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119 */
120static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 enum ib_qp_type type, u8 port)
122{
123 u32 i, offset, max_scan, qpn;
124 struct qpn_map *map;
125 u32 ret;
Ralph Campbellf9315512010-05-23 21:44:54 -0700126
127 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
128 unsigned n;
129
130 ret = type == IB_QPT_GSI;
131 n = 1 << (ret + 2 * (port - 1));
132 spin_lock(&qpt->lock);
133 if (qpt->flags & n)
134 ret = -EINVAL;
135 else
136 qpt->flags |= n;
137 spin_unlock(&qpt->lock);
138 goto bail;
139 }
140
Mike Marciniszyn7c3edd32011-01-10 17:42:22 -0800141 qpn = qpt->last + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700142 if (qpn >= QPN_MAX)
143 qpn = 2;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800144 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
145 qpn = (qpn | qpt->mask) + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700146 offset = qpn & BITS_PER_PAGE_MASK;
147 map = &qpt->map[qpn / BITS_PER_PAGE];
148 max_scan = qpt->nmaps - !offset;
149 for (i = 0;;) {
150 if (unlikely(!map->page)) {
151 get_map_page(qpt, map);
152 if (unlikely(!map->page))
153 break;
154 }
155 do {
156 if (!test_and_set_bit(offset, map->page)) {
157 qpt->last = qpn;
158 ret = qpn;
159 goto bail;
160 }
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800161 offset = find_next_offset(qpt, map, offset,
162 dd->n_krcv_queues);
Ralph Campbellf9315512010-05-23 21:44:54 -0700163 qpn = mk_qpn(qpt, map, offset);
164 /*
165 * This test differs from alloc_pidmap().
166 * If find_next_offset() does find a zero
167 * bit, we don't need to check for QPN
168 * wrapping around past our starting QPN.
169 * We just need to be sure we don't loop
170 * forever.
171 */
172 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
173 /*
174 * In order to keep the number of pages allocated to a
175 * minimum, we scan the all existing pages before increasing
176 * the size of the bitmap table.
177 */
178 if (++i > max_scan) {
179 if (qpt->nmaps == QPNMAP_ENTRIES)
180 break;
181 map = &qpt->map[qpt->nmaps++];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800182 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700183 } else if (map < &qpt->map[qpt->nmaps]) {
184 ++map;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800185 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700186 } else {
187 map = &qpt->map[0];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800188 offset = 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700189 }
190 qpn = mk_qpn(qpt, map, offset);
191 }
192
193 ret = -ENOMEM;
194
195bail:
196 return ret;
197}
198
199static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
200{
201 struct qpn_map *map;
202
203 map = qpt->map + qpn / BITS_PER_PAGE;
204 if (map->page)
205 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
206}
207
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400208static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
209{
210 return jhash_1word(qpn, dev->qp_rnd) &
211 (dev->qp_table_size - 1);
212}
213
214
Ralph Campbellf9315512010-05-23 21:44:54 -0700215/*
216 * Put the QP into the hash table.
217 * The hash table holds a reference to the QP.
218 */
219static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
220{
221 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700222 unsigned long flags;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400223 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700224
225 spin_lock_irqsave(&dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700226 atomic_inc(&qp->refcount);
227
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400228 if (qp->ibqp.qp_num == 0)
229 rcu_assign_pointer(ibp->qp0, qp);
230 else if (qp->ibqp.qp_num == 1)
231 rcu_assign_pointer(ibp->qp1, qp);
232 else {
233 qp->next = dev->qp_table[n];
234 rcu_assign_pointer(dev->qp_table[n], qp);
235 }
236
Ralph Campbellf9315512010-05-23 21:44:54 -0700237 spin_unlock_irqrestore(&dev->qpt_lock, flags);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400238 synchronize_rcu();
Ralph Campbellf9315512010-05-23 21:44:54 -0700239}
240
241/*
242 * Remove the QP from the table so it can't be found asynchronously by
243 * the receive interrupt routine.
244 */
245static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
246{
247 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400248 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700249 unsigned long flags;
250
Ralph Campbellf9315512010-05-23 21:44:54 -0700251 spin_lock_irqsave(&dev->qpt_lock, flags);
252
253 if (ibp->qp0 == qp) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700254 atomic_dec(&qp->refcount);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400255 rcu_assign_pointer(ibp->qp0, NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700256 } else if (ibp->qp1 == qp) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700257 atomic_dec(&qp->refcount);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400258 rcu_assign_pointer(ibp->qp1, NULL);
259 } else {
260 struct qib_qp *q, **qpp;
261
262 qpp = &dev->qp_table[n];
Ralph Campbellf9315512010-05-23 21:44:54 -0700263 for (; (q = *qpp) != NULL; qpp = &q->next)
264 if (q == qp) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700265 atomic_dec(&qp->refcount);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400266 rcu_assign_pointer(*qpp, qp->next);
267 qp->next = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700268 break;
269 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400270 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700271
272 spin_unlock_irqrestore(&dev->qpt_lock, flags);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400273 synchronize_rcu();
Ralph Campbellf9315512010-05-23 21:44:54 -0700274}
275
276/**
277 * qib_free_all_qps - check for QPs still in use
278 * @qpt: the QP table to empty
279 *
280 * There should not be any QPs still in use.
281 * Free memory for table.
282 */
283unsigned qib_free_all_qps(struct qib_devdata *dd)
284{
285 struct qib_ibdev *dev = &dd->verbs_dev;
286 unsigned long flags;
287 struct qib_qp *qp;
288 unsigned n, qp_inuse = 0;
289
290 for (n = 0; n < dd->num_pports; n++) {
291 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
292
293 if (!qib_mcast_tree_empty(ibp))
294 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400295 rcu_read_lock();
296 if (rcu_dereference(ibp->qp0))
Ralph Campbellf9315512010-05-23 21:44:54 -0700297 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400298 if (rcu_dereference(ibp->qp1))
Ralph Campbellf9315512010-05-23 21:44:54 -0700299 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400300 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700301 }
302
303 spin_lock_irqsave(&dev->qpt_lock, flags);
304 for (n = 0; n < dev->qp_table_size; n++) {
305 qp = dev->qp_table[n];
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400306 rcu_assign_pointer(dev->qp_table[n], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700307
308 for (; qp; qp = qp->next)
309 qp_inuse++;
310 }
311 spin_unlock_irqrestore(&dev->qpt_lock, flags);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400312 synchronize_rcu();
Ralph Campbellf9315512010-05-23 21:44:54 -0700313
314 return qp_inuse;
315}
316
317/**
318 * qib_lookup_qpn - return the QP with the given QPN
319 * @qpt: the QP table
320 * @qpn: the QP number to look up
321 *
322 * The caller is responsible for decrementing the QP reference count
323 * when done.
324 */
325struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
326{
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400327 struct qib_qp *qp = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700328
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400329 if (unlikely(qpn <= 1)) {
330 rcu_read_lock();
331 if (qpn == 0)
332 qp = rcu_dereference(ibp->qp0);
333 else
334 qp = rcu_dereference(ibp->qp1);
335 } else {
336 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
337 unsigned n = qpn_hash(dev, qpn);
Ralph Campbellf9315512010-05-23 21:44:54 -0700338
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400339 rcu_read_lock();
340 for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next)
Ralph Campbellf9315512010-05-23 21:44:54 -0700341 if (qp->ibqp.qp_num == qpn)
342 break;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400343 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700344 if (qp)
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400345 if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
346 qp = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700347
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400348 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700349 return qp;
350}
351
352/**
353 * qib_reset_qp - initialize the QP state to the reset state
354 * @qp: the QP to reset
355 * @type: the QP type
356 */
357static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
358{
359 qp->remote_qpn = 0;
360 qp->qkey = 0;
361 qp->qp_access_flags = 0;
362 atomic_set(&qp->s_dma_busy, 0);
363 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
364 qp->s_hdrwords = 0;
365 qp->s_wqe = NULL;
366 qp->s_draining = 0;
367 qp->s_next_psn = 0;
368 qp->s_last_psn = 0;
369 qp->s_sending_psn = 0;
370 qp->s_sending_hpsn = 0;
371 qp->s_psn = 0;
372 qp->r_psn = 0;
373 qp->r_msn = 0;
374 if (type == IB_QPT_RC) {
375 qp->s_state = IB_OPCODE_RC_SEND_LAST;
376 qp->r_state = IB_OPCODE_RC_SEND_LAST;
377 } else {
378 qp->s_state = IB_OPCODE_UC_SEND_LAST;
379 qp->r_state = IB_OPCODE_UC_SEND_LAST;
380 }
381 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
382 qp->r_nak_state = 0;
383 qp->r_aflags = 0;
384 qp->r_flags = 0;
385 qp->s_head = 0;
386 qp->s_tail = 0;
387 qp->s_cur = 0;
388 qp->s_acked = 0;
389 qp->s_last = 0;
390 qp->s_ssn = 1;
391 qp->s_lsn = 0;
392 qp->s_mig_state = IB_MIG_MIGRATED;
393 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
394 qp->r_head_ack_queue = 0;
395 qp->s_tail_ack_queue = 0;
396 qp->s_num_rd_atomic = 0;
397 if (qp->r_rq.wq) {
398 qp->r_rq.wq->head = 0;
399 qp->r_rq.wq->tail = 0;
400 }
401 qp->r_sge.num_sge = 0;
402}
403
404static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
405{
406 unsigned n;
407
408 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400409 qib_put_ss(&qp->s_rdma_read_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700410
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400411 qib_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700412
413 if (clr_sends) {
414 while (qp->s_last != qp->s_head) {
415 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
416 unsigned i;
417
418 for (i = 0; i < wqe->wr.num_sge; i++) {
419 struct qib_sge *sge = &wqe->sg_list[i];
420
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400421 qib_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700422 }
423 if (qp->ibqp.qp_type == IB_QPT_UD ||
424 qp->ibqp.qp_type == IB_QPT_SMI ||
425 qp->ibqp.qp_type == IB_QPT_GSI)
426 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
427 if (++qp->s_last >= qp->s_size)
428 qp->s_last = 0;
429 }
430 if (qp->s_rdma_mr) {
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400431 qib_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700432 qp->s_rdma_mr = NULL;
433 }
434 }
435
436 if (qp->ibqp.qp_type != IB_QPT_RC)
437 return;
438
439 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
440 struct qib_ack_entry *e = &qp->s_ack_queue[n];
441
442 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
443 e->rdma_sge.mr) {
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400444 qib_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700445 e->rdma_sge.mr = NULL;
446 }
447 }
448}
449
450/**
451 * qib_error_qp - put a QP into the error state
452 * @qp: the QP to put into the error state
453 * @err: the receive completion error to signal if a RWQE is active
454 *
455 * Flushes both send and receive work queues.
456 * Returns true if last WQE event should be generated.
Ralph Campbella5210c12010-08-02 22:39:30 +0000457 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700458 * If we are already in error state, just return.
459 */
460int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
461{
462 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
463 struct ib_wc wc;
464 int ret = 0;
465
466 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
467 goto bail;
468
469 qp->state = IB_QPS_ERR;
470
471 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
472 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
473 del_timer(&qp->s_timer);
474 }
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800475
476 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
477 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
478
Ralph Campbellf9315512010-05-23 21:44:54 -0700479 spin_lock(&dev->pending_lock);
480 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
481 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
482 list_del_init(&qp->iowait);
483 }
484 spin_unlock(&dev->pending_lock);
485
486 if (!(qp->s_flags & QIB_S_BUSY)) {
487 qp->s_hdrwords = 0;
488 if (qp->s_rdma_mr) {
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400489 qib_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700490 qp->s_rdma_mr = NULL;
491 }
492 if (qp->s_tx) {
493 qib_put_txreq(qp->s_tx);
494 qp->s_tx = NULL;
495 }
496 }
497
498 /* Schedule the sending tasklet to drain the send work queue. */
499 if (qp->s_last != qp->s_head)
500 qib_schedule_send(qp);
501
502 clear_mr_refs(qp, 0);
503
504 memset(&wc, 0, sizeof(wc));
505 wc.qp = &qp->ibqp;
506 wc.opcode = IB_WC_RECV;
507
508 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
509 wc.wr_id = qp->r_wr_id;
510 wc.status = err;
511 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
512 }
513 wc.status = IB_WC_WR_FLUSH_ERR;
514
515 if (qp->r_rq.wq) {
516 struct qib_rwq *wq;
517 u32 head;
518 u32 tail;
519
520 spin_lock(&qp->r_rq.lock);
521
522 /* sanity check pointers before trusting them */
523 wq = qp->r_rq.wq;
524 head = wq->head;
525 if (head >= qp->r_rq.size)
526 head = 0;
527 tail = wq->tail;
528 if (tail >= qp->r_rq.size)
529 tail = 0;
530 while (tail != head) {
531 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
532 if (++tail >= qp->r_rq.size)
533 tail = 0;
534 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
535 }
536 wq->tail = tail;
537
538 spin_unlock(&qp->r_rq.lock);
539 } else if (qp->ibqp.event_handler)
540 ret = 1;
541
542bail:
543 return ret;
544}
545
546/**
547 * qib_modify_qp - modify the attributes of a queue pair
548 * @ibqp: the queue pair who's attributes we're modifying
549 * @attr: the new attributes
550 * @attr_mask: the mask of attributes to modify
551 * @udata: user data for libibverbs.so
552 *
553 * Returns 0 on success, otherwise returns an errno.
554 */
555int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
556 int attr_mask, struct ib_udata *udata)
557{
558 struct qib_ibdev *dev = to_idev(ibqp->device);
559 struct qib_qp *qp = to_iqp(ibqp);
560 enum ib_qp_state cur_state, new_state;
561 struct ib_event ev;
562 int lastwqe = 0;
563 int mig = 0;
564 int ret;
565 u32 pmtu = 0; /* for gcc warning only */
566
567 spin_lock_irq(&qp->r_lock);
568 spin_lock(&qp->s_lock);
569
570 cur_state = attr_mask & IB_QP_CUR_STATE ?
571 attr->cur_qp_state : qp->state;
572 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
573
574 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
575 attr_mask))
576 goto inval;
577
578 if (attr_mask & IB_QP_AV) {
579 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
580 goto inval;
581 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
582 goto inval;
583 }
584
585 if (attr_mask & IB_QP_ALT_PATH) {
586 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
587 goto inval;
588 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
589 goto inval;
590 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
591 goto inval;
592 }
593
594 if (attr_mask & IB_QP_PKEY_INDEX)
595 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
596 goto inval;
597
598 if (attr_mask & IB_QP_MIN_RNR_TIMER)
599 if (attr->min_rnr_timer > 31)
600 goto inval;
601
602 if (attr_mask & IB_QP_PORT)
603 if (qp->ibqp.qp_type == IB_QPT_SMI ||
604 qp->ibqp.qp_type == IB_QPT_GSI ||
605 attr->port_num == 0 ||
606 attr->port_num > ibqp->device->phys_port_cnt)
607 goto inval;
608
609 if (attr_mask & IB_QP_DEST_QPN)
610 if (attr->dest_qp_num > QIB_QPN_MASK)
611 goto inval;
612
613 if (attr_mask & IB_QP_RETRY_CNT)
614 if (attr->retry_cnt > 7)
615 goto inval;
616
617 if (attr_mask & IB_QP_RNR_RETRY)
618 if (attr->rnr_retry > 7)
619 goto inval;
620
621 /*
622 * Don't allow invalid path_mtu values. OK to set greater
623 * than the active mtu (or even the max_cap, if we have tuned
624 * that to a small mtu. We'll set qp->path_mtu
625 * to the lesser of requested attribute mtu and active,
626 * for packetizing messages.
627 * Note that the QP port has to be set in INIT and MTU in RTR.
628 */
629 if (attr_mask & IB_QP_PATH_MTU) {
630 struct qib_devdata *dd = dd_from_dev(dev);
631 int mtu, pidx = qp->port_num - 1;
632
633 mtu = ib_mtu_enum_to_int(attr->path_mtu);
634 if (mtu == -1)
635 goto inval;
636 if (mtu > dd->pport[pidx].ibmtu) {
637 switch (dd->pport[pidx].ibmtu) {
638 case 4096:
639 pmtu = IB_MTU_4096;
640 break;
641 case 2048:
642 pmtu = IB_MTU_2048;
643 break;
644 case 1024:
645 pmtu = IB_MTU_1024;
646 break;
647 case 512:
648 pmtu = IB_MTU_512;
649 break;
650 case 256:
651 pmtu = IB_MTU_256;
652 break;
653 default:
654 pmtu = IB_MTU_2048;
655 }
656 } else
657 pmtu = attr->path_mtu;
658 }
659
660 if (attr_mask & IB_QP_PATH_MIG_STATE) {
661 if (attr->path_mig_state == IB_MIG_REARM) {
662 if (qp->s_mig_state == IB_MIG_ARMED)
663 goto inval;
664 if (new_state != IB_QPS_RTS)
665 goto inval;
666 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
667 if (qp->s_mig_state == IB_MIG_REARM)
668 goto inval;
669 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
670 goto inval;
671 if (qp->s_mig_state == IB_MIG_ARMED)
672 mig = 1;
673 } else
674 goto inval;
675 }
676
677 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
678 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
679 goto inval;
680
681 switch (new_state) {
682 case IB_QPS_RESET:
683 if (qp->state != IB_QPS_RESET) {
684 qp->state = IB_QPS_RESET;
685 spin_lock(&dev->pending_lock);
686 if (!list_empty(&qp->iowait))
687 list_del_init(&qp->iowait);
688 spin_unlock(&dev->pending_lock);
689 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
690 spin_unlock(&qp->s_lock);
691 spin_unlock_irq(&qp->r_lock);
692 /* Stop the sending work queue and retry timer */
693 cancel_work_sync(&qp->s_work);
694 del_timer_sync(&qp->s_timer);
695 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
696 if (qp->s_tx) {
697 qib_put_txreq(qp->s_tx);
698 qp->s_tx = NULL;
699 }
700 remove_qp(dev, qp);
701 wait_event(qp->wait, !atomic_read(&qp->refcount));
702 spin_lock_irq(&qp->r_lock);
703 spin_lock(&qp->s_lock);
704 clear_mr_refs(qp, 1);
705 qib_reset_qp(qp, ibqp->qp_type);
706 }
707 break;
708
709 case IB_QPS_RTR:
710 /* Allow event to retrigger if QP set to RTR more than once */
711 qp->r_flags &= ~QIB_R_COMM_EST;
712 qp->state = new_state;
713 break;
714
715 case IB_QPS_SQD:
716 qp->s_draining = qp->s_last != qp->s_cur;
717 qp->state = new_state;
718 break;
719
720 case IB_QPS_SQE:
721 if (qp->ibqp.qp_type == IB_QPT_RC)
722 goto inval;
723 qp->state = new_state;
724 break;
725
726 case IB_QPS_ERR:
727 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
728 break;
729
730 default:
731 qp->state = new_state;
732 break;
733 }
734
735 if (attr_mask & IB_QP_PKEY_INDEX)
736 qp->s_pkey_index = attr->pkey_index;
737
738 if (attr_mask & IB_QP_PORT)
739 qp->port_num = attr->port_num;
740
741 if (attr_mask & IB_QP_DEST_QPN)
742 qp->remote_qpn = attr->dest_qp_num;
743
744 if (attr_mask & IB_QP_SQ_PSN) {
745 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
746 qp->s_psn = qp->s_next_psn;
747 qp->s_sending_psn = qp->s_next_psn;
748 qp->s_last_psn = qp->s_next_psn - 1;
749 qp->s_sending_hpsn = qp->s_last_psn;
750 }
751
752 if (attr_mask & IB_QP_RQ_PSN)
753 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
754
755 if (attr_mask & IB_QP_ACCESS_FLAGS)
756 qp->qp_access_flags = attr->qp_access_flags;
757
758 if (attr_mask & IB_QP_AV) {
759 qp->remote_ah_attr = attr->ah_attr;
760 qp->s_srate = attr->ah_attr.static_rate;
761 }
762
763 if (attr_mask & IB_QP_ALT_PATH) {
764 qp->alt_ah_attr = attr->alt_ah_attr;
765 qp->s_alt_pkey_index = attr->alt_pkey_index;
766 }
767
768 if (attr_mask & IB_QP_PATH_MIG_STATE) {
769 qp->s_mig_state = attr->path_mig_state;
770 if (mig) {
771 qp->remote_ah_attr = qp->alt_ah_attr;
772 qp->port_num = qp->alt_ah_attr.port_num;
773 qp->s_pkey_index = qp->s_alt_pkey_index;
774 }
775 }
776
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400777 if (attr_mask & IB_QP_PATH_MTU) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700778 qp->path_mtu = pmtu;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400779 qp->pmtu = ib_mtu_enum_to_int(pmtu);
780 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700781
782 if (attr_mask & IB_QP_RETRY_CNT) {
783 qp->s_retry_cnt = attr->retry_cnt;
784 qp->s_retry = attr->retry_cnt;
785 }
786
787 if (attr_mask & IB_QP_RNR_RETRY) {
788 qp->s_rnr_retry_cnt = attr->rnr_retry;
789 qp->s_rnr_retry = attr->rnr_retry;
790 }
791
792 if (attr_mask & IB_QP_MIN_RNR_TIMER)
793 qp->r_min_rnr_timer = attr->min_rnr_timer;
794
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400795 if (attr_mask & IB_QP_TIMEOUT) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700796 qp->timeout = attr->timeout;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400797 qp->timeout_jiffies =
798 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
799 1000UL);
800 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700801
802 if (attr_mask & IB_QP_QKEY)
803 qp->qkey = attr->qkey;
804
805 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
806 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
807
808 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
809 qp->s_max_rd_atomic = attr->max_rd_atomic;
810
811 spin_unlock(&qp->s_lock);
812 spin_unlock_irq(&qp->r_lock);
813
814 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
815 insert_qp(dev, qp);
816
817 if (lastwqe) {
818 ev.device = qp->ibqp.device;
819 ev.element.qp = &qp->ibqp;
820 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
821 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
822 }
823 if (mig) {
824 ev.device = qp->ibqp.device;
825 ev.element.qp = &qp->ibqp;
826 ev.event = IB_EVENT_PATH_MIG;
827 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
828 }
829 ret = 0;
830 goto bail;
831
832inval:
833 spin_unlock(&qp->s_lock);
834 spin_unlock_irq(&qp->r_lock);
835 ret = -EINVAL;
836
837bail:
838 return ret;
839}
840
841int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
842 int attr_mask, struct ib_qp_init_attr *init_attr)
843{
844 struct qib_qp *qp = to_iqp(ibqp);
845
846 attr->qp_state = qp->state;
847 attr->cur_qp_state = attr->qp_state;
848 attr->path_mtu = qp->path_mtu;
849 attr->path_mig_state = qp->s_mig_state;
850 attr->qkey = qp->qkey;
851 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
852 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
853 attr->dest_qp_num = qp->remote_qpn;
854 attr->qp_access_flags = qp->qp_access_flags;
855 attr->cap.max_send_wr = qp->s_size - 1;
856 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
857 attr->cap.max_send_sge = qp->s_max_sge;
858 attr->cap.max_recv_sge = qp->r_rq.max_sge;
859 attr->cap.max_inline_data = 0;
860 attr->ah_attr = qp->remote_ah_attr;
861 attr->alt_ah_attr = qp->alt_ah_attr;
862 attr->pkey_index = qp->s_pkey_index;
863 attr->alt_pkey_index = qp->s_alt_pkey_index;
864 attr->en_sqd_async_notify = 0;
865 attr->sq_draining = qp->s_draining;
866 attr->max_rd_atomic = qp->s_max_rd_atomic;
867 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
868 attr->min_rnr_timer = qp->r_min_rnr_timer;
869 attr->port_num = qp->port_num;
870 attr->timeout = qp->timeout;
871 attr->retry_cnt = qp->s_retry_cnt;
872 attr->rnr_retry = qp->s_rnr_retry_cnt;
873 attr->alt_port_num = qp->alt_ah_attr.port_num;
874 attr->alt_timeout = qp->alt_timeout;
875
876 init_attr->event_handler = qp->ibqp.event_handler;
877 init_attr->qp_context = qp->ibqp.qp_context;
878 init_attr->send_cq = qp->ibqp.send_cq;
879 init_attr->recv_cq = qp->ibqp.recv_cq;
880 init_attr->srq = qp->ibqp.srq;
881 init_attr->cap = attr->cap;
882 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
883 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
884 else
885 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
886 init_attr->qp_type = qp->ibqp.qp_type;
887 init_attr->port_num = qp->port_num;
888 return 0;
889}
890
891/**
892 * qib_compute_aeth - compute the AETH (syndrome + MSN)
893 * @qp: the queue pair to compute the AETH for
894 *
895 * Returns the AETH.
896 */
897__be32 qib_compute_aeth(struct qib_qp *qp)
898{
899 u32 aeth = qp->r_msn & QIB_MSN_MASK;
900
901 if (qp->ibqp.srq) {
902 /*
903 * Shared receive queues don't generate credits.
904 * Set the credit field to the invalid value.
905 */
906 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
907 } else {
908 u32 min, max, x;
909 u32 credits;
910 struct qib_rwq *wq = qp->r_rq.wq;
911 u32 head;
912 u32 tail;
913
914 /* sanity check pointers before trusting them */
915 head = wq->head;
916 if (head >= qp->r_rq.size)
917 head = 0;
918 tail = wq->tail;
919 if (tail >= qp->r_rq.size)
920 tail = 0;
921 /*
922 * Compute the number of credits available (RWQEs).
923 * XXX Not holding the r_rq.lock here so there is a small
924 * chance that the pair of reads are not atomic.
925 */
926 credits = head - tail;
927 if ((int)credits < 0)
928 credits += qp->r_rq.size;
929 /*
930 * Binary search the credit table to find the code to
931 * use.
932 */
933 min = 0;
934 max = 31;
935 for (;;) {
936 x = (min + max) / 2;
937 if (credit_table[x] == credits)
938 break;
939 if (credit_table[x] > credits)
940 max = x;
941 else if (min == x)
942 break;
943 else
944 min = x;
945 }
946 aeth |= x << QIB_AETH_CREDIT_SHIFT;
947 }
948 return cpu_to_be32(aeth);
949}
950
951/**
952 * qib_create_qp - create a queue pair for a device
953 * @ibpd: the protection domain who's device we create the queue pair for
954 * @init_attr: the attributes of the queue pair
955 * @udata: user data for libibverbs.so
956 *
957 * Returns the queue pair on success, otherwise returns an errno.
958 *
959 * Called by the ib_create_qp() core verbs function.
960 */
961struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
962 struct ib_qp_init_attr *init_attr,
963 struct ib_udata *udata)
964{
965 struct qib_qp *qp;
966 int err;
967 struct qib_swqe *swq = NULL;
968 struct qib_ibdev *dev;
969 struct qib_devdata *dd;
970 size_t sz;
971 size_t sg_list_sz;
972 struct ib_qp *ret;
973
974 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
975 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
976 ret = ERR_PTR(-EINVAL);
977 goto bail;
978 }
979
980 /* Check receive queue parameters if no SRQ is specified. */
981 if (!init_attr->srq) {
982 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
983 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
984 ret = ERR_PTR(-EINVAL);
985 goto bail;
986 }
987 if (init_attr->cap.max_send_sge +
988 init_attr->cap.max_send_wr +
989 init_attr->cap.max_recv_sge +
990 init_attr->cap.max_recv_wr == 0) {
991 ret = ERR_PTR(-EINVAL);
992 goto bail;
993 }
994 }
995
996 switch (init_attr->qp_type) {
997 case IB_QPT_SMI:
998 case IB_QPT_GSI:
999 if (init_attr->port_num == 0 ||
1000 init_attr->port_num > ibpd->device->phys_port_cnt) {
1001 ret = ERR_PTR(-EINVAL);
1002 goto bail;
1003 }
1004 case IB_QPT_UC:
1005 case IB_QPT_RC:
1006 case IB_QPT_UD:
1007 sz = sizeof(struct qib_sge) *
1008 init_attr->cap.max_send_sge +
1009 sizeof(struct qib_swqe);
1010 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1011 if (swq == NULL) {
1012 ret = ERR_PTR(-ENOMEM);
1013 goto bail;
1014 }
1015 sz = sizeof(*qp);
1016 sg_list_sz = 0;
1017 if (init_attr->srq) {
1018 struct qib_srq *srq = to_isrq(init_attr->srq);
1019
1020 if (srq->rq.max_sge > 1)
1021 sg_list_sz = sizeof(*qp->r_sg_list) *
1022 (srq->rq.max_sge - 1);
1023 } else if (init_attr->cap.max_recv_sge > 1)
1024 sg_list_sz = sizeof(*qp->r_sg_list) *
1025 (init_attr->cap.max_recv_sge - 1);
1026 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1027 if (!qp) {
1028 ret = ERR_PTR(-ENOMEM);
1029 goto bail_swq;
1030 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001031 RCU_INIT_POINTER(qp->next, NULL);
Mike Marciniszyn1c942832012-05-07 14:02:31 -04001032 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1033 if (!qp->s_hdr) {
1034 ret = ERR_PTR(-ENOMEM);
1035 goto bail_qp;
1036 }
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -04001037 qp->timeout_jiffies =
1038 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1039 1000UL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001040 if (init_attr->srq)
1041 sz = 0;
1042 else {
1043 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1044 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1045 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1046 sizeof(struct qib_rwqe);
1047 qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1048 qp->r_rq.size * sz);
1049 if (!qp->r_rq.wq) {
1050 ret = ERR_PTR(-ENOMEM);
1051 goto bail_qp;
1052 }
1053 }
1054
1055 /*
1056 * ib_create_qp() will initialize qp->ibqp
1057 * except for qp->ibqp.qp_num.
1058 */
1059 spin_lock_init(&qp->r_lock);
1060 spin_lock_init(&qp->s_lock);
1061 spin_lock_init(&qp->r_rq.lock);
1062 atomic_set(&qp->refcount, 0);
1063 init_waitqueue_head(&qp->wait);
1064 init_waitqueue_head(&qp->wait_dma);
1065 init_timer(&qp->s_timer);
1066 qp->s_timer.data = (unsigned long)qp;
1067 INIT_WORK(&qp->s_work, qib_do_send);
1068 INIT_LIST_HEAD(&qp->iowait);
1069 INIT_LIST_HEAD(&qp->rspwait);
1070 qp->state = IB_QPS_RESET;
1071 qp->s_wq = swq;
1072 qp->s_size = init_attr->cap.max_send_wr + 1;
1073 qp->s_max_sge = init_attr->cap.max_send_sge;
1074 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1075 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1076 dev = to_idev(ibpd->device);
1077 dd = dd_from_dev(dev);
1078 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1079 init_attr->port_num);
1080 if (err < 0) {
1081 ret = ERR_PTR(err);
1082 vfree(qp->r_rq.wq);
1083 goto bail_qp;
1084 }
1085 qp->ibqp.qp_num = err;
1086 qp->port_num = init_attr->port_num;
Ralph Campbellf9315512010-05-23 21:44:54 -07001087 qib_reset_qp(qp, init_attr->qp_type);
1088 break;
1089
1090 default:
1091 /* Don't support raw QPs */
1092 ret = ERR_PTR(-ENOSYS);
1093 goto bail;
1094 }
1095
1096 init_attr->cap.max_inline_data = 0;
1097
1098 /*
1099 * Return the address of the RWQ as the offset to mmap.
1100 * See qib_mmap() for details.
1101 */
1102 if (udata && udata->outlen >= sizeof(__u64)) {
1103 if (!qp->r_rq.wq) {
1104 __u64 offset = 0;
1105
1106 err = ib_copy_to_udata(udata, &offset,
1107 sizeof(offset));
1108 if (err) {
1109 ret = ERR_PTR(err);
1110 goto bail_ip;
1111 }
1112 } else {
1113 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1114
1115 qp->ip = qib_create_mmap_info(dev, s,
1116 ibpd->uobject->context,
1117 qp->r_rq.wq);
1118 if (!qp->ip) {
1119 ret = ERR_PTR(-ENOMEM);
1120 goto bail_ip;
1121 }
1122
1123 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1124 sizeof(qp->ip->offset));
1125 if (err) {
1126 ret = ERR_PTR(err);
1127 goto bail_ip;
1128 }
1129 }
1130 }
1131
1132 spin_lock(&dev->n_qps_lock);
1133 if (dev->n_qps_allocated == ib_qib_max_qps) {
1134 spin_unlock(&dev->n_qps_lock);
1135 ret = ERR_PTR(-ENOMEM);
1136 goto bail_ip;
1137 }
1138
1139 dev->n_qps_allocated++;
1140 spin_unlock(&dev->n_qps_lock);
1141
1142 if (qp->ip) {
1143 spin_lock_irq(&dev->pending_lock);
1144 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1145 spin_unlock_irq(&dev->pending_lock);
1146 }
1147
1148 ret = &qp->ibqp;
1149 goto bail;
1150
1151bail_ip:
1152 if (qp->ip)
1153 kref_put(&qp->ip->ref, qib_release_mmap_info);
1154 else
1155 vfree(qp->r_rq.wq);
1156 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1157bail_qp:
Mike Marciniszyn1c942832012-05-07 14:02:31 -04001158 kfree(qp->s_hdr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001159 kfree(qp);
1160bail_swq:
1161 vfree(swq);
1162bail:
1163 return ret;
1164}
1165
1166/**
1167 * qib_destroy_qp - destroy a queue pair
1168 * @ibqp: the queue pair to destroy
1169 *
1170 * Returns 0 on success.
1171 *
1172 * Note that this can be called while the QP is actively sending or
1173 * receiving!
1174 */
1175int qib_destroy_qp(struct ib_qp *ibqp)
1176{
1177 struct qib_qp *qp = to_iqp(ibqp);
1178 struct qib_ibdev *dev = to_idev(ibqp->device);
1179
1180 /* Make sure HW and driver activity is stopped. */
1181 spin_lock_irq(&qp->s_lock);
1182 if (qp->state != IB_QPS_RESET) {
1183 qp->state = IB_QPS_RESET;
1184 spin_lock(&dev->pending_lock);
1185 if (!list_empty(&qp->iowait))
1186 list_del_init(&qp->iowait);
1187 spin_unlock(&dev->pending_lock);
1188 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1189 spin_unlock_irq(&qp->s_lock);
1190 cancel_work_sync(&qp->s_work);
1191 del_timer_sync(&qp->s_timer);
1192 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1193 if (qp->s_tx) {
1194 qib_put_txreq(qp->s_tx);
1195 qp->s_tx = NULL;
1196 }
1197 remove_qp(dev, qp);
1198 wait_event(qp->wait, !atomic_read(&qp->refcount));
1199 clear_mr_refs(qp, 1);
1200 } else
1201 spin_unlock_irq(&qp->s_lock);
1202
1203 /* all user's cleaned up, mark it available */
1204 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1205 spin_lock(&dev->n_qps_lock);
1206 dev->n_qps_allocated--;
1207 spin_unlock(&dev->n_qps_lock);
1208
1209 if (qp->ip)
1210 kref_put(&qp->ip->ref, qib_release_mmap_info);
1211 else
1212 vfree(qp->r_rq.wq);
1213 vfree(qp->s_wq);
Mike Marciniszyn1c942832012-05-07 14:02:31 -04001214 kfree(qp->s_hdr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001215 kfree(qp);
1216 return 0;
1217}
1218
1219/**
1220 * qib_init_qpn_table - initialize the QP number table for a device
1221 * @qpt: the QPN table
1222 */
1223void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1224{
1225 spin_lock_init(&qpt->lock);
1226 qpt->last = 1; /* start with QPN 2 */
1227 qpt->nmaps = 1;
1228 qpt->mask = dd->qpn_mask;
1229}
1230
1231/**
1232 * qib_free_qpn_table - free the QP number table for a device
1233 * @qpt: the QPN table
1234 */
1235void qib_free_qpn_table(struct qib_qpn_table *qpt)
1236{
1237 int i;
1238
1239 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1240 if (qpt->map[i].page)
1241 free_page((unsigned long) qpt->map[i].page);
1242}
1243
1244/**
1245 * qib_get_credit - flush the send work queue of a QP
1246 * @qp: the qp who's send work queue to flush
1247 * @aeth: the Acknowledge Extended Transport Header
1248 *
1249 * The QP s_lock should be held.
1250 */
1251void qib_get_credit(struct qib_qp *qp, u32 aeth)
1252{
1253 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1254
1255 /*
1256 * If the credit is invalid, we can send
1257 * as many packets as we like. Otherwise, we have to
1258 * honor the credit field.
1259 */
1260 if (credit == QIB_AETH_CREDIT_INVAL) {
1261 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1262 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1263 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1264 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1265 qib_schedule_send(qp);
1266 }
1267 }
1268 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1269 /* Compute new LSN (i.e., MSN + credit) */
1270 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1271 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1272 qp->s_lsn = credit;
1273 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1274 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1275 qib_schedule_send(qp);
1276 }
1277 }
1278 }
1279}