blob: b0f2dcf485be3cfb1dc075aa25910f4972d3b581 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -04002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040037#include <linux/jhash.h>
Dennis Dalessandro869a2a92016-01-22 12:45:02 -080038#include <rdma/rdma_vt.h>
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -040039#ifdef CONFIG_DEBUG_FS
40#include <linux/seq_file.h>
41#endif
Ralph Campbellf9315512010-05-23 21:44:54 -070042
43#include "qib.h"
44
45#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
46#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
47
48static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
49 struct qpn_map *map, unsigned off)
50{
51 return (map - qpt->map) * BITS_PER_PAGE + off;
52}
53
54static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
55 struct qpn_map *map, unsigned off,
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080056 unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -070057{
58 if (qpt->mask) {
59 off++;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080060 if (((off & qpt->mask) >> 1) >= n)
61 off = (off | qpt->mask) + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -070062 } else
63 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
64 return off;
65}
66
67/*
68 * Convert the AETH credit code into the number of credits.
69 */
70static u32 credit_table[31] = {
71 0, /* 0 */
72 1, /* 1 */
73 2, /* 2 */
74 3, /* 3 */
75 4, /* 4 */
76 6, /* 5 */
77 8, /* 6 */
78 12, /* 7 */
79 16, /* 8 */
80 24, /* 9 */
81 32, /* A */
82 48, /* B */
83 64, /* C */
84 96, /* D */
85 128, /* E */
86 192, /* F */
87 256, /* 10 */
88 384, /* 11 */
89 512, /* 12 */
90 768, /* 13 */
91 1024, /* 14 */
92 1536, /* 15 */
93 2048, /* 16 */
94 3072, /* 17 */
95 4096, /* 18 */
96 6144, /* 19 */
97 8192, /* 1A */
98 12288, /* 1B */
99 16384, /* 1C */
100 24576, /* 1D */
101 32768 /* 1E */
102};
103
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500104static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
105 gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700106{
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500107 unsigned long page = get_zeroed_page(gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700108
109 /*
110 * Free the page if someone raced with us installing it.
111 */
112
113 spin_lock(&qpt->lock);
114 if (map->page)
115 free_page(page);
116 else
117 map->page = (void *)page;
118 spin_unlock(&qpt->lock);
119}
120
121/*
122 * Allocate the next available QPN or
123 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
124 */
125static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500126 enum ib_qp_type type, u8 port, gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700127{
128 u32 i, offset, max_scan, qpn;
129 struct qpn_map *map;
130 u32 ret;
Ralph Campbellf9315512010-05-23 21:44:54 -0700131
132 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
133 unsigned n;
134
135 ret = type == IB_QPT_GSI;
136 n = 1 << (ret + 2 * (port - 1));
137 spin_lock(&qpt->lock);
138 if (qpt->flags & n)
139 ret = -EINVAL;
140 else
141 qpt->flags |= n;
142 spin_unlock(&qpt->lock);
143 goto bail;
144 }
145
Mike Marciniszyn7c3edd32011-01-10 17:42:22 -0800146 qpn = qpt->last + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700147 if (qpn >= QPN_MAX)
148 qpn = 2;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800149 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
150 qpn = (qpn | qpt->mask) + 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700151 offset = qpn & BITS_PER_PAGE_MASK;
152 map = &qpt->map[qpn / BITS_PER_PAGE];
153 max_scan = qpt->nmaps - !offset;
154 for (i = 0;;) {
155 if (unlikely(!map->page)) {
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500156 get_map_page(qpt, map, gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700157 if (unlikely(!map->page))
158 break;
159 }
160 do {
161 if (!test_and_set_bit(offset, map->page)) {
162 qpt->last = qpn;
163 ret = qpn;
164 goto bail;
165 }
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800166 offset = find_next_offset(qpt, map, offset,
167 dd->n_krcv_queues);
Ralph Campbellf9315512010-05-23 21:44:54 -0700168 qpn = mk_qpn(qpt, map, offset);
169 /*
170 * This test differs from alloc_pidmap().
171 * If find_next_offset() does find a zero
172 * bit, we don't need to check for QPN
173 * wrapping around past our starting QPN.
174 * We just need to be sure we don't loop
175 * forever.
176 */
177 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
178 /*
179 * In order to keep the number of pages allocated to a
180 * minimum, we scan the all existing pages before increasing
181 * the size of the bitmap table.
182 */
183 if (++i > max_scan) {
184 if (qpt->nmaps == QPNMAP_ENTRIES)
185 break;
186 map = &qpt->map[qpt->nmaps++];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800187 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700188 } else if (map < &qpt->map[qpt->nmaps]) {
189 ++map;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800190 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700191 } else {
192 map = &qpt->map[0];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800193 offset = 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700194 }
195 qpn = mk_qpn(qpt, map, offset);
196 }
197
198 ret = -ENOMEM;
199
200bail:
201 return ret;
202}
203
204static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
205{
206 struct qpn_map *map;
207
208 map = qpt->map + qpn / BITS_PER_PAGE;
209 if (map->page)
210 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
211}
212
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400213static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
214{
215 return jhash_1word(qpn, dev->qp_rnd) &
216 (dev->qp_table_size - 1);
217}
218
219
Ralph Campbellf9315512010-05-23 21:44:54 -0700220/*
221 * Put the QP into the hash table.
222 * The hash table holds a reference to the QP.
223 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800224static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700225{
226 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700227 unsigned long flags;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400228 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700229
Ralph Campbellf9315512010-05-23 21:44:54 -0700230 atomic_inc(&qp->refcount);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400231 spin_lock_irqsave(&dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700232
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400233 if (qp->ibqp.qp_num == 0)
234 rcu_assign_pointer(ibp->qp0, qp);
235 else if (qp->ibqp.qp_num == 1)
236 rcu_assign_pointer(ibp->qp1, qp);
237 else {
238 qp->next = dev->qp_table[n];
239 rcu_assign_pointer(dev->qp_table[n], qp);
240 }
241
Ralph Campbellf9315512010-05-23 21:44:54 -0700242 spin_unlock_irqrestore(&dev->qpt_lock, flags);
243}
244
245/*
246 * Remove the QP from the table so it can't be found asynchronously by
247 * the receive interrupt routine.
248 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800249static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700250{
251 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400252 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700253 unsigned long flags;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400254 int removed = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700255
Ralph Campbellf9315512010-05-23 21:44:54 -0700256 spin_lock_irqsave(&dev->qpt_lock, flags);
257
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000258 if (rcu_dereference_protected(ibp->qp0,
259 lockdep_is_held(&dev->qpt_lock)) == qp) {
Andreea-Cristina Bernat03c88592015-01-16 10:19:53 -0500260 RCU_INIT_POINTER(ibp->qp0, NULL);
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000261 } else if (rcu_dereference_protected(ibp->qp1,
262 lockdep_is_held(&dev->qpt_lock)) == qp) {
Andreea-Cristina Bernat03c88592015-01-16 10:19:53 -0500263 RCU_INIT_POINTER(ibp->qp1, NULL);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400264 } else {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800265 struct rvt_qp *q;
266 struct rvt_qp __rcu **qpp;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400267
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400268 removed = 0;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400269 qpp = &dev->qp_table[n];
Mike Marciniszynd359f352013-01-24 18:59:34 +0000270 for (; (q = rcu_dereference_protected(*qpp,
271 lockdep_is_held(&dev->qpt_lock))) != NULL;
272 qpp = &q->next)
Ralph Campbellf9315512010-05-23 21:44:54 -0700273 if (q == qp) {
Andreea-Cristina Bernat03c88592015-01-16 10:19:53 -0500274 RCU_INIT_POINTER(*qpp,
Mike Marciniszynbcc9b672013-02-07 20:47:51 +0000275 rcu_dereference_protected(qp->next,
276 lockdep_is_held(&dev->qpt_lock)));
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400277 removed = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700278 break;
279 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400280 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700281
282 spin_unlock_irqrestore(&dev->qpt_lock, flags);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400283 if (removed) {
284 synchronize_rcu();
285 atomic_dec(&qp->refcount);
286 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700287}
288
289/**
290 * qib_free_all_qps - check for QPs still in use
291 * @qpt: the QP table to empty
292 *
293 * There should not be any QPs still in use.
294 * Free memory for table.
295 */
296unsigned qib_free_all_qps(struct qib_devdata *dd)
297{
298 struct qib_ibdev *dev = &dd->verbs_dev;
299 unsigned long flags;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800300 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700301 unsigned n, qp_inuse = 0;
302
303 for (n = 0; n < dd->num_pports; n++) {
304 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
305
306 if (!qib_mcast_tree_empty(ibp))
307 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400308 rcu_read_lock();
309 if (rcu_dereference(ibp->qp0))
Ralph Campbellf9315512010-05-23 21:44:54 -0700310 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400311 if (rcu_dereference(ibp->qp1))
Ralph Campbellf9315512010-05-23 21:44:54 -0700312 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400313 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700314 }
315
316 spin_lock_irqsave(&dev->qpt_lock, flags);
317 for (n = 0; n < dev->qp_table_size; n++) {
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000318 qp = rcu_dereference_protected(dev->qp_table[n],
319 lockdep_is_held(&dev->qpt_lock));
Andreea-Cristina Bernat03c88592015-01-16 10:19:53 -0500320 RCU_INIT_POINTER(dev->qp_table[n], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700321
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000322 for (; qp; qp = rcu_dereference_protected(qp->next,
323 lockdep_is_held(&dev->qpt_lock)))
Ralph Campbellf9315512010-05-23 21:44:54 -0700324 qp_inuse++;
325 }
326 spin_unlock_irqrestore(&dev->qpt_lock, flags);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400327 synchronize_rcu();
Ralph Campbellf9315512010-05-23 21:44:54 -0700328
329 return qp_inuse;
330}
331
332/**
333 * qib_lookup_qpn - return the QP with the given QPN
334 * @qpt: the QP table
335 * @qpn: the QP number to look up
336 *
337 * The caller is responsible for decrementing the QP reference count
338 * when done.
339 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800340struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700341{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800342 struct rvt_qp *qp = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700343
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400344 rcu_read_lock();
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400345 if (unlikely(qpn <= 1)) {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400346 if (qpn == 0)
347 qp = rcu_dereference(ibp->qp0);
348 else
349 qp = rcu_dereference(ibp->qp1);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400350 if (qp)
351 atomic_inc(&qp->refcount);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400352 } else {
353 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
354 unsigned n = qpn_hash(dev, qpn);
Ralph Campbellf9315512010-05-23 21:44:54 -0700355
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000356 for (qp = rcu_dereference(dev->qp_table[n]); qp;
357 qp = rcu_dereference(qp->next))
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400358 if (qp->ibqp.qp_num == qpn) {
359 atomic_inc(&qp->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700360 break;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400361 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400362 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400363 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700364 return qp;
365}
366
367/**
368 * qib_reset_qp - initialize the QP state to the reset state
369 * @qp: the QP to reset
370 * @type: the QP type
371 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800372static void qib_reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
Ralph Campbellf9315512010-05-23 21:44:54 -0700373{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800374 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700375 qp->remote_qpn = 0;
376 qp->qkey = 0;
377 qp->qp_access_flags = 0;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800378 atomic_set(&priv->s_dma_busy, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -0700379 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
380 qp->s_hdrwords = 0;
381 qp->s_wqe = NULL;
382 qp->s_draining = 0;
383 qp->s_next_psn = 0;
384 qp->s_last_psn = 0;
385 qp->s_sending_psn = 0;
386 qp->s_sending_hpsn = 0;
387 qp->s_psn = 0;
388 qp->r_psn = 0;
389 qp->r_msn = 0;
390 if (type == IB_QPT_RC) {
391 qp->s_state = IB_OPCODE_RC_SEND_LAST;
392 qp->r_state = IB_OPCODE_RC_SEND_LAST;
393 } else {
394 qp->s_state = IB_OPCODE_UC_SEND_LAST;
395 qp->r_state = IB_OPCODE_UC_SEND_LAST;
396 }
397 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
398 qp->r_nak_state = 0;
399 qp->r_aflags = 0;
400 qp->r_flags = 0;
401 qp->s_head = 0;
402 qp->s_tail = 0;
403 qp->s_cur = 0;
404 qp->s_acked = 0;
405 qp->s_last = 0;
406 qp->s_ssn = 1;
407 qp->s_lsn = 0;
408 qp->s_mig_state = IB_MIG_MIGRATED;
409 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
410 qp->r_head_ack_queue = 0;
411 qp->s_tail_ack_queue = 0;
412 qp->s_num_rd_atomic = 0;
413 if (qp->r_rq.wq) {
414 qp->r_rq.wq->head = 0;
415 qp->r_rq.wq->tail = 0;
416 }
417 qp->r_sge.num_sge = 0;
418}
419
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800420static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
Ralph Campbellf9315512010-05-23 21:44:54 -0700421{
422 unsigned n;
423
424 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400425 qib_put_ss(&qp->s_rdma_read_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700426
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400427 qib_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700428
429 if (clr_sends) {
430 while (qp->s_last != qp->s_head) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800431 struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
Ralph Campbellf9315512010-05-23 21:44:54 -0700432 unsigned i;
433
434 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800435 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -0700436
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800437 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700438 }
439 if (qp->ibqp.qp_type == IB_QPT_UD ||
440 qp->ibqp.qp_type == IB_QPT_SMI ||
441 qp->ibqp.qp_type == IB_QPT_GSI)
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100442 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700443 if (++qp->s_last >= qp->s_size)
444 qp->s_last = 0;
445 }
446 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800447 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700448 qp->s_rdma_mr = NULL;
449 }
450 }
451
452 if (qp->ibqp.qp_type != IB_QPT_RC)
453 return;
454
455 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800456 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
Ralph Campbellf9315512010-05-23 21:44:54 -0700457
458 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
459 e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800460 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700461 e->rdma_sge.mr = NULL;
462 }
463 }
464}
465
466/**
467 * qib_error_qp - put a QP into the error state
468 * @qp: the QP to put into the error state
469 * @err: the receive completion error to signal if a RWQE is active
470 *
471 * Flushes both send and receive work queues.
472 * Returns true if last WQE event should be generated.
Ralph Campbella5210c12010-08-02 22:39:30 +0000473 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700474 * If we are already in error state, just return.
475 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800476int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
Ralph Campbellf9315512010-05-23 21:44:54 -0700477{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800478 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700479 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
480 struct ib_wc wc;
481 int ret = 0;
482
483 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
484 goto bail;
485
486 qp->state = IB_QPS_ERR;
487
488 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
489 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
490 del_timer(&qp->s_timer);
491 }
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800492
493 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
494 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
495
Ralph Campbellf9315512010-05-23 21:44:54 -0700496 spin_lock(&dev->pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800497 if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700498 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800499 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700500 }
501 spin_unlock(&dev->pending_lock);
502
503 if (!(qp->s_flags & QIB_S_BUSY)) {
504 qp->s_hdrwords = 0;
505 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800506 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700507 qp->s_rdma_mr = NULL;
508 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800509 if (priv->s_tx) {
510 qib_put_txreq(priv->s_tx);
511 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700512 }
513 }
514
515 /* Schedule the sending tasklet to drain the send work queue. */
516 if (qp->s_last != qp->s_head)
517 qib_schedule_send(qp);
518
519 clear_mr_refs(qp, 0);
520
521 memset(&wc, 0, sizeof(wc));
522 wc.qp = &qp->ibqp;
523 wc.opcode = IB_WC_RECV;
524
525 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
526 wc.wr_id = qp->r_wr_id;
527 wc.status = err;
528 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
529 }
530 wc.status = IB_WC_WR_FLUSH_ERR;
531
532 if (qp->r_rq.wq) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800533 struct rvt_rwq *wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700534 u32 head;
535 u32 tail;
536
537 spin_lock(&qp->r_rq.lock);
538
539 /* sanity check pointers before trusting them */
540 wq = qp->r_rq.wq;
541 head = wq->head;
542 if (head >= qp->r_rq.size)
543 head = 0;
544 tail = wq->tail;
545 if (tail >= qp->r_rq.size)
546 tail = 0;
547 while (tail != head) {
548 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
549 if (++tail >= qp->r_rq.size)
550 tail = 0;
551 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
552 }
553 wq->tail = tail;
554
555 spin_unlock(&qp->r_rq.lock);
556 } else if (qp->ibqp.event_handler)
557 ret = 1;
558
559bail:
560 return ret;
561}
562
563/**
564 * qib_modify_qp - modify the attributes of a queue pair
565 * @ibqp: the queue pair who's attributes we're modifying
566 * @attr: the new attributes
567 * @attr_mask: the mask of attributes to modify
568 * @udata: user data for libibverbs.so
569 *
570 * Returns 0 on success, otherwise returns an errno.
571 */
572int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
573 int attr_mask, struct ib_udata *udata)
574{
575 struct qib_ibdev *dev = to_idev(ibqp->device);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800576 struct rvt_qp *qp = to_iqp(ibqp);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800577 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700578 enum ib_qp_state cur_state, new_state;
579 struct ib_event ev;
580 int lastwqe = 0;
581 int mig = 0;
582 int ret;
583 u32 pmtu = 0; /* for gcc warning only */
584
585 spin_lock_irq(&qp->r_lock);
586 spin_lock(&qp->s_lock);
587
588 cur_state = attr_mask & IB_QP_CUR_STATE ?
589 attr->cur_qp_state : qp->state;
590 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
591
592 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
Matan Barakdd5f03b2013-12-12 18:03:11 +0200593 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
Ralph Campbellf9315512010-05-23 21:44:54 -0700594 goto inval;
595
596 if (attr_mask & IB_QP_AV) {
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800597 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
Ralph Campbellf9315512010-05-23 21:44:54 -0700598 goto inval;
599 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
600 goto inval;
601 }
602
603 if (attr_mask & IB_QP_ALT_PATH) {
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800604 if (attr->alt_ah_attr.dlid >=
605 be16_to_cpu(IB_MULTICAST_LID_BASE))
Ralph Campbellf9315512010-05-23 21:44:54 -0700606 goto inval;
607 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
608 goto inval;
609 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
610 goto inval;
611 }
612
613 if (attr_mask & IB_QP_PKEY_INDEX)
614 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
615 goto inval;
616
617 if (attr_mask & IB_QP_MIN_RNR_TIMER)
618 if (attr->min_rnr_timer > 31)
619 goto inval;
620
621 if (attr_mask & IB_QP_PORT)
622 if (qp->ibqp.qp_type == IB_QPT_SMI ||
623 qp->ibqp.qp_type == IB_QPT_GSI ||
624 attr->port_num == 0 ||
625 attr->port_num > ibqp->device->phys_port_cnt)
626 goto inval;
627
628 if (attr_mask & IB_QP_DEST_QPN)
629 if (attr->dest_qp_num > QIB_QPN_MASK)
630 goto inval;
631
632 if (attr_mask & IB_QP_RETRY_CNT)
633 if (attr->retry_cnt > 7)
634 goto inval;
635
636 if (attr_mask & IB_QP_RNR_RETRY)
637 if (attr->rnr_retry > 7)
638 goto inval;
639
640 /*
641 * Don't allow invalid path_mtu values. OK to set greater
642 * than the active mtu (or even the max_cap, if we have tuned
643 * that to a small mtu. We'll set qp->path_mtu
644 * to the lesser of requested attribute mtu and active,
645 * for packetizing messages.
646 * Note that the QP port has to be set in INIT and MTU in RTR.
647 */
648 if (attr_mask & IB_QP_PATH_MTU) {
649 struct qib_devdata *dd = dd_from_dev(dev);
650 int mtu, pidx = qp->port_num - 1;
651
652 mtu = ib_mtu_enum_to_int(attr->path_mtu);
653 if (mtu == -1)
654 goto inval;
655 if (mtu > dd->pport[pidx].ibmtu) {
656 switch (dd->pport[pidx].ibmtu) {
657 case 4096:
658 pmtu = IB_MTU_4096;
659 break;
660 case 2048:
661 pmtu = IB_MTU_2048;
662 break;
663 case 1024:
664 pmtu = IB_MTU_1024;
665 break;
666 case 512:
667 pmtu = IB_MTU_512;
668 break;
669 case 256:
670 pmtu = IB_MTU_256;
671 break;
672 default:
673 pmtu = IB_MTU_2048;
674 }
675 } else
676 pmtu = attr->path_mtu;
677 }
678
679 if (attr_mask & IB_QP_PATH_MIG_STATE) {
680 if (attr->path_mig_state == IB_MIG_REARM) {
681 if (qp->s_mig_state == IB_MIG_ARMED)
682 goto inval;
683 if (new_state != IB_QPS_RTS)
684 goto inval;
685 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
686 if (qp->s_mig_state == IB_MIG_REARM)
687 goto inval;
688 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
689 goto inval;
690 if (qp->s_mig_state == IB_MIG_ARMED)
691 mig = 1;
692 } else
693 goto inval;
694 }
695
696 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
697 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
698 goto inval;
699
700 switch (new_state) {
701 case IB_QPS_RESET:
702 if (qp->state != IB_QPS_RESET) {
703 qp->state = IB_QPS_RESET;
704 spin_lock(&dev->pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800705 if (!list_empty(&priv->iowait))
706 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700707 spin_unlock(&dev->pending_lock);
708 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
709 spin_unlock(&qp->s_lock);
710 spin_unlock_irq(&qp->r_lock);
711 /* Stop the sending work queue and retry timer */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800712 cancel_work_sync(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -0700713 del_timer_sync(&qp->s_timer);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800714 wait_event(priv->wait_dma,
715 !atomic_read(&priv->s_dma_busy));
716 if (priv->s_tx) {
717 qib_put_txreq(priv->s_tx);
718 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700719 }
720 remove_qp(dev, qp);
721 wait_event(qp->wait, !atomic_read(&qp->refcount));
722 spin_lock_irq(&qp->r_lock);
723 spin_lock(&qp->s_lock);
724 clear_mr_refs(qp, 1);
725 qib_reset_qp(qp, ibqp->qp_type);
726 }
727 break;
728
729 case IB_QPS_RTR:
730 /* Allow event to retrigger if QP set to RTR more than once */
731 qp->r_flags &= ~QIB_R_COMM_EST;
732 qp->state = new_state;
733 break;
734
735 case IB_QPS_SQD:
736 qp->s_draining = qp->s_last != qp->s_cur;
737 qp->state = new_state;
738 break;
739
740 case IB_QPS_SQE:
741 if (qp->ibqp.qp_type == IB_QPT_RC)
742 goto inval;
743 qp->state = new_state;
744 break;
745
746 case IB_QPS_ERR:
747 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
748 break;
749
750 default:
751 qp->state = new_state;
752 break;
753 }
754
755 if (attr_mask & IB_QP_PKEY_INDEX)
756 qp->s_pkey_index = attr->pkey_index;
757
758 if (attr_mask & IB_QP_PORT)
759 qp->port_num = attr->port_num;
760
761 if (attr_mask & IB_QP_DEST_QPN)
762 qp->remote_qpn = attr->dest_qp_num;
763
764 if (attr_mask & IB_QP_SQ_PSN) {
765 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
766 qp->s_psn = qp->s_next_psn;
767 qp->s_sending_psn = qp->s_next_psn;
768 qp->s_last_psn = qp->s_next_psn - 1;
769 qp->s_sending_hpsn = qp->s_last_psn;
770 }
771
772 if (attr_mask & IB_QP_RQ_PSN)
773 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
774
775 if (attr_mask & IB_QP_ACCESS_FLAGS)
776 qp->qp_access_flags = attr->qp_access_flags;
777
778 if (attr_mask & IB_QP_AV) {
779 qp->remote_ah_attr = attr->ah_attr;
780 qp->s_srate = attr->ah_attr.static_rate;
781 }
782
783 if (attr_mask & IB_QP_ALT_PATH) {
784 qp->alt_ah_attr = attr->alt_ah_attr;
785 qp->s_alt_pkey_index = attr->alt_pkey_index;
786 }
787
788 if (attr_mask & IB_QP_PATH_MIG_STATE) {
789 qp->s_mig_state = attr->path_mig_state;
790 if (mig) {
791 qp->remote_ah_attr = qp->alt_ah_attr;
792 qp->port_num = qp->alt_ah_attr.port_num;
793 qp->s_pkey_index = qp->s_alt_pkey_index;
794 }
795 }
796
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400797 if (attr_mask & IB_QP_PATH_MTU) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700798 qp->path_mtu = pmtu;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400799 qp->pmtu = ib_mtu_enum_to_int(pmtu);
800 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700801
802 if (attr_mask & IB_QP_RETRY_CNT) {
803 qp->s_retry_cnt = attr->retry_cnt;
804 qp->s_retry = attr->retry_cnt;
805 }
806
807 if (attr_mask & IB_QP_RNR_RETRY) {
808 qp->s_rnr_retry_cnt = attr->rnr_retry;
809 qp->s_rnr_retry = attr->rnr_retry;
810 }
811
812 if (attr_mask & IB_QP_MIN_RNR_TIMER)
813 qp->r_min_rnr_timer = attr->min_rnr_timer;
814
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400815 if (attr_mask & IB_QP_TIMEOUT) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700816 qp->timeout = attr->timeout;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400817 qp->timeout_jiffies =
818 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
819 1000UL);
820 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700821
822 if (attr_mask & IB_QP_QKEY)
823 qp->qkey = attr->qkey;
824
825 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
826 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
827
828 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
829 qp->s_max_rd_atomic = attr->max_rd_atomic;
830
831 spin_unlock(&qp->s_lock);
832 spin_unlock_irq(&qp->r_lock);
833
834 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
835 insert_qp(dev, qp);
836
837 if (lastwqe) {
838 ev.device = qp->ibqp.device;
839 ev.element.qp = &qp->ibqp;
840 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
841 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
842 }
843 if (mig) {
844 ev.device = qp->ibqp.device;
845 ev.element.qp = &qp->ibqp;
846 ev.event = IB_EVENT_PATH_MIG;
847 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
848 }
849 ret = 0;
850 goto bail;
851
852inval:
853 spin_unlock(&qp->s_lock);
854 spin_unlock_irq(&qp->r_lock);
855 ret = -EINVAL;
856
857bail:
858 return ret;
859}
860
861int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
862 int attr_mask, struct ib_qp_init_attr *init_attr)
863{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800864 struct rvt_qp *qp = to_iqp(ibqp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700865
866 attr->qp_state = qp->state;
867 attr->cur_qp_state = attr->qp_state;
868 attr->path_mtu = qp->path_mtu;
869 attr->path_mig_state = qp->s_mig_state;
870 attr->qkey = qp->qkey;
871 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
872 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
873 attr->dest_qp_num = qp->remote_qpn;
874 attr->qp_access_flags = qp->qp_access_flags;
875 attr->cap.max_send_wr = qp->s_size - 1;
876 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
877 attr->cap.max_send_sge = qp->s_max_sge;
878 attr->cap.max_recv_sge = qp->r_rq.max_sge;
879 attr->cap.max_inline_data = 0;
880 attr->ah_attr = qp->remote_ah_attr;
881 attr->alt_ah_attr = qp->alt_ah_attr;
882 attr->pkey_index = qp->s_pkey_index;
883 attr->alt_pkey_index = qp->s_alt_pkey_index;
884 attr->en_sqd_async_notify = 0;
885 attr->sq_draining = qp->s_draining;
886 attr->max_rd_atomic = qp->s_max_rd_atomic;
887 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
888 attr->min_rnr_timer = qp->r_min_rnr_timer;
889 attr->port_num = qp->port_num;
890 attr->timeout = qp->timeout;
891 attr->retry_cnt = qp->s_retry_cnt;
892 attr->rnr_retry = qp->s_rnr_retry_cnt;
893 attr->alt_port_num = qp->alt_ah_attr.port_num;
894 attr->alt_timeout = qp->alt_timeout;
895
896 init_attr->event_handler = qp->ibqp.event_handler;
897 init_attr->qp_context = qp->ibqp.qp_context;
898 init_attr->send_cq = qp->ibqp.send_cq;
899 init_attr->recv_cq = qp->ibqp.recv_cq;
900 init_attr->srq = qp->ibqp.srq;
901 init_attr->cap = attr->cap;
902 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
903 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
904 else
905 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
906 init_attr->qp_type = qp->ibqp.qp_type;
907 init_attr->port_num = qp->port_num;
908 return 0;
909}
910
911/**
912 * qib_compute_aeth - compute the AETH (syndrome + MSN)
913 * @qp: the queue pair to compute the AETH for
914 *
915 * Returns the AETH.
916 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800917__be32 qib_compute_aeth(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700918{
919 u32 aeth = qp->r_msn & QIB_MSN_MASK;
920
921 if (qp->ibqp.srq) {
922 /*
923 * Shared receive queues don't generate credits.
924 * Set the credit field to the invalid value.
925 */
926 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
927 } else {
928 u32 min, max, x;
929 u32 credits;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800930 struct rvt_rwq *wq = qp->r_rq.wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700931 u32 head;
932 u32 tail;
933
934 /* sanity check pointers before trusting them */
935 head = wq->head;
936 if (head >= qp->r_rq.size)
937 head = 0;
938 tail = wq->tail;
939 if (tail >= qp->r_rq.size)
940 tail = 0;
941 /*
942 * Compute the number of credits available (RWQEs).
943 * XXX Not holding the r_rq.lock here so there is a small
944 * chance that the pair of reads are not atomic.
945 */
946 credits = head - tail;
947 if ((int)credits < 0)
948 credits += qp->r_rq.size;
949 /*
950 * Binary search the credit table to find the code to
951 * use.
952 */
953 min = 0;
954 max = 31;
955 for (;;) {
956 x = (min + max) / 2;
957 if (credit_table[x] == credits)
958 break;
959 if (credit_table[x] > credits)
960 max = x;
961 else if (min == x)
962 break;
963 else
964 min = x;
965 }
966 aeth |= x << QIB_AETH_CREDIT_SHIFT;
967 }
968 return cpu_to_be32(aeth);
969}
970
971/**
972 * qib_create_qp - create a queue pair for a device
973 * @ibpd: the protection domain who's device we create the queue pair for
974 * @init_attr: the attributes of the queue pair
975 * @udata: user data for libibverbs.so
976 *
977 * Returns the queue pair on success, otherwise returns an errno.
978 *
979 * Called by the ib_create_qp() core verbs function.
980 */
981struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
982 struct ib_qp_init_attr *init_attr,
983 struct ib_udata *udata)
984{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800985 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700986 int err;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800987 struct rvt_swqe *swq = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700988 struct qib_ibdev *dev;
989 struct qib_devdata *dd;
990 size_t sz;
991 size_t sg_list_sz;
992 struct ib_qp *ret;
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500993 gfp_t gfp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800994 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700995
996 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
Or Gerlitz60093dc2014-05-11 15:15:10 +0300997 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500998 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
999 return ERR_PTR(-EINVAL);
1000
1001 /* GFP_NOIO is applicable in RC QPs only */
1002 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
1003 init_attr->qp_type != IB_QPT_RC)
1004 return ERR_PTR(-EINVAL);
1005
1006 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
1007 GFP_NOIO : GFP_KERNEL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001008
1009 /* Check receive queue parameters if no SRQ is specified. */
1010 if (!init_attr->srq) {
1011 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
1012 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
1013 ret = ERR_PTR(-EINVAL);
1014 goto bail;
1015 }
1016 if (init_attr->cap.max_send_sge +
1017 init_attr->cap.max_send_wr +
1018 init_attr->cap.max_recv_sge +
1019 init_attr->cap.max_recv_wr == 0) {
1020 ret = ERR_PTR(-EINVAL);
1021 goto bail;
1022 }
1023 }
1024
1025 switch (init_attr->qp_type) {
1026 case IB_QPT_SMI:
1027 case IB_QPT_GSI:
1028 if (init_attr->port_num == 0 ||
1029 init_attr->port_num > ibpd->device->phys_port_cnt) {
1030 ret = ERR_PTR(-EINVAL);
1031 goto bail;
1032 }
1033 case IB_QPT_UC:
1034 case IB_QPT_RC:
1035 case IB_QPT_UD:
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001036 sz = sizeof(struct rvt_sge) *
Ralph Campbellf9315512010-05-23 21:44:54 -07001037 init_attr->cap.max_send_sge +
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001038 sizeof(struct rvt_swqe);
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -05001039 swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
1040 gfp, PAGE_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001041 if (swq == NULL) {
1042 ret = ERR_PTR(-ENOMEM);
1043 goto bail;
1044 }
1045 sz = sizeof(*qp);
1046 sg_list_sz = 0;
1047 if (init_attr->srq) {
1048 struct qib_srq *srq = to_isrq(init_attr->srq);
1049
1050 if (srq->rq.max_sge > 1)
1051 sg_list_sz = sizeof(*qp->r_sg_list) *
1052 (srq->rq.max_sge - 1);
1053 } else if (init_attr->cap.max_recv_sge > 1)
1054 sg_list_sz = sizeof(*qp->r_sg_list) *
1055 (init_attr->cap.max_recv_sge - 1);
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -05001056 qp = kzalloc(sz + sg_list_sz, gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001057 if (!qp) {
1058 ret = ERR_PTR(-ENOMEM);
1059 goto bail_swq;
1060 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001061 RCU_INIT_POINTER(qp->next, NULL);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001062 priv = kzalloc(sizeof(*priv), gfp);
1063 if (!priv) {
1064 ret = ERR_PTR(-ENOMEM);
1065 goto bail_qp_hdr;
1066 }
1067 priv->owner = qp;
1068 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
1069 if (!priv->s_hdr) {
Mike Marciniszyn1c942832012-05-07 14:02:31 -04001070 ret = ERR_PTR(-ENOMEM);
1071 goto bail_qp;
1072 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001073 qp->priv = priv;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -04001074 qp->timeout_jiffies =
1075 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1076 1000UL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001077 if (init_attr->srq)
1078 sz = 0;
1079 else {
1080 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1081 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1082 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001083 sizeof(struct rvt_rwqe);
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -05001084 if (gfp != GFP_NOIO)
1085 qp->r_rq.wq = vmalloc_user(
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001086 sizeof(struct rvt_rwq) +
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -05001087 qp->r_rq.size * sz);
1088 else
1089 qp->r_rq.wq = __vmalloc(
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001090 sizeof(struct rvt_rwq) +
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -05001091 qp->r_rq.size * sz,
1092 gfp, PAGE_KERNEL);
1093
Ralph Campbellf9315512010-05-23 21:44:54 -07001094 if (!qp->r_rq.wq) {
1095 ret = ERR_PTR(-ENOMEM);
1096 goto bail_qp;
1097 }
1098 }
1099
1100 /*
1101 * ib_create_qp() will initialize qp->ibqp
1102 * except for qp->ibqp.qp_num.
1103 */
1104 spin_lock_init(&qp->r_lock);
1105 spin_lock_init(&qp->s_lock);
1106 spin_lock_init(&qp->r_rq.lock);
1107 atomic_set(&qp->refcount, 0);
1108 init_waitqueue_head(&qp->wait);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001109 init_waitqueue_head(&priv->wait_dma);
Ralph Campbellf9315512010-05-23 21:44:54 -07001110 init_timer(&qp->s_timer);
1111 qp->s_timer.data = (unsigned long)qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001112 INIT_WORK(&priv->s_work, qib_do_send);
1113 INIT_LIST_HEAD(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001114 INIT_LIST_HEAD(&qp->rspwait);
1115 qp->state = IB_QPS_RESET;
1116 qp->s_wq = swq;
1117 qp->s_size = init_attr->cap.max_send_wr + 1;
1118 qp->s_max_sge = init_attr->cap.max_send_sge;
1119 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1120 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1121 dev = to_idev(ibpd->device);
1122 dd = dd_from_dev(dev);
1123 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -05001124 init_attr->port_num, gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001125 if (err < 0) {
1126 ret = ERR_PTR(err);
1127 vfree(qp->r_rq.wq);
1128 goto bail_qp;
1129 }
1130 qp->ibqp.qp_num = err;
1131 qp->port_num = init_attr->port_num;
Ralph Campbellf9315512010-05-23 21:44:54 -07001132 qib_reset_qp(qp, init_attr->qp_type);
1133 break;
1134
1135 default:
1136 /* Don't support raw QPs */
1137 ret = ERR_PTR(-ENOSYS);
1138 goto bail;
1139 }
1140
1141 init_attr->cap.max_inline_data = 0;
1142
1143 /*
1144 * Return the address of the RWQ as the offset to mmap.
1145 * See qib_mmap() for details.
1146 */
1147 if (udata && udata->outlen >= sizeof(__u64)) {
1148 if (!qp->r_rq.wq) {
1149 __u64 offset = 0;
1150
1151 err = ib_copy_to_udata(udata, &offset,
1152 sizeof(offset));
1153 if (err) {
1154 ret = ERR_PTR(err);
1155 goto bail_ip;
1156 }
1157 } else {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001158 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
Ralph Campbellf9315512010-05-23 21:44:54 -07001159
1160 qp->ip = qib_create_mmap_info(dev, s,
1161 ibpd->uobject->context,
1162 qp->r_rq.wq);
1163 if (!qp->ip) {
1164 ret = ERR_PTR(-ENOMEM);
1165 goto bail_ip;
1166 }
1167
1168 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1169 sizeof(qp->ip->offset));
1170 if (err) {
1171 ret = ERR_PTR(err);
1172 goto bail_ip;
1173 }
1174 }
1175 }
1176
1177 spin_lock(&dev->n_qps_lock);
1178 if (dev->n_qps_allocated == ib_qib_max_qps) {
1179 spin_unlock(&dev->n_qps_lock);
1180 ret = ERR_PTR(-ENOMEM);
1181 goto bail_ip;
1182 }
1183
1184 dev->n_qps_allocated++;
1185 spin_unlock(&dev->n_qps_lock);
1186
1187 if (qp->ip) {
1188 spin_lock_irq(&dev->pending_lock);
1189 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1190 spin_unlock_irq(&dev->pending_lock);
1191 }
1192
1193 ret = &qp->ibqp;
1194 goto bail;
1195
1196bail_ip:
1197 if (qp->ip)
1198 kref_put(&qp->ip->ref, qib_release_mmap_info);
1199 else
1200 vfree(qp->r_rq.wq);
1201 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1202bail_qp:
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001203 kfree(priv->s_hdr);
1204 kfree(priv);
1205bail_qp_hdr:
Ralph Campbellf9315512010-05-23 21:44:54 -07001206 kfree(qp);
1207bail_swq:
1208 vfree(swq);
1209bail:
1210 return ret;
1211}
1212
1213/**
1214 * qib_destroy_qp - destroy a queue pair
1215 * @ibqp: the queue pair to destroy
1216 *
1217 * Returns 0 on success.
1218 *
1219 * Note that this can be called while the QP is actively sending or
1220 * receiving!
1221 */
1222int qib_destroy_qp(struct ib_qp *ibqp)
1223{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001224 struct rvt_qp *qp = to_iqp(ibqp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001225 struct qib_ibdev *dev = to_idev(ibqp->device);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001226 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001227
1228 /* Make sure HW and driver activity is stopped. */
1229 spin_lock_irq(&qp->s_lock);
1230 if (qp->state != IB_QPS_RESET) {
1231 qp->state = IB_QPS_RESET;
1232 spin_lock(&dev->pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001233 if (!list_empty(&priv->iowait))
1234 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001235 spin_unlock(&dev->pending_lock);
1236 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1237 spin_unlock_irq(&qp->s_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001238 cancel_work_sync(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -07001239 del_timer_sync(&qp->s_timer);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001240 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
1241 if (priv->s_tx) {
1242 qib_put_txreq(priv->s_tx);
1243 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001244 }
1245 remove_qp(dev, qp);
1246 wait_event(qp->wait, !atomic_read(&qp->refcount));
1247 clear_mr_refs(qp, 1);
1248 } else
1249 spin_unlock_irq(&qp->s_lock);
1250
1251 /* all user's cleaned up, mark it available */
1252 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1253 spin_lock(&dev->n_qps_lock);
1254 dev->n_qps_allocated--;
1255 spin_unlock(&dev->n_qps_lock);
1256
1257 if (qp->ip)
1258 kref_put(&qp->ip->ref, qib_release_mmap_info);
1259 else
1260 vfree(qp->r_rq.wq);
1261 vfree(qp->s_wq);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001262 kfree(priv->s_hdr);
1263 kfree(priv);
Ralph Campbellf9315512010-05-23 21:44:54 -07001264 kfree(qp);
1265 return 0;
1266}
1267
1268/**
1269 * qib_init_qpn_table - initialize the QP number table for a device
1270 * @qpt: the QPN table
1271 */
1272void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1273{
1274 spin_lock_init(&qpt->lock);
1275 qpt->last = 1; /* start with QPN 2 */
1276 qpt->nmaps = 1;
1277 qpt->mask = dd->qpn_mask;
1278}
1279
1280/**
1281 * qib_free_qpn_table - free the QP number table for a device
1282 * @qpt: the QPN table
1283 */
1284void qib_free_qpn_table(struct qib_qpn_table *qpt)
1285{
1286 int i;
1287
1288 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1289 if (qpt->map[i].page)
1290 free_page((unsigned long) qpt->map[i].page);
1291}
1292
1293/**
1294 * qib_get_credit - flush the send work queue of a QP
1295 * @qp: the qp who's send work queue to flush
1296 * @aeth: the Acknowledge Extended Transport Header
1297 *
1298 * The QP s_lock should be held.
1299 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001300void qib_get_credit(struct rvt_qp *qp, u32 aeth)
Ralph Campbellf9315512010-05-23 21:44:54 -07001301{
1302 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1303
1304 /*
1305 * If the credit is invalid, we can send
1306 * as many packets as we like. Otherwise, we have to
1307 * honor the credit field.
1308 */
1309 if (credit == QIB_AETH_CREDIT_INVAL) {
1310 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1311 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1312 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1313 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1314 qib_schedule_send(qp);
1315 }
1316 }
1317 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1318 /* Compute new LSN (i.e., MSN + credit) */
1319 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1320 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1321 qp->s_lsn = credit;
1322 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1323 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1324 qib_schedule_send(qp);
1325 }
1326 }
1327 }
1328}
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001329
1330#ifdef CONFIG_DEBUG_FS
1331
1332struct qib_qp_iter {
1333 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001334 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001335 int n;
1336};
1337
1338struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
1339{
1340 struct qib_qp_iter *iter;
1341
1342 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1343 if (!iter)
1344 return NULL;
1345
1346 iter->dev = dev;
1347 if (qib_qp_iter_next(iter)) {
1348 kfree(iter);
1349 return NULL;
1350 }
1351
1352 return iter;
1353}
1354
1355int qib_qp_iter_next(struct qib_qp_iter *iter)
1356{
1357 struct qib_ibdev *dev = iter->dev;
1358 int n = iter->n;
1359 int ret = 1;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001360 struct rvt_qp *pqp = iter->qp;
1361 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001362
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001363 for (; n < dev->qp_table_size; n++) {
1364 if (pqp)
1365 qp = rcu_dereference(pqp->next);
1366 else
1367 qp = rcu_dereference(dev->qp_table[n]);
1368 pqp = qp;
1369 if (qp) {
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001370 iter->qp = qp;
1371 iter->n = n;
1372 return 0;
1373 }
1374 }
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001375 return ret;
1376}
1377
1378static const char * const qp_type_str[] = {
1379 "SMI", "GSI", "RC", "UC", "UD",
1380};
1381
1382void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1383{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001384 struct rvt_swqe *wqe;
1385 struct rvt_qp *qp = iter->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001386 struct qib_qp_priv *priv = qp->priv;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001387
1388 wqe = get_swqe_ptr(qp, qp->s_last);
1389 seq_printf(s,
1390 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1391 iter->n,
1392 qp->ibqp.qp_num,
1393 qp_type_str[qp->ibqp.qp_type],
1394 qp->state,
1395 wqe->wr.opcode,
1396 qp->s_hdrwords,
1397 qp->s_flags,
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001398 atomic_read(&priv->s_dma_busy),
1399 !list_empty(&priv->iowait),
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001400 qp->timeout,
1401 wqe->ssn,
1402 qp->s_lsn,
1403 qp->s_last_psn,
1404 qp->s_psn, qp->s_next_psn,
1405 qp->s_sending_psn, qp->s_sending_hpsn,
1406 qp->s_last, qp->s_acked, qp->s_cur,
1407 qp->s_tail, qp->s_head, qp->s_size,
1408 qp->remote_qpn,
1409 qp->remote_ah_attr.dlid);
1410}
1411
1412#endif