blob: 65b752c521903889881722b3e00e3c8dd9d28d5d [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -04002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040037#include <linux/jhash.h>
Dennis Dalessandro869a2a92016-01-22 12:45:02 -080038#include <rdma/rdma_vt.h>
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -040039#ifdef CONFIG_DEBUG_FS
40#include <linux/seq_file.h>
41#endif
Ralph Campbellf9315512010-05-23 21:44:54 -070042
43#include "qib.h"
44
Harish Chegondi898fa522016-01-22 12:56:27 -080045/*
46 * mask field which was present in now deleted qib_qpn_table
47 * is not present in rvt_qpn_table. Defining the same field
48 * as qpt_mask here instead of adding the mask field to
49 * rvt_qpn_table.
50 */
Harish Chegondi47c7ea62016-01-22 12:56:52 -080051u16 qpt_mask;
Ralph Campbellf9315512010-05-23 21:44:54 -070052
Harish Chegondi898fa522016-01-22 12:56:27 -080053static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
54 struct rvt_qpn_map *map, unsigned off)
Ralph Campbellf9315512010-05-23 21:44:54 -070055{
Harish Chegondi898fa522016-01-22 12:56:27 -080056 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
Ralph Campbellf9315512010-05-23 21:44:54 -070057}
58
Harish Chegondi898fa522016-01-22 12:56:27 -080059static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
60 struct rvt_qpn_map *map, unsigned off,
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080061 unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -070062{
Harish Chegondi898fa522016-01-22 12:56:27 -080063 if (qpt_mask) {
Ralph Campbellf9315512010-05-23 21:44:54 -070064 off++;
Harish Chegondi898fa522016-01-22 12:56:27 -080065 if (((off & qpt_mask) >> 1) >= n)
66 off = (off | qpt_mask) + 2;
67 } else {
68 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
69 }
Ralph Campbellf9315512010-05-23 21:44:54 -070070 return off;
71}
72
73/*
74 * Convert the AETH credit code into the number of credits.
75 */
76static u32 credit_table[31] = {
77 0, /* 0 */
78 1, /* 1 */
79 2, /* 2 */
80 3, /* 3 */
81 4, /* 4 */
82 6, /* 5 */
83 8, /* 6 */
84 12, /* 7 */
85 16, /* 8 */
86 24, /* 9 */
87 32, /* A */
88 48, /* B */
89 64, /* C */
90 96, /* D */
91 128, /* E */
92 192, /* F */
93 256, /* 10 */
94 384, /* 11 */
95 512, /* 12 */
96 768, /* 13 */
97 1024, /* 14 */
98 1536, /* 15 */
99 2048, /* 16 */
100 3072, /* 17 */
101 4096, /* 18 */
102 6144, /* 19 */
103 8192, /* 1A */
104 12288, /* 1B */
105 16384, /* 1C */
106 24576, /* 1D */
107 32768 /* 1E */
108};
109
Harish Chegondi898fa522016-01-22 12:56:27 -0800110static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500111 gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700112{
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500113 unsigned long page = get_zeroed_page(gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700114
115 /*
116 * Free the page if someone raced with us installing it.
117 */
118
119 spin_lock(&qpt->lock);
120 if (map->page)
121 free_page(page);
122 else
123 map->page = (void *)page;
124 spin_unlock(&qpt->lock);
125}
126
127/*
128 * Allocate the next available QPN or
129 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
130 */
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800131int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
132 enum ib_qp_type type, u8 port, gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700133{
134 u32 i, offset, max_scan, qpn;
Harish Chegondi898fa522016-01-22 12:56:27 -0800135 struct rvt_qpn_map *map;
Ralph Campbellf9315512010-05-23 21:44:54 -0700136 u32 ret;
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800137 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
138 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
139 verbs_dev);
Ralph Campbellf9315512010-05-23 21:44:54 -0700140
141 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
142 unsigned n;
143
144 ret = type == IB_QPT_GSI;
145 n = 1 << (ret + 2 * (port - 1));
146 spin_lock(&qpt->lock);
147 if (qpt->flags & n)
148 ret = -EINVAL;
149 else
150 qpt->flags |= n;
151 spin_unlock(&qpt->lock);
152 goto bail;
153 }
154
Mike Marciniszyn7c3edd32011-01-10 17:42:22 -0800155 qpn = qpt->last + 2;
Harish Chegondi898fa522016-01-22 12:56:27 -0800156 if (qpn >= RVT_QPN_MAX)
Ralph Campbellf9315512010-05-23 21:44:54 -0700157 qpn = 2;
Harish Chegondi898fa522016-01-22 12:56:27 -0800158 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
159 qpn = (qpn | qpt_mask) + 2;
160 offset = qpn & RVT_BITS_PER_PAGE_MASK;
161 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
Ralph Campbellf9315512010-05-23 21:44:54 -0700162 max_scan = qpt->nmaps - !offset;
163 for (i = 0;;) {
164 if (unlikely(!map->page)) {
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500165 get_map_page(qpt, map, gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700166 if (unlikely(!map->page))
167 break;
168 }
169 do {
170 if (!test_and_set_bit(offset, map->page)) {
171 qpt->last = qpn;
172 ret = qpn;
173 goto bail;
174 }
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800175 offset = find_next_offset(qpt, map, offset,
176 dd->n_krcv_queues);
Ralph Campbellf9315512010-05-23 21:44:54 -0700177 qpn = mk_qpn(qpt, map, offset);
178 /*
179 * This test differs from alloc_pidmap().
180 * If find_next_offset() does find a zero
181 * bit, we don't need to check for QPN
182 * wrapping around past our starting QPN.
183 * We just need to be sure we don't loop
184 * forever.
185 */
Harish Chegondi898fa522016-01-22 12:56:27 -0800186 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
Ralph Campbellf9315512010-05-23 21:44:54 -0700187 /*
188 * In order to keep the number of pages allocated to a
189 * minimum, we scan the all existing pages before increasing
190 * the size of the bitmap table.
191 */
192 if (++i > max_scan) {
Harish Chegondi898fa522016-01-22 12:56:27 -0800193 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
Ralph Campbellf9315512010-05-23 21:44:54 -0700194 break;
195 map = &qpt->map[qpt->nmaps++];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800196 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700197 } else if (map < &qpt->map[qpt->nmaps]) {
198 ++map;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800199 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700200 } else {
201 map = &qpt->map[0];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800202 offset = 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700203 }
204 qpn = mk_qpn(qpt, map, offset);
205 }
206
207 ret = -ENOMEM;
208
209bail:
210 return ret;
211}
212
Harish Chegondi898fa522016-01-22 12:56:27 -0800213static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700214{
Harish Chegondi898fa522016-01-22 12:56:27 -0800215 struct rvt_qpn_map *map;
Ralph Campbellf9315512010-05-23 21:44:54 -0700216
Harish Chegondi898fa522016-01-22 12:56:27 -0800217 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700218 if (map->page)
Harish Chegondi898fa522016-01-22 12:56:27 -0800219 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
Ralph Campbellf9315512010-05-23 21:44:54 -0700220}
221
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400222static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
223{
224 return jhash_1word(qpn, dev->qp_rnd) &
Harish Chegondi898fa522016-01-22 12:56:27 -0800225 (dev->rdi.qp_dev->qp_table_size - 1);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400226}
227
228
Ralph Campbellf9315512010-05-23 21:44:54 -0700229/*
230 * Put the QP into the hash table.
231 * The hash table holds a reference to the QP.
232 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800233static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700234{
235 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700236 unsigned long flags;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400237 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700238
Ralph Campbellf9315512010-05-23 21:44:54 -0700239 atomic_inc(&qp->refcount);
Harish Chegondi898fa522016-01-22 12:56:27 -0800240 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700241
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400242 if (qp->ibqp.qp_num == 0)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800243 rcu_assign_pointer(ibp->rvp.qp[0], qp);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400244 else if (qp->ibqp.qp_num == 1)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800245 rcu_assign_pointer(ibp->rvp.qp[1], qp);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400246 else {
Harish Chegondi898fa522016-01-22 12:56:27 -0800247 qp->next = dev->rdi.qp_dev->qp_table[n];
248 rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400249 }
250
Harish Chegondi898fa522016-01-22 12:56:27 -0800251 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700252}
253
254/*
255 * Remove the QP from the table so it can't be found asynchronously by
256 * the receive interrupt routine.
257 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800258static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700259{
260 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400261 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700262 unsigned long flags;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400263 int removed = 1;
Harish Chegondi898fa522016-01-22 12:56:27 -0800264 spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
Ralph Campbellf9315512010-05-23 21:44:54 -0700265
Harish Chegondi898fa522016-01-22 12:56:27 -0800266 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700267
Harish Chegondi898fa522016-01-22 12:56:27 -0800268 qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
Harish Chegondif24a6d42016-01-22 12:56:02 -0800269 if (rcu_dereference_protected(ibp->rvp.qp[0],
Harish Chegondi898fa522016-01-22 12:56:27 -0800270 lockdep_is_held(qpt_lock_ptr)) == qp) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800271 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
272 } else if (rcu_dereference_protected(ibp->rvp.qp[1],
Harish Chegondi898fa522016-01-22 12:56:27 -0800273 lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800274 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400275 } else {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800276 struct rvt_qp *q;
277 struct rvt_qp __rcu **qpp;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400278
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400279 removed = 0;
Harish Chegondi898fa522016-01-22 12:56:27 -0800280 qpp = &dev->rdi.qp_dev->qp_table[n];
Mike Marciniszynd359f352013-01-24 18:59:34 +0000281 for (; (q = rcu_dereference_protected(*qpp,
Harish Chegondi898fa522016-01-22 12:56:27 -0800282 lockdep_is_held(qpt_lock_ptr))) != NULL;
Mike Marciniszynd359f352013-01-24 18:59:34 +0000283 qpp = &q->next)
Ralph Campbellf9315512010-05-23 21:44:54 -0700284 if (q == qp) {
Andreea-Cristina Bernat03c88592015-01-16 10:19:53 -0500285 RCU_INIT_POINTER(*qpp,
Mike Marciniszynbcc9b672013-02-07 20:47:51 +0000286 rcu_dereference_protected(qp->next,
Harish Chegondi898fa522016-01-22 12:56:27 -0800287 lockdep_is_held(qpt_lock_ptr)));
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400288 removed = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700289 break;
290 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400291 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700292
Harish Chegondi898fa522016-01-22 12:56:27 -0800293 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400294 if (removed) {
295 synchronize_rcu();
296 atomic_dec(&qp->refcount);
297 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700298}
299
300/**
301 * qib_free_all_qps - check for QPs still in use
Ralph Campbellf9315512010-05-23 21:44:54 -0700302 */
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800303unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
Ralph Campbellf9315512010-05-23 21:44:54 -0700304{
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800305 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
306 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
307 verbs_dev);
Ralph Campbellf9315512010-05-23 21:44:54 -0700308 unsigned n, qp_inuse = 0;
309
310 for (n = 0; n < dd->num_pports; n++) {
311 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
312
313 if (!qib_mcast_tree_empty(ibp))
314 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400315 rcu_read_lock();
Harish Chegondif24a6d42016-01-22 12:56:02 -0800316 if (rcu_dereference(ibp->rvp.qp[0]))
Ralph Campbellf9315512010-05-23 21:44:54 -0700317 qp_inuse++;
Harish Chegondif24a6d42016-01-22 12:56:02 -0800318 if (rcu_dereference(ibp->rvp.qp[1]))
Ralph Campbellf9315512010-05-23 21:44:54 -0700319 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400320 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700321 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700322 return qp_inuse;
323}
324
325/**
326 * qib_lookup_qpn - return the QP with the given QPN
327 * @qpt: the QP table
328 * @qpn: the QP number to look up
329 *
330 * The caller is responsible for decrementing the QP reference count
331 * when done.
332 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800333struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700334{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800335 struct rvt_qp *qp = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700336
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400337 rcu_read_lock();
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400338 if (unlikely(qpn <= 1)) {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400339 if (qpn == 0)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800340 qp = rcu_dereference(ibp->rvp.qp[0]);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400341 else
Harish Chegondif24a6d42016-01-22 12:56:02 -0800342 qp = rcu_dereference(ibp->rvp.qp[1]);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400343 if (qp)
344 atomic_inc(&qp->refcount);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400345 } else {
346 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
347 unsigned n = qpn_hash(dev, qpn);
Ralph Campbellf9315512010-05-23 21:44:54 -0700348
Harish Chegondi898fa522016-01-22 12:56:27 -0800349 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000350 qp = rcu_dereference(qp->next))
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400351 if (qp->ibqp.qp_num == qpn) {
352 atomic_inc(&qp->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700353 break;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400354 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400355 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400356 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700357 return qp;
358}
359
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800360void notify_qp_reset(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700361{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800362 struct qib_qp_priv *priv = qp->priv;
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800363
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800364 atomic_set(&priv->s_dma_busy, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -0700365}
366
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800367static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
Ralph Campbellf9315512010-05-23 21:44:54 -0700368{
369 unsigned n;
370
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800371 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400372 qib_put_ss(&qp->s_rdma_read_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700373
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400374 qib_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700375
376 if (clr_sends) {
377 while (qp->s_last != qp->s_head) {
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800378 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Ralph Campbellf9315512010-05-23 21:44:54 -0700379 unsigned i;
380
381 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800382 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -0700383
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800384 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700385 }
386 if (qp->ibqp.qp_type == IB_QPT_UD ||
387 qp->ibqp.qp_type == IB_QPT_SMI ||
388 qp->ibqp.qp_type == IB_QPT_GSI)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800389 atomic_dec(
390 &ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700391 if (++qp->s_last >= qp->s_size)
392 qp->s_last = 0;
393 }
394 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800395 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700396 qp->s_rdma_mr = NULL;
397 }
398 }
399
400 if (qp->ibqp.qp_type != IB_QPT_RC)
401 return;
402
403 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800404 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
Ralph Campbellf9315512010-05-23 21:44:54 -0700405
406 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
407 e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800408 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700409 e->rdma_sge.mr = NULL;
410 }
411 }
412}
413
414/**
415 * qib_error_qp - put a QP into the error state
416 * @qp: the QP to put into the error state
417 * @err: the receive completion error to signal if a RWQE is active
418 *
419 * Flushes both send and receive work queues.
420 * Returns true if last WQE event should be generated.
Ralph Campbella5210c12010-08-02 22:39:30 +0000421 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700422 * If we are already in error state, just return.
423 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800424int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
Ralph Campbellf9315512010-05-23 21:44:54 -0700425{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800426 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700427 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
428 struct ib_wc wc;
429 int ret = 0;
430
431 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
432 goto bail;
433
434 qp->state = IB_QPS_ERR;
435
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800436 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
437 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
Ralph Campbellf9315512010-05-23 21:44:54 -0700438 del_timer(&qp->s_timer);
439 }
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800440
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800441 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
442 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800443
Harish Chegondicd182012016-01-22 12:56:14 -0800444 spin_lock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800445 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
446 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800447 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700448 }
Harish Chegondicd182012016-01-22 12:56:14 -0800449 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700450
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800451 if (!(qp->s_flags & RVT_S_BUSY)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700452 qp->s_hdrwords = 0;
453 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800454 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700455 qp->s_rdma_mr = NULL;
456 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800457 if (priv->s_tx) {
458 qib_put_txreq(priv->s_tx);
459 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700460 }
461 }
462
463 /* Schedule the sending tasklet to drain the send work queue. */
464 if (qp->s_last != qp->s_head)
465 qib_schedule_send(qp);
466
467 clear_mr_refs(qp, 0);
468
469 memset(&wc, 0, sizeof(wc));
470 wc.qp = &qp->ibqp;
471 wc.opcode = IB_WC_RECV;
472
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800473 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700474 wc.wr_id = qp->r_wr_id;
475 wc.status = err;
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800476 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -0700477 }
478 wc.status = IB_WC_WR_FLUSH_ERR;
479
480 if (qp->r_rq.wq) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800481 struct rvt_rwq *wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700482 u32 head;
483 u32 tail;
484
485 spin_lock(&qp->r_rq.lock);
486
487 /* sanity check pointers before trusting them */
488 wq = qp->r_rq.wq;
489 head = wq->head;
490 if (head >= qp->r_rq.size)
491 head = 0;
492 tail = wq->tail;
493 if (tail >= qp->r_rq.size)
494 tail = 0;
495 while (tail != head) {
496 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
497 if (++tail >= qp->r_rq.size)
498 tail = 0;
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800499 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -0700500 }
501 wq->tail = tail;
502
503 spin_unlock(&qp->r_rq.lock);
504 } else if (qp->ibqp.event_handler)
505 ret = 1;
506
507bail:
508 return ret;
509}
510
511/**
512 * qib_modify_qp - modify the attributes of a queue pair
513 * @ibqp: the queue pair who's attributes we're modifying
514 * @attr: the new attributes
515 * @attr_mask: the mask of attributes to modify
516 * @udata: user data for libibverbs.so
517 *
518 * Returns 0 on success, otherwise returns an errno.
519 */
520int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
521 int attr_mask, struct ib_udata *udata)
522{
523 struct qib_ibdev *dev = to_idev(ibqp->device);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800524 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800525 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700526 enum ib_qp_state cur_state, new_state;
527 struct ib_event ev;
528 int lastwqe = 0;
529 int mig = 0;
530 int ret;
531 u32 pmtu = 0; /* for gcc warning only */
532
533 spin_lock_irq(&qp->r_lock);
534 spin_lock(&qp->s_lock);
535
536 cur_state = attr_mask & IB_QP_CUR_STATE ?
537 attr->cur_qp_state : qp->state;
538 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
539
540 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
Matan Barakdd5f03b2013-12-12 18:03:11 +0200541 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
Ralph Campbellf9315512010-05-23 21:44:54 -0700542 goto inval;
543
544 if (attr_mask & IB_QP_AV) {
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800545 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
Ralph Campbellf9315512010-05-23 21:44:54 -0700546 goto inval;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800547 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
Ralph Campbellf9315512010-05-23 21:44:54 -0700548 goto inval;
549 }
550
551 if (attr_mask & IB_QP_ALT_PATH) {
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800552 if (attr->alt_ah_attr.dlid >=
553 be16_to_cpu(IB_MULTICAST_LID_BASE))
Ralph Campbellf9315512010-05-23 21:44:54 -0700554 goto inval;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800555 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
Ralph Campbellf9315512010-05-23 21:44:54 -0700556 goto inval;
557 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
558 goto inval;
559 }
560
561 if (attr_mask & IB_QP_PKEY_INDEX)
562 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
563 goto inval;
564
565 if (attr_mask & IB_QP_MIN_RNR_TIMER)
566 if (attr->min_rnr_timer > 31)
567 goto inval;
568
569 if (attr_mask & IB_QP_PORT)
570 if (qp->ibqp.qp_type == IB_QPT_SMI ||
571 qp->ibqp.qp_type == IB_QPT_GSI ||
572 attr->port_num == 0 ||
573 attr->port_num > ibqp->device->phys_port_cnt)
574 goto inval;
575
576 if (attr_mask & IB_QP_DEST_QPN)
577 if (attr->dest_qp_num > QIB_QPN_MASK)
578 goto inval;
579
580 if (attr_mask & IB_QP_RETRY_CNT)
581 if (attr->retry_cnt > 7)
582 goto inval;
583
584 if (attr_mask & IB_QP_RNR_RETRY)
585 if (attr->rnr_retry > 7)
586 goto inval;
587
588 /*
589 * Don't allow invalid path_mtu values. OK to set greater
590 * than the active mtu (or even the max_cap, if we have tuned
591 * that to a small mtu. We'll set qp->path_mtu
592 * to the lesser of requested attribute mtu and active,
593 * for packetizing messages.
594 * Note that the QP port has to be set in INIT and MTU in RTR.
595 */
596 if (attr_mask & IB_QP_PATH_MTU) {
597 struct qib_devdata *dd = dd_from_dev(dev);
598 int mtu, pidx = qp->port_num - 1;
599
600 mtu = ib_mtu_enum_to_int(attr->path_mtu);
601 if (mtu == -1)
602 goto inval;
603 if (mtu > dd->pport[pidx].ibmtu) {
604 switch (dd->pport[pidx].ibmtu) {
605 case 4096:
606 pmtu = IB_MTU_4096;
607 break;
608 case 2048:
609 pmtu = IB_MTU_2048;
610 break;
611 case 1024:
612 pmtu = IB_MTU_1024;
613 break;
614 case 512:
615 pmtu = IB_MTU_512;
616 break;
617 case 256:
618 pmtu = IB_MTU_256;
619 break;
620 default:
621 pmtu = IB_MTU_2048;
622 }
623 } else
624 pmtu = attr->path_mtu;
625 }
626
627 if (attr_mask & IB_QP_PATH_MIG_STATE) {
628 if (attr->path_mig_state == IB_MIG_REARM) {
629 if (qp->s_mig_state == IB_MIG_ARMED)
630 goto inval;
631 if (new_state != IB_QPS_RTS)
632 goto inval;
633 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
634 if (qp->s_mig_state == IB_MIG_REARM)
635 goto inval;
636 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
637 goto inval;
638 if (qp->s_mig_state == IB_MIG_ARMED)
639 mig = 1;
640 } else
641 goto inval;
642 }
643
644 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
645 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
646 goto inval;
647
648 switch (new_state) {
649 case IB_QPS_RESET:
650 if (qp->state != IB_QPS_RESET) {
651 qp->state = IB_QPS_RESET;
Harish Chegondicd182012016-01-22 12:56:14 -0800652 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800653 if (!list_empty(&priv->iowait))
654 list_del_init(&priv->iowait);
Harish Chegondicd182012016-01-22 12:56:14 -0800655 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800656 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
Ralph Campbellf9315512010-05-23 21:44:54 -0700657 spin_unlock(&qp->s_lock);
658 spin_unlock_irq(&qp->r_lock);
659 /* Stop the sending work queue and retry timer */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800660 cancel_work_sync(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -0700661 del_timer_sync(&qp->s_timer);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800662 wait_event(priv->wait_dma,
663 !atomic_read(&priv->s_dma_busy));
664 if (priv->s_tx) {
665 qib_put_txreq(priv->s_tx);
666 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700667 }
668 remove_qp(dev, qp);
669 wait_event(qp->wait, !atomic_read(&qp->refcount));
670 spin_lock_irq(&qp->r_lock);
671 spin_lock(&qp->s_lock);
672 clear_mr_refs(qp, 1);
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800673 rvt_reset_qp(&dev->rdi, qp, ibqp->qp_type);
Ralph Campbellf9315512010-05-23 21:44:54 -0700674 }
675 break;
676
677 case IB_QPS_RTR:
678 /* Allow event to retrigger if QP set to RTR more than once */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800679 qp->r_flags &= ~RVT_R_COMM_EST;
Ralph Campbellf9315512010-05-23 21:44:54 -0700680 qp->state = new_state;
681 break;
682
683 case IB_QPS_SQD:
684 qp->s_draining = qp->s_last != qp->s_cur;
685 qp->state = new_state;
686 break;
687
688 case IB_QPS_SQE:
689 if (qp->ibqp.qp_type == IB_QPT_RC)
690 goto inval;
691 qp->state = new_state;
692 break;
693
694 case IB_QPS_ERR:
695 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
696 break;
697
698 default:
699 qp->state = new_state;
700 break;
701 }
702
703 if (attr_mask & IB_QP_PKEY_INDEX)
704 qp->s_pkey_index = attr->pkey_index;
705
706 if (attr_mask & IB_QP_PORT)
707 qp->port_num = attr->port_num;
708
709 if (attr_mask & IB_QP_DEST_QPN)
710 qp->remote_qpn = attr->dest_qp_num;
711
712 if (attr_mask & IB_QP_SQ_PSN) {
713 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
714 qp->s_psn = qp->s_next_psn;
715 qp->s_sending_psn = qp->s_next_psn;
716 qp->s_last_psn = qp->s_next_psn - 1;
717 qp->s_sending_hpsn = qp->s_last_psn;
718 }
719
720 if (attr_mask & IB_QP_RQ_PSN)
721 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
722
723 if (attr_mask & IB_QP_ACCESS_FLAGS)
724 qp->qp_access_flags = attr->qp_access_flags;
725
726 if (attr_mask & IB_QP_AV) {
727 qp->remote_ah_attr = attr->ah_attr;
728 qp->s_srate = attr->ah_attr.static_rate;
729 }
730
731 if (attr_mask & IB_QP_ALT_PATH) {
732 qp->alt_ah_attr = attr->alt_ah_attr;
733 qp->s_alt_pkey_index = attr->alt_pkey_index;
734 }
735
736 if (attr_mask & IB_QP_PATH_MIG_STATE) {
737 qp->s_mig_state = attr->path_mig_state;
738 if (mig) {
739 qp->remote_ah_attr = qp->alt_ah_attr;
740 qp->port_num = qp->alt_ah_attr.port_num;
741 qp->s_pkey_index = qp->s_alt_pkey_index;
742 }
743 }
744
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400745 if (attr_mask & IB_QP_PATH_MTU) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700746 qp->path_mtu = pmtu;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400747 qp->pmtu = ib_mtu_enum_to_int(pmtu);
748 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700749
750 if (attr_mask & IB_QP_RETRY_CNT) {
751 qp->s_retry_cnt = attr->retry_cnt;
752 qp->s_retry = attr->retry_cnt;
753 }
754
755 if (attr_mask & IB_QP_RNR_RETRY) {
756 qp->s_rnr_retry_cnt = attr->rnr_retry;
757 qp->s_rnr_retry = attr->rnr_retry;
758 }
759
760 if (attr_mask & IB_QP_MIN_RNR_TIMER)
761 qp->r_min_rnr_timer = attr->min_rnr_timer;
762
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400763 if (attr_mask & IB_QP_TIMEOUT) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700764 qp->timeout = attr->timeout;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400765 qp->timeout_jiffies =
766 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
767 1000UL);
768 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700769
770 if (attr_mask & IB_QP_QKEY)
771 qp->qkey = attr->qkey;
772
773 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
774 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
775
776 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
777 qp->s_max_rd_atomic = attr->max_rd_atomic;
778
779 spin_unlock(&qp->s_lock);
780 spin_unlock_irq(&qp->r_lock);
781
782 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
783 insert_qp(dev, qp);
784
785 if (lastwqe) {
786 ev.device = qp->ibqp.device;
787 ev.element.qp = &qp->ibqp;
788 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
789 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
790 }
791 if (mig) {
792 ev.device = qp->ibqp.device;
793 ev.element.qp = &qp->ibqp;
794 ev.event = IB_EVENT_PATH_MIG;
795 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
796 }
797 ret = 0;
798 goto bail;
799
800inval:
801 spin_unlock(&qp->s_lock);
802 spin_unlock_irq(&qp->r_lock);
803 ret = -EINVAL;
804
805bail:
806 return ret;
807}
808
809int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
810 int attr_mask, struct ib_qp_init_attr *init_attr)
811{
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800812 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700813
814 attr->qp_state = qp->state;
815 attr->cur_qp_state = attr->qp_state;
816 attr->path_mtu = qp->path_mtu;
817 attr->path_mig_state = qp->s_mig_state;
818 attr->qkey = qp->qkey;
819 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
820 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
821 attr->dest_qp_num = qp->remote_qpn;
822 attr->qp_access_flags = qp->qp_access_flags;
823 attr->cap.max_send_wr = qp->s_size - 1;
824 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
825 attr->cap.max_send_sge = qp->s_max_sge;
826 attr->cap.max_recv_sge = qp->r_rq.max_sge;
827 attr->cap.max_inline_data = 0;
828 attr->ah_attr = qp->remote_ah_attr;
829 attr->alt_ah_attr = qp->alt_ah_attr;
830 attr->pkey_index = qp->s_pkey_index;
831 attr->alt_pkey_index = qp->s_alt_pkey_index;
832 attr->en_sqd_async_notify = 0;
833 attr->sq_draining = qp->s_draining;
834 attr->max_rd_atomic = qp->s_max_rd_atomic;
835 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
836 attr->min_rnr_timer = qp->r_min_rnr_timer;
837 attr->port_num = qp->port_num;
838 attr->timeout = qp->timeout;
839 attr->retry_cnt = qp->s_retry_cnt;
840 attr->rnr_retry = qp->s_rnr_retry_cnt;
841 attr->alt_port_num = qp->alt_ah_attr.port_num;
842 attr->alt_timeout = qp->alt_timeout;
843
844 init_attr->event_handler = qp->ibqp.event_handler;
845 init_attr->qp_context = qp->ibqp.qp_context;
846 init_attr->send_cq = qp->ibqp.send_cq;
847 init_attr->recv_cq = qp->ibqp.recv_cq;
848 init_attr->srq = qp->ibqp.srq;
849 init_attr->cap = attr->cap;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800850 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
Ralph Campbellf9315512010-05-23 21:44:54 -0700851 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
852 else
853 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
854 init_attr->qp_type = qp->ibqp.qp_type;
855 init_attr->port_num = qp->port_num;
856 return 0;
857}
858
859/**
860 * qib_compute_aeth - compute the AETH (syndrome + MSN)
861 * @qp: the queue pair to compute the AETH for
862 *
863 * Returns the AETH.
864 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800865__be32 qib_compute_aeth(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700866{
867 u32 aeth = qp->r_msn & QIB_MSN_MASK;
868
869 if (qp->ibqp.srq) {
870 /*
871 * Shared receive queues don't generate credits.
872 * Set the credit field to the invalid value.
873 */
874 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
875 } else {
876 u32 min, max, x;
877 u32 credits;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800878 struct rvt_rwq *wq = qp->r_rq.wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700879 u32 head;
880 u32 tail;
881
882 /* sanity check pointers before trusting them */
883 head = wq->head;
884 if (head >= qp->r_rq.size)
885 head = 0;
886 tail = wq->tail;
887 if (tail >= qp->r_rq.size)
888 tail = 0;
889 /*
890 * Compute the number of credits available (RWQEs).
891 * XXX Not holding the r_rq.lock here so there is a small
892 * chance that the pair of reads are not atomic.
893 */
894 credits = head - tail;
895 if ((int)credits < 0)
896 credits += qp->r_rq.size;
897 /*
898 * Binary search the credit table to find the code to
899 * use.
900 */
901 min = 0;
902 max = 31;
903 for (;;) {
904 x = (min + max) / 2;
905 if (credit_table[x] == credits)
906 break;
907 if (credit_table[x] > credits)
908 max = x;
909 else if (min == x)
910 break;
911 else
912 min = x;
913 }
914 aeth |= x << QIB_AETH_CREDIT_SHIFT;
915 }
916 return cpu_to_be32(aeth);
917}
918
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800919void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700920{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800921 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700922
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800923 priv = kzalloc(sizeof(*priv), gfp);
924 if (!priv)
925 return ERR_PTR(-ENOMEM);
926 priv->owner = qp;
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500927
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800928 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
929 if (!priv->s_hdr) {
930 kfree(priv);
931 return ERR_PTR(-ENOMEM);
Ralph Campbellf9315512010-05-23 21:44:54 -0700932 }
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800933 init_waitqueue_head(&priv->wait_dma);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800934 INIT_WORK(&priv->s_work, _qib_do_send);
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800935 INIT_LIST_HEAD(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700936
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800937 return priv;
938}
Ralph Campbellf9315512010-05-23 21:44:54 -0700939
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800940void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
941{
942 struct qib_qp_priv *priv = qp->priv;
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500943
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800944 kfree(priv->s_hdr);
945 kfree(priv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700946}
947
948/**
949 * qib_destroy_qp - destroy a queue pair
950 * @ibqp: the queue pair to destroy
951 *
952 * Returns 0 on success.
953 *
954 * Note that this can be called while the QP is actively sending or
955 * receiving!
956 */
957int qib_destroy_qp(struct ib_qp *ibqp)
958{
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800959 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700960 struct qib_ibdev *dev = to_idev(ibqp->device);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800961 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700962
963 /* Make sure HW and driver activity is stopped. */
964 spin_lock_irq(&qp->s_lock);
965 if (qp->state != IB_QPS_RESET) {
966 qp->state = IB_QPS_RESET;
Harish Chegondicd182012016-01-22 12:56:14 -0800967 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800968 if (!list_empty(&priv->iowait))
969 list_del_init(&priv->iowait);
Harish Chegondicd182012016-01-22 12:56:14 -0800970 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800971 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
Ralph Campbellf9315512010-05-23 21:44:54 -0700972 spin_unlock_irq(&qp->s_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800973 cancel_work_sync(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -0700974 del_timer_sync(&qp->s_timer);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800975 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
976 if (priv->s_tx) {
977 qib_put_txreq(priv->s_tx);
978 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700979 }
980 remove_qp(dev, qp);
981 wait_event(qp->wait, !atomic_read(&qp->refcount));
982 clear_mr_refs(qp, 1);
983 } else
984 spin_unlock_irq(&qp->s_lock);
985
986 /* all user's cleaned up, mark it available */
Harish Chegondi898fa522016-01-22 12:56:27 -0800987 free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700988 spin_lock(&dev->n_qps_lock);
989 dev->n_qps_allocated--;
990 spin_unlock(&dev->n_qps_lock);
991
992 if (qp->ip)
Harish Chegondicd182012016-01-22 12:56:14 -0800993 kref_put(&qp->ip->ref, rvt_release_mmap_info);
Ralph Campbellf9315512010-05-23 21:44:54 -0700994 else
995 vfree(qp->r_rq.wq);
996 vfree(qp->s_wq);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800997 kfree(priv->s_hdr);
998 kfree(priv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700999 kfree(qp);
1000 return 0;
1001}
1002
1003/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001004 * qib_get_credit - flush the send work queue of a QP
1005 * @qp: the qp who's send work queue to flush
1006 * @aeth: the Acknowledge Extended Transport Header
1007 *
1008 * The QP s_lock should be held.
1009 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001010void qib_get_credit(struct rvt_qp *qp, u32 aeth)
Ralph Campbellf9315512010-05-23 21:44:54 -07001011{
1012 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1013
1014 /*
1015 * If the credit is invalid, we can send
1016 * as many packets as we like. Otherwise, we have to
1017 * honor the credit field.
1018 */
1019 if (credit == QIB_AETH_CREDIT_INVAL) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001020 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
1021 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
1022 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
1023 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -07001024 qib_schedule_send(qp);
1025 }
1026 }
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001027 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001028 /* Compute new LSN (i.e., MSN + credit) */
1029 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1030 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1031 qp->s_lsn = credit;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001032 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
1033 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -07001034 qib_schedule_send(qp);
1035 }
1036 }
1037 }
1038}
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001039
1040#ifdef CONFIG_DEBUG_FS
1041
1042struct qib_qp_iter {
1043 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001044 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001045 int n;
1046};
1047
1048struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
1049{
1050 struct qib_qp_iter *iter;
1051
1052 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1053 if (!iter)
1054 return NULL;
1055
1056 iter->dev = dev;
1057 if (qib_qp_iter_next(iter)) {
1058 kfree(iter);
1059 return NULL;
1060 }
1061
1062 return iter;
1063}
1064
1065int qib_qp_iter_next(struct qib_qp_iter *iter)
1066{
1067 struct qib_ibdev *dev = iter->dev;
1068 int n = iter->n;
1069 int ret = 1;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001070 struct rvt_qp *pqp = iter->qp;
1071 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001072
Harish Chegondi898fa522016-01-22 12:56:27 -08001073 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001074 if (pqp)
1075 qp = rcu_dereference(pqp->next);
1076 else
Harish Chegondi898fa522016-01-22 12:56:27 -08001077 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001078 pqp = qp;
1079 if (qp) {
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001080 iter->qp = qp;
1081 iter->n = n;
1082 return 0;
1083 }
1084 }
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001085 return ret;
1086}
1087
1088static const char * const qp_type_str[] = {
1089 "SMI", "GSI", "RC", "UC", "UD",
1090};
1091
1092void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1093{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001094 struct rvt_swqe *wqe;
1095 struct rvt_qp *qp = iter->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001096 struct qib_qp_priv *priv = qp->priv;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001097
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001098 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001099 seq_printf(s,
1100 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1101 iter->n,
1102 qp->ibqp.qp_num,
1103 qp_type_str[qp->ibqp.qp_type],
1104 qp->state,
1105 wqe->wr.opcode,
1106 qp->s_hdrwords,
1107 qp->s_flags,
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001108 atomic_read(&priv->s_dma_busy),
1109 !list_empty(&priv->iowait),
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001110 qp->timeout,
1111 wqe->ssn,
1112 qp->s_lsn,
1113 qp->s_last_psn,
1114 qp->s_psn, qp->s_next_psn,
1115 qp->s_sending_psn, qp->s_sending_hpsn,
1116 qp->s_last, qp->s_acked, qp->s_cur,
1117 qp->s_tail, qp->s_head, qp->s_size,
1118 qp->remote_qpn,
1119 qp->remote_ah_attr.dlid);
1120}
1121
1122#endif