blob: ce9002f6e7a2424e5fe9654609007a938470c1f8 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -04002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040037#include <linux/jhash.h>
Dennis Dalessandro869a2a92016-01-22 12:45:02 -080038#include <rdma/rdma_vt.h>
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -040039#ifdef CONFIG_DEBUG_FS
40#include <linux/seq_file.h>
41#endif
Ralph Campbellf9315512010-05-23 21:44:54 -070042
43#include "qib.h"
44
Harish Chegondi898fa522016-01-22 12:56:27 -080045/*
46 * mask field which was present in now deleted qib_qpn_table
47 * is not present in rvt_qpn_table. Defining the same field
48 * as qpt_mask here instead of adding the mask field to
49 * rvt_qpn_table.
50 */
Harish Chegondi47c7ea62016-01-22 12:56:52 -080051u16 qpt_mask;
Ralph Campbellf9315512010-05-23 21:44:54 -070052
Harish Chegondi898fa522016-01-22 12:56:27 -080053static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
54 struct rvt_qpn_map *map, unsigned off)
Ralph Campbellf9315512010-05-23 21:44:54 -070055{
Harish Chegondi898fa522016-01-22 12:56:27 -080056 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
Ralph Campbellf9315512010-05-23 21:44:54 -070057}
58
Harish Chegondi898fa522016-01-22 12:56:27 -080059static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
60 struct rvt_qpn_map *map, unsigned off,
Mike Marciniszyn2528ea62011-01-10 17:42:21 -080061 unsigned n)
Ralph Campbellf9315512010-05-23 21:44:54 -070062{
Harish Chegondi898fa522016-01-22 12:56:27 -080063 if (qpt_mask) {
Ralph Campbellf9315512010-05-23 21:44:54 -070064 off++;
Harish Chegondi898fa522016-01-22 12:56:27 -080065 if (((off & qpt_mask) >> 1) >= n)
66 off = (off | qpt_mask) + 2;
67 } else {
68 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
69 }
Ralph Campbellf9315512010-05-23 21:44:54 -070070 return off;
71}
72
73/*
74 * Convert the AETH credit code into the number of credits.
75 */
76static u32 credit_table[31] = {
77 0, /* 0 */
78 1, /* 1 */
79 2, /* 2 */
80 3, /* 3 */
81 4, /* 4 */
82 6, /* 5 */
83 8, /* 6 */
84 12, /* 7 */
85 16, /* 8 */
86 24, /* 9 */
87 32, /* A */
88 48, /* B */
89 64, /* C */
90 96, /* D */
91 128, /* E */
92 192, /* F */
93 256, /* 10 */
94 384, /* 11 */
95 512, /* 12 */
96 768, /* 13 */
97 1024, /* 14 */
98 1536, /* 15 */
99 2048, /* 16 */
100 3072, /* 17 */
101 4096, /* 18 */
102 6144, /* 19 */
103 8192, /* 1A */
104 12288, /* 1B */
105 16384, /* 1C */
106 24576, /* 1D */
107 32768 /* 1E */
108};
109
Harish Chegondi898fa522016-01-22 12:56:27 -0800110static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500111 gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700112{
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500113 unsigned long page = get_zeroed_page(gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700114
115 /*
116 * Free the page if someone raced with us installing it.
117 */
118
119 spin_lock(&qpt->lock);
120 if (map->page)
121 free_page(page);
122 else
123 map->page = (void *)page;
124 spin_unlock(&qpt->lock);
125}
126
127/*
128 * Allocate the next available QPN or
129 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
130 */
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800131int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
132 enum ib_qp_type type, u8 port, gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700133{
134 u32 i, offset, max_scan, qpn;
Harish Chegondi898fa522016-01-22 12:56:27 -0800135 struct rvt_qpn_map *map;
Ralph Campbellf9315512010-05-23 21:44:54 -0700136 u32 ret;
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800137 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
138 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
139 verbs_dev);
Ralph Campbellf9315512010-05-23 21:44:54 -0700140
141 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
142 unsigned n;
143
144 ret = type == IB_QPT_GSI;
145 n = 1 << (ret + 2 * (port - 1));
146 spin_lock(&qpt->lock);
147 if (qpt->flags & n)
148 ret = -EINVAL;
149 else
150 qpt->flags |= n;
151 spin_unlock(&qpt->lock);
152 goto bail;
153 }
154
Mike Marciniszyn7c3edd32011-01-10 17:42:22 -0800155 qpn = qpt->last + 2;
Harish Chegondi898fa522016-01-22 12:56:27 -0800156 if (qpn >= RVT_QPN_MAX)
Ralph Campbellf9315512010-05-23 21:44:54 -0700157 qpn = 2;
Harish Chegondi898fa522016-01-22 12:56:27 -0800158 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
159 qpn = (qpn | qpt_mask) + 2;
160 offset = qpn & RVT_BITS_PER_PAGE_MASK;
161 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
Ralph Campbellf9315512010-05-23 21:44:54 -0700162 max_scan = qpt->nmaps - !offset;
163 for (i = 0;;) {
164 if (unlikely(!map->page)) {
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500165 get_map_page(qpt, map, gfp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700166 if (unlikely(!map->page))
167 break;
168 }
169 do {
170 if (!test_and_set_bit(offset, map->page)) {
171 qpt->last = qpn;
172 ret = qpn;
173 goto bail;
174 }
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800175 offset = find_next_offset(qpt, map, offset,
176 dd->n_krcv_queues);
Ralph Campbellf9315512010-05-23 21:44:54 -0700177 qpn = mk_qpn(qpt, map, offset);
178 /*
179 * This test differs from alloc_pidmap().
180 * If find_next_offset() does find a zero
181 * bit, we don't need to check for QPN
182 * wrapping around past our starting QPN.
183 * We just need to be sure we don't loop
184 * forever.
185 */
Harish Chegondi898fa522016-01-22 12:56:27 -0800186 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
Ralph Campbellf9315512010-05-23 21:44:54 -0700187 /*
188 * In order to keep the number of pages allocated to a
189 * minimum, we scan the all existing pages before increasing
190 * the size of the bitmap table.
191 */
192 if (++i > max_scan) {
Harish Chegondi898fa522016-01-22 12:56:27 -0800193 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
Ralph Campbellf9315512010-05-23 21:44:54 -0700194 break;
195 map = &qpt->map[qpt->nmaps++];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800196 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700197 } else if (map < &qpt->map[qpt->nmaps]) {
198 ++map;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800199 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700200 } else {
201 map = &qpt->map[0];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800202 offset = 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700203 }
204 qpn = mk_qpn(qpt, map, offset);
205 }
206
207 ret = -ENOMEM;
208
209bail:
210 return ret;
211}
212
Harish Chegondi898fa522016-01-22 12:56:27 -0800213static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700214{
Harish Chegondi898fa522016-01-22 12:56:27 -0800215 struct rvt_qpn_map *map;
Ralph Campbellf9315512010-05-23 21:44:54 -0700216
Harish Chegondi898fa522016-01-22 12:56:27 -0800217 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
Ralph Campbellf9315512010-05-23 21:44:54 -0700218 if (map->page)
Harish Chegondi898fa522016-01-22 12:56:27 -0800219 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
Ralph Campbellf9315512010-05-23 21:44:54 -0700220}
221
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400222static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
223{
224 return jhash_1word(qpn, dev->qp_rnd) &
Harish Chegondi898fa522016-01-22 12:56:27 -0800225 (dev->rdi.qp_dev->qp_table_size - 1);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400226}
227
228
Ralph Campbellf9315512010-05-23 21:44:54 -0700229/*
230 * Put the QP into the hash table.
231 * The hash table holds a reference to the QP.
232 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800233static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700234{
235 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700236 unsigned long flags;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400237 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700238
Ralph Campbellf9315512010-05-23 21:44:54 -0700239 atomic_inc(&qp->refcount);
Harish Chegondi898fa522016-01-22 12:56:27 -0800240 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700241
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400242 if (qp->ibqp.qp_num == 0)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800243 rcu_assign_pointer(ibp->rvp.qp[0], qp);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400244 else if (qp->ibqp.qp_num == 1)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800245 rcu_assign_pointer(ibp->rvp.qp[1], qp);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400246 else {
Harish Chegondi898fa522016-01-22 12:56:27 -0800247 qp->next = dev->rdi.qp_dev->qp_table[n];
248 rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400249 }
250
Harish Chegondi898fa522016-01-22 12:56:27 -0800251 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700252}
253
254/*
255 * Remove the QP from the table so it can't be found asynchronously by
256 * the receive interrupt routine.
257 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800258static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700259{
260 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400261 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700262 unsigned long flags;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400263 int removed = 1;
Harish Chegondi898fa522016-01-22 12:56:27 -0800264 spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
Ralph Campbellf9315512010-05-23 21:44:54 -0700265
Harish Chegondi898fa522016-01-22 12:56:27 -0800266 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700267
Harish Chegondi898fa522016-01-22 12:56:27 -0800268 qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
Harish Chegondif24a6d42016-01-22 12:56:02 -0800269 if (rcu_dereference_protected(ibp->rvp.qp[0],
Harish Chegondi898fa522016-01-22 12:56:27 -0800270 lockdep_is_held(qpt_lock_ptr)) == qp) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800271 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
272 } else if (rcu_dereference_protected(ibp->rvp.qp[1],
Harish Chegondi898fa522016-01-22 12:56:27 -0800273 lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800274 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400275 } else {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800276 struct rvt_qp *q;
277 struct rvt_qp __rcu **qpp;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400278
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400279 removed = 0;
Harish Chegondi898fa522016-01-22 12:56:27 -0800280 qpp = &dev->rdi.qp_dev->qp_table[n];
Mike Marciniszynd359f352013-01-24 18:59:34 +0000281 for (; (q = rcu_dereference_protected(*qpp,
Harish Chegondi898fa522016-01-22 12:56:27 -0800282 lockdep_is_held(qpt_lock_ptr))) != NULL;
Mike Marciniszynd359f352013-01-24 18:59:34 +0000283 qpp = &q->next)
Ralph Campbellf9315512010-05-23 21:44:54 -0700284 if (q == qp) {
Andreea-Cristina Bernat03c88592015-01-16 10:19:53 -0500285 RCU_INIT_POINTER(*qpp,
Mike Marciniszynbcc9b672013-02-07 20:47:51 +0000286 rcu_dereference_protected(qp->next,
Harish Chegondi898fa522016-01-22 12:56:27 -0800287 lockdep_is_held(qpt_lock_ptr)));
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400288 removed = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700289 break;
290 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400291 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700292
Harish Chegondi898fa522016-01-22 12:56:27 -0800293 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400294 if (removed) {
295 synchronize_rcu();
296 atomic_dec(&qp->refcount);
297 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700298}
299
300/**
301 * qib_free_all_qps - check for QPs still in use
Ralph Campbellf9315512010-05-23 21:44:54 -0700302 */
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800303unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
Ralph Campbellf9315512010-05-23 21:44:54 -0700304{
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800305 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
306 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
307 verbs_dev);
Ralph Campbellf9315512010-05-23 21:44:54 -0700308 unsigned n, qp_inuse = 0;
309
310 for (n = 0; n < dd->num_pports; n++) {
311 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
312
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400313 rcu_read_lock();
Harish Chegondif24a6d42016-01-22 12:56:02 -0800314 if (rcu_dereference(ibp->rvp.qp[0]))
Ralph Campbellf9315512010-05-23 21:44:54 -0700315 qp_inuse++;
Harish Chegondif24a6d42016-01-22 12:56:02 -0800316 if (rcu_dereference(ibp->rvp.qp[1]))
Ralph Campbellf9315512010-05-23 21:44:54 -0700317 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400318 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700319 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700320 return qp_inuse;
321}
322
323/**
324 * qib_lookup_qpn - return the QP with the given QPN
325 * @qpt: the QP table
326 * @qpn: the QP number to look up
327 *
328 * The caller is responsible for decrementing the QP reference count
329 * when done.
330 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800331struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
Ralph Campbellf9315512010-05-23 21:44:54 -0700332{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800333 struct rvt_qp *qp = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700334
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400335 rcu_read_lock();
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400336 if (unlikely(qpn <= 1)) {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400337 if (qpn == 0)
Harish Chegondif24a6d42016-01-22 12:56:02 -0800338 qp = rcu_dereference(ibp->rvp.qp[0]);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400339 else
Harish Chegondif24a6d42016-01-22 12:56:02 -0800340 qp = rcu_dereference(ibp->rvp.qp[1]);
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400341 if (qp)
342 atomic_inc(&qp->refcount);
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400343 } else {
344 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
345 unsigned n = qpn_hash(dev, qpn);
Ralph Campbellf9315512010-05-23 21:44:54 -0700346
Harish Chegondi898fa522016-01-22 12:56:27 -0800347 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000348 qp = rcu_dereference(qp->next))
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400349 if (qp->ibqp.qp_num == qpn) {
350 atomic_inc(&qp->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700351 break;
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -0400352 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400353 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400354 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700355 return qp;
356}
357
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800358void notify_qp_reset(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700359{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800360 struct qib_qp_priv *priv = qp->priv;
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800361
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800362 atomic_set(&priv->s_dma_busy, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -0700363}
364
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800365static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
Ralph Campbellf9315512010-05-23 21:44:54 -0700366{
367 unsigned n;
368
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800369 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400370 qib_put_ss(&qp->s_rdma_read_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700371
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400372 qib_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700373
374 if (clr_sends) {
375 while (qp->s_last != qp->s_head) {
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800376 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Ralph Campbellf9315512010-05-23 21:44:54 -0700377 unsigned i;
378
379 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800380 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -0700381
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800382 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700383 }
384 if (qp->ibqp.qp_type == IB_QPT_UD ||
385 qp->ibqp.qp_type == IB_QPT_SMI ||
386 qp->ibqp.qp_type == IB_QPT_GSI)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800387 atomic_dec(
388 &ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700389 if (++qp->s_last >= qp->s_size)
390 qp->s_last = 0;
391 }
392 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800393 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700394 qp->s_rdma_mr = NULL;
395 }
396 }
397
398 if (qp->ibqp.qp_type != IB_QPT_RC)
399 return;
400
401 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800402 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
Ralph Campbellf9315512010-05-23 21:44:54 -0700403
404 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
405 e->rdma_sge.mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800406 rvt_put_mr(e->rdma_sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700407 e->rdma_sge.mr = NULL;
408 }
409 }
410}
411
412/**
413 * qib_error_qp - put a QP into the error state
414 * @qp: the QP to put into the error state
415 * @err: the receive completion error to signal if a RWQE is active
416 *
417 * Flushes both send and receive work queues.
418 * Returns true if last WQE event should be generated.
Ralph Campbella5210c12010-08-02 22:39:30 +0000419 * The QP r_lock and s_lock should be held and interrupts disabled.
Ralph Campbellf9315512010-05-23 21:44:54 -0700420 * If we are already in error state, just return.
421 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800422int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
Ralph Campbellf9315512010-05-23 21:44:54 -0700423{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800424 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700425 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
426 struct ib_wc wc;
427 int ret = 0;
428
429 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
430 goto bail;
431
432 qp->state = IB_QPS_ERR;
433
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800434 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
435 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
Ralph Campbellf9315512010-05-23 21:44:54 -0700436 del_timer(&qp->s_timer);
437 }
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800438
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800439 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
440 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800441
Harish Chegondicd182012016-01-22 12:56:14 -0800442 spin_lock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800443 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
444 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800445 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700446 }
Harish Chegondicd182012016-01-22 12:56:14 -0800447 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700448
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800449 if (!(qp->s_flags & RVT_S_BUSY)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700450 qp->s_hdrwords = 0;
451 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800452 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700453 qp->s_rdma_mr = NULL;
454 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800455 if (priv->s_tx) {
456 qib_put_txreq(priv->s_tx);
457 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700458 }
459 }
460
461 /* Schedule the sending tasklet to drain the send work queue. */
462 if (qp->s_last != qp->s_head)
463 qib_schedule_send(qp);
464
465 clear_mr_refs(qp, 0);
466
467 memset(&wc, 0, sizeof(wc));
468 wc.qp = &qp->ibqp;
469 wc.opcode = IB_WC_RECV;
470
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800471 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700472 wc.wr_id = qp->r_wr_id;
473 wc.status = err;
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800474 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -0700475 }
476 wc.status = IB_WC_WR_FLUSH_ERR;
477
478 if (qp->r_rq.wq) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800479 struct rvt_rwq *wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700480 u32 head;
481 u32 tail;
482
483 spin_lock(&qp->r_rq.lock);
484
485 /* sanity check pointers before trusting them */
486 wq = qp->r_rq.wq;
487 head = wq->head;
488 if (head >= qp->r_rq.size)
489 head = 0;
490 tail = wq->tail;
491 if (tail >= qp->r_rq.size)
492 tail = 0;
493 while (tail != head) {
494 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
495 if (++tail >= qp->r_rq.size)
496 tail = 0;
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800497 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -0700498 }
499 wq->tail = tail;
500
501 spin_unlock(&qp->r_rq.lock);
502 } else if (qp->ibqp.event_handler)
503 ret = 1;
504
505bail:
506 return ret;
507}
508
509/**
510 * qib_modify_qp - modify the attributes of a queue pair
511 * @ibqp: the queue pair who's attributes we're modifying
512 * @attr: the new attributes
513 * @attr_mask: the mask of attributes to modify
514 * @udata: user data for libibverbs.so
515 *
516 * Returns 0 on success, otherwise returns an errno.
517 */
518int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
519 int attr_mask, struct ib_udata *udata)
520{
521 struct qib_ibdev *dev = to_idev(ibqp->device);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800522 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800523 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700524 enum ib_qp_state cur_state, new_state;
525 struct ib_event ev;
526 int lastwqe = 0;
527 int mig = 0;
528 int ret;
529 u32 pmtu = 0; /* for gcc warning only */
530
531 spin_lock_irq(&qp->r_lock);
532 spin_lock(&qp->s_lock);
533
534 cur_state = attr_mask & IB_QP_CUR_STATE ?
535 attr->cur_qp_state : qp->state;
536 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
537
538 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
Matan Barakdd5f03b2013-12-12 18:03:11 +0200539 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
Ralph Campbellf9315512010-05-23 21:44:54 -0700540 goto inval;
541
542 if (attr_mask & IB_QP_AV) {
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800543 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
Ralph Campbellf9315512010-05-23 21:44:54 -0700544 goto inval;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800545 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
Ralph Campbellf9315512010-05-23 21:44:54 -0700546 goto inval;
547 }
548
549 if (attr_mask & IB_QP_ALT_PATH) {
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800550 if (attr->alt_ah_attr.dlid >=
551 be16_to_cpu(IB_MULTICAST_LID_BASE))
Ralph Campbellf9315512010-05-23 21:44:54 -0700552 goto inval;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800553 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
Ralph Campbellf9315512010-05-23 21:44:54 -0700554 goto inval;
555 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
556 goto inval;
557 }
558
559 if (attr_mask & IB_QP_PKEY_INDEX)
560 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
561 goto inval;
562
563 if (attr_mask & IB_QP_MIN_RNR_TIMER)
564 if (attr->min_rnr_timer > 31)
565 goto inval;
566
567 if (attr_mask & IB_QP_PORT)
568 if (qp->ibqp.qp_type == IB_QPT_SMI ||
569 qp->ibqp.qp_type == IB_QPT_GSI ||
570 attr->port_num == 0 ||
571 attr->port_num > ibqp->device->phys_port_cnt)
572 goto inval;
573
574 if (attr_mask & IB_QP_DEST_QPN)
575 if (attr->dest_qp_num > QIB_QPN_MASK)
576 goto inval;
577
578 if (attr_mask & IB_QP_RETRY_CNT)
579 if (attr->retry_cnt > 7)
580 goto inval;
581
582 if (attr_mask & IB_QP_RNR_RETRY)
583 if (attr->rnr_retry > 7)
584 goto inval;
585
586 /*
587 * Don't allow invalid path_mtu values. OK to set greater
588 * than the active mtu (or even the max_cap, if we have tuned
589 * that to a small mtu. We'll set qp->path_mtu
590 * to the lesser of requested attribute mtu and active,
591 * for packetizing messages.
592 * Note that the QP port has to be set in INIT and MTU in RTR.
593 */
594 if (attr_mask & IB_QP_PATH_MTU) {
595 struct qib_devdata *dd = dd_from_dev(dev);
596 int mtu, pidx = qp->port_num - 1;
597
598 mtu = ib_mtu_enum_to_int(attr->path_mtu);
599 if (mtu == -1)
600 goto inval;
601 if (mtu > dd->pport[pidx].ibmtu) {
602 switch (dd->pport[pidx].ibmtu) {
603 case 4096:
604 pmtu = IB_MTU_4096;
605 break;
606 case 2048:
607 pmtu = IB_MTU_2048;
608 break;
609 case 1024:
610 pmtu = IB_MTU_1024;
611 break;
612 case 512:
613 pmtu = IB_MTU_512;
614 break;
615 case 256:
616 pmtu = IB_MTU_256;
617 break;
618 default:
619 pmtu = IB_MTU_2048;
620 }
621 } else
622 pmtu = attr->path_mtu;
623 }
624
625 if (attr_mask & IB_QP_PATH_MIG_STATE) {
626 if (attr->path_mig_state == IB_MIG_REARM) {
627 if (qp->s_mig_state == IB_MIG_ARMED)
628 goto inval;
629 if (new_state != IB_QPS_RTS)
630 goto inval;
631 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
632 if (qp->s_mig_state == IB_MIG_REARM)
633 goto inval;
634 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
635 goto inval;
636 if (qp->s_mig_state == IB_MIG_ARMED)
637 mig = 1;
638 } else
639 goto inval;
640 }
641
642 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
643 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
644 goto inval;
645
646 switch (new_state) {
647 case IB_QPS_RESET:
648 if (qp->state != IB_QPS_RESET) {
649 qp->state = IB_QPS_RESET;
Harish Chegondicd182012016-01-22 12:56:14 -0800650 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800651 if (!list_empty(&priv->iowait))
652 list_del_init(&priv->iowait);
Harish Chegondicd182012016-01-22 12:56:14 -0800653 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800654 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
Ralph Campbellf9315512010-05-23 21:44:54 -0700655 spin_unlock(&qp->s_lock);
656 spin_unlock_irq(&qp->r_lock);
657 /* Stop the sending work queue and retry timer */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800658 cancel_work_sync(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -0700659 del_timer_sync(&qp->s_timer);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800660 wait_event(priv->wait_dma,
661 !atomic_read(&priv->s_dma_busy));
662 if (priv->s_tx) {
663 qib_put_txreq(priv->s_tx);
664 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700665 }
666 remove_qp(dev, qp);
667 wait_event(qp->wait, !atomic_read(&qp->refcount));
668 spin_lock_irq(&qp->r_lock);
669 spin_lock(&qp->s_lock);
670 clear_mr_refs(qp, 1);
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800671 rvt_reset_qp(&dev->rdi, qp, ibqp->qp_type);
Ralph Campbellf9315512010-05-23 21:44:54 -0700672 }
673 break;
674
675 case IB_QPS_RTR:
676 /* Allow event to retrigger if QP set to RTR more than once */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800677 qp->r_flags &= ~RVT_R_COMM_EST;
Ralph Campbellf9315512010-05-23 21:44:54 -0700678 qp->state = new_state;
679 break;
680
681 case IB_QPS_SQD:
682 qp->s_draining = qp->s_last != qp->s_cur;
683 qp->state = new_state;
684 break;
685
686 case IB_QPS_SQE:
687 if (qp->ibqp.qp_type == IB_QPT_RC)
688 goto inval;
689 qp->state = new_state;
690 break;
691
692 case IB_QPS_ERR:
693 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
694 break;
695
696 default:
697 qp->state = new_state;
698 break;
699 }
700
701 if (attr_mask & IB_QP_PKEY_INDEX)
702 qp->s_pkey_index = attr->pkey_index;
703
704 if (attr_mask & IB_QP_PORT)
705 qp->port_num = attr->port_num;
706
707 if (attr_mask & IB_QP_DEST_QPN)
708 qp->remote_qpn = attr->dest_qp_num;
709
710 if (attr_mask & IB_QP_SQ_PSN) {
711 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
712 qp->s_psn = qp->s_next_psn;
713 qp->s_sending_psn = qp->s_next_psn;
714 qp->s_last_psn = qp->s_next_psn - 1;
715 qp->s_sending_hpsn = qp->s_last_psn;
716 }
717
718 if (attr_mask & IB_QP_RQ_PSN)
719 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
720
721 if (attr_mask & IB_QP_ACCESS_FLAGS)
722 qp->qp_access_flags = attr->qp_access_flags;
723
724 if (attr_mask & IB_QP_AV) {
725 qp->remote_ah_attr = attr->ah_attr;
726 qp->s_srate = attr->ah_attr.static_rate;
727 }
728
729 if (attr_mask & IB_QP_ALT_PATH) {
730 qp->alt_ah_attr = attr->alt_ah_attr;
731 qp->s_alt_pkey_index = attr->alt_pkey_index;
732 }
733
734 if (attr_mask & IB_QP_PATH_MIG_STATE) {
735 qp->s_mig_state = attr->path_mig_state;
736 if (mig) {
737 qp->remote_ah_attr = qp->alt_ah_attr;
738 qp->port_num = qp->alt_ah_attr.port_num;
739 qp->s_pkey_index = qp->s_alt_pkey_index;
740 }
741 }
742
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400743 if (attr_mask & IB_QP_PATH_MTU) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700744 qp->path_mtu = pmtu;
Mike Marciniszyncc6ea132011-09-23 13:16:34 -0400745 qp->pmtu = ib_mtu_enum_to_int(pmtu);
746 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700747
748 if (attr_mask & IB_QP_RETRY_CNT) {
749 qp->s_retry_cnt = attr->retry_cnt;
750 qp->s_retry = attr->retry_cnt;
751 }
752
753 if (attr_mask & IB_QP_RNR_RETRY) {
754 qp->s_rnr_retry_cnt = attr->rnr_retry;
755 qp->s_rnr_retry = attr->rnr_retry;
756 }
757
758 if (attr_mask & IB_QP_MIN_RNR_TIMER)
759 qp->r_min_rnr_timer = attr->min_rnr_timer;
760
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400761 if (attr_mask & IB_QP_TIMEOUT) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700762 qp->timeout = attr->timeout;
Mike Marciniszynd0f2faf2011-09-23 13:16:49 -0400763 qp->timeout_jiffies =
764 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
765 1000UL);
766 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700767
768 if (attr_mask & IB_QP_QKEY)
769 qp->qkey = attr->qkey;
770
771 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
772 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
773
774 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
775 qp->s_max_rd_atomic = attr->max_rd_atomic;
776
777 spin_unlock(&qp->s_lock);
778 spin_unlock_irq(&qp->r_lock);
779
780 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
781 insert_qp(dev, qp);
782
783 if (lastwqe) {
784 ev.device = qp->ibqp.device;
785 ev.element.qp = &qp->ibqp;
786 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
787 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
788 }
789 if (mig) {
790 ev.device = qp->ibqp.device;
791 ev.element.qp = &qp->ibqp;
792 ev.event = IB_EVENT_PATH_MIG;
793 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
794 }
795 ret = 0;
796 goto bail;
797
798inval:
799 spin_unlock(&qp->s_lock);
800 spin_unlock_irq(&qp->r_lock);
801 ret = -EINVAL;
802
803bail:
804 return ret;
805}
806
Ralph Campbellf9315512010-05-23 21:44:54 -0700807/**
808 * qib_compute_aeth - compute the AETH (syndrome + MSN)
809 * @qp: the queue pair to compute the AETH for
810 *
811 * Returns the AETH.
812 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800813__be32 qib_compute_aeth(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700814{
815 u32 aeth = qp->r_msn & QIB_MSN_MASK;
816
817 if (qp->ibqp.srq) {
818 /*
819 * Shared receive queues don't generate credits.
820 * Set the credit field to the invalid value.
821 */
822 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
823 } else {
824 u32 min, max, x;
825 u32 credits;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800826 struct rvt_rwq *wq = qp->r_rq.wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700827 u32 head;
828 u32 tail;
829
830 /* sanity check pointers before trusting them */
831 head = wq->head;
832 if (head >= qp->r_rq.size)
833 head = 0;
834 tail = wq->tail;
835 if (tail >= qp->r_rq.size)
836 tail = 0;
837 /*
838 * Compute the number of credits available (RWQEs).
839 * XXX Not holding the r_rq.lock here so there is a small
840 * chance that the pair of reads are not atomic.
841 */
842 credits = head - tail;
843 if ((int)credits < 0)
844 credits += qp->r_rq.size;
845 /*
846 * Binary search the credit table to find the code to
847 * use.
848 */
849 min = 0;
850 max = 31;
851 for (;;) {
852 x = (min + max) / 2;
853 if (credit_table[x] == credits)
854 break;
855 if (credit_table[x] > credits)
856 max = x;
857 else if (min == x)
858 break;
859 else
860 min = x;
861 }
862 aeth |= x << QIB_AETH_CREDIT_SHIFT;
863 }
864 return cpu_to_be32(aeth);
865}
866
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800867void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700868{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800869 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700870
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800871 priv = kzalloc(sizeof(*priv), gfp);
872 if (!priv)
873 return ERR_PTR(-ENOMEM);
874 priv->owner = qp;
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500875
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800876 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
877 if (!priv->s_hdr) {
878 kfree(priv);
879 return ERR_PTR(-ENOMEM);
Ralph Campbellf9315512010-05-23 21:44:54 -0700880 }
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800881 init_waitqueue_head(&priv->wait_dma);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800882 INIT_WORK(&priv->s_work, _qib_do_send);
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800883 INIT_LIST_HEAD(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700884
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800885 return priv;
886}
Ralph Campbellf9315512010-05-23 21:44:54 -0700887
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800888void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
889{
890 struct qib_qp_priv *priv = qp->priv;
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500891
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800892 kfree(priv->s_hdr);
893 kfree(priv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700894}
895
896/**
897 * qib_destroy_qp - destroy a queue pair
898 * @ibqp: the queue pair to destroy
899 *
900 * Returns 0 on success.
901 *
902 * Note that this can be called while the QP is actively sending or
903 * receiving!
904 */
905int qib_destroy_qp(struct ib_qp *ibqp)
906{
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800907 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700908 struct qib_ibdev *dev = to_idev(ibqp->device);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800909 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700910
911 /* Make sure HW and driver activity is stopped. */
912 spin_lock_irq(&qp->s_lock);
913 if (qp->state != IB_QPS_RESET) {
914 qp->state = IB_QPS_RESET;
Harish Chegondicd182012016-01-22 12:56:14 -0800915 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800916 if (!list_empty(&priv->iowait))
917 list_del_init(&priv->iowait);
Harish Chegondicd182012016-01-22 12:56:14 -0800918 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800919 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
Ralph Campbellf9315512010-05-23 21:44:54 -0700920 spin_unlock_irq(&qp->s_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800921 cancel_work_sync(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -0700922 del_timer_sync(&qp->s_timer);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800923 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
924 if (priv->s_tx) {
925 qib_put_txreq(priv->s_tx);
926 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700927 }
928 remove_qp(dev, qp);
929 wait_event(qp->wait, !atomic_read(&qp->refcount));
930 clear_mr_refs(qp, 1);
931 } else
932 spin_unlock_irq(&qp->s_lock);
933
934 /* all user's cleaned up, mark it available */
Harish Chegondi898fa522016-01-22 12:56:27 -0800935 free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
Ralph Campbellf9315512010-05-23 21:44:54 -0700936 spin_lock(&dev->n_qps_lock);
937 dev->n_qps_allocated--;
938 spin_unlock(&dev->n_qps_lock);
939
940 if (qp->ip)
Harish Chegondicd182012016-01-22 12:56:14 -0800941 kref_put(&qp->ip->ref, rvt_release_mmap_info);
Ralph Campbellf9315512010-05-23 21:44:54 -0700942 else
943 vfree(qp->r_rq.wq);
944 vfree(qp->s_wq);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800945 kfree(priv->s_hdr);
946 kfree(priv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700947 kfree(qp);
948 return 0;
949}
950
951/**
Ralph Campbellf9315512010-05-23 21:44:54 -0700952 * qib_get_credit - flush the send work queue of a QP
953 * @qp: the qp who's send work queue to flush
954 * @aeth: the Acknowledge Extended Transport Header
955 *
956 * The QP s_lock should be held.
957 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800958void qib_get_credit(struct rvt_qp *qp, u32 aeth)
Ralph Campbellf9315512010-05-23 21:44:54 -0700959{
960 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
961
962 /*
963 * If the credit is invalid, we can send
964 * as many packets as we like. Otherwise, we have to
965 * honor the credit field.
966 */
967 if (credit == QIB_AETH_CREDIT_INVAL) {
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800968 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
969 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
970 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
971 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700972 qib_schedule_send(qp);
973 }
974 }
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800975 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700976 /* Compute new LSN (i.e., MSN + credit) */
977 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
978 if (qib_cmp24(credit, qp->s_lsn) > 0) {
979 qp->s_lsn = credit;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800980 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
981 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
Ralph Campbellf9315512010-05-23 21:44:54 -0700982 qib_schedule_send(qp);
983 }
984 }
985 }
986}
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400987
988#ifdef CONFIG_DEBUG_FS
989
990struct qib_qp_iter {
991 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800992 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400993 int n;
994};
995
996struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
997{
998 struct qib_qp_iter *iter;
999
1000 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1001 if (!iter)
1002 return NULL;
1003
1004 iter->dev = dev;
1005 if (qib_qp_iter_next(iter)) {
1006 kfree(iter);
1007 return NULL;
1008 }
1009
1010 return iter;
1011}
1012
1013int qib_qp_iter_next(struct qib_qp_iter *iter)
1014{
1015 struct qib_ibdev *dev = iter->dev;
1016 int n = iter->n;
1017 int ret = 1;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001018 struct rvt_qp *pqp = iter->qp;
1019 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001020
Harish Chegondi898fa522016-01-22 12:56:27 -08001021 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001022 if (pqp)
1023 qp = rcu_dereference(pqp->next);
1024 else
Harish Chegondi898fa522016-01-22 12:56:27 -08001025 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001026 pqp = qp;
1027 if (qp) {
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001028 iter->qp = qp;
1029 iter->n = n;
1030 return 0;
1031 }
1032 }
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001033 return ret;
1034}
1035
1036static const char * const qp_type_str[] = {
1037 "SMI", "GSI", "RC", "UC", "UD",
1038};
1039
1040void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1041{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001042 struct rvt_swqe *wqe;
1043 struct rvt_qp *qp = iter->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001044 struct qib_qp_priv *priv = qp->priv;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001045
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001046 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001047 seq_printf(s,
1048 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1049 iter->n,
1050 qp->ibqp.qp_num,
1051 qp_type_str[qp->ibqp.qp_type],
1052 qp->state,
1053 wqe->wr.opcode,
1054 qp->s_hdrwords,
1055 qp->s_flags,
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001056 atomic_read(&priv->s_dma_busy),
1057 !list_empty(&priv->iowait),
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04001058 qp->timeout,
1059 wqe->ssn,
1060 qp->s_lsn,
1061 qp->s_last_psn,
1062 qp->s_psn, qp->s_next_psn,
1063 qp->s_sending_psn, qp->s_sending_hpsn,
1064 qp->s_last, qp->s_acked, qp->s_cur,
1065 qp->s_tail, qp->s_head, qp->s_size,
1066 qp->remote_qpn,
1067 qp->remote_ah_attr.dlid);
1068}
1069
1070#endif