blob: a343e3b5d4cbf1188db5801b0856b13ad08ff72e [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Mike Marciniszynf7cf9a62013-06-15 17:06:58 -04002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
Dennis Dalessandro869a2a92016-01-22 12:45:02 -080037#include <rdma/rdma_vt.h>
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -040038#ifdef CONFIG_DEBUG_FS
39#include <linux/seq_file.h>
40#endif
Ralph Campbellf9315512010-05-23 21:44:54 -070041
42#include "qib.h"
43
Harish Chegondi898fa522016-01-22 12:56:27 -080044static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
45 struct rvt_qpn_map *map, unsigned off)
Ralph Campbellf9315512010-05-23 21:44:54 -070046{
Harish Chegondi898fa522016-01-22 12:56:27 -080047 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
Ralph Campbellf9315512010-05-23 21:44:54 -070048}
49
Harish Chegondi898fa522016-01-22 12:56:27 -080050static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
51 struct rvt_qpn_map *map, unsigned off,
Dennis Dalessandro84b3adc2016-09-25 07:41:05 -070052 unsigned n, u16 qpt_mask)
Ralph Campbellf9315512010-05-23 21:44:54 -070053{
Harish Chegondi898fa522016-01-22 12:56:27 -080054 if (qpt_mask) {
Ralph Campbellf9315512010-05-23 21:44:54 -070055 off++;
Harish Chegondi898fa522016-01-22 12:56:27 -080056 if (((off & qpt_mask) >> 1) >= n)
57 off = (off | qpt_mask) + 2;
58 } else {
59 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
60 }
Ralph Campbellf9315512010-05-23 21:44:54 -070061 return off;
62}
63
Mike Marciniszyn9ec4faa2016-07-01 16:02:18 -070064const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
65[IB_WR_RDMA_WRITE] = {
66 .length = sizeof(struct ib_rdma_wr),
67 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
68},
69
70[IB_WR_RDMA_READ] = {
71 .length = sizeof(struct ib_rdma_wr),
72 .qpt_support = BIT(IB_QPT_RC),
73 .flags = RVT_OPERATION_ATOMIC,
74},
75
76[IB_WR_ATOMIC_CMP_AND_SWP] = {
77 .length = sizeof(struct ib_atomic_wr),
78 .qpt_support = BIT(IB_QPT_RC),
79 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
80},
81
82[IB_WR_ATOMIC_FETCH_AND_ADD] = {
83 .length = sizeof(struct ib_atomic_wr),
84 .qpt_support = BIT(IB_QPT_RC),
85 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
86},
87
88[IB_WR_RDMA_WRITE_WITH_IMM] = {
89 .length = sizeof(struct ib_rdma_wr),
90 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
91},
92
93[IB_WR_SEND] = {
94 .length = sizeof(struct ib_send_wr),
95 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
96 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
97},
98
99[IB_WR_SEND_WITH_IMM] = {
100 .length = sizeof(struct ib_send_wr),
101 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
102 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
103},
104
105};
106
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
Ralph Campbellf9315512010-05-23 21:44:54 -0700108{
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300109 unsigned long page = get_zeroed_page(GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -0700110
111 /*
112 * Free the page if someone raced with us installing it.
113 */
114
115 spin_lock(&qpt->lock);
116 if (map->page)
117 free_page(page);
118 else
119 map->page = (void *)page;
120 spin_unlock(&qpt->lock);
121}
122
123/*
124 * Allocate the next available QPN or
125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
126 */
Harish Chegondi20f333b2016-02-14 12:09:55 -0800127int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300128 enum ib_qp_type type, u8 port)
Ralph Campbellf9315512010-05-23 21:44:54 -0700129{
130 u32 i, offset, max_scan, qpn;
Harish Chegondi898fa522016-01-22 12:56:27 -0800131 struct rvt_qpn_map *map;
Ralph Campbellf9315512010-05-23 21:44:54 -0700132 u32 ret;
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800133 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
134 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
135 verbs_dev);
Dennis Dalessandro84b3adc2016-09-25 07:41:05 -0700136 u16 qpt_mask = dd->qpn_mask;
Ralph Campbellf9315512010-05-23 21:44:54 -0700137
138 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
139 unsigned n;
140
141 ret = type == IB_QPT_GSI;
142 n = 1 << (ret + 2 * (port - 1));
143 spin_lock(&qpt->lock);
144 if (qpt->flags & n)
145 ret = -EINVAL;
146 else
147 qpt->flags |= n;
148 spin_unlock(&qpt->lock);
149 goto bail;
150 }
151
Mike Marciniszyn7c3edd32011-01-10 17:42:22 -0800152 qpn = qpt->last + 2;
Harish Chegondi898fa522016-01-22 12:56:27 -0800153 if (qpn >= RVT_QPN_MAX)
Ralph Campbellf9315512010-05-23 21:44:54 -0700154 qpn = 2;
Harish Chegondi898fa522016-01-22 12:56:27 -0800155 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
156 qpn = (qpn | qpt_mask) + 2;
157 offset = qpn & RVT_BITS_PER_PAGE_MASK;
158 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
Ralph Campbellf9315512010-05-23 21:44:54 -0700159 max_scan = qpt->nmaps - !offset;
160 for (i = 0;;) {
161 if (unlikely(!map->page)) {
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300162 get_map_page(qpt, map);
Ralph Campbellf9315512010-05-23 21:44:54 -0700163 if (unlikely(!map->page))
164 break;
165 }
166 do {
167 if (!test_and_set_bit(offset, map->page)) {
168 qpt->last = qpn;
169 ret = qpn;
170 goto bail;
171 }
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800172 offset = find_next_offset(qpt, map, offset,
Dennis Dalessandro84b3adc2016-09-25 07:41:05 -0700173 dd->n_krcv_queues, qpt_mask);
Ralph Campbellf9315512010-05-23 21:44:54 -0700174 qpn = mk_qpn(qpt, map, offset);
175 /*
176 * This test differs from alloc_pidmap().
177 * If find_next_offset() does find a zero
178 * bit, we don't need to check for QPN
179 * wrapping around past our starting QPN.
180 * We just need to be sure we don't loop
181 * forever.
182 */
Harish Chegondi898fa522016-01-22 12:56:27 -0800183 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
Ralph Campbellf9315512010-05-23 21:44:54 -0700184 /*
185 * In order to keep the number of pages allocated to a
186 * minimum, we scan the all existing pages before increasing
187 * the size of the bitmap table.
188 */
189 if (++i > max_scan) {
Harish Chegondi898fa522016-01-22 12:56:27 -0800190 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
Ralph Campbellf9315512010-05-23 21:44:54 -0700191 break;
192 map = &qpt->map[qpt->nmaps++];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800193 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700194 } else if (map < &qpt->map[qpt->nmaps]) {
195 ++map;
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800196 offset = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700197 } else {
198 map = &qpt->map[0];
Mike Marciniszyn2528ea62011-01-10 17:42:21 -0800199 offset = 2;
Ralph Campbellf9315512010-05-23 21:44:54 -0700200 }
201 qpn = mk_qpn(qpt, map, offset);
202 }
203
204 ret = -ENOMEM;
205
206bail:
207 return ret;
208}
209
Ralph Campbellf9315512010-05-23 21:44:54 -0700210/**
211 * qib_free_all_qps - check for QPs still in use
Ralph Campbellf9315512010-05-23 21:44:54 -0700212 */
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800213unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
Ralph Campbellf9315512010-05-23 21:44:54 -0700214{
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800215 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
216 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
217 verbs_dev);
Ralph Campbellf9315512010-05-23 21:44:54 -0700218 unsigned n, qp_inuse = 0;
219
220 for (n = 0; n < dd->num_pports; n++) {
221 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
222
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400223 rcu_read_lock();
Harish Chegondif24a6d42016-01-22 12:56:02 -0800224 if (rcu_dereference(ibp->rvp.qp[0]))
Ralph Campbellf9315512010-05-23 21:44:54 -0700225 qp_inuse++;
Harish Chegondif24a6d42016-01-22 12:56:02 -0800226 if (rcu_dereference(ibp->rvp.qp[1]))
Ralph Campbellf9315512010-05-23 21:44:54 -0700227 qp_inuse++;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400228 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700229 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700230 return qp_inuse;
231}
232
Harish Chegondi20f333b2016-02-14 12:09:55 -0800233void qib_notify_qp_reset(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700234{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800235 struct qib_qp_priv *priv = qp->priv;
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800236
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800237 atomic_set(&priv->s_dma_busy, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -0700238}
239
Harish Chegondi20f333b2016-02-14 12:09:55 -0800240void qib_notify_error_qp(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700241{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800242 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700243 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
Mike Marciniszyn16028f22011-01-10 17:42:20 -0800244
Harish Chegondicd182012016-01-22 12:56:14 -0800245 spin_lock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800246 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
247 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800248 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700249 }
Harish Chegondicd182012016-01-22 12:56:14 -0800250 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700251
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800252 if (!(qp->s_flags & RVT_S_BUSY)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700253 qp->s_hdrwords = 0;
254 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800255 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700256 qp->s_rdma_mr = NULL;
257 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800258 if (priv->s_tx) {
259 qib_put_txreq(priv->s_tx);
260 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700261 }
262 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700263}
264
Harish Chegondi70696ea2016-02-03 14:20:27 -0800265static int mtu_to_enum(u32 mtu)
Ralph Campbellf9315512010-05-23 21:44:54 -0700266{
Harish Chegondi70696ea2016-02-03 14:20:27 -0800267 int enum_mtu;
Ralph Campbellf9315512010-05-23 21:44:54 -0700268
Harish Chegondi70696ea2016-02-03 14:20:27 -0800269 switch (mtu) {
270 case 4096:
271 enum_mtu = IB_MTU_4096;
Ralph Campbellf9315512010-05-23 21:44:54 -0700272 break;
Harish Chegondi70696ea2016-02-03 14:20:27 -0800273 case 2048:
274 enum_mtu = IB_MTU_2048;
Ralph Campbellf9315512010-05-23 21:44:54 -0700275 break;
Harish Chegondi70696ea2016-02-03 14:20:27 -0800276 case 1024:
277 enum_mtu = IB_MTU_1024;
Ralph Campbellf9315512010-05-23 21:44:54 -0700278 break;
Harish Chegondi70696ea2016-02-03 14:20:27 -0800279 case 512:
280 enum_mtu = IB_MTU_512;
Ralph Campbellf9315512010-05-23 21:44:54 -0700281 break;
Harish Chegondi70696ea2016-02-03 14:20:27 -0800282 case 256:
283 enum_mtu = IB_MTU_256;
Ralph Campbellf9315512010-05-23 21:44:54 -0700284 break;
Ralph Campbellf9315512010-05-23 21:44:54 -0700285 default:
Harish Chegondi70696ea2016-02-03 14:20:27 -0800286 enum_mtu = IB_MTU_2048;
Ralph Campbellf9315512010-05-23 21:44:54 -0700287 }
Harish Chegondi70696ea2016-02-03 14:20:27 -0800288 return enum_mtu;
289}
Ralph Campbellf9315512010-05-23 21:44:54 -0700290
Harish Chegondi20f333b2016-02-14 12:09:55 -0800291int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
292 struct ib_qp_attr *attr)
Harish Chegondi70696ea2016-02-03 14:20:27 -0800293{
294 int mtu, pmtu, pidx = qp->port_num - 1;
295 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
296 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
297 verbs_dev);
298 mtu = ib_mtu_enum_to_int(attr->path_mtu);
299 if (mtu == -1)
300 return -EINVAL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700301
Harish Chegondi70696ea2016-02-03 14:20:27 -0800302 if (mtu > dd->pport[pidx].ibmtu)
303 pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
304 else
305 pmtu = attr->path_mtu;
306 return pmtu;
307}
Ralph Campbellf9315512010-05-23 21:44:54 -0700308
Harish Chegondi20f333b2016-02-14 12:09:55 -0800309int qib_mtu_to_path_mtu(u32 mtu)
Harish Chegondi70696ea2016-02-03 14:20:27 -0800310{
311 return mtu_to_enum(mtu);
312}
Ralph Campbellf9315512010-05-23 21:44:54 -0700313
Harish Chegondi20f333b2016-02-14 12:09:55 -0800314u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
Harish Chegondi70696ea2016-02-03 14:20:27 -0800315{
316 return ib_mtu_enum_to_int(pmtu);
Ralph Campbellf9315512010-05-23 21:44:54 -0700317}
318
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300319void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700320{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800321 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700322
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300323 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800324 if (!priv)
325 return ERR_PTR(-ENOMEM);
326 priv->owner = qp;
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500327
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300328 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800329 if (!priv->s_hdr) {
330 kfree(priv);
331 return ERR_PTR(-ENOMEM);
Ralph Campbellf9315512010-05-23 21:44:54 -0700332 }
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800333 init_waitqueue_head(&priv->wait_dma);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800334 INIT_WORK(&priv->s_work, _qib_do_send);
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800335 INIT_LIST_HEAD(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700336
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800337 return priv;
338}
Ralph Campbellf9315512010-05-23 21:44:54 -0700339
Harish Chegondi20f333b2016-02-14 12:09:55 -0800340void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
Harish Chegondi47c7ea62016-01-22 12:56:52 -0800341{
342 struct qib_qp_priv *priv = qp->priv;
Vinit Agnihotrifbbeb862016-01-11 12:57:25 -0500343
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800344 kfree(priv->s_hdr);
345 kfree(priv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700346}
347
Harish Chegondi20f333b2016-02-14 12:09:55 -0800348void qib_stop_send_queue(struct rvt_qp *qp)
Harish Chegondi70696ea2016-02-03 14:20:27 -0800349{
350 struct qib_qp_priv *priv = qp->priv;
351
352 cancel_work_sync(&priv->s_work);
353}
354
Harish Chegondi20f333b2016-02-14 12:09:55 -0800355void qib_quiesce_qp(struct rvt_qp *qp)
Harish Chegondi70696ea2016-02-03 14:20:27 -0800356{
357 struct qib_qp_priv *priv = qp->priv;
358
359 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
360 if (priv->s_tx) {
361 qib_put_txreq(priv->s_tx);
362 priv->s_tx = NULL;
363 }
364}
365
Harish Chegondi20f333b2016-02-14 12:09:55 -0800366void qib_flush_qp_waiters(struct rvt_qp *qp)
Harish Chegondi70696ea2016-02-03 14:20:27 -0800367{
368 struct qib_qp_priv *priv = qp->priv;
369 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
370
371 spin_lock(&dev->rdi.pending_lock);
372 if (!list_empty(&priv->iowait))
373 list_del_init(&priv->iowait);
374 spin_unlock(&dev->rdi.pending_lock);
375}
376
Ralph Campbellf9315512010-05-23 21:44:54 -0700377/**
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800378 * qib_check_send_wqe - validate wr/wqe
379 * @qp - The qp
380 * @wqe - The built wqe
381 *
382 * validate wr/wqe. This is called
383 * prior to inserting the wqe into
384 * the ring but after the wqe has been
385 * setup.
386 *
Mike Marciniszyn91702b42016-02-14 12:45:44 -0800387 * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800388 */
389int qib_check_send_wqe(struct rvt_qp *qp,
390 struct rvt_swqe *wqe)
391{
392 struct rvt_ah *ah;
Mike Marciniszyn91702b42016-02-14 12:45:44 -0800393 int ret = 0;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800394
395 switch (qp->ibqp.qp_type) {
396 case IB_QPT_RC:
397 case IB_QPT_UC:
398 if (wqe->length > 0x80000000U)
399 return -EINVAL;
400 break;
401 case IB_QPT_SMI:
402 case IB_QPT_GSI:
403 case IB_QPT_UD:
404 ah = ibah_to_rvtah(wqe->ud_wr.ah);
405 if (wqe->length > (1 << ah->log_pmtu))
406 return -EINVAL;
Mike Marciniszyn91702b42016-02-14 12:45:44 -0800407 /* progress hint */
408 ret = 1;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800409 break;
410 default:
411 break;
412 }
Mike Marciniszyn91702b42016-02-14 12:45:44 -0800413 return ret;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800414}
415
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400416#ifdef CONFIG_DEBUG_FS
417
418struct qib_qp_iter {
419 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800420 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400421 int n;
422};
423
424struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
425{
426 struct qib_qp_iter *iter;
427
428 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
429 if (!iter)
430 return NULL;
431
432 iter->dev = dev;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400433
434 return iter;
435}
436
437int qib_qp_iter_next(struct qib_qp_iter *iter)
438{
439 struct qib_ibdev *dev = iter->dev;
440 int n = iter->n;
441 int ret = 1;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800442 struct rvt_qp *pqp = iter->qp;
443 struct rvt_qp *qp;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400444
Harish Chegondi898fa522016-01-22 12:56:27 -0800445 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400446 if (pqp)
447 qp = rcu_dereference(pqp->next);
448 else
Harish Chegondi898fa522016-01-22 12:56:27 -0800449 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400450 pqp = qp;
451 if (qp) {
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400452 iter->qp = qp;
453 iter->n = n;
454 return 0;
455 }
456 }
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400457 return ret;
458}
459
460static const char * const qp_type_str[] = {
461 "SMI", "GSI", "RC", "UC", "UD",
462};
463
464void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
465{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800466 struct rvt_swqe *wqe;
467 struct rvt_qp *qp = iter->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800468 struct qib_qp_priv *priv = qp->priv;
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400469
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800470 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400471 seq_printf(s,
472 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
473 iter->n,
474 qp->ibqp.qp_num,
475 qp_type_str[qp->ibqp.qp_type],
476 qp->state,
477 wqe->wr.opcode,
478 qp->s_hdrwords,
479 qp->s_flags,
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800480 atomic_read(&priv->s_dma_busy),
481 !list_empty(&priv->iowait),
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400482 qp->timeout,
483 wqe->ssn,
484 qp->s_lsn,
485 qp->s_last_psn,
486 qp->s_psn, qp->s_next_psn,
487 qp->s_sending_psn, qp->s_sending_hpsn,
488 qp->s_last, qp->s_acked, qp->s_cur,
489 qp->s_tail, qp->s_head, qp->s_size,
490 qp->remote_qpn,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400491 rdma_ah_get_dlid(&qp->remote_ah_attr));
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400492}
493
494#endif