blob: 45f9582241d1a2cf59b41531feb4b0d37b3ffdcc [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040038#include <linux/module.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070039#include <linux/utsname.h>
40#include <linux/rculist.h>
41#include <linux/mm.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040042#include <linux/random.h>
Mike Marciniszynd6f1c172015-07-21 08:36:07 -040043#include <linux/vmalloc.h>
Dennis Dalessandroeb636ac2016-01-22 12:44:36 -080044#include <rdma/rdma_vt.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070045
46#include "qib.h"
47#include "qib_common.h"
48
Mike Marciniszynaf061a62011-09-23 13:16:44 -040049static unsigned int ib_qib_qp_table_size = 256;
Ralph Campbellf9315512010-05-23 21:44:54 -070050module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51MODULE_PARM_DESC(qp_table_size, "QP table size");
52
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080053static unsigned int qib_lkey_table_size = 16;
54module_param_named(lkey_table_size, qib_lkey_table_size, uint,
Ralph Campbellf9315512010-05-23 21:44:54 -070055 S_IRUGO);
56MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
58
59static unsigned int ib_qib_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
63
64static unsigned int ib_qib_max_ahs = 0xFFFF;
65module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67
68unsigned int ib_qib_max_cqes = 0x2FFFF;
69module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
72
73unsigned int ib_qib_max_cqs = 0x1FFFF;
74module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76
77unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
80
81unsigned int ib_qib_max_qps = 16384;
82module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
84
85unsigned int ib_qib_max_sges = 0x60;
86module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
88
89unsigned int ib_qib_max_mcast_grps = 16384;
90module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
93
94unsigned int ib_qib_max_mcast_qp_attached = 16;
95module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
96 uint, S_IRUGO);
97MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
99
100unsigned int ib_qib_max_srqs = 1024;
101module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
103
104unsigned int ib_qib_max_srq_sges = 128;
105module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107
108unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111
112static unsigned int ib_qib_disable_sma;
113module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114MODULE_PARM_DESC(disable_sma, "Disable the SMA");
115
116/*
117 * Note that it is OK to post send work requests in the SQE and ERR
118 * states; qib_do_send() will process them and generate error
119 * completions as per IB 1.2 C10-96.
120 */
121const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
122 [IB_QPS_RESET] = 0,
123 [IB_QPS_INIT] = QIB_POST_RECV_OK,
124 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
125 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
126 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
127 QIB_PROCESS_NEXT_SEND_OK,
128 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
129 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
130 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
131 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
132 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
133 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
134};
135
Ralph Campbellf9315512010-05-23 21:44:54 -0700136/*
137 * Translate ib_wr_opcode into ib_wc_opcode.
138 */
139const enum ib_wc_opcode ib_qib_wc_opcode[] = {
140 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
141 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
142 [IB_WR_SEND] = IB_WC_SEND,
143 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
144 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
145 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
146 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
147};
148
149/*
150 * System image GUID.
151 */
152__be64 ib_qib_sys_image_guid;
153
154/**
155 * qib_copy_sge - copy data to SGE memory
156 * @ss: the SGE state
157 * @data: the data to copy
158 * @length: the length of the data
159 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800160void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700161{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800162 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700163
164 while (length) {
165 u32 len = sge->length;
166
167 if (len > length)
168 len = length;
169 if (len > sge->sge_length)
170 len = sge->sge_length;
171 BUG_ON(len == 0);
172 memcpy(sge->vaddr, data, len);
173 sge->vaddr += len;
174 sge->length -= len;
175 sge->sge_length -= len;
176 if (sge->sge_length == 0) {
177 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800178 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700179 if (--ss->num_sge)
180 *sge = *ss->sg_list++;
181 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800182 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700183 if (++sge->m >= sge->mr->mapsz)
184 break;
185 sge->n = 0;
186 }
187 sge->vaddr =
188 sge->mr->map[sge->m]->segs[sge->n].vaddr;
189 sge->length =
190 sge->mr->map[sge->m]->segs[sge->n].length;
191 }
192 data += len;
193 length -= len;
194 }
195}
196
197/**
198 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
199 * @ss: the SGE state
200 * @length: the number of bytes to skip
201 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800202void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700203{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800204 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700205
206 while (length) {
207 u32 len = sge->length;
208
209 if (len > length)
210 len = length;
211 if (len > sge->sge_length)
212 len = sge->sge_length;
213 BUG_ON(len == 0);
214 sge->vaddr += len;
215 sge->length -= len;
216 sge->sge_length -= len;
217 if (sge->sge_length == 0) {
218 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800219 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700220 if (--ss->num_sge)
221 *sge = *ss->sg_list++;
222 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800223 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700224 if (++sge->m >= sge->mr->mapsz)
225 break;
226 sge->n = 0;
227 }
228 sge->vaddr =
229 sge->mr->map[sge->m]->segs[sge->n].vaddr;
230 sge->length =
231 sge->mr->map[sge->m]->segs[sge->n].length;
232 }
233 length -= len;
234 }
235}
236
237/*
238 * Count the number of DMA descriptors needed to send length bytes of data.
239 * Don't modify the qib_sge_state to get the count.
240 * Return zero if any of the segments is not aligned.
241 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800242static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700243{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800244 struct rvt_sge *sg_list = ss->sg_list;
245 struct rvt_sge sge = ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700246 u8 num_sge = ss->num_sge;
247 u32 ndesc = 1; /* count the header */
248
249 while (length) {
250 u32 len = sge.length;
251
252 if (len > length)
253 len = length;
254 if (len > sge.sge_length)
255 len = sge.sge_length;
256 BUG_ON(len == 0);
257 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
258 (len != length && (len & (sizeof(u32) - 1)))) {
259 ndesc = 0;
260 break;
261 }
262 ndesc++;
263 sge.vaddr += len;
264 sge.length -= len;
265 sge.sge_length -= len;
266 if (sge.sge_length == 0) {
267 if (--num_sge)
268 sge = *sg_list++;
269 } else if (sge.length == 0 && sge.mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800270 if (++sge.n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700271 if (++sge.m >= sge.mr->mapsz)
272 break;
273 sge.n = 0;
274 }
275 sge.vaddr =
276 sge.mr->map[sge.m]->segs[sge.n].vaddr;
277 sge.length =
278 sge.mr->map[sge.m]->segs[sge.n].length;
279 }
280 length -= len;
281 }
282 return ndesc;
283}
284
285/*
286 * Copy from the SGEs to the data buffer.
287 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800288static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700289{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800290 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700291
292 while (length) {
293 u32 len = sge->length;
294
295 if (len > length)
296 len = length;
297 if (len > sge->sge_length)
298 len = sge->sge_length;
299 BUG_ON(len == 0);
300 memcpy(data, sge->vaddr, len);
301 sge->vaddr += len;
302 sge->length -= len;
303 sge->sge_length -= len;
304 if (sge->sge_length == 0) {
305 if (--ss->num_sge)
306 *sge = *ss->sg_list++;
307 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800308 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700309 if (++sge->m >= sge->mr->mapsz)
310 break;
311 sge->n = 0;
312 }
313 sge->vaddr =
314 sge->mr->map[sge->m]->segs[sge->n].vaddr;
315 sge->length =
316 sge->mr->map[sge->m]->segs[sge->n].length;
317 }
318 data += len;
319 length -= len;
320 }
321}
322
323/**
324 * qib_post_one_send - post one RC, UC, or UD send work request
325 * @qp: the QP to post on
326 * @wr: the work request to send
327 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800328static int qib_post_one_send(struct rvt_qp *qp, struct ib_send_wr *wr,
329 int *scheduled)
Ralph Campbellf9315512010-05-23 21:44:54 -0700330{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800331 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700332 u32 next;
333 int i;
334 int j;
335 int acc;
336 int ret;
337 unsigned long flags;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800338 struct rvt_lkey_table *rkt;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -0800339 struct rvt_pd *pd;
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500340 int avoid_schedule = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700341
342 spin_lock_irqsave(&qp->s_lock, flags);
343
344 /* Check that state is OK to post send. */
345 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
346 goto bail_inval;
347
348 /* IB spec says that num_sge == 0 is OK. */
349 if (wr->num_sge > qp->s_max_sge)
350 goto bail_inval;
351
352 /*
353 * Don't allow RDMA reads or atomic operations on UC or
354 * undefined operations.
355 * Make sure buffer is large enough to hold the result for atomics.
356 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800357 if (qp->ibqp.qp_type == IB_QPT_UC) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700358 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
359 goto bail_inval;
360 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
361 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
362 if (wr->opcode != IB_WR_SEND &&
363 wr->opcode != IB_WR_SEND_WITH_IMM)
364 goto bail_inval;
365 /* Check UD destination address PD */
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100366 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
Ralph Campbellf9315512010-05-23 21:44:54 -0700367 goto bail_inval;
368 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
369 goto bail_inval;
370 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
371 (wr->num_sge == 0 ||
372 wr->sg_list[0].length < sizeof(u64) ||
373 wr->sg_list[0].addr & (sizeof(u64) - 1)))
374 goto bail_inval;
375 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
376 goto bail_inval;
377
378 next = qp->s_head + 1;
379 if (next >= qp->s_size)
380 next = 0;
381 if (next == qp->s_last) {
382 ret = -ENOMEM;
383 goto bail;
384 }
385
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800386 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -0800387 pd = ibpd_to_rvtpd(qp->ibqp.pd);
Ralph Campbellf9315512010-05-23 21:44:54 -0700388 wqe = get_swqe_ptr(qp, qp->s_head);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100389
390 if (qp->ibqp.qp_type != IB_QPT_UC &&
391 qp->ibqp.qp_type != IB_QPT_RC)
392 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
Sagi Grimberg38071a42015-10-13 19:11:31 +0300393 else if (wr->opcode == IB_WR_REG_MR)
394 memcpy(&wqe->reg_wr, reg_wr(wr),
395 sizeof(wqe->reg_wr));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100396 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
397 wr->opcode == IB_WR_RDMA_WRITE ||
398 wr->opcode == IB_WR_RDMA_READ)
399 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
400 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
401 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
402 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
403 else
404 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
405
Ralph Campbellf9315512010-05-23 21:44:54 -0700406 wqe->length = 0;
407 j = 0;
408 if (wr->num_sge) {
409 acc = wr->opcode >= IB_WR_RDMA_READ ?
410 IB_ACCESS_LOCAL_WRITE : 0;
411 for (i = 0; i < wr->num_sge; i++) {
412 u32 length = wr->sg_list[i].length;
413 int ok;
414
415 if (length == 0)
416 continue;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800417 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
Ralph Campbellf9315512010-05-23 21:44:54 -0700418 &wr->sg_list[i], acc);
419 if (!ok)
420 goto bail_inval_free;
421 wqe->length += length;
422 j++;
423 }
424 wqe->wr.num_sge = j;
425 }
426 if (qp->ibqp.qp_type == IB_QPT_UC ||
427 qp->ibqp.qp_type == IB_QPT_RC) {
428 if (wqe->length > 0x80000000U)
429 goto bail_inval_free;
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500430 if (wqe->length <= qp->pmtu)
431 avoid_schedule = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700432 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500433 qp->port_num - 1)->ibmtu) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700434 goto bail_inval_free;
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500435 } else {
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800436 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500437 avoid_schedule = 1;
438 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700439 wqe->ssn = qp->s_ssn++;
440 qp->s_head = next;
441
442 ret = 0;
443 goto bail;
444
445bail_inval_free:
446 while (j) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800447 struct rvt_sge *sge = &wqe->sg_list[--j];
Ralph Campbellf9315512010-05-23 21:44:54 -0700448
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800449 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700450 }
451bail_inval:
452 ret = -EINVAL;
453bail:
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500454 if (!ret && !wr->next && !avoid_schedule &&
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000455 !qib_sdma_empty(
456 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
457 qib_schedule_send(qp);
458 *scheduled = 1;
459 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700460 spin_unlock_irqrestore(&qp->s_lock, flags);
461 return ret;
462}
463
464/**
465 * qib_post_send - post a send on a QP
466 * @ibqp: the QP to post the send on
467 * @wr: the list of work requests to post
468 * @bad_wr: the first bad WR is put here
469 *
470 * This may be called from interrupt context.
471 */
472static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
473 struct ib_send_wr **bad_wr)
474{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800475 struct rvt_qp *qp = to_iqp(ibqp);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800476 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700477 int err = 0;
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000478 int scheduled = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700479
480 for (; wr; wr = wr->next) {
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000481 err = qib_post_one_send(qp, wr, &scheduled);
Ralph Campbellf9315512010-05-23 21:44:54 -0700482 if (err) {
483 *bad_wr = wr;
484 goto bail;
485 }
486 }
487
488 /* Try to do the send work in the caller's context. */
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000489 if (!scheduled)
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800490 qib_do_send(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -0700491
492bail:
493 return err;
494}
495
496/**
497 * qib_post_receive - post a receive on a QP
498 * @ibqp: the QP to post the receive on
499 * @wr: the WR to post
500 * @bad_wr: the first bad WR is put here
501 *
502 * This may be called from interrupt context.
503 */
504static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
505 struct ib_recv_wr **bad_wr)
506{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800507 struct rvt_qp *qp = to_iqp(ibqp);
508 struct rvt_rwq *wq = qp->r_rq.wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700509 unsigned long flags;
510 int ret;
511
512 /* Check that state is OK to post receive. */
513 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
514 *bad_wr = wr;
515 ret = -EINVAL;
516 goto bail;
517 }
518
519 for (; wr; wr = wr->next) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800520 struct rvt_rwqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700521 u32 next;
522 int i;
523
524 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
525 *bad_wr = wr;
526 ret = -EINVAL;
527 goto bail;
528 }
529
530 spin_lock_irqsave(&qp->r_rq.lock, flags);
531 next = wq->head + 1;
532 if (next >= qp->r_rq.size)
533 next = 0;
534 if (next == wq->tail) {
535 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
536 *bad_wr = wr;
537 ret = -ENOMEM;
538 goto bail;
539 }
540
541 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
542 wqe->wr_id = wr->wr_id;
543 wqe->num_sge = wr->num_sge;
544 for (i = 0; i < wr->num_sge; i++)
545 wqe->sg_list[i] = wr->sg_list[i];
546 /* Make sure queue entry is written before the head index. */
547 smp_wmb();
548 wq->head = next;
549 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
550 }
551 ret = 0;
552
553bail:
554 return ret;
555}
556
557/**
558 * qib_qp_rcv - processing an incoming packet on a QP
559 * @rcd: the context pointer
560 * @hdr: the packet header
561 * @has_grh: true if the packet has a GRH
562 * @data: the packet data
563 * @tlen: the packet length
564 * @qp: the QP the packet came on
565 *
566 * This is called from qib_ib_rcv() to process an incoming packet
567 * for the given QP.
568 * Called at interrupt level.
569 */
570static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800571 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700572{
573 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
574
Ralph Campbella5210c12010-08-02 22:39:30 +0000575 spin_lock(&qp->r_lock);
576
Ralph Campbellf9315512010-05-23 21:44:54 -0700577 /* Check for valid receive state. */
578 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800579 ibp->rvp.n_pkt_drops++;
Ralph Campbella5210c12010-08-02 22:39:30 +0000580 goto unlock;
Ralph Campbellf9315512010-05-23 21:44:54 -0700581 }
582
583 switch (qp->ibqp.qp_type) {
584 case IB_QPT_SMI:
585 case IB_QPT_GSI:
586 if (ib_qib_disable_sma)
587 break;
588 /* FALLTHROUGH */
589 case IB_QPT_UD:
590 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
591 break;
592
593 case IB_QPT_RC:
594 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
595 break;
596
597 case IB_QPT_UC:
598 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
599 break;
600
601 default:
602 break;
603 }
Ralph Campbella5210c12010-08-02 22:39:30 +0000604
605unlock:
606 spin_unlock(&qp->r_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700607}
608
609/**
610 * qib_ib_rcv - process an incoming packet
611 * @rcd: the context pointer
612 * @rhdr: the header of the packet
613 * @data: the packet payload
614 * @tlen: the packet length
615 *
616 * This is called from qib_kreceive() to process an incoming packet at
617 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
618 */
619void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
620{
621 struct qib_pportdata *ppd = rcd->ppd;
622 struct qib_ibport *ibp = &ppd->ibport_data;
623 struct qib_ib_header *hdr = rhdr;
624 struct qib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800625 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700626 u32 qp_num;
627 int lnh;
628 u8 opcode;
629 u16 lid;
630
631 /* 24 == LRH+BTH+CRC */
632 if (unlikely(tlen < 24))
633 goto drop;
634
635 /* Check for a valid destination LID (see ch. 7.11.1). */
636 lid = be16_to_cpu(hdr->lrh[1]);
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800637 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700638 lid &= ~((1 << ppd->lmc) - 1);
639 if (unlikely(lid != ppd->lid))
640 goto drop;
641 }
642
643 /* Check for GRH */
644 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
645 if (lnh == QIB_LRH_BTH)
646 ohdr = &hdr->u.oth;
647 else if (lnh == QIB_LRH_GRH) {
648 u32 vtf;
649
650 ohdr = &hdr->u.l.oth;
651 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
652 goto drop;
653 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
654 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
655 goto drop;
656 } else
657 goto drop;
658
Mike Marciniszynddb88762013-06-15 17:07:03 -0400659 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
660#ifdef CONFIG_DEBUG_FS
661 rcd->opstats->stats[opcode].n_bytes += tlen;
662 rcd->opstats->stats[opcode].n_packets++;
663#endif
Ralph Campbellf9315512010-05-23 21:44:54 -0700664
665 /* Get the destination QP number. */
666 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
667 if (qp_num == QIB_MULTICAST_QPN) {
668 struct qib_mcast *mcast;
669 struct qib_mcast_qp *p;
670
671 if (lnh != QIB_LRH_GRH)
672 goto drop;
673 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
674 if (mcast == NULL)
675 goto drop;
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500676 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700677 list_for_each_entry_rcu(p, &mcast->qp_list, list)
678 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
679 /*
680 * Notify qib_multicast_detach() if it is waiting for us
681 * to finish.
682 */
683 if (atomic_dec_return(&mcast->refcount) <= 1)
684 wake_up(&mcast->wait);
685 } else {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400686 if (rcd->lookaside_qp) {
687 if (rcd->lookaside_qpn != qp_num) {
688 if (atomic_dec_and_test(
689 &rcd->lookaside_qp->refcount))
690 wake_up(
691 &rcd->lookaside_qp->wait);
Yann Droneaud8572de92014-03-10 23:06:29 +0100692 rcd->lookaside_qp = NULL;
693 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400694 }
695 if (!rcd->lookaside_qp) {
696 qp = qib_lookup_qpn(ibp, qp_num);
697 if (!qp)
698 goto drop;
699 rcd->lookaside_qp = qp;
700 rcd->lookaside_qpn = qp_num;
701 } else
702 qp = rcd->lookaside_qp;
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500703 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700704 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700705 }
706 return;
707
708drop:
Harish Chegondif24a6d42016-01-22 12:56:02 -0800709 ibp->rvp.n_pkt_drops++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700710}
711
712/*
713 * This is called from a timer to check for QPs
714 * which need kernel memory in order to send a packet.
715 */
716static void mem_timer(unsigned long data)
717{
718 struct qib_ibdev *dev = (struct qib_ibdev *) data;
719 struct list_head *list = &dev->memwait;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800720 struct rvt_qp *qp = NULL;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800721 struct qib_qp_priv *priv = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700722 unsigned long flags;
723
Harish Chegondicd182012016-01-22 12:56:14 -0800724 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700725 if (!list_empty(list)) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800726 priv = list_entry(list->next, struct qib_qp_priv, iowait);
727 qp = priv->owner;
728 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700729 atomic_inc(&qp->refcount);
730 if (!list_empty(list))
731 mod_timer(&dev->mem_timer, jiffies + 1);
732 }
Harish Chegondicd182012016-01-22 12:56:14 -0800733 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700734
735 if (qp) {
736 spin_lock_irqsave(&qp->s_lock, flags);
737 if (qp->s_flags & QIB_S_WAIT_KMEM) {
738 qp->s_flags &= ~QIB_S_WAIT_KMEM;
739 qib_schedule_send(qp);
740 }
741 spin_unlock_irqrestore(&qp->s_lock, flags);
742 if (atomic_dec_and_test(&qp->refcount))
743 wake_up(&qp->wait);
744 }
745}
746
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800747static void update_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700748{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800749 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700750
751 sge->vaddr += length;
752 sge->length -= length;
753 sge->sge_length -= length;
754 if (sge->sge_length == 0) {
755 if (--ss->num_sge)
756 *sge = *ss->sg_list++;
757 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800758 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700759 if (++sge->m >= sge->mr->mapsz)
760 return;
761 sge->n = 0;
762 }
763 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
764 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
765 }
766}
767
768#ifdef __LITTLE_ENDIAN
769static inline u32 get_upper_bits(u32 data, u32 shift)
770{
771 return data >> shift;
772}
773
774static inline u32 set_upper_bits(u32 data, u32 shift)
775{
776 return data << shift;
777}
778
779static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
780{
781 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
782 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
783 return data;
784}
785#else
786static inline u32 get_upper_bits(u32 data, u32 shift)
787{
788 return data << shift;
789}
790
791static inline u32 set_upper_bits(u32 data, u32 shift)
792{
793 return data >> shift;
794}
795
796static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
797{
798 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
799 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
800 return data;
801}
802#endif
803
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800804static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
Ralph Campbellf9315512010-05-23 21:44:54 -0700805 u32 length, unsigned flush_wc)
806{
807 u32 extra = 0;
808 u32 data = 0;
809 u32 last;
810
811 while (1) {
812 u32 len = ss->sge.length;
813 u32 off;
814
815 if (len > length)
816 len = length;
817 if (len > ss->sge.sge_length)
818 len = ss->sge.sge_length;
819 BUG_ON(len == 0);
820 /* If the source address is not aligned, try to align it. */
821 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
822 if (off) {
823 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
824 ~(sizeof(u32) - 1));
825 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
826 u32 y;
827
828 y = sizeof(u32) - off;
829 if (len > y)
830 len = y;
831 if (len + extra >= sizeof(u32)) {
832 data |= set_upper_bits(v, extra *
833 BITS_PER_BYTE);
834 len = sizeof(u32) - extra;
835 if (len == length) {
836 last = data;
837 break;
838 }
839 __raw_writel(data, piobuf);
840 piobuf++;
841 extra = 0;
842 data = 0;
843 } else {
844 /* Clear unused upper bytes */
845 data |= clear_upper_bytes(v, len, extra);
846 if (len == length) {
847 last = data;
848 break;
849 }
850 extra += len;
851 }
852 } else if (extra) {
853 /* Source address is aligned. */
854 u32 *addr = (u32 *) ss->sge.vaddr;
855 int shift = extra * BITS_PER_BYTE;
856 int ushift = 32 - shift;
857 u32 l = len;
858
859 while (l >= sizeof(u32)) {
860 u32 v = *addr;
861
862 data |= set_upper_bits(v, shift);
863 __raw_writel(data, piobuf);
864 data = get_upper_bits(v, ushift);
865 piobuf++;
866 addr++;
867 l -= sizeof(u32);
868 }
869 /*
870 * We still have 'extra' number of bytes leftover.
871 */
872 if (l) {
873 u32 v = *addr;
874
875 if (l + extra >= sizeof(u32)) {
876 data |= set_upper_bits(v, shift);
877 len -= l + extra - sizeof(u32);
878 if (len == length) {
879 last = data;
880 break;
881 }
882 __raw_writel(data, piobuf);
883 piobuf++;
884 extra = 0;
885 data = 0;
886 } else {
887 /* Clear unused upper bytes */
888 data |= clear_upper_bytes(v, l, extra);
889 if (len == length) {
890 last = data;
891 break;
892 }
893 extra += l;
894 }
895 } else if (len == length) {
896 last = data;
897 break;
898 }
899 } else if (len == length) {
900 u32 w;
901
902 /*
903 * Need to round up for the last dword in the
904 * packet.
905 */
906 w = (len + 3) >> 2;
907 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
908 piobuf += w - 1;
909 last = ((u32 *) ss->sge.vaddr)[w - 1];
910 break;
911 } else {
912 u32 w = len >> 2;
913
914 qib_pio_copy(piobuf, ss->sge.vaddr, w);
915 piobuf += w;
916
917 extra = len & (sizeof(u32) - 1);
918 if (extra) {
919 u32 v = ((u32 *) ss->sge.vaddr)[w];
920
921 /* Clear unused upper bytes */
922 data = clear_upper_bytes(v, extra, 0);
923 }
924 }
925 update_sge(ss, len);
926 length -= len;
927 }
928 /* Update address before sending packet. */
929 update_sge(ss, length);
930 if (flush_wc) {
931 /* must flush early everything before trigger word */
932 qib_flush_wc();
933 __raw_writel(last, piobuf);
934 /* be sure trigger word is written */
935 qib_flush_wc();
936 } else
937 __raw_writel(last, piobuf);
938}
939
Mike Marciniszyn48947102011-12-23 08:03:41 -0500940static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800941 struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700942{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800943 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700944 struct qib_verbs_txreq *tx;
945 unsigned long flags;
946
947 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondicd182012016-01-22 12:56:14 -0800948 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700949
950 if (!list_empty(&dev->txreq_free)) {
951 struct list_head *l = dev->txreq_free.next;
952
953 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800954 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500955 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700956 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
Ralph Campbellf9315512010-05-23 21:44:54 -0700957 } else {
958 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800959 list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700960 dev->n_txwait++;
961 qp->s_flags |= QIB_S_WAIT_TX;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800962 list_add_tail(&priv->iowait, &dev->txwait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700963 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700964 qp->s_flags &= ~QIB_S_BUSY;
Harish Chegondicd182012016-01-22 12:56:14 -0800965 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500966 spin_unlock_irqrestore(&qp->s_lock, flags);
967 tx = ERR_PTR(-EBUSY);
Ralph Campbellf9315512010-05-23 21:44:54 -0700968 }
Mike Marciniszyn48947102011-12-23 08:03:41 -0500969 return tx;
970}
Ralph Campbellf9315512010-05-23 21:44:54 -0700971
Mike Marciniszyn48947102011-12-23 08:03:41 -0500972static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800973 struct rvt_qp *qp)
Mike Marciniszyn48947102011-12-23 08:03:41 -0500974{
975 struct qib_verbs_txreq *tx;
976 unsigned long flags;
Ralph Campbellf9315512010-05-23 21:44:54 -0700977
Harish Chegondicd182012016-01-22 12:56:14 -0800978 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500979 /* assume the list non empty */
980 if (likely(!list_empty(&dev->txreq_free))) {
981 struct list_head *l = dev->txreq_free.next;
982
983 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800984 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500985 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
986 } else {
987 /* call slow path to get the extra lock */
Harish Chegondicd182012016-01-22 12:56:14 -0800988 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500989 tx = __get_txreq(dev, qp);
990 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700991 return tx;
992}
993
994void qib_put_txreq(struct qib_verbs_txreq *tx)
995{
996 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800997 struct rvt_qp *qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800998 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700999 unsigned long flags;
1000
1001 qp = tx->qp;
1002 dev = to_idev(qp->ibqp.device);
1003
1004 if (atomic_dec_and_test(&qp->refcount))
1005 wake_up(&qp->wait);
1006 if (tx->mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001007 rvt_put_mr(tx->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001008 tx->mr = NULL;
1009 }
1010 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
1011 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
1012 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
1013 tx->txreq.addr, tx->hdr_dwords << 2,
1014 DMA_TO_DEVICE);
1015 kfree(tx->align_buf);
1016 }
1017
Harish Chegondicd182012016-01-22 12:56:14 -08001018 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001019
1020 /* Put struct back on free list */
1021 list_add(&tx->txreq.list, &dev->txreq_free);
1022
1023 if (!list_empty(&dev->txwait)) {
1024 /* Wake up first QP wanting a free struct */
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001025 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
1026 iowait);
1027 qp = priv->owner;
1028 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001029 atomic_inc(&qp->refcount);
Harish Chegondicd182012016-01-22 12:56:14 -08001030 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001031
1032 spin_lock_irqsave(&qp->s_lock, flags);
1033 if (qp->s_flags & QIB_S_WAIT_TX) {
1034 qp->s_flags &= ~QIB_S_WAIT_TX;
1035 qib_schedule_send(qp);
1036 }
1037 spin_unlock_irqrestore(&qp->s_lock, flags);
1038
1039 if (atomic_dec_and_test(&qp->refcount))
1040 wake_up(&qp->wait);
1041 } else
Harish Chegondicd182012016-01-22 12:56:14 -08001042 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001043}
1044
1045/*
1046 * This is called when there are send DMA descriptors that might be
1047 * available.
1048 *
1049 * This is called with ppd->sdma_lock held.
1050 */
1051void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1052{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001053 struct rvt_qp *qp, *nqp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001054 struct qib_qp_priv *qpp, *nqpp;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001055 struct rvt_qp *qps[20];
Ralph Campbellf9315512010-05-23 21:44:54 -07001056 struct qib_ibdev *dev;
1057 unsigned i, n;
1058
1059 n = 0;
1060 dev = &ppd->dd->verbs_dev;
Harish Chegondicd182012016-01-22 12:56:14 -08001061 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001062
1063 /* Search wait list for first QP wanting DMA descriptors. */
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001064 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
1065 qp = qpp->owner;
1066 nqp = nqpp->owner;
Ralph Campbellf9315512010-05-23 21:44:54 -07001067 if (qp->port_num != ppd->port)
1068 continue;
1069 if (n == ARRAY_SIZE(qps))
1070 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001071 if (qpp->s_tx->txreq.sg_count > avail)
Ralph Campbellf9315512010-05-23 21:44:54 -07001072 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001073 avail -= qpp->s_tx->txreq.sg_count;
1074 list_del_init(&qpp->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001075 atomic_inc(&qp->refcount);
1076 qps[n++] = qp;
1077 }
1078
Harish Chegondicd182012016-01-22 12:56:14 -08001079 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001080
1081 for (i = 0; i < n; i++) {
1082 qp = qps[i];
1083 spin_lock(&qp->s_lock);
1084 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1085 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1086 qib_schedule_send(qp);
1087 }
1088 spin_unlock(&qp->s_lock);
1089 if (atomic_dec_and_test(&qp->refcount))
1090 wake_up(&qp->wait);
1091 }
1092}
1093
1094/*
1095 * This is called with ppd->sdma_lock held.
1096 */
1097static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1098{
1099 struct qib_verbs_txreq *tx =
1100 container_of(cookie, struct qib_verbs_txreq, txreq);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001101 struct rvt_qp *qp = tx->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001102 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001103
1104 spin_lock(&qp->s_lock);
1105 if (tx->wqe)
1106 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1107 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1108 struct qib_ib_header *hdr;
1109
1110 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1111 hdr = &tx->align_buf->hdr;
1112 else {
1113 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1114
1115 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1116 }
1117 qib_rc_send_complete(qp, hdr);
1118 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001119 if (atomic_dec_and_test(&priv->s_dma_busy)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001120 if (qp->state == IB_QPS_RESET)
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001121 wake_up(&priv->wait_dma);
Ralph Campbellf9315512010-05-23 21:44:54 -07001122 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1123 qp->s_flags &= ~QIB_S_WAIT_DMA;
1124 qib_schedule_send(qp);
1125 }
1126 }
1127 spin_unlock(&qp->s_lock);
1128
1129 qib_put_txreq(tx);
1130}
1131
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001132static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001133{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001134 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001135 unsigned long flags;
1136 int ret = 0;
1137
1138 spin_lock_irqsave(&qp->s_lock, flags);
1139 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -08001140 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001141 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001142 if (list_empty(&dev->memwait))
1143 mod_timer(&dev->mem_timer, jiffies + 1);
1144 qp->s_flags |= QIB_S_WAIT_KMEM;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001145 list_add_tail(&priv->iowait, &dev->memwait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001146 }
Harish Chegondicd182012016-01-22 12:56:14 -08001147 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001148 qp->s_flags &= ~QIB_S_BUSY;
1149 ret = -EBUSY;
1150 }
1151 spin_unlock_irqrestore(&qp->s_lock, flags);
1152
1153 return ret;
1154}
1155
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001156static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
1157 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -07001158 u32 plen, u32 dwords)
1159{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001160 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001161 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1162 struct qib_devdata *dd = dd_from_dev(dev);
1163 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1164 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1165 struct qib_verbs_txreq *tx;
1166 struct qib_pio_header *phdr;
1167 u32 control;
1168 u32 ndesc;
1169 int ret;
1170
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001171 tx = priv->s_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -07001172 if (tx) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001173 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001174 /* resend previously constructed packet */
1175 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1176 goto bail;
1177 }
1178
Mike Marciniszyn48947102011-12-23 08:03:41 -05001179 tx = get_txreq(dev, qp);
1180 if (IS_ERR(tx))
1181 goto bail_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -07001182
1183 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1184 be16_to_cpu(hdr->lrh[0]) >> 12);
1185 tx->qp = qp;
1186 atomic_inc(&qp->refcount);
1187 tx->wqe = qp->s_wqe;
1188 tx->mr = qp->s_rdma_mr;
1189 if (qp->s_rdma_mr)
1190 qp->s_rdma_mr = NULL;
1191 tx->txreq.callback = sdma_complete;
1192 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1193 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1194 else
1195 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1196 if (plen + 1 > dd->piosize2kmax_dwords)
1197 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1198
1199 if (len) {
1200 /*
1201 * Don't try to DMA if it takes more descriptors than
1202 * the queue holds.
1203 */
1204 ndesc = qib_count_sge(ss, len);
1205 if (ndesc >= ppd->sdma_descq_cnt)
1206 ndesc = 0;
1207 } else
1208 ndesc = 1;
1209 if (ndesc) {
1210 phdr = &dev->pio_hdrs[tx->hdr_inx];
1211 phdr->pbc[0] = cpu_to_le32(plen);
1212 phdr->pbc[1] = cpu_to_le32(control);
1213 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1214 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1215 tx->txreq.sg_count = ndesc;
1216 tx->txreq.addr = dev->pio_hdrs_phys +
1217 tx->hdr_inx * sizeof(struct qib_pio_header);
1218 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1219 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1220 goto bail;
1221 }
1222
1223 /* Allocate a buffer and copy the header and payload to it. */
1224 tx->hdr_dwords = plen + 1;
1225 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1226 if (!phdr)
1227 goto err_tx;
1228 phdr->pbc[0] = cpu_to_le32(plen);
1229 phdr->pbc[1] = cpu_to_le32(control);
1230 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1231 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1232
1233 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1234 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1235 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1236 goto map_err;
1237 tx->align_buf = phdr;
1238 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1239 tx->txreq.sg_count = 1;
1240 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1241 goto unaligned;
1242
1243map_err:
1244 kfree(phdr);
1245err_tx:
1246 qib_put_txreq(tx);
1247 ret = wait_kmem(dev, qp);
1248unaligned:
Harish Chegondif24a6d42016-01-22 12:56:02 -08001249 ibp->rvp.n_unaligned++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001250bail:
1251 return ret;
Mike Marciniszyn48947102011-12-23 08:03:41 -05001252bail_tx:
1253 ret = PTR_ERR(tx);
1254 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -07001255}
1256
1257/*
1258 * If we are now in the error state, return zero to flush the
1259 * send work request.
1260 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001261static int no_bufs_available(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001262{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001263 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001264 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1265 struct qib_devdata *dd;
1266 unsigned long flags;
1267 int ret = 0;
1268
1269 /*
1270 * Note that as soon as want_buffer() is called and
1271 * possibly before it returns, qib_ib_piobufavail()
1272 * could be called. Therefore, put QP on the I/O wait list before
1273 * enabling the PIO avail interrupt.
1274 */
1275 spin_lock_irqsave(&qp->s_lock, flags);
1276 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -08001277 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001278 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001279 dev->n_piowait++;
1280 qp->s_flags |= QIB_S_WAIT_PIO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001281 list_add_tail(&priv->iowait, &dev->piowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001282 dd = dd_from_dev(dev);
1283 dd->f_wantpiobuf_intr(dd, 1);
1284 }
Harish Chegondicd182012016-01-22 12:56:14 -08001285 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001286 qp->s_flags &= ~QIB_S_BUSY;
1287 ret = -EBUSY;
1288 }
1289 spin_unlock_irqrestore(&qp->s_lock, flags);
1290 return ret;
1291}
1292
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001293static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
1294 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -07001295 u32 plen, u32 dwords)
1296{
1297 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1298 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1299 u32 *hdr = (u32 *) ibhdr;
1300 u32 __iomem *piobuf_orig;
1301 u32 __iomem *piobuf;
1302 u64 pbc;
1303 unsigned long flags;
1304 unsigned flush_wc;
1305 u32 control;
1306 u32 pbufn;
1307
1308 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1309 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1310 pbc = ((u64) control << 32) | plen;
1311 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1312 if (unlikely(piobuf == NULL))
1313 return no_bufs_available(qp);
1314
1315 /*
1316 * Write the pbc.
1317 * We have to flush after the PBC for correctness on some cpus
1318 * or WC buffer can be written out of order.
1319 */
1320 writeq(pbc, piobuf);
1321 piobuf_orig = piobuf;
1322 piobuf += 2;
1323
1324 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1325 if (len == 0) {
1326 /*
1327 * If there is just the header portion, must flush before
1328 * writing last word of header for correctness, and after
1329 * the last header word (trigger word).
1330 */
1331 if (flush_wc) {
1332 qib_flush_wc();
1333 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1334 qib_flush_wc();
1335 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1336 qib_flush_wc();
1337 } else
1338 qib_pio_copy(piobuf, hdr, hdrwords);
1339 goto done;
1340 }
1341
1342 if (flush_wc)
1343 qib_flush_wc();
1344 qib_pio_copy(piobuf, hdr, hdrwords);
1345 piobuf += hdrwords;
1346
1347 /* The common case is aligned and contained in one segment. */
1348 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1349 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1350 u32 *addr = (u32 *) ss->sge.vaddr;
1351
1352 /* Update address before sending packet. */
1353 update_sge(ss, len);
1354 if (flush_wc) {
1355 qib_pio_copy(piobuf, addr, dwords - 1);
1356 /* must flush early everything before trigger word */
1357 qib_flush_wc();
1358 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1359 /* be sure trigger word is written */
1360 qib_flush_wc();
1361 } else
1362 qib_pio_copy(piobuf, addr, dwords);
1363 goto done;
1364 }
1365 copy_io(piobuf, ss, len, flush_wc);
1366done:
1367 if (dd->flags & QIB_USE_SPCL_TRIG) {
1368 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
Mike Marciniszynda12c1f2015-01-16 11:23:31 -05001369
Ralph Campbellf9315512010-05-23 21:44:54 -07001370 qib_flush_wc();
1371 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1372 }
1373 qib_sendbuf_done(dd, pbufn);
1374 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001375 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001376 qp->s_rdma_mr = NULL;
1377 }
1378 if (qp->s_wqe) {
1379 spin_lock_irqsave(&qp->s_lock, flags);
1380 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1381 spin_unlock_irqrestore(&qp->s_lock, flags);
1382 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1383 spin_lock_irqsave(&qp->s_lock, flags);
1384 qib_rc_send_complete(qp, ibhdr);
1385 spin_unlock_irqrestore(&qp->s_lock, flags);
1386 }
1387 return 0;
1388}
1389
1390/**
1391 * qib_verbs_send - send a packet
1392 * @qp: the QP to send on
1393 * @hdr: the packet header
1394 * @hdrwords: the number of 32-bit words in the header
1395 * @ss: the SGE to send
1396 * @len: the length of the packet in bytes
1397 *
1398 * Return zero if packet is sent or queued OK.
1399 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1400 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001401int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
1402 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
Ralph Campbellf9315512010-05-23 21:44:54 -07001403{
1404 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1405 u32 plen;
1406 int ret;
1407 u32 dwords = (len + 3) >> 2;
1408
1409 /*
1410 * Calculate the send buffer trigger address.
1411 * The +1 counts for the pbc control dword following the pbc length.
1412 */
1413 plen = hdrwords + dwords + 1;
1414
1415 /*
1416 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1417 * can defer SDMA restart until link goes ACTIVE without
1418 * worrying about just how we got there.
1419 */
1420 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1421 !(dd->flags & QIB_HAS_SEND_DMA))
1422 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1423 plen, dwords);
1424 else
1425 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1426 plen, dwords);
1427
1428 return ret;
1429}
1430
1431int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1432 u64 *rwords, u64 *spkts, u64 *rpkts,
1433 u64 *xmit_wait)
1434{
1435 int ret;
1436 struct qib_devdata *dd = ppd->dd;
1437
1438 if (!(dd->flags & QIB_PRESENT)) {
1439 /* no hardware, freeze, etc. */
1440 ret = -EINVAL;
1441 goto bail;
1442 }
1443 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1444 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1445 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1446 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1447 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1448
1449 ret = 0;
1450
1451bail:
1452 return ret;
1453}
1454
1455/**
1456 * qib_get_counters - get various chip counters
1457 * @dd: the qlogic_ib device
1458 * @cntrs: counters are placed here
1459 *
1460 * Return the counters needed by recv_pma_get_portcounters().
1461 */
1462int qib_get_counters(struct qib_pportdata *ppd,
1463 struct qib_verbs_counters *cntrs)
1464{
1465 int ret;
1466
1467 if (!(ppd->dd->flags & QIB_PRESENT)) {
1468 /* no hardware, freeze, etc. */
1469 ret = -EINVAL;
1470 goto bail;
1471 }
1472 cntrs->symbol_error_counter =
1473 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1474 cntrs->link_error_recovery_counter =
1475 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1476 /*
1477 * The link downed counter counts when the other side downs the
1478 * connection. We add in the number of times we downed the link
1479 * due to local link integrity errors to compensate.
1480 */
1481 cntrs->link_downed_counter =
1482 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1483 cntrs->port_rcv_errors =
1484 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1485 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1486 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1487 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1488 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1489 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1490 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1491 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1492 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1493 cntrs->port_rcv_errors +=
1494 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1495 cntrs->port_rcv_errors +=
1496 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1497 cntrs->port_rcv_remphys_errors =
1498 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1499 cntrs->port_xmit_discards =
1500 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1501 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1502 QIBPORTCNTR_WORDSEND);
1503 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1504 QIBPORTCNTR_WORDRCV);
1505 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1506 QIBPORTCNTR_PKTSEND);
1507 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1508 QIBPORTCNTR_PKTRCV);
1509 cntrs->local_link_integrity_errors =
1510 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1511 cntrs->excessive_buffer_overrun_errors =
1512 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1513 cntrs->vl15_dropped =
1514 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1515
1516 ret = 0;
1517
1518bail:
1519 return ret;
1520}
1521
1522/**
1523 * qib_ib_piobufavail - callback when a PIO buffer is available
1524 * @dd: the device pointer
1525 *
1526 * This is called from qib_intr() at interrupt level when a PIO buffer is
1527 * available after qib_verbs_send() returned an error that no buffers were
1528 * available. Disable the interrupt if there are no more QPs waiting.
1529 */
1530void qib_ib_piobufavail(struct qib_devdata *dd)
1531{
1532 struct qib_ibdev *dev = &dd->verbs_dev;
1533 struct list_head *list;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001534 struct rvt_qp *qps[5];
1535 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -07001536 unsigned long flags;
1537 unsigned i, n;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001538 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001539
1540 list = &dev->piowait;
1541 n = 0;
1542
1543 /*
1544 * Note: checking that the piowait list is empty and clearing
1545 * the buffer available interrupt needs to be atomic or we
1546 * could end up with QPs on the wait list with the interrupt
1547 * disabled.
1548 */
Harish Chegondicd182012016-01-22 12:56:14 -08001549 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001550 while (!list_empty(list)) {
1551 if (n == ARRAY_SIZE(qps))
1552 goto full;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001553 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1554 qp = priv->owner;
1555 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001556 atomic_inc(&qp->refcount);
1557 qps[n++] = qp;
1558 }
1559 dd->f_wantpiobuf_intr(dd, 0);
1560full:
Harish Chegondicd182012016-01-22 12:56:14 -08001561 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001562
1563 for (i = 0; i < n; i++) {
1564 qp = qps[i];
1565
1566 spin_lock_irqsave(&qp->s_lock, flags);
1567 if (qp->s_flags & QIB_S_WAIT_PIO) {
1568 qp->s_flags &= ~QIB_S_WAIT_PIO;
1569 qib_schedule_send(qp);
1570 }
1571 spin_unlock_irqrestore(&qp->s_lock, flags);
1572
1573 /* Notify qib_destroy_qp() if it is waiting. */
1574 if (atomic_dec_and_test(&qp->refcount))
1575 wake_up(&qp->wait);
1576 }
1577}
1578
Ralph Campbellf9315512010-05-23 21:44:54 -07001579static int qib_query_port(struct ib_device *ibdev, u8 port,
1580 struct ib_port_attr *props)
1581{
1582 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1583 struct qib_ibport *ibp = to_iport(ibdev, port);
1584 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1585 enum ib_mtu mtu;
1586 u16 lid = ppd->lid;
1587
1588 memset(props, 0, sizeof(*props));
1589 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1590 props->lmc = ppd->lmc;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001591 props->sm_lid = ibp->rvp.sm_lid;
1592 props->sm_sl = ibp->rvp.sm_sl;
Ralph Campbellf9315512010-05-23 21:44:54 -07001593 props->state = dd->f_iblink_state(ppd->lastibcstat);
1594 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
Harish Chegondif24a6d42016-01-22 12:56:02 -08001595 props->port_cap_flags = ibp->rvp.port_cap_flags;
Ralph Campbellf9315512010-05-23 21:44:54 -07001596 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1597 props->max_msg_sz = 0x80000000;
1598 props->pkey_tbl_len = qib_get_npkeys(dd);
Harish Chegondif24a6d42016-01-22 12:56:02 -08001599 props->bad_pkey_cntr = ibp->rvp.pkey_violations;
1600 props->qkey_viol_cntr = ibp->rvp.qkey_violations;
Ralph Campbellf9315512010-05-23 21:44:54 -07001601 props->active_width = ppd->link_width_active;
1602 /* See rate_show() */
1603 props->active_speed = ppd->link_speed_active;
1604 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1605 props->init_type_reply = 0;
1606
1607 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1608 switch (ppd->ibmtu) {
1609 case 4096:
1610 mtu = IB_MTU_4096;
1611 break;
1612 case 2048:
1613 mtu = IB_MTU_2048;
1614 break;
1615 case 1024:
1616 mtu = IB_MTU_1024;
1617 break;
1618 case 512:
1619 mtu = IB_MTU_512;
1620 break;
1621 case 256:
1622 mtu = IB_MTU_256;
1623 break;
1624 default:
1625 mtu = IB_MTU_2048;
1626 }
1627 props->active_mtu = mtu;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001628 props->subnet_timeout = ibp->rvp.subnet_timeout;
Ralph Campbellf9315512010-05-23 21:44:54 -07001629
1630 return 0;
1631}
1632
1633static int qib_modify_device(struct ib_device *device,
1634 int device_modify_mask,
1635 struct ib_device_modify *device_modify)
1636{
1637 struct qib_devdata *dd = dd_from_ibdev(device);
1638 unsigned i;
1639 int ret;
1640
1641 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1642 IB_DEVICE_MODIFY_NODE_DESC)) {
1643 ret = -EOPNOTSUPP;
1644 goto bail;
1645 }
1646
1647 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1648 memcpy(device->node_desc, device_modify->node_desc, 64);
1649 for (i = 0; i < dd->num_pports; i++) {
1650 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1651
1652 qib_node_desc_chg(ibp);
1653 }
1654 }
1655
1656 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1657 ib_qib_sys_image_guid =
1658 cpu_to_be64(device_modify->sys_image_guid);
1659 for (i = 0; i < dd->num_pports; i++) {
1660 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1661
1662 qib_sys_guid_chg(ibp);
1663 }
1664 }
1665
1666 ret = 0;
1667
1668bail:
1669 return ret;
1670}
1671
1672static int qib_modify_port(struct ib_device *ibdev, u8 port,
1673 int port_modify_mask, struct ib_port_modify *props)
1674{
1675 struct qib_ibport *ibp = to_iport(ibdev, port);
1676 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1677
Harish Chegondif24a6d42016-01-22 12:56:02 -08001678 ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
1679 ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
Ralph Campbellf9315512010-05-23 21:44:54 -07001680 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1681 qib_cap_mask_chg(ibp);
1682 if (port_modify_mask & IB_PORT_SHUTDOWN)
1683 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1684 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
Harish Chegondif24a6d42016-01-22 12:56:02 -08001685 ibp->rvp.qkey_violations = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001686 return 0;
1687}
1688
1689static int qib_query_gid(struct ib_device *ibdev, u8 port,
1690 int index, union ib_gid *gid)
1691{
1692 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1693 int ret = 0;
1694
1695 if (!port || port > dd->num_pports)
1696 ret = -EINVAL;
1697 else {
1698 struct qib_ibport *ibp = to_iport(ibdev, port);
1699 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1700
Harish Chegondif24a6d42016-01-22 12:56:02 -08001701 gid->global.subnet_prefix = ibp->rvp.gid_prefix;
Ralph Campbellf9315512010-05-23 21:44:54 -07001702 if (index == 0)
1703 gid->global.interface_id = ppd->guid;
1704 else if (index < QIB_GUIDS_PER_PORT)
1705 gid->global.interface_id = ibp->guids[index - 1];
1706 else
1707 ret = -EINVAL;
1708 }
1709
1710 return ret;
1711}
1712
Ralph Campbellf9315512010-05-23 21:44:54 -07001713int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1714{
Ralph Campbellf9315512010-05-23 21:44:54 -07001715 if (ah_attr->sl > 15)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08001716 return -EINVAL;
1717
Ralph Campbellf9315512010-05-23 21:44:54 -07001718 return 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001719}
1720
Harish Chegondi5418a5a2016-01-22 12:56:08 -08001721static void qib_notify_new_ah(struct ib_device *ibdev,
1722 struct ib_ah_attr *ah_attr,
1723 struct rvt_ah *ah)
1724{
1725 struct qib_ibport *ibp;
1726 struct qib_pportdata *ppd;
1727
1728 /*
1729 * Do not trust reading anything from rvt_ah at this point as it is not
1730 * done being setup. We can however modify things which we need to set.
1731 */
1732
1733 ibp = to_iport(ibdev, ah_attr->port_num);
1734 ppd = ppd_from_ibp(ibp);
1735 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1736 ah->log_pmtu = ilog2(ppd->ibmtu);
1737}
1738
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001739struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1740{
1741 struct ib_ah_attr attr;
1742 struct ib_ah *ah = ERR_PTR(-EINVAL);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001743 struct rvt_qp *qp0;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001744
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001745 memset(&attr, 0, sizeof(attr));
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001746 attr.dlid = dlid;
1747 attr.port_num = ppd_from_ibp(ibp)->port;
1748 rcu_read_lock();
Harish Chegondif24a6d42016-01-22 12:56:02 -08001749 qp0 = rcu_dereference(ibp->rvp.qp[0]);
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001750 if (qp0)
1751 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1752 rcu_read_unlock();
1753 return ah;
1754}
1755
Ralph Campbellf9315512010-05-23 21:44:54 -07001756/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001757 * qib_get_npkeys - return the size of the PKEY table for context 0
1758 * @dd: the qlogic_ib device
1759 */
1760unsigned qib_get_npkeys(struct qib_devdata *dd)
1761{
1762 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1763}
1764
1765/*
1766 * Return the indexed PKEY from the port PKEY table.
1767 * No need to validate rcd[ctxt]; the port is setup if we are here.
1768 */
1769unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1770{
1771 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1772 struct qib_devdata *dd = ppd->dd;
1773 unsigned ctxt = ppd->hw_pidx;
1774 unsigned ret;
1775
1776 /* dd->rcd null if mini_init or some init failures */
1777 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1778 ret = 0;
1779 else
1780 ret = dd->rcd[ctxt]->pkeys[index];
1781
1782 return ret;
1783}
1784
Ralph Campbellf9315512010-05-23 21:44:54 -07001785static void init_ibport(struct qib_pportdata *ppd)
1786{
1787 struct qib_verbs_counters cntrs;
1788 struct qib_ibport *ibp = &ppd->ibport_data;
1789
Harish Chegondif24a6d42016-01-22 12:56:02 -08001790 spin_lock_init(&ibp->rvp.lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001791 /* Set the prefix to the default value (see ch. 4.1.1) */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001792 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1793 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1794 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
Ralph Campbellf9315512010-05-23 21:44:54 -07001795 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1796 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1797 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1798 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1799 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
Harish Chegondif24a6d42016-01-22 12:56:02 -08001800 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1801 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1802 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1803 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1804 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1805 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
Ralph Campbellf9315512010-05-23 21:44:54 -07001806
1807 /* Snapshot current HW counters to "clear" them. */
1808 qib_get_counters(ppd, &cntrs);
1809 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1810 ibp->z_link_error_recovery_counter =
1811 cntrs.link_error_recovery_counter;
1812 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1813 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1814 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1815 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1816 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1817 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1818 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1819 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1820 ibp->z_local_link_integrity_errors =
1821 cntrs.local_link_integrity_errors;
1822 ibp->z_excessive_buffer_overrun_errors =
1823 cntrs.excessive_buffer_overrun_errors;
1824 ibp->z_vl15_dropped = cntrs.vl15_dropped;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001825 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1826 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001827}
1828
Ira Weiny77386132015-05-13 20:02:58 -04001829static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
1830 struct ib_port_immutable *immutable)
1831{
1832 struct ib_port_attr attr;
1833 int err;
1834
1835 err = qib_query_port(ibdev, port_num, &attr);
1836 if (err)
1837 return err;
1838
1839 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1840 immutable->gid_tbl_len = attr.gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04001841 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Ira Weiny337877a2015-06-06 14:38:29 -04001842 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Ira Weiny77386132015-05-13 20:02:58 -04001843
1844 return 0;
1845}
1846
Ralph Campbellf9315512010-05-23 21:44:54 -07001847/**
Harish Chegondi0aeddea2016-01-22 12:56:40 -08001848 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1849 * @dd: the device data structure
1850 */
1851static void qib_fill_device_attr(struct qib_devdata *dd)
1852{
1853 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1854
1855 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1856
1857 rdi->dparms.props.max_pd = ib_qib_max_pds;
1858 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1859 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1860 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1861 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1862 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1863 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1864 rdi->dparms.props.vendor_id =
1865 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1866 rdi->dparms.props.vendor_part_id = dd->deviceid;
1867 rdi->dparms.props.hw_ver = dd->minrev;
1868 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1869 rdi->dparms.props.max_mr_size = ~0ULL;
1870 rdi->dparms.props.max_qp = ib_qib_max_qps;
1871 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1872 rdi->dparms.props.max_sge = ib_qib_max_sges;
1873 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1874 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1875 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1876 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1877 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1878 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1879 rdi->dparms.props.max_map_per_fmr = 32767;
1880 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1881 rdi->dparms.props.max_qp_init_rd_atom = 255;
1882 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1883 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1884 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1885 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1886 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1887 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1888 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1889 rdi->dparms.props.max_total_mcast_qp_attach =
1890 rdi->dparms.props.max_mcast_qp_attach *
1891 rdi->dparms.props.max_mcast_grp;
1892}
1893
1894/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001895 * qib_register_ib_device - register our device with the infiniband core
1896 * @dd: the device data structure
1897 * Return the allocated qib_ibdev pointer or NULL on error.
1898 */
1899int qib_register_ib_device(struct qib_devdata *dd)
1900{
1901 struct qib_ibdev *dev = &dd->verbs_dev;
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001902 struct ib_device *ibdev = &dev->rdi.ibdev;
Ralph Campbellf9315512010-05-23 21:44:54 -07001903 struct qib_pportdata *ppd = dd->pport;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001904 unsigned i, ctxt;
Ralph Campbellf9315512010-05-23 21:44:54 -07001905 int ret;
1906
Harish Chegondi898fa522016-01-22 12:56:27 -08001907 /* allocate parent object */
1908 dev->rdi.qp_dev = kzalloc(sizeof(*dev->rdi.qp_dev), GFP_KERNEL);
1909 if (!dev->rdi.qp_dev)
1910 return -ENOMEM;
1911 dev->rdi.qp_dev->qp_table_size = ib_qib_qp_table_size;
1912 dev->rdi.qp_dev->qp_table_bits = ilog2(ib_qib_qp_table_size);
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001913 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
Harish Chegondi898fa522016-01-22 12:56:27 -08001914 dev->rdi.qp_dev->qp_table = kmalloc_array(
1915 dev->rdi.qp_dev->qp_table_size,
1916 sizeof(*dev->rdi.qp_dev->qp_table),
Ralph Campbellf9315512010-05-23 21:44:54 -07001917 GFP_KERNEL);
Harish Chegondi898fa522016-01-22 12:56:27 -08001918 if (!dev->rdi.qp_dev->qp_table) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001919 ret = -ENOMEM;
1920 goto err_qpt;
1921 }
Harish Chegondi898fa522016-01-22 12:56:27 -08001922 for (i = 0; i < dev->rdi.qp_dev->qp_table_size; i++)
1923 RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[i], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001924
1925 for (i = 0; i < dd->num_pports; i++)
1926 init_ibport(ppd + i);
1927
1928 /* Only need to initialize non-zero fields. */
Harish Chegondi898fa522016-01-22 12:56:27 -08001929 spin_lock_init(&dev->rdi.qp_dev->qpt_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001930 spin_lock_init(&dev->n_cqs_lock);
1931 spin_lock_init(&dev->n_qps_lock);
1932 spin_lock_init(&dev->n_srqs_lock);
1933 spin_lock_init(&dev->n_mcast_grps_lock);
1934 init_timer(&dev->mem_timer);
1935 dev->mem_timer.function = mem_timer;
1936 dev->mem_timer.data = (unsigned long) dev;
1937
Harish Chegondi898fa522016-01-22 12:56:27 -08001938 qib_init_qpn_table(dd, &dev->rdi.qp_dev->qpn_table);
Ralph Campbellf9315512010-05-23 21:44:54 -07001939
Ralph Campbellf9315512010-05-23 21:44:54 -07001940 INIT_LIST_HEAD(&dev->piowait);
1941 INIT_LIST_HEAD(&dev->dmawait);
1942 INIT_LIST_HEAD(&dev->txwait);
1943 INIT_LIST_HEAD(&dev->memwait);
1944 INIT_LIST_HEAD(&dev->txreq_free);
1945
1946 if (ppd->sdma_descq_cnt) {
1947 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1948 ppd->sdma_descq_cnt *
1949 sizeof(struct qib_pio_header),
1950 &dev->pio_hdrs_phys,
1951 GFP_KERNEL);
1952 if (!dev->pio_hdrs) {
1953 ret = -ENOMEM;
1954 goto err_hdrs;
1955 }
1956 }
1957
1958 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1959 struct qib_verbs_txreq *tx;
1960
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001961 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001962 if (!tx) {
1963 ret = -ENOMEM;
1964 goto err_tx;
1965 }
1966 tx->hdr_inx = i;
1967 list_add(&tx->txreq.list, &dev->txreq_free);
1968 }
1969
1970 /*
1971 * The system image GUID is supposed to be the same for all
1972 * IB HCAs in a single system but since there can be other
1973 * device types in the system, we can't be sure this is unique.
1974 */
1975 if (!ib_qib_sys_image_guid)
1976 ib_qib_sys_image_guid = ppd->guid;
1977
1978 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1979 ibdev->owner = THIS_MODULE;
1980 ibdev->node_guid = ppd->guid;
1981 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
1982 ibdev->uverbs_cmd_mask =
1983 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1984 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1985 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1986 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1987 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1988 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
1989 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
1990 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
1991 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
1992 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1993 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1994 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1995 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1996 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1997 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1998 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1999 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2000 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2001 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2002 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2003 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2004 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2005 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2006 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2007 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2008 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2009 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2010 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2011 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2012 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2013 ibdev->node_type = RDMA_NODE_IB_CA;
2014 ibdev->phys_port_cnt = dd->num_pports;
2015 ibdev->num_comp_vectors = 1;
2016 ibdev->dma_device = &dd->pcidev->dev;
Harish Chegondi0aeddea2016-01-22 12:56:40 -08002017 ibdev->query_device = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07002018 ibdev->modify_device = qib_modify_device;
2019 ibdev->query_port = qib_query_port;
2020 ibdev->modify_port = qib_modify_port;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08002021 ibdev->query_pkey = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07002022 ibdev->query_gid = qib_query_gid;
Harish Chegondi1da0f7e2016-01-22 12:56:33 -08002023 ibdev->alloc_ucontext = NULL;
2024 ibdev->dealloc_ucontext = NULL;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -08002025 ibdev->alloc_pd = NULL;
2026 ibdev->dealloc_pd = NULL;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08002027 ibdev->create_ah = NULL;
2028 ibdev->destroy_ah = NULL;
2029 ibdev->modify_ah = NULL;
2030 ibdev->query_ah = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07002031 ibdev->create_srq = qib_create_srq;
2032 ibdev->modify_srq = qib_modify_srq;
2033 ibdev->query_srq = qib_query_srq;
2034 ibdev->destroy_srq = qib_destroy_srq;
2035 ibdev->create_qp = qib_create_qp;
2036 ibdev->modify_qp = qib_modify_qp;
2037 ibdev->query_qp = qib_query_qp;
2038 ibdev->destroy_qp = qib_destroy_qp;
2039 ibdev->post_send = qib_post_send;
2040 ibdev->post_recv = qib_post_receive;
2041 ibdev->post_srq_recv = qib_post_srq_receive;
2042 ibdev->create_cq = qib_create_cq;
2043 ibdev->destroy_cq = qib_destroy_cq;
2044 ibdev->resize_cq = qib_resize_cq;
2045 ibdev->poll_cq = qib_poll_cq;
2046 ibdev->req_notify_cq = qib_req_notify_cq;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002047 ibdev->get_dma_mr = NULL;
2048 ibdev->reg_user_mr = NULL;
2049 ibdev->dereg_mr = NULL;
2050 ibdev->alloc_mr = NULL;
2051 ibdev->map_mr_sg = NULL;
2052 ibdev->alloc_fmr = NULL;
2053 ibdev->map_phys_fmr = NULL;
2054 ibdev->unmap_fmr = NULL;
2055 ibdev->dealloc_fmr = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07002056 ibdev->attach_mcast = qib_multicast_attach;
2057 ibdev->detach_mcast = qib_multicast_detach;
2058 ibdev->process_mad = qib_process_mad;
Harish Chegondicd182012016-01-22 12:56:14 -08002059 ibdev->mmap = NULL;
Dennis Dalessandroeb636ac2016-01-22 12:44:36 -08002060 ibdev->dma_ops = NULL;
Ira Weiny77386132015-05-13 20:02:58 -04002061 ibdev->get_port_immutable = qib_port_immutable;
Ralph Campbellf9315512010-05-23 21:44:54 -07002062
2063 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00002064 "Intel Infiniband HCA %s", init_utsname()->nodename);
Ralph Campbellf9315512010-05-23 21:44:54 -07002065
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002066 /*
2067 * Fill in rvt info object.
2068 */
2069 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
Dennis Dalessandro6a9df402016-01-22 12:45:20 -08002070 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
2071 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08002072 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
Harish Chegondi5418a5a2016-01-22 12:56:08 -08002073 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002074 dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER |
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002075 RVT_FLAG_CQ_INIT_DRIVER);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002076 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08002077 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
2078 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
2079
Harish Chegondi0aeddea2016-01-22 12:56:40 -08002080 qib_fill_device_attr(dd);
2081
Harish Chegondi76fec3e2016-01-22 12:56:21 -08002082 ppd = dd->pport;
2083 for (i = 0; i < dd->num_pports; i++, ppd++) {
2084 ctxt = ppd->hw_pidx;
2085 rvt_init_port(&dd->verbs_dev.rdi,
2086 &ppd->ibport_data.rvp,
2087 i,
2088 dd->rcd[ctxt]->pkeys);
2089 }
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002090
2091 ret = rvt_register_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07002092 if (ret)
2093 goto err_reg;
2094
2095 ret = qib_create_agents(dev);
2096 if (ret)
2097 goto err_agents;
2098
Mike Marciniszync9bdad32013-03-28 18:17:20 +00002099 ret = qib_verbs_register_sysfs(dd);
2100 if (ret)
Ralph Campbellf9315512010-05-23 21:44:54 -07002101 goto err_class;
2102
2103 goto bail;
2104
2105err_class:
2106 qib_free_agents(dev);
2107err_agents:
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002108 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07002109err_reg:
2110err_tx:
2111 while (!list_empty(&dev->txreq_free)) {
2112 struct list_head *l = dev->txreq_free.next;
2113 struct qib_verbs_txreq *tx;
2114
2115 list_del(l);
2116 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2117 kfree(tx);
2118 }
2119 if (ppd->sdma_descq_cnt)
2120 dma_free_coherent(&dd->pcidev->dev,
2121 ppd->sdma_descq_cnt *
2122 sizeof(struct qib_pio_header),
2123 dev->pio_hdrs, dev->pio_hdrs_phys);
2124err_hdrs:
Harish Chegondi898fa522016-01-22 12:56:27 -08002125 kfree(dev->rdi.qp_dev->qp_table);
Ralph Campbellf9315512010-05-23 21:44:54 -07002126err_qpt:
2127 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2128bail:
2129 return ret;
2130}
2131
2132void qib_unregister_ib_device(struct qib_devdata *dd)
2133{
2134 struct qib_ibdev *dev = &dd->verbs_dev;
Ralph Campbellf9315512010-05-23 21:44:54 -07002135 u32 qps_inuse;
Ralph Campbellf9315512010-05-23 21:44:54 -07002136
2137 qib_verbs_unregister_sysfs(dd);
2138
2139 qib_free_agents(dev);
2140
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002141 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07002142
2143 if (!list_empty(&dev->piowait))
2144 qib_dev_err(dd, "piowait list not empty!\n");
2145 if (!list_empty(&dev->dmawait))
2146 qib_dev_err(dd, "dmawait list not empty!\n");
2147 if (!list_empty(&dev->txwait))
2148 qib_dev_err(dd, "txwait list not empty!\n");
2149 if (!list_empty(&dev->memwait))
2150 qib_dev_err(dd, "memwait list not empty!\n");
Ralph Campbellf9315512010-05-23 21:44:54 -07002151
2152 qps_inuse = qib_free_all_qps(dd);
2153 if (qps_inuse)
2154 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2155 qps_inuse);
2156
2157 del_timer_sync(&dev->mem_timer);
Harish Chegondi898fa522016-01-22 12:56:27 -08002158 qib_free_qpn_table(&dev->rdi.qp_dev->qpn_table);
Ralph Campbellf9315512010-05-23 21:44:54 -07002159 while (!list_empty(&dev->txreq_free)) {
2160 struct list_head *l = dev->txreq_free.next;
2161 struct qib_verbs_txreq *tx;
2162
2163 list_del(l);
2164 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2165 kfree(tx);
2166 }
2167 if (dd->pport->sdma_descq_cnt)
2168 dma_free_coherent(&dd->pcidev->dev,
2169 dd->pport->sdma_descq_cnt *
2170 sizeof(struct qib_pio_header),
2171 dev->pio_hdrs, dev->pio_hdrs_phys);
Harish Chegondi898fa522016-01-22 12:56:27 -08002172 kfree(dev->rdi.qp_dev->qp_table);
Ralph Campbellf9315512010-05-23 21:44:54 -07002173}
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002174
2175/*
2176 * This must be called with s_lock held.
2177 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002178void qib_schedule_send(struct rvt_qp *qp)
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002179{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08002180 struct qib_qp_priv *priv = qp->priv;
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002181 if (qib_send_ok(qp)) {
2182 struct qib_ibport *ibp =
2183 to_iport(qp->ibqp.device, qp->port_num);
2184 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2185
Dennis Dalessandroffc26902016-01-22 12:45:11 -08002186 queue_work(ppd->qib_wq, &priv->s_work);
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002187 }
2188}