blob: cbf5f88af882bafb89f8df58ef8c5ea12617ba68 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040038#include <linux/module.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070039#include <linux/utsname.h>
40#include <linux/rculist.h>
41#include <linux/mm.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040042#include <linux/random.h>
Mike Marciniszynd6f1c172015-07-21 08:36:07 -040043#include <linux/vmalloc.h>
Dennis Dalessandroeb636ac2016-01-22 12:44:36 -080044#include <rdma/rdma_vt.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070045
46#include "qib.h"
47#include "qib_common.h"
48
Mike Marciniszynaf061a62011-09-23 13:16:44 -040049static unsigned int ib_qib_qp_table_size = 256;
Ralph Campbellf9315512010-05-23 21:44:54 -070050module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51MODULE_PARM_DESC(qp_table_size, "QP table size");
52
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080053static unsigned int qib_lkey_table_size = 16;
54module_param_named(lkey_table_size, qib_lkey_table_size, uint,
Ralph Campbellf9315512010-05-23 21:44:54 -070055 S_IRUGO);
56MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
58
59static unsigned int ib_qib_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
63
64static unsigned int ib_qib_max_ahs = 0xFFFF;
65module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67
68unsigned int ib_qib_max_cqes = 0x2FFFF;
69module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
72
73unsigned int ib_qib_max_cqs = 0x1FFFF;
74module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76
77unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
80
81unsigned int ib_qib_max_qps = 16384;
82module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
84
85unsigned int ib_qib_max_sges = 0x60;
86module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
88
89unsigned int ib_qib_max_mcast_grps = 16384;
90module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
93
94unsigned int ib_qib_max_mcast_qp_attached = 16;
95module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
96 uint, S_IRUGO);
97MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
99
100unsigned int ib_qib_max_srqs = 1024;
101module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
103
104unsigned int ib_qib_max_srq_sges = 128;
105module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107
108unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111
112static unsigned int ib_qib_disable_sma;
113module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114MODULE_PARM_DESC(disable_sma, "Disable the SMA");
115
116/*
Ralph Campbellf9315512010-05-23 21:44:54 -0700117 * Translate ib_wr_opcode into ib_wc_opcode.
118 */
119const enum ib_wc_opcode ib_qib_wc_opcode[] = {
120 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
121 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
122 [IB_WR_SEND] = IB_WC_SEND,
123 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
124 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
125 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
126 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
127};
128
129/*
130 * System image GUID.
131 */
132__be64 ib_qib_sys_image_guid;
133
134/**
135 * qib_copy_sge - copy data to SGE memory
136 * @ss: the SGE state
137 * @data: the data to copy
138 * @length: the length of the data
139 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800140void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700141{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800142 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700143
144 while (length) {
145 u32 len = sge->length;
146
147 if (len > length)
148 len = length;
149 if (len > sge->sge_length)
150 len = sge->sge_length;
151 BUG_ON(len == 0);
152 memcpy(sge->vaddr, data, len);
153 sge->vaddr += len;
154 sge->length -= len;
155 sge->sge_length -= len;
156 if (sge->sge_length == 0) {
157 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800158 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700159 if (--ss->num_sge)
160 *sge = *ss->sg_list++;
161 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800162 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700163 if (++sge->m >= sge->mr->mapsz)
164 break;
165 sge->n = 0;
166 }
167 sge->vaddr =
168 sge->mr->map[sge->m]->segs[sge->n].vaddr;
169 sge->length =
170 sge->mr->map[sge->m]->segs[sge->n].length;
171 }
172 data += len;
173 length -= len;
174 }
175}
176
177/**
178 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
179 * @ss: the SGE state
180 * @length: the number of bytes to skip
181 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800182void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700183{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800184 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700185
186 while (length) {
187 u32 len = sge->length;
188
189 if (len > length)
190 len = length;
191 if (len > sge->sge_length)
192 len = sge->sge_length;
193 BUG_ON(len == 0);
194 sge->vaddr += len;
195 sge->length -= len;
196 sge->sge_length -= len;
197 if (sge->sge_length == 0) {
198 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800199 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700200 if (--ss->num_sge)
201 *sge = *ss->sg_list++;
202 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800203 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700204 if (++sge->m >= sge->mr->mapsz)
205 break;
206 sge->n = 0;
207 }
208 sge->vaddr =
209 sge->mr->map[sge->m]->segs[sge->n].vaddr;
210 sge->length =
211 sge->mr->map[sge->m]->segs[sge->n].length;
212 }
213 length -= len;
214 }
215}
216
217/*
218 * Count the number of DMA descriptors needed to send length bytes of data.
219 * Don't modify the qib_sge_state to get the count.
220 * Return zero if any of the segments is not aligned.
221 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800222static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700223{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800224 struct rvt_sge *sg_list = ss->sg_list;
225 struct rvt_sge sge = ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700226 u8 num_sge = ss->num_sge;
227 u32 ndesc = 1; /* count the header */
228
229 while (length) {
230 u32 len = sge.length;
231
232 if (len > length)
233 len = length;
234 if (len > sge.sge_length)
235 len = sge.sge_length;
236 BUG_ON(len == 0);
237 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
238 (len != length && (len & (sizeof(u32) - 1)))) {
239 ndesc = 0;
240 break;
241 }
242 ndesc++;
243 sge.vaddr += len;
244 sge.length -= len;
245 sge.sge_length -= len;
246 if (sge.sge_length == 0) {
247 if (--num_sge)
248 sge = *sg_list++;
249 } else if (sge.length == 0 && sge.mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800250 if (++sge.n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700251 if (++sge.m >= sge.mr->mapsz)
252 break;
253 sge.n = 0;
254 }
255 sge.vaddr =
256 sge.mr->map[sge.m]->segs[sge.n].vaddr;
257 sge.length =
258 sge.mr->map[sge.m]->segs[sge.n].length;
259 }
260 length -= len;
261 }
262 return ndesc;
263}
264
265/*
266 * Copy from the SGEs to the data buffer.
267 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800268static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700269{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800270 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700271
272 while (length) {
273 u32 len = sge->length;
274
275 if (len > length)
276 len = length;
277 if (len > sge->sge_length)
278 len = sge->sge_length;
279 BUG_ON(len == 0);
280 memcpy(data, sge->vaddr, len);
281 sge->vaddr += len;
282 sge->length -= len;
283 sge->sge_length -= len;
284 if (sge->sge_length == 0) {
285 if (--ss->num_sge)
286 *sge = *ss->sg_list++;
287 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800288 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700289 if (++sge->m >= sge->mr->mapsz)
290 break;
291 sge->n = 0;
292 }
293 sge->vaddr =
294 sge->mr->map[sge->m]->segs[sge->n].vaddr;
295 sge->length =
296 sge->mr->map[sge->m]->segs[sge->n].length;
297 }
298 data += len;
299 length -= len;
300 }
301}
302
303/**
Ralph Campbellf9315512010-05-23 21:44:54 -0700304 * qib_qp_rcv - processing an incoming packet on a QP
305 * @rcd: the context pointer
306 * @hdr: the packet header
307 * @has_grh: true if the packet has a GRH
308 * @data: the packet data
309 * @tlen: the packet length
310 * @qp: the QP the packet came on
311 *
312 * This is called from qib_ib_rcv() to process an incoming packet
313 * for the given QP.
314 * Called at interrupt level.
315 */
316static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800317 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700318{
319 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
320
Ralph Campbella5210c12010-08-02 22:39:30 +0000321 spin_lock(&qp->r_lock);
322
Ralph Campbellf9315512010-05-23 21:44:54 -0700323 /* Check for valid receive state. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800324 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800325 ibp->rvp.n_pkt_drops++;
Ralph Campbella5210c12010-08-02 22:39:30 +0000326 goto unlock;
Ralph Campbellf9315512010-05-23 21:44:54 -0700327 }
328
329 switch (qp->ibqp.qp_type) {
330 case IB_QPT_SMI:
331 case IB_QPT_GSI:
332 if (ib_qib_disable_sma)
333 break;
334 /* FALLTHROUGH */
335 case IB_QPT_UD:
336 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
337 break;
338
339 case IB_QPT_RC:
340 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
341 break;
342
343 case IB_QPT_UC:
344 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
345 break;
346
347 default:
348 break;
349 }
Ralph Campbella5210c12010-08-02 22:39:30 +0000350
351unlock:
352 spin_unlock(&qp->r_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700353}
354
355/**
356 * qib_ib_rcv - process an incoming packet
357 * @rcd: the context pointer
358 * @rhdr: the header of the packet
359 * @data: the packet payload
360 * @tlen: the packet length
361 *
362 * This is called from qib_kreceive() to process an incoming packet at
363 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
364 */
365void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
366{
367 struct qib_pportdata *ppd = rcd->ppd;
368 struct qib_ibport *ibp = &ppd->ibport_data;
369 struct qib_ib_header *hdr = rhdr;
370 struct qib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800371 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700372 u32 qp_num;
373 int lnh;
374 u8 opcode;
375 u16 lid;
376
377 /* 24 == LRH+BTH+CRC */
378 if (unlikely(tlen < 24))
379 goto drop;
380
381 /* Check for a valid destination LID (see ch. 7.11.1). */
382 lid = be16_to_cpu(hdr->lrh[1]);
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800383 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700384 lid &= ~((1 << ppd->lmc) - 1);
385 if (unlikely(lid != ppd->lid))
386 goto drop;
387 }
388
389 /* Check for GRH */
390 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
391 if (lnh == QIB_LRH_BTH)
392 ohdr = &hdr->u.oth;
393 else if (lnh == QIB_LRH_GRH) {
394 u32 vtf;
395
396 ohdr = &hdr->u.l.oth;
397 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
398 goto drop;
399 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
400 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
401 goto drop;
402 } else
403 goto drop;
404
Mike Marciniszynddb88762013-06-15 17:07:03 -0400405 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
406#ifdef CONFIG_DEBUG_FS
407 rcd->opstats->stats[opcode].n_bytes += tlen;
408 rcd->opstats->stats[opcode].n_packets++;
409#endif
Ralph Campbellf9315512010-05-23 21:44:54 -0700410
411 /* Get the destination QP number. */
412 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
413 if (qp_num == QIB_MULTICAST_QPN) {
Harish Chegondi18f6c582016-01-22 13:07:55 -0800414 struct rvt_mcast *mcast;
415 struct rvt_mcast_qp *p;
Ralph Campbellf9315512010-05-23 21:44:54 -0700416
417 if (lnh != QIB_LRH_GRH)
418 goto drop;
Harish Chegondi18f6c582016-01-22 13:07:55 -0800419 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
Ralph Campbellf9315512010-05-23 21:44:54 -0700420 if (mcast == NULL)
421 goto drop;
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500422 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700423 list_for_each_entry_rcu(p, &mcast->qp_list, list)
424 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
425 /*
Harish Chegondi18f6c582016-01-22 13:07:55 -0800426 * Notify rvt_multicast_detach() if it is waiting for us
Ralph Campbellf9315512010-05-23 21:44:54 -0700427 * to finish.
428 */
429 if (atomic_dec_return(&mcast->refcount) <= 1)
430 wake_up(&mcast->wait);
431 } else {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400432 if (rcd->lookaside_qp) {
433 if (rcd->lookaside_qpn != qp_num) {
434 if (atomic_dec_and_test(
435 &rcd->lookaside_qp->refcount))
436 wake_up(
437 &rcd->lookaside_qp->wait);
Yann Droneaud8572de92014-03-10 23:06:29 +0100438 rcd->lookaside_qp = NULL;
439 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400440 }
441 if (!rcd->lookaside_qp) {
442 qp = qib_lookup_qpn(ibp, qp_num);
443 if (!qp)
444 goto drop;
445 rcd->lookaside_qp = qp;
446 rcd->lookaside_qpn = qp_num;
447 } else
448 qp = rcd->lookaside_qp;
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500449 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700450 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700451 }
452 return;
453
454drop:
Harish Chegondif24a6d42016-01-22 12:56:02 -0800455 ibp->rvp.n_pkt_drops++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700456}
457
458/*
459 * This is called from a timer to check for QPs
460 * which need kernel memory in order to send a packet.
461 */
462static void mem_timer(unsigned long data)
463{
464 struct qib_ibdev *dev = (struct qib_ibdev *) data;
465 struct list_head *list = &dev->memwait;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800466 struct rvt_qp *qp = NULL;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800467 struct qib_qp_priv *priv = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700468 unsigned long flags;
469
Harish Chegondicd182012016-01-22 12:56:14 -0800470 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700471 if (!list_empty(list)) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800472 priv = list_entry(list->next, struct qib_qp_priv, iowait);
473 qp = priv->owner;
474 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700475 atomic_inc(&qp->refcount);
476 if (!list_empty(list))
477 mod_timer(&dev->mem_timer, jiffies + 1);
478 }
Harish Chegondicd182012016-01-22 12:56:14 -0800479 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700480
481 if (qp) {
482 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800483 if (qp->s_flags & RVT_S_WAIT_KMEM) {
484 qp->s_flags &= ~RVT_S_WAIT_KMEM;
Ralph Campbellf9315512010-05-23 21:44:54 -0700485 qib_schedule_send(qp);
486 }
487 spin_unlock_irqrestore(&qp->s_lock, flags);
488 if (atomic_dec_and_test(&qp->refcount))
489 wake_up(&qp->wait);
490 }
491}
492
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800493static void update_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700494{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800495 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700496
497 sge->vaddr += length;
498 sge->length -= length;
499 sge->sge_length -= length;
500 if (sge->sge_length == 0) {
501 if (--ss->num_sge)
502 *sge = *ss->sg_list++;
503 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800504 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700505 if (++sge->m >= sge->mr->mapsz)
506 return;
507 sge->n = 0;
508 }
509 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
510 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
511 }
512}
513
514#ifdef __LITTLE_ENDIAN
515static inline u32 get_upper_bits(u32 data, u32 shift)
516{
517 return data >> shift;
518}
519
520static inline u32 set_upper_bits(u32 data, u32 shift)
521{
522 return data << shift;
523}
524
525static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
526{
527 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
528 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
529 return data;
530}
531#else
532static inline u32 get_upper_bits(u32 data, u32 shift)
533{
534 return data << shift;
535}
536
537static inline u32 set_upper_bits(u32 data, u32 shift)
538{
539 return data >> shift;
540}
541
542static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
543{
544 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
545 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
546 return data;
547}
548#endif
549
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800550static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
Ralph Campbellf9315512010-05-23 21:44:54 -0700551 u32 length, unsigned flush_wc)
552{
553 u32 extra = 0;
554 u32 data = 0;
555 u32 last;
556
557 while (1) {
558 u32 len = ss->sge.length;
559 u32 off;
560
561 if (len > length)
562 len = length;
563 if (len > ss->sge.sge_length)
564 len = ss->sge.sge_length;
565 BUG_ON(len == 0);
566 /* If the source address is not aligned, try to align it. */
567 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
568 if (off) {
569 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
570 ~(sizeof(u32) - 1));
571 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
572 u32 y;
573
574 y = sizeof(u32) - off;
575 if (len > y)
576 len = y;
577 if (len + extra >= sizeof(u32)) {
578 data |= set_upper_bits(v, extra *
579 BITS_PER_BYTE);
580 len = sizeof(u32) - extra;
581 if (len == length) {
582 last = data;
583 break;
584 }
585 __raw_writel(data, piobuf);
586 piobuf++;
587 extra = 0;
588 data = 0;
589 } else {
590 /* Clear unused upper bytes */
591 data |= clear_upper_bytes(v, len, extra);
592 if (len == length) {
593 last = data;
594 break;
595 }
596 extra += len;
597 }
598 } else if (extra) {
599 /* Source address is aligned. */
600 u32 *addr = (u32 *) ss->sge.vaddr;
601 int shift = extra * BITS_PER_BYTE;
602 int ushift = 32 - shift;
603 u32 l = len;
604
605 while (l >= sizeof(u32)) {
606 u32 v = *addr;
607
608 data |= set_upper_bits(v, shift);
609 __raw_writel(data, piobuf);
610 data = get_upper_bits(v, ushift);
611 piobuf++;
612 addr++;
613 l -= sizeof(u32);
614 }
615 /*
616 * We still have 'extra' number of bytes leftover.
617 */
618 if (l) {
619 u32 v = *addr;
620
621 if (l + extra >= sizeof(u32)) {
622 data |= set_upper_bits(v, shift);
623 len -= l + extra - sizeof(u32);
624 if (len == length) {
625 last = data;
626 break;
627 }
628 __raw_writel(data, piobuf);
629 piobuf++;
630 extra = 0;
631 data = 0;
632 } else {
633 /* Clear unused upper bytes */
634 data |= clear_upper_bytes(v, l, extra);
635 if (len == length) {
636 last = data;
637 break;
638 }
639 extra += l;
640 }
641 } else if (len == length) {
642 last = data;
643 break;
644 }
645 } else if (len == length) {
646 u32 w;
647
648 /*
649 * Need to round up for the last dword in the
650 * packet.
651 */
652 w = (len + 3) >> 2;
653 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
654 piobuf += w - 1;
655 last = ((u32 *) ss->sge.vaddr)[w - 1];
656 break;
657 } else {
658 u32 w = len >> 2;
659
660 qib_pio_copy(piobuf, ss->sge.vaddr, w);
661 piobuf += w;
662
663 extra = len & (sizeof(u32) - 1);
664 if (extra) {
665 u32 v = ((u32 *) ss->sge.vaddr)[w];
666
667 /* Clear unused upper bytes */
668 data = clear_upper_bytes(v, extra, 0);
669 }
670 }
671 update_sge(ss, len);
672 length -= len;
673 }
674 /* Update address before sending packet. */
675 update_sge(ss, length);
676 if (flush_wc) {
677 /* must flush early everything before trigger word */
678 qib_flush_wc();
679 __raw_writel(last, piobuf);
680 /* be sure trigger word is written */
681 qib_flush_wc();
682 } else
683 __raw_writel(last, piobuf);
684}
685
Mike Marciniszyn48947102011-12-23 08:03:41 -0500686static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800687 struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700688{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800689 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700690 struct qib_verbs_txreq *tx;
691 unsigned long flags;
692
693 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondicd182012016-01-22 12:56:14 -0800694 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700695
696 if (!list_empty(&dev->txreq_free)) {
697 struct list_head *l = dev->txreq_free.next;
698
699 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800700 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500701 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700702 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
Ralph Campbellf9315512010-05-23 21:44:54 -0700703 } else {
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800704 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800705 list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700706 dev->n_txwait++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800707 qp->s_flags |= RVT_S_WAIT_TX;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800708 list_add_tail(&priv->iowait, &dev->txwait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700709 }
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800710 qp->s_flags &= ~RVT_S_BUSY;
Harish Chegondicd182012016-01-22 12:56:14 -0800711 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500712 spin_unlock_irqrestore(&qp->s_lock, flags);
713 tx = ERR_PTR(-EBUSY);
Ralph Campbellf9315512010-05-23 21:44:54 -0700714 }
Mike Marciniszyn48947102011-12-23 08:03:41 -0500715 return tx;
716}
Ralph Campbellf9315512010-05-23 21:44:54 -0700717
Mike Marciniszyn48947102011-12-23 08:03:41 -0500718static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800719 struct rvt_qp *qp)
Mike Marciniszyn48947102011-12-23 08:03:41 -0500720{
721 struct qib_verbs_txreq *tx;
722 unsigned long flags;
Ralph Campbellf9315512010-05-23 21:44:54 -0700723
Harish Chegondicd182012016-01-22 12:56:14 -0800724 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500725 /* assume the list non empty */
726 if (likely(!list_empty(&dev->txreq_free))) {
727 struct list_head *l = dev->txreq_free.next;
728
729 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800730 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500731 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
732 } else {
733 /* call slow path to get the extra lock */
Harish Chegondicd182012016-01-22 12:56:14 -0800734 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500735 tx = __get_txreq(dev, qp);
736 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700737 return tx;
738}
739
740void qib_put_txreq(struct qib_verbs_txreq *tx)
741{
742 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800743 struct rvt_qp *qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800744 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700745 unsigned long flags;
746
747 qp = tx->qp;
748 dev = to_idev(qp->ibqp.device);
749
750 if (atomic_dec_and_test(&qp->refcount))
751 wake_up(&qp->wait);
752 if (tx->mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800753 rvt_put_mr(tx->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700754 tx->mr = NULL;
755 }
756 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
757 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
758 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
759 tx->txreq.addr, tx->hdr_dwords << 2,
760 DMA_TO_DEVICE);
761 kfree(tx->align_buf);
762 }
763
Harish Chegondicd182012016-01-22 12:56:14 -0800764 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700765
766 /* Put struct back on free list */
767 list_add(&tx->txreq.list, &dev->txreq_free);
768
769 if (!list_empty(&dev->txwait)) {
770 /* Wake up first QP wanting a free struct */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800771 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
772 iowait);
773 qp = priv->owner;
774 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700775 atomic_inc(&qp->refcount);
Harish Chegondicd182012016-01-22 12:56:14 -0800776 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700777
778 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800779 if (qp->s_flags & RVT_S_WAIT_TX) {
780 qp->s_flags &= ~RVT_S_WAIT_TX;
Ralph Campbellf9315512010-05-23 21:44:54 -0700781 qib_schedule_send(qp);
782 }
783 spin_unlock_irqrestore(&qp->s_lock, flags);
784
785 if (atomic_dec_and_test(&qp->refcount))
786 wake_up(&qp->wait);
787 } else
Harish Chegondicd182012016-01-22 12:56:14 -0800788 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700789}
790
791/*
792 * This is called when there are send DMA descriptors that might be
793 * available.
794 *
795 * This is called with ppd->sdma_lock held.
796 */
797void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
798{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800799 struct rvt_qp *qp, *nqp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800800 struct qib_qp_priv *qpp, *nqpp;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800801 struct rvt_qp *qps[20];
Ralph Campbellf9315512010-05-23 21:44:54 -0700802 struct qib_ibdev *dev;
803 unsigned i, n;
804
805 n = 0;
806 dev = &ppd->dd->verbs_dev;
Harish Chegondicd182012016-01-22 12:56:14 -0800807 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700808
809 /* Search wait list for first QP wanting DMA descriptors. */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800810 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
811 qp = qpp->owner;
812 nqp = nqpp->owner;
Ralph Campbellf9315512010-05-23 21:44:54 -0700813 if (qp->port_num != ppd->port)
814 continue;
815 if (n == ARRAY_SIZE(qps))
816 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800817 if (qpp->s_tx->txreq.sg_count > avail)
Ralph Campbellf9315512010-05-23 21:44:54 -0700818 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800819 avail -= qpp->s_tx->txreq.sg_count;
820 list_del_init(&qpp->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700821 atomic_inc(&qp->refcount);
822 qps[n++] = qp;
823 }
824
Harish Chegondicd182012016-01-22 12:56:14 -0800825 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700826
827 for (i = 0; i < n; i++) {
828 qp = qps[i];
829 spin_lock(&qp->s_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800830 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
831 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
Ralph Campbellf9315512010-05-23 21:44:54 -0700832 qib_schedule_send(qp);
833 }
834 spin_unlock(&qp->s_lock);
835 if (atomic_dec_and_test(&qp->refcount))
836 wake_up(&qp->wait);
837 }
838}
839
840/*
841 * This is called with ppd->sdma_lock held.
842 */
843static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
844{
845 struct qib_verbs_txreq *tx =
846 container_of(cookie, struct qib_verbs_txreq, txreq);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800847 struct rvt_qp *qp = tx->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800848 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700849
850 spin_lock(&qp->s_lock);
851 if (tx->wqe)
852 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
853 else if (qp->ibqp.qp_type == IB_QPT_RC) {
854 struct qib_ib_header *hdr;
855
856 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
857 hdr = &tx->align_buf->hdr;
858 else {
859 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
860
861 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
862 }
863 qib_rc_send_complete(qp, hdr);
864 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800865 if (atomic_dec_and_test(&priv->s_dma_busy)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700866 if (qp->state == IB_QPS_RESET)
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800867 wake_up(&priv->wait_dma);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800868 else if (qp->s_flags & RVT_S_WAIT_DMA) {
869 qp->s_flags &= ~RVT_S_WAIT_DMA;
Ralph Campbellf9315512010-05-23 21:44:54 -0700870 qib_schedule_send(qp);
871 }
872 }
873 spin_unlock(&qp->s_lock);
874
875 qib_put_txreq(tx);
876}
877
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800878static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700879{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800880 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700881 unsigned long flags;
882 int ret = 0;
883
884 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800885 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -0800886 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800887 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700888 if (list_empty(&dev->memwait))
889 mod_timer(&dev->mem_timer, jiffies + 1);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800890 qp->s_flags |= RVT_S_WAIT_KMEM;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800891 list_add_tail(&priv->iowait, &dev->memwait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700892 }
Harish Chegondicd182012016-01-22 12:56:14 -0800893 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800894 qp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700895 ret = -EBUSY;
896 }
897 spin_unlock_irqrestore(&qp->s_lock, flags);
898
899 return ret;
900}
901
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800902static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
903 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -0700904 u32 plen, u32 dwords)
905{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800906 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700907 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
908 struct qib_devdata *dd = dd_from_dev(dev);
909 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
910 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
911 struct qib_verbs_txreq *tx;
912 struct qib_pio_header *phdr;
913 u32 control;
914 u32 ndesc;
915 int ret;
916
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800917 tx = priv->s_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -0700918 if (tx) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800919 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700920 /* resend previously constructed packet */
921 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
922 goto bail;
923 }
924
Mike Marciniszyn48947102011-12-23 08:03:41 -0500925 tx = get_txreq(dev, qp);
926 if (IS_ERR(tx))
927 goto bail_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -0700928
929 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
930 be16_to_cpu(hdr->lrh[0]) >> 12);
931 tx->qp = qp;
932 atomic_inc(&qp->refcount);
933 tx->wqe = qp->s_wqe;
934 tx->mr = qp->s_rdma_mr;
935 if (qp->s_rdma_mr)
936 qp->s_rdma_mr = NULL;
937 tx->txreq.callback = sdma_complete;
938 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
939 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
940 else
941 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
942 if (plen + 1 > dd->piosize2kmax_dwords)
943 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
944
945 if (len) {
946 /*
947 * Don't try to DMA if it takes more descriptors than
948 * the queue holds.
949 */
950 ndesc = qib_count_sge(ss, len);
951 if (ndesc >= ppd->sdma_descq_cnt)
952 ndesc = 0;
953 } else
954 ndesc = 1;
955 if (ndesc) {
956 phdr = &dev->pio_hdrs[tx->hdr_inx];
957 phdr->pbc[0] = cpu_to_le32(plen);
958 phdr->pbc[1] = cpu_to_le32(control);
959 memcpy(&phdr->hdr, hdr, hdrwords << 2);
960 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
961 tx->txreq.sg_count = ndesc;
962 tx->txreq.addr = dev->pio_hdrs_phys +
963 tx->hdr_inx * sizeof(struct qib_pio_header);
964 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
965 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
966 goto bail;
967 }
968
969 /* Allocate a buffer and copy the header and payload to it. */
970 tx->hdr_dwords = plen + 1;
971 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
972 if (!phdr)
973 goto err_tx;
974 phdr->pbc[0] = cpu_to_le32(plen);
975 phdr->pbc[1] = cpu_to_le32(control);
976 memcpy(&phdr->hdr, hdr, hdrwords << 2);
977 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
978
979 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
980 tx->hdr_dwords << 2, DMA_TO_DEVICE);
981 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
982 goto map_err;
983 tx->align_buf = phdr;
984 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
985 tx->txreq.sg_count = 1;
986 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
987 goto unaligned;
988
989map_err:
990 kfree(phdr);
991err_tx:
992 qib_put_txreq(tx);
993 ret = wait_kmem(dev, qp);
994unaligned:
Harish Chegondif24a6d42016-01-22 12:56:02 -0800995 ibp->rvp.n_unaligned++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700996bail:
997 return ret;
Mike Marciniszyn48947102011-12-23 08:03:41 -0500998bail_tx:
999 ret = PTR_ERR(tx);
1000 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -07001001}
1002
1003/*
1004 * If we are now in the error state, return zero to flush the
1005 * send work request.
1006 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001007static int no_bufs_available(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001008{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001009 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001010 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1011 struct qib_devdata *dd;
1012 unsigned long flags;
1013 int ret = 0;
1014
1015 /*
1016 * Note that as soon as want_buffer() is called and
1017 * possibly before it returns, qib_ib_piobufavail()
1018 * could be called. Therefore, put QP on the I/O wait list before
1019 * enabling the PIO avail interrupt.
1020 */
1021 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001022 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -08001023 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001024 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001025 dev->n_piowait++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001026 qp->s_flags |= RVT_S_WAIT_PIO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001027 list_add_tail(&priv->iowait, &dev->piowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001028 dd = dd_from_dev(dev);
1029 dd->f_wantpiobuf_intr(dd, 1);
1030 }
Harish Chegondicd182012016-01-22 12:56:14 -08001031 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001032 qp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -07001033 ret = -EBUSY;
1034 }
1035 spin_unlock_irqrestore(&qp->s_lock, flags);
1036 return ret;
1037}
1038
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001039static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
1040 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -07001041 u32 plen, u32 dwords)
1042{
1043 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1044 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1045 u32 *hdr = (u32 *) ibhdr;
1046 u32 __iomem *piobuf_orig;
1047 u32 __iomem *piobuf;
1048 u64 pbc;
1049 unsigned long flags;
1050 unsigned flush_wc;
1051 u32 control;
1052 u32 pbufn;
1053
1054 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1055 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1056 pbc = ((u64) control << 32) | plen;
1057 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1058 if (unlikely(piobuf == NULL))
1059 return no_bufs_available(qp);
1060
1061 /*
1062 * Write the pbc.
1063 * We have to flush after the PBC for correctness on some cpus
1064 * or WC buffer can be written out of order.
1065 */
1066 writeq(pbc, piobuf);
1067 piobuf_orig = piobuf;
1068 piobuf += 2;
1069
1070 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1071 if (len == 0) {
1072 /*
1073 * If there is just the header portion, must flush before
1074 * writing last word of header for correctness, and after
1075 * the last header word (trigger word).
1076 */
1077 if (flush_wc) {
1078 qib_flush_wc();
1079 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1080 qib_flush_wc();
1081 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1082 qib_flush_wc();
1083 } else
1084 qib_pio_copy(piobuf, hdr, hdrwords);
1085 goto done;
1086 }
1087
1088 if (flush_wc)
1089 qib_flush_wc();
1090 qib_pio_copy(piobuf, hdr, hdrwords);
1091 piobuf += hdrwords;
1092
1093 /* The common case is aligned and contained in one segment. */
1094 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1095 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1096 u32 *addr = (u32 *) ss->sge.vaddr;
1097
1098 /* Update address before sending packet. */
1099 update_sge(ss, len);
1100 if (flush_wc) {
1101 qib_pio_copy(piobuf, addr, dwords - 1);
1102 /* must flush early everything before trigger word */
1103 qib_flush_wc();
1104 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1105 /* be sure trigger word is written */
1106 qib_flush_wc();
1107 } else
1108 qib_pio_copy(piobuf, addr, dwords);
1109 goto done;
1110 }
1111 copy_io(piobuf, ss, len, flush_wc);
1112done:
1113 if (dd->flags & QIB_USE_SPCL_TRIG) {
1114 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
Mike Marciniszynda12c1f2015-01-16 11:23:31 -05001115
Ralph Campbellf9315512010-05-23 21:44:54 -07001116 qib_flush_wc();
1117 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1118 }
1119 qib_sendbuf_done(dd, pbufn);
1120 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001121 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001122 qp->s_rdma_mr = NULL;
1123 }
1124 if (qp->s_wqe) {
1125 spin_lock_irqsave(&qp->s_lock, flags);
1126 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1127 spin_unlock_irqrestore(&qp->s_lock, flags);
1128 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1129 spin_lock_irqsave(&qp->s_lock, flags);
1130 qib_rc_send_complete(qp, ibhdr);
1131 spin_unlock_irqrestore(&qp->s_lock, flags);
1132 }
1133 return 0;
1134}
1135
1136/**
1137 * qib_verbs_send - send a packet
1138 * @qp: the QP to send on
1139 * @hdr: the packet header
1140 * @hdrwords: the number of 32-bit words in the header
1141 * @ss: the SGE to send
1142 * @len: the length of the packet in bytes
1143 *
1144 * Return zero if packet is sent or queued OK.
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001145 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
Ralph Campbellf9315512010-05-23 21:44:54 -07001146 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001147int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
1148 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
Ralph Campbellf9315512010-05-23 21:44:54 -07001149{
1150 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1151 u32 plen;
1152 int ret;
1153 u32 dwords = (len + 3) >> 2;
1154
1155 /*
1156 * Calculate the send buffer trigger address.
1157 * The +1 counts for the pbc control dword following the pbc length.
1158 */
1159 plen = hdrwords + dwords + 1;
1160
1161 /*
1162 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1163 * can defer SDMA restart until link goes ACTIVE without
1164 * worrying about just how we got there.
1165 */
1166 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1167 !(dd->flags & QIB_HAS_SEND_DMA))
1168 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1169 plen, dwords);
1170 else
1171 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1172 plen, dwords);
1173
1174 return ret;
1175}
1176
1177int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1178 u64 *rwords, u64 *spkts, u64 *rpkts,
1179 u64 *xmit_wait)
1180{
1181 int ret;
1182 struct qib_devdata *dd = ppd->dd;
1183
1184 if (!(dd->flags & QIB_PRESENT)) {
1185 /* no hardware, freeze, etc. */
1186 ret = -EINVAL;
1187 goto bail;
1188 }
1189 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1190 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1191 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1192 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1193 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1194
1195 ret = 0;
1196
1197bail:
1198 return ret;
1199}
1200
1201/**
1202 * qib_get_counters - get various chip counters
1203 * @dd: the qlogic_ib device
1204 * @cntrs: counters are placed here
1205 *
1206 * Return the counters needed by recv_pma_get_portcounters().
1207 */
1208int qib_get_counters(struct qib_pportdata *ppd,
1209 struct qib_verbs_counters *cntrs)
1210{
1211 int ret;
1212
1213 if (!(ppd->dd->flags & QIB_PRESENT)) {
1214 /* no hardware, freeze, etc. */
1215 ret = -EINVAL;
1216 goto bail;
1217 }
1218 cntrs->symbol_error_counter =
1219 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1220 cntrs->link_error_recovery_counter =
1221 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1222 /*
1223 * The link downed counter counts when the other side downs the
1224 * connection. We add in the number of times we downed the link
1225 * due to local link integrity errors to compensate.
1226 */
1227 cntrs->link_downed_counter =
1228 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1229 cntrs->port_rcv_errors =
1230 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1231 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1232 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1233 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1234 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1235 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1236 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1237 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1238 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1239 cntrs->port_rcv_errors +=
1240 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1241 cntrs->port_rcv_errors +=
1242 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1243 cntrs->port_rcv_remphys_errors =
1244 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1245 cntrs->port_xmit_discards =
1246 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1247 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1248 QIBPORTCNTR_WORDSEND);
1249 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1250 QIBPORTCNTR_WORDRCV);
1251 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1252 QIBPORTCNTR_PKTSEND);
1253 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1254 QIBPORTCNTR_PKTRCV);
1255 cntrs->local_link_integrity_errors =
1256 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1257 cntrs->excessive_buffer_overrun_errors =
1258 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1259 cntrs->vl15_dropped =
1260 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1261
1262 ret = 0;
1263
1264bail:
1265 return ret;
1266}
1267
1268/**
1269 * qib_ib_piobufavail - callback when a PIO buffer is available
1270 * @dd: the device pointer
1271 *
1272 * This is called from qib_intr() at interrupt level when a PIO buffer is
1273 * available after qib_verbs_send() returned an error that no buffers were
1274 * available. Disable the interrupt if there are no more QPs waiting.
1275 */
1276void qib_ib_piobufavail(struct qib_devdata *dd)
1277{
1278 struct qib_ibdev *dev = &dd->verbs_dev;
1279 struct list_head *list;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001280 struct rvt_qp *qps[5];
1281 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -07001282 unsigned long flags;
1283 unsigned i, n;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001284 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001285
1286 list = &dev->piowait;
1287 n = 0;
1288
1289 /*
1290 * Note: checking that the piowait list is empty and clearing
1291 * the buffer available interrupt needs to be atomic or we
1292 * could end up with QPs on the wait list with the interrupt
1293 * disabled.
1294 */
Harish Chegondicd182012016-01-22 12:56:14 -08001295 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001296 while (!list_empty(list)) {
1297 if (n == ARRAY_SIZE(qps))
1298 goto full;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001299 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1300 qp = priv->owner;
1301 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001302 atomic_inc(&qp->refcount);
1303 qps[n++] = qp;
1304 }
1305 dd->f_wantpiobuf_intr(dd, 0);
1306full:
Harish Chegondicd182012016-01-22 12:56:14 -08001307 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001308
1309 for (i = 0; i < n; i++) {
1310 qp = qps[i];
1311
1312 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001313 if (qp->s_flags & RVT_S_WAIT_PIO) {
1314 qp->s_flags &= ~RVT_S_WAIT_PIO;
Ralph Campbellf9315512010-05-23 21:44:54 -07001315 qib_schedule_send(qp);
1316 }
1317 spin_unlock_irqrestore(&qp->s_lock, flags);
1318
1319 /* Notify qib_destroy_qp() if it is waiting. */
1320 if (atomic_dec_and_test(&qp->refcount))
1321 wake_up(&qp->wait);
1322 }
1323}
1324
Ralph Campbellf9315512010-05-23 21:44:54 -07001325static int qib_query_port(struct ib_device *ibdev, u8 port,
1326 struct ib_port_attr *props)
1327{
1328 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1329 struct qib_ibport *ibp = to_iport(ibdev, port);
1330 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1331 enum ib_mtu mtu;
1332 u16 lid = ppd->lid;
1333
1334 memset(props, 0, sizeof(*props));
1335 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1336 props->lmc = ppd->lmc;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001337 props->sm_lid = ibp->rvp.sm_lid;
1338 props->sm_sl = ibp->rvp.sm_sl;
Ralph Campbellf9315512010-05-23 21:44:54 -07001339 props->state = dd->f_iblink_state(ppd->lastibcstat);
1340 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
Harish Chegondif24a6d42016-01-22 12:56:02 -08001341 props->port_cap_flags = ibp->rvp.port_cap_flags;
Ralph Campbellf9315512010-05-23 21:44:54 -07001342 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1343 props->max_msg_sz = 0x80000000;
1344 props->pkey_tbl_len = qib_get_npkeys(dd);
Harish Chegondif24a6d42016-01-22 12:56:02 -08001345 props->bad_pkey_cntr = ibp->rvp.pkey_violations;
1346 props->qkey_viol_cntr = ibp->rvp.qkey_violations;
Ralph Campbellf9315512010-05-23 21:44:54 -07001347 props->active_width = ppd->link_width_active;
1348 /* See rate_show() */
1349 props->active_speed = ppd->link_speed_active;
1350 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1351 props->init_type_reply = 0;
1352
1353 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1354 switch (ppd->ibmtu) {
1355 case 4096:
1356 mtu = IB_MTU_4096;
1357 break;
1358 case 2048:
1359 mtu = IB_MTU_2048;
1360 break;
1361 case 1024:
1362 mtu = IB_MTU_1024;
1363 break;
1364 case 512:
1365 mtu = IB_MTU_512;
1366 break;
1367 case 256:
1368 mtu = IB_MTU_256;
1369 break;
1370 default:
1371 mtu = IB_MTU_2048;
1372 }
1373 props->active_mtu = mtu;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001374 props->subnet_timeout = ibp->rvp.subnet_timeout;
Ralph Campbellf9315512010-05-23 21:44:54 -07001375
1376 return 0;
1377}
1378
1379static int qib_modify_device(struct ib_device *device,
1380 int device_modify_mask,
1381 struct ib_device_modify *device_modify)
1382{
1383 struct qib_devdata *dd = dd_from_ibdev(device);
1384 unsigned i;
1385 int ret;
1386
1387 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1388 IB_DEVICE_MODIFY_NODE_DESC)) {
1389 ret = -EOPNOTSUPP;
1390 goto bail;
1391 }
1392
1393 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1394 memcpy(device->node_desc, device_modify->node_desc, 64);
1395 for (i = 0; i < dd->num_pports; i++) {
1396 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1397
1398 qib_node_desc_chg(ibp);
1399 }
1400 }
1401
1402 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1403 ib_qib_sys_image_guid =
1404 cpu_to_be64(device_modify->sys_image_guid);
1405 for (i = 0; i < dd->num_pports; i++) {
1406 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1407
1408 qib_sys_guid_chg(ibp);
1409 }
1410 }
1411
1412 ret = 0;
1413
1414bail:
1415 return ret;
1416}
1417
1418static int qib_modify_port(struct ib_device *ibdev, u8 port,
1419 int port_modify_mask, struct ib_port_modify *props)
1420{
1421 struct qib_ibport *ibp = to_iport(ibdev, port);
1422 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1423
Harish Chegondif24a6d42016-01-22 12:56:02 -08001424 ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
1425 ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
Ralph Campbellf9315512010-05-23 21:44:54 -07001426 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1427 qib_cap_mask_chg(ibp);
1428 if (port_modify_mask & IB_PORT_SHUTDOWN)
1429 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1430 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
Harish Chegondif24a6d42016-01-22 12:56:02 -08001431 ibp->rvp.qkey_violations = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001432 return 0;
1433}
1434
1435static int qib_query_gid(struct ib_device *ibdev, u8 port,
1436 int index, union ib_gid *gid)
1437{
1438 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1439 int ret = 0;
1440
1441 if (!port || port > dd->num_pports)
1442 ret = -EINVAL;
1443 else {
1444 struct qib_ibport *ibp = to_iport(ibdev, port);
1445 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1446
Harish Chegondif24a6d42016-01-22 12:56:02 -08001447 gid->global.subnet_prefix = ibp->rvp.gid_prefix;
Ralph Campbellf9315512010-05-23 21:44:54 -07001448 if (index == 0)
1449 gid->global.interface_id = ppd->guid;
1450 else if (index < QIB_GUIDS_PER_PORT)
1451 gid->global.interface_id = ibp->guids[index - 1];
1452 else
1453 ret = -EINVAL;
1454 }
1455
1456 return ret;
1457}
1458
Ralph Campbellf9315512010-05-23 21:44:54 -07001459int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1460{
Ralph Campbellf9315512010-05-23 21:44:54 -07001461 if (ah_attr->sl > 15)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08001462 return -EINVAL;
1463
Ralph Campbellf9315512010-05-23 21:44:54 -07001464 return 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001465}
1466
Harish Chegondi5418a5a2016-01-22 12:56:08 -08001467static void qib_notify_new_ah(struct ib_device *ibdev,
1468 struct ib_ah_attr *ah_attr,
1469 struct rvt_ah *ah)
1470{
1471 struct qib_ibport *ibp;
1472 struct qib_pportdata *ppd;
1473
1474 /*
1475 * Do not trust reading anything from rvt_ah at this point as it is not
1476 * done being setup. We can however modify things which we need to set.
1477 */
1478
1479 ibp = to_iport(ibdev, ah_attr->port_num);
1480 ppd = ppd_from_ibp(ibp);
1481 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1482 ah->log_pmtu = ilog2(ppd->ibmtu);
1483}
1484
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001485struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1486{
1487 struct ib_ah_attr attr;
1488 struct ib_ah *ah = ERR_PTR(-EINVAL);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001489 struct rvt_qp *qp0;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001490
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001491 memset(&attr, 0, sizeof(attr));
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001492 attr.dlid = dlid;
1493 attr.port_num = ppd_from_ibp(ibp)->port;
1494 rcu_read_lock();
Harish Chegondif24a6d42016-01-22 12:56:02 -08001495 qp0 = rcu_dereference(ibp->rvp.qp[0]);
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001496 if (qp0)
1497 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1498 rcu_read_unlock();
1499 return ah;
1500}
1501
Ralph Campbellf9315512010-05-23 21:44:54 -07001502/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001503 * qib_get_npkeys - return the size of the PKEY table for context 0
1504 * @dd: the qlogic_ib device
1505 */
1506unsigned qib_get_npkeys(struct qib_devdata *dd)
1507{
1508 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1509}
1510
1511/*
1512 * Return the indexed PKEY from the port PKEY table.
1513 * No need to validate rcd[ctxt]; the port is setup if we are here.
1514 */
1515unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1516{
1517 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1518 struct qib_devdata *dd = ppd->dd;
1519 unsigned ctxt = ppd->hw_pidx;
1520 unsigned ret;
1521
1522 /* dd->rcd null if mini_init or some init failures */
1523 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1524 ret = 0;
1525 else
1526 ret = dd->rcd[ctxt]->pkeys[index];
1527
1528 return ret;
1529}
1530
Ralph Campbellf9315512010-05-23 21:44:54 -07001531static void init_ibport(struct qib_pportdata *ppd)
1532{
1533 struct qib_verbs_counters cntrs;
1534 struct qib_ibport *ibp = &ppd->ibport_data;
1535
Harish Chegondif24a6d42016-01-22 12:56:02 -08001536 spin_lock_init(&ibp->rvp.lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001537 /* Set the prefix to the default value (see ch. 4.1.1) */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001538 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1539 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1540 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
Ralph Campbellf9315512010-05-23 21:44:54 -07001541 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1542 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1543 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1544 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1545 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
Harish Chegondif24a6d42016-01-22 12:56:02 -08001546 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1547 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1548 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1549 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1550 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1551 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
Ralph Campbellf9315512010-05-23 21:44:54 -07001552
1553 /* Snapshot current HW counters to "clear" them. */
1554 qib_get_counters(ppd, &cntrs);
1555 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1556 ibp->z_link_error_recovery_counter =
1557 cntrs.link_error_recovery_counter;
1558 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1559 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1560 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1561 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1562 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1563 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1564 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1565 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1566 ibp->z_local_link_integrity_errors =
1567 cntrs.local_link_integrity_errors;
1568 ibp->z_excessive_buffer_overrun_errors =
1569 cntrs.excessive_buffer_overrun_errors;
1570 ibp->z_vl15_dropped = cntrs.vl15_dropped;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001571 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1572 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001573}
1574
Ira Weiny77386132015-05-13 20:02:58 -04001575static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
1576 struct ib_port_immutable *immutable)
1577{
1578 struct ib_port_attr attr;
1579 int err;
1580
1581 err = qib_query_port(ibdev, port_num, &attr);
1582 if (err)
1583 return err;
1584
1585 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1586 immutable->gid_tbl_len = attr.gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04001587 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Ira Weiny337877a2015-06-06 14:38:29 -04001588 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Ira Weiny77386132015-05-13 20:02:58 -04001589
1590 return 0;
1591}
1592
Ralph Campbellf9315512010-05-23 21:44:54 -07001593/**
Harish Chegondi0aeddea2016-01-22 12:56:40 -08001594 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1595 * @dd: the device data structure
1596 */
1597static void qib_fill_device_attr(struct qib_devdata *dd)
1598{
1599 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1600
1601 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1602
1603 rdi->dparms.props.max_pd = ib_qib_max_pds;
1604 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1605 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1606 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1607 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1608 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1609 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1610 rdi->dparms.props.vendor_id =
1611 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1612 rdi->dparms.props.vendor_part_id = dd->deviceid;
1613 rdi->dparms.props.hw_ver = dd->minrev;
1614 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1615 rdi->dparms.props.max_mr_size = ~0ULL;
1616 rdi->dparms.props.max_qp = ib_qib_max_qps;
1617 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1618 rdi->dparms.props.max_sge = ib_qib_max_sges;
1619 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1620 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1621 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1622 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1623 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1624 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1625 rdi->dparms.props.max_map_per_fmr = 32767;
1626 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1627 rdi->dparms.props.max_qp_init_rd_atom = 255;
1628 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1629 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1630 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1631 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1632 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1633 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1634 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1635 rdi->dparms.props.max_total_mcast_qp_attach =
1636 rdi->dparms.props.max_mcast_qp_attach *
1637 rdi->dparms.props.max_mcast_grp;
1638}
1639
1640/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001641 * qib_register_ib_device - register our device with the infiniband core
1642 * @dd: the device data structure
1643 * Return the allocated qib_ibdev pointer or NULL on error.
1644 */
1645int qib_register_ib_device(struct qib_devdata *dd)
1646{
1647 struct qib_ibdev *dev = &dd->verbs_dev;
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001648 struct ib_device *ibdev = &dev->rdi.ibdev;
Ralph Campbellf9315512010-05-23 21:44:54 -07001649 struct qib_pportdata *ppd = dd->pport;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001650 unsigned i, ctxt;
Ralph Campbellf9315512010-05-23 21:44:54 -07001651 int ret;
1652
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001653 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
Ralph Campbellf9315512010-05-23 21:44:54 -07001654 for (i = 0; i < dd->num_pports; i++)
1655 init_ibport(ppd + i);
1656
1657 /* Only need to initialize non-zero fields. */
Ralph Campbellf9315512010-05-23 21:44:54 -07001658 spin_lock_init(&dev->n_qps_lock);
1659 spin_lock_init(&dev->n_srqs_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001660 init_timer(&dev->mem_timer);
1661 dev->mem_timer.function = mem_timer;
1662 dev->mem_timer.data = (unsigned long) dev;
1663
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001664 qpt_mask = dd->qpn_mask;
Ralph Campbellf9315512010-05-23 21:44:54 -07001665
Ralph Campbellf9315512010-05-23 21:44:54 -07001666 INIT_LIST_HEAD(&dev->piowait);
1667 INIT_LIST_HEAD(&dev->dmawait);
1668 INIT_LIST_HEAD(&dev->txwait);
1669 INIT_LIST_HEAD(&dev->memwait);
1670 INIT_LIST_HEAD(&dev->txreq_free);
1671
1672 if (ppd->sdma_descq_cnt) {
1673 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1674 ppd->sdma_descq_cnt *
1675 sizeof(struct qib_pio_header),
1676 &dev->pio_hdrs_phys,
1677 GFP_KERNEL);
1678 if (!dev->pio_hdrs) {
1679 ret = -ENOMEM;
1680 goto err_hdrs;
1681 }
1682 }
1683
1684 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1685 struct qib_verbs_txreq *tx;
1686
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001687 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001688 if (!tx) {
1689 ret = -ENOMEM;
1690 goto err_tx;
1691 }
1692 tx->hdr_inx = i;
1693 list_add(&tx->txreq.list, &dev->txreq_free);
1694 }
1695
1696 /*
1697 * The system image GUID is supposed to be the same for all
1698 * IB HCAs in a single system but since there can be other
1699 * device types in the system, we can't be sure this is unique.
1700 */
1701 if (!ib_qib_sys_image_guid)
1702 ib_qib_sys_image_guid = ppd->guid;
1703
1704 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1705 ibdev->owner = THIS_MODULE;
1706 ibdev->node_guid = ppd->guid;
1707 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
1708 ibdev->uverbs_cmd_mask =
1709 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1710 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1711 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1712 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1713 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1714 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
1715 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
1716 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
1717 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
1718 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1719 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1720 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1721 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1722 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1723 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1724 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1725 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1726 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1727 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1728 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1729 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1730 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1731 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
1732 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1733 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1734 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1735 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1736 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1737 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1738 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1739 ibdev->node_type = RDMA_NODE_IB_CA;
1740 ibdev->phys_port_cnt = dd->num_pports;
1741 ibdev->num_comp_vectors = 1;
1742 ibdev->dma_device = &dd->pcidev->dev;
Harish Chegondi0aeddea2016-01-22 12:56:40 -08001743 ibdev->query_device = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001744 ibdev->modify_device = qib_modify_device;
1745 ibdev->query_port = qib_query_port;
1746 ibdev->modify_port = qib_modify_port;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001747 ibdev->query_pkey = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001748 ibdev->query_gid = qib_query_gid;
Harish Chegondi1da0f7e2016-01-22 12:56:33 -08001749 ibdev->alloc_ucontext = NULL;
1750 ibdev->dealloc_ucontext = NULL;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -08001751 ibdev->alloc_pd = NULL;
1752 ibdev->dealloc_pd = NULL;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08001753 ibdev->create_ah = NULL;
1754 ibdev->destroy_ah = NULL;
1755 ibdev->modify_ah = NULL;
1756 ibdev->query_ah = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001757 ibdev->create_srq = qib_create_srq;
1758 ibdev->modify_srq = qib_modify_srq;
1759 ibdev->query_srq = qib_query_srq;
1760 ibdev->destroy_srq = qib_destroy_srq;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001761 ibdev->create_qp = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001762 ibdev->modify_qp = qib_modify_qp;
1763 ibdev->query_qp = qib_query_qp;
1764 ibdev->destroy_qp = qib_destroy_qp;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001765 ibdev->post_send = NULL;
Harish Chegondia7d34a42016-01-22 13:07:49 -08001766 ibdev->post_recv = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001767 ibdev->post_srq_recv = qib_post_srq_receive;
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001768 ibdev->create_cq = NULL;
1769 ibdev->destroy_cq = NULL;
1770 ibdev->resize_cq = NULL;
1771 ibdev->poll_cq = NULL;
1772 ibdev->req_notify_cq = NULL;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001773 ibdev->get_dma_mr = NULL;
1774 ibdev->reg_user_mr = NULL;
1775 ibdev->dereg_mr = NULL;
1776 ibdev->alloc_mr = NULL;
1777 ibdev->map_mr_sg = NULL;
1778 ibdev->alloc_fmr = NULL;
1779 ibdev->map_phys_fmr = NULL;
1780 ibdev->unmap_fmr = NULL;
1781 ibdev->dealloc_fmr = NULL;
Harish Chegondi18f6c582016-01-22 13:07:55 -08001782 ibdev->attach_mcast = NULL;
1783 ibdev->detach_mcast = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001784 ibdev->process_mad = qib_process_mad;
Harish Chegondicd182012016-01-22 12:56:14 -08001785 ibdev->mmap = NULL;
Dennis Dalessandroeb636ac2016-01-22 12:44:36 -08001786 ibdev->dma_ops = NULL;
Ira Weiny77386132015-05-13 20:02:58 -04001787 ibdev->get_port_immutable = qib_port_immutable;
Ralph Campbellf9315512010-05-23 21:44:54 -07001788
1789 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00001790 "Intel Infiniband HCA %s", init_utsname()->nodename);
Ralph Campbellf9315512010-05-23 21:44:54 -07001791
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001792 /*
1793 * Fill in rvt info object.
1794 */
1795 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
Dennis Dalessandro6a9df402016-01-22 12:45:20 -08001796 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
1797 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08001798 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
Harish Chegondi5418a5a2016-01-22 12:56:08 -08001799 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001800 dd->verbs_dev.rdi.driver_f.alloc_qpn = alloc_qpn;
1801 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1802 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1803 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
1804 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001805 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
1806 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001807
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001808 dd->verbs_dev.rdi.flags = 0;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001809
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001810 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001811 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
1812 dd->verbs_dev.rdi.dparms.qpn_start = 1;
1813 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
1814 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
1815 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1816 dd->verbs_dev.rdi.dparms.qos_shift = 1;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001817 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1818 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001819 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
1820 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1821 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1822 "qib_cq%d", dd->unit);
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001823
Harish Chegondi0aeddea2016-01-22 12:56:40 -08001824 qib_fill_device_attr(dd);
1825
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001826 ppd = dd->pport;
1827 for (i = 0; i < dd->num_pports; i++, ppd++) {
1828 ctxt = ppd->hw_pidx;
1829 rvt_init_port(&dd->verbs_dev.rdi,
1830 &ppd->ibport_data.rvp,
1831 i,
1832 dd->rcd[ctxt]->pkeys);
1833 }
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001834
1835 ret = rvt_register_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07001836 if (ret)
Dennis Dalessandro5196aa92016-01-22 13:07:30 -08001837 goto err_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -07001838
Mike Marciniszync9bdad32013-03-28 18:17:20 +00001839 ret = qib_verbs_register_sysfs(dd);
1840 if (ret)
Ralph Campbellf9315512010-05-23 21:44:54 -07001841 goto err_class;
1842
Dennis Dalessandro5196aa92016-01-22 13:07:30 -08001843 return ret;
Ralph Campbellf9315512010-05-23 21:44:54 -07001844
1845err_class:
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001846 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07001847err_tx:
1848 while (!list_empty(&dev->txreq_free)) {
1849 struct list_head *l = dev->txreq_free.next;
1850 struct qib_verbs_txreq *tx;
1851
1852 list_del(l);
1853 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1854 kfree(tx);
1855 }
1856 if (ppd->sdma_descq_cnt)
1857 dma_free_coherent(&dd->pcidev->dev,
1858 ppd->sdma_descq_cnt *
1859 sizeof(struct qib_pio_header),
1860 dev->pio_hdrs, dev->pio_hdrs_phys);
1861err_hdrs:
Ralph Campbellf9315512010-05-23 21:44:54 -07001862 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
Ralph Campbellf9315512010-05-23 21:44:54 -07001863 return ret;
1864}
1865
1866void qib_unregister_ib_device(struct qib_devdata *dd)
1867{
1868 struct qib_ibdev *dev = &dd->verbs_dev;
Ralph Campbellf9315512010-05-23 21:44:54 -07001869
1870 qib_verbs_unregister_sysfs(dd);
1871
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001872 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07001873
1874 if (!list_empty(&dev->piowait))
1875 qib_dev_err(dd, "piowait list not empty!\n");
1876 if (!list_empty(&dev->dmawait))
1877 qib_dev_err(dd, "dmawait list not empty!\n");
1878 if (!list_empty(&dev->txwait))
1879 qib_dev_err(dd, "txwait list not empty!\n");
1880 if (!list_empty(&dev->memwait))
1881 qib_dev_err(dd, "memwait list not empty!\n");
Ralph Campbellf9315512010-05-23 21:44:54 -07001882
Ralph Campbellf9315512010-05-23 21:44:54 -07001883 del_timer_sync(&dev->mem_timer);
Ralph Campbellf9315512010-05-23 21:44:54 -07001884 while (!list_empty(&dev->txreq_free)) {
1885 struct list_head *l = dev->txreq_free.next;
1886 struct qib_verbs_txreq *tx;
1887
1888 list_del(l);
1889 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1890 kfree(tx);
1891 }
1892 if (dd->pport->sdma_descq_cnt)
1893 dma_free_coherent(&dd->pcidev->dev,
1894 dd->pport->sdma_descq_cnt *
1895 sizeof(struct qib_pio_header),
1896 dev->pio_hdrs, dev->pio_hdrs_phys);
Ralph Campbellf9315512010-05-23 21:44:54 -07001897}
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001898
1899/*
1900 * This must be called with s_lock held.
1901 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001902void qib_schedule_send(struct rvt_qp *qp)
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001903{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001904 struct qib_qp_priv *priv = qp->priv;
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001905 if (qib_send_ok(qp)) {
1906 struct qib_ibport *ibp =
1907 to_iport(qp->ibqp.device, qp->port_num);
1908 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1909
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001910 queue_work(ppd->qib_wq, &priv->s_work);
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001911 }
1912}