blob: a894762da462c73b304e9e2abc314582d84cd6ab [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040038#include <linux/module.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070039#include <linux/utsname.h>
40#include <linux/rculist.h>
41#include <linux/mm.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040042#include <linux/random.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070043
44#include "qib.h"
45#include "qib_common.h"
46
Mike Marciniszynaf061a62011-09-23 13:16:44 -040047static unsigned int ib_qib_qp_table_size = 256;
Ralph Campbellf9315512010-05-23 21:44:54 -070048module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
49MODULE_PARM_DESC(qp_table_size, "QP table size");
50
51unsigned int ib_qib_lkey_table_size = 16;
52module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
53 S_IRUGO);
54MODULE_PARM_DESC(lkey_table_size,
55 "LKEY table size in bits (2^n, 1 <= n <= 23)");
56
57static unsigned int ib_qib_max_pds = 0xFFFF;
58module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
59MODULE_PARM_DESC(max_pds,
60 "Maximum number of protection domains to support");
61
62static unsigned int ib_qib_max_ahs = 0xFFFF;
63module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
64MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
65
66unsigned int ib_qib_max_cqes = 0x2FFFF;
67module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
68MODULE_PARM_DESC(max_cqes,
69 "Maximum number of completion queue entries to support");
70
71unsigned int ib_qib_max_cqs = 0x1FFFF;
72module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
73MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
74
75unsigned int ib_qib_max_qp_wrs = 0x3FFF;
76module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
77MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
78
79unsigned int ib_qib_max_qps = 16384;
80module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
81MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
82
83unsigned int ib_qib_max_sges = 0x60;
84module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
85MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
86
87unsigned int ib_qib_max_mcast_grps = 16384;
88module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
89MODULE_PARM_DESC(max_mcast_grps,
90 "Maximum number of multicast groups to support");
91
92unsigned int ib_qib_max_mcast_qp_attached = 16;
93module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
94 uint, S_IRUGO);
95MODULE_PARM_DESC(max_mcast_qp_attached,
96 "Maximum number of attached QPs to support");
97
98unsigned int ib_qib_max_srqs = 1024;
99module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
100MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
101
102unsigned int ib_qib_max_srq_sges = 128;
103module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
104MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
105
106unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
107module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
108MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
109
110static unsigned int ib_qib_disable_sma;
111module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
112MODULE_PARM_DESC(disable_sma, "Disable the SMA");
113
114/*
115 * Note that it is OK to post send work requests in the SQE and ERR
116 * states; qib_do_send() will process them and generate error
117 * completions as per IB 1.2 C10-96.
118 */
119const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
120 [IB_QPS_RESET] = 0,
121 [IB_QPS_INIT] = QIB_POST_RECV_OK,
122 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
123 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
124 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
125 QIB_PROCESS_NEXT_SEND_OK,
126 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
127 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
128 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
129 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
130 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
131 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
132};
133
134struct qib_ucontext {
135 struct ib_ucontext ibucontext;
136};
137
138static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
139 *ibucontext)
140{
141 return container_of(ibucontext, struct qib_ucontext, ibucontext);
142}
143
144/*
145 * Translate ib_wr_opcode into ib_wc_opcode.
146 */
147const enum ib_wc_opcode ib_qib_wc_opcode[] = {
148 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
149 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
150 [IB_WR_SEND] = IB_WC_SEND,
151 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
152 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
153 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
154 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
155};
156
157/*
158 * System image GUID.
159 */
160__be64 ib_qib_sys_image_guid;
161
162/**
163 * qib_copy_sge - copy data to SGE memory
164 * @ss: the SGE state
165 * @data: the data to copy
166 * @length: the length of the data
167 */
168void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
169{
170 struct qib_sge *sge = &ss->sge;
171
172 while (length) {
173 u32 len = sge->length;
174
175 if (len > length)
176 len = length;
177 if (len > sge->sge_length)
178 len = sge->sge_length;
179 BUG_ON(len == 0);
180 memcpy(sge->vaddr, data, len);
181 sge->vaddr += len;
182 sge->length -= len;
183 sge->sge_length -= len;
184 if (sge->sge_length == 0) {
185 if (release)
186 atomic_dec(&sge->mr->refcount);
187 if (--ss->num_sge)
188 *sge = *ss->sg_list++;
189 } else if (sge->length == 0 && sge->mr->lkey) {
190 if (++sge->n >= QIB_SEGSZ) {
191 if (++sge->m >= sge->mr->mapsz)
192 break;
193 sge->n = 0;
194 }
195 sge->vaddr =
196 sge->mr->map[sge->m]->segs[sge->n].vaddr;
197 sge->length =
198 sge->mr->map[sge->m]->segs[sge->n].length;
199 }
200 data += len;
201 length -= len;
202 }
203}
204
205/**
206 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
207 * @ss: the SGE state
208 * @length: the number of bytes to skip
209 */
210void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
211{
212 struct qib_sge *sge = &ss->sge;
213
214 while (length) {
215 u32 len = sge->length;
216
217 if (len > length)
218 len = length;
219 if (len > sge->sge_length)
220 len = sge->sge_length;
221 BUG_ON(len == 0);
222 sge->vaddr += len;
223 sge->length -= len;
224 sge->sge_length -= len;
225 if (sge->sge_length == 0) {
226 if (release)
227 atomic_dec(&sge->mr->refcount);
228 if (--ss->num_sge)
229 *sge = *ss->sg_list++;
230 } else if (sge->length == 0 && sge->mr->lkey) {
231 if (++sge->n >= QIB_SEGSZ) {
232 if (++sge->m >= sge->mr->mapsz)
233 break;
234 sge->n = 0;
235 }
236 sge->vaddr =
237 sge->mr->map[sge->m]->segs[sge->n].vaddr;
238 sge->length =
239 sge->mr->map[sge->m]->segs[sge->n].length;
240 }
241 length -= len;
242 }
243}
244
245/*
246 * Count the number of DMA descriptors needed to send length bytes of data.
247 * Don't modify the qib_sge_state to get the count.
248 * Return zero if any of the segments is not aligned.
249 */
250static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
251{
252 struct qib_sge *sg_list = ss->sg_list;
253 struct qib_sge sge = ss->sge;
254 u8 num_sge = ss->num_sge;
255 u32 ndesc = 1; /* count the header */
256
257 while (length) {
258 u32 len = sge.length;
259
260 if (len > length)
261 len = length;
262 if (len > sge.sge_length)
263 len = sge.sge_length;
264 BUG_ON(len == 0);
265 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266 (len != length && (len & (sizeof(u32) - 1)))) {
267 ndesc = 0;
268 break;
269 }
270 ndesc++;
271 sge.vaddr += len;
272 sge.length -= len;
273 sge.sge_length -= len;
274 if (sge.sge_length == 0) {
275 if (--num_sge)
276 sge = *sg_list++;
277 } else if (sge.length == 0 && sge.mr->lkey) {
278 if (++sge.n >= QIB_SEGSZ) {
279 if (++sge.m >= sge.mr->mapsz)
280 break;
281 sge.n = 0;
282 }
283 sge.vaddr =
284 sge.mr->map[sge.m]->segs[sge.n].vaddr;
285 sge.length =
286 sge.mr->map[sge.m]->segs[sge.n].length;
287 }
288 length -= len;
289 }
290 return ndesc;
291}
292
293/*
294 * Copy from the SGEs to the data buffer.
295 */
296static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
297{
298 struct qib_sge *sge = &ss->sge;
299
300 while (length) {
301 u32 len = sge->length;
302
303 if (len > length)
304 len = length;
305 if (len > sge->sge_length)
306 len = sge->sge_length;
307 BUG_ON(len == 0);
308 memcpy(data, sge->vaddr, len);
309 sge->vaddr += len;
310 sge->length -= len;
311 sge->sge_length -= len;
312 if (sge->sge_length == 0) {
313 if (--ss->num_sge)
314 *sge = *ss->sg_list++;
315 } else if (sge->length == 0 && sge->mr->lkey) {
316 if (++sge->n >= QIB_SEGSZ) {
317 if (++sge->m >= sge->mr->mapsz)
318 break;
319 sge->n = 0;
320 }
321 sge->vaddr =
322 sge->mr->map[sge->m]->segs[sge->n].vaddr;
323 sge->length =
324 sge->mr->map[sge->m]->segs[sge->n].length;
325 }
326 data += len;
327 length -= len;
328 }
329}
330
331/**
332 * qib_post_one_send - post one RC, UC, or UD send work request
333 * @qp: the QP to post on
334 * @wr: the work request to send
335 */
336static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
337{
338 struct qib_swqe *wqe;
339 u32 next;
340 int i;
341 int j;
342 int acc;
343 int ret;
344 unsigned long flags;
345 struct qib_lkey_table *rkt;
346 struct qib_pd *pd;
347
348 spin_lock_irqsave(&qp->s_lock, flags);
349
350 /* Check that state is OK to post send. */
351 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
352 goto bail_inval;
353
354 /* IB spec says that num_sge == 0 is OK. */
355 if (wr->num_sge > qp->s_max_sge)
356 goto bail_inval;
357
358 /*
359 * Don't allow RDMA reads or atomic operations on UC or
360 * undefined operations.
361 * Make sure buffer is large enough to hold the result for atomics.
362 */
363 if (wr->opcode == IB_WR_FAST_REG_MR) {
364 if (qib_fast_reg_mr(qp, wr))
365 goto bail_inval;
366 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
367 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
368 goto bail_inval;
369 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
370 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
371 if (wr->opcode != IB_WR_SEND &&
372 wr->opcode != IB_WR_SEND_WITH_IMM)
373 goto bail_inval;
374 /* Check UD destination address PD */
375 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
376 goto bail_inval;
377 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
378 goto bail_inval;
379 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
380 (wr->num_sge == 0 ||
381 wr->sg_list[0].length < sizeof(u64) ||
382 wr->sg_list[0].addr & (sizeof(u64) - 1)))
383 goto bail_inval;
384 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
385 goto bail_inval;
386
387 next = qp->s_head + 1;
388 if (next >= qp->s_size)
389 next = 0;
390 if (next == qp->s_last) {
391 ret = -ENOMEM;
392 goto bail;
393 }
394
395 rkt = &to_idev(qp->ibqp.device)->lk_table;
396 pd = to_ipd(qp->ibqp.pd);
397 wqe = get_swqe_ptr(qp, qp->s_head);
398 wqe->wr = *wr;
399 wqe->length = 0;
400 j = 0;
401 if (wr->num_sge) {
402 acc = wr->opcode >= IB_WR_RDMA_READ ?
403 IB_ACCESS_LOCAL_WRITE : 0;
404 for (i = 0; i < wr->num_sge; i++) {
405 u32 length = wr->sg_list[i].length;
406 int ok;
407
408 if (length == 0)
409 continue;
410 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
411 &wr->sg_list[i], acc);
412 if (!ok)
413 goto bail_inval_free;
414 wqe->length += length;
415 j++;
416 }
417 wqe->wr.num_sge = j;
418 }
419 if (qp->ibqp.qp_type == IB_QPT_UC ||
420 qp->ibqp.qp_type == IB_QPT_RC) {
421 if (wqe->length > 0x80000000U)
422 goto bail_inval_free;
423 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
424 qp->port_num - 1)->ibmtu)
425 goto bail_inval_free;
426 else
427 atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
428 wqe->ssn = qp->s_ssn++;
429 qp->s_head = next;
430
431 ret = 0;
432 goto bail;
433
434bail_inval_free:
435 while (j) {
436 struct qib_sge *sge = &wqe->sg_list[--j];
437
438 atomic_dec(&sge->mr->refcount);
439 }
440bail_inval:
441 ret = -EINVAL;
442bail:
443 spin_unlock_irqrestore(&qp->s_lock, flags);
444 return ret;
445}
446
447/**
448 * qib_post_send - post a send on a QP
449 * @ibqp: the QP to post the send on
450 * @wr: the list of work requests to post
451 * @bad_wr: the first bad WR is put here
452 *
453 * This may be called from interrupt context.
454 */
455static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
456 struct ib_send_wr **bad_wr)
457{
458 struct qib_qp *qp = to_iqp(ibqp);
459 int err = 0;
460
461 for (; wr; wr = wr->next) {
462 err = qib_post_one_send(qp, wr);
463 if (err) {
464 *bad_wr = wr;
465 goto bail;
466 }
467 }
468
469 /* Try to do the send work in the caller's context. */
470 qib_do_send(&qp->s_work);
471
472bail:
473 return err;
474}
475
476/**
477 * qib_post_receive - post a receive on a QP
478 * @ibqp: the QP to post the receive on
479 * @wr: the WR to post
480 * @bad_wr: the first bad WR is put here
481 *
482 * This may be called from interrupt context.
483 */
484static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
485 struct ib_recv_wr **bad_wr)
486{
487 struct qib_qp *qp = to_iqp(ibqp);
488 struct qib_rwq *wq = qp->r_rq.wq;
489 unsigned long flags;
490 int ret;
491
492 /* Check that state is OK to post receive. */
493 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
494 *bad_wr = wr;
495 ret = -EINVAL;
496 goto bail;
497 }
498
499 for (; wr; wr = wr->next) {
500 struct qib_rwqe *wqe;
501 u32 next;
502 int i;
503
504 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
505 *bad_wr = wr;
506 ret = -EINVAL;
507 goto bail;
508 }
509
510 spin_lock_irqsave(&qp->r_rq.lock, flags);
511 next = wq->head + 1;
512 if (next >= qp->r_rq.size)
513 next = 0;
514 if (next == wq->tail) {
515 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
516 *bad_wr = wr;
517 ret = -ENOMEM;
518 goto bail;
519 }
520
521 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
522 wqe->wr_id = wr->wr_id;
523 wqe->num_sge = wr->num_sge;
524 for (i = 0; i < wr->num_sge; i++)
525 wqe->sg_list[i] = wr->sg_list[i];
526 /* Make sure queue entry is written before the head index. */
527 smp_wmb();
528 wq->head = next;
529 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
530 }
531 ret = 0;
532
533bail:
534 return ret;
535}
536
537/**
538 * qib_qp_rcv - processing an incoming packet on a QP
539 * @rcd: the context pointer
540 * @hdr: the packet header
541 * @has_grh: true if the packet has a GRH
542 * @data: the packet data
543 * @tlen: the packet length
544 * @qp: the QP the packet came on
545 *
546 * This is called from qib_ib_rcv() to process an incoming packet
547 * for the given QP.
548 * Called at interrupt level.
549 */
550static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
551 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
552{
553 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
554
Ralph Campbella5210c12010-08-02 22:39:30 +0000555 spin_lock(&qp->r_lock);
556
Ralph Campbellf9315512010-05-23 21:44:54 -0700557 /* Check for valid receive state. */
558 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
559 ibp->n_pkt_drops++;
Ralph Campbella5210c12010-08-02 22:39:30 +0000560 goto unlock;
Ralph Campbellf9315512010-05-23 21:44:54 -0700561 }
562
563 switch (qp->ibqp.qp_type) {
564 case IB_QPT_SMI:
565 case IB_QPT_GSI:
566 if (ib_qib_disable_sma)
567 break;
568 /* FALLTHROUGH */
569 case IB_QPT_UD:
570 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
571 break;
572
573 case IB_QPT_RC:
574 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
575 break;
576
577 case IB_QPT_UC:
578 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
579 break;
580
581 default:
582 break;
583 }
Ralph Campbella5210c12010-08-02 22:39:30 +0000584
585unlock:
586 spin_unlock(&qp->r_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700587}
588
589/**
590 * qib_ib_rcv - process an incoming packet
591 * @rcd: the context pointer
592 * @rhdr: the header of the packet
593 * @data: the packet payload
594 * @tlen: the packet length
595 *
596 * This is called from qib_kreceive() to process an incoming packet at
597 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
598 */
599void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
600{
601 struct qib_pportdata *ppd = rcd->ppd;
602 struct qib_ibport *ibp = &ppd->ibport_data;
603 struct qib_ib_header *hdr = rhdr;
604 struct qib_other_headers *ohdr;
605 struct qib_qp *qp;
606 u32 qp_num;
607 int lnh;
608 u8 opcode;
609 u16 lid;
610
611 /* 24 == LRH+BTH+CRC */
612 if (unlikely(tlen < 24))
613 goto drop;
614
615 /* Check for a valid destination LID (see ch. 7.11.1). */
616 lid = be16_to_cpu(hdr->lrh[1]);
617 if (lid < QIB_MULTICAST_LID_BASE) {
618 lid &= ~((1 << ppd->lmc) - 1);
619 if (unlikely(lid != ppd->lid))
620 goto drop;
621 }
622
623 /* Check for GRH */
624 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
625 if (lnh == QIB_LRH_BTH)
626 ohdr = &hdr->u.oth;
627 else if (lnh == QIB_LRH_GRH) {
628 u32 vtf;
629
630 ohdr = &hdr->u.l.oth;
631 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
632 goto drop;
633 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
634 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
635 goto drop;
636 } else
637 goto drop;
638
639 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
640 ibp->opstats[opcode & 0x7f].n_bytes += tlen;
641 ibp->opstats[opcode & 0x7f].n_packets++;
642
643 /* Get the destination QP number. */
644 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
645 if (qp_num == QIB_MULTICAST_QPN) {
646 struct qib_mcast *mcast;
647 struct qib_mcast_qp *p;
648
649 if (lnh != QIB_LRH_GRH)
650 goto drop;
651 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
652 if (mcast == NULL)
653 goto drop;
654 ibp->n_multicast_rcv++;
655 list_for_each_entry_rcu(p, &mcast->qp_list, list)
656 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
657 /*
658 * Notify qib_multicast_detach() if it is waiting for us
659 * to finish.
660 */
661 if (atomic_dec_return(&mcast->refcount) <= 1)
662 wake_up(&mcast->wait);
663 } else {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400664 if (rcd->lookaside_qp) {
665 if (rcd->lookaside_qpn != qp_num) {
666 if (atomic_dec_and_test(
667 &rcd->lookaside_qp->refcount))
668 wake_up(
669 &rcd->lookaside_qp->wait);
670 rcd->lookaside_qp = NULL;
671 }
672 }
673 if (!rcd->lookaside_qp) {
674 qp = qib_lookup_qpn(ibp, qp_num);
675 if (!qp)
676 goto drop;
677 rcd->lookaside_qp = qp;
678 rcd->lookaside_qpn = qp_num;
679 } else
680 qp = rcd->lookaside_qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700681 ibp->n_unicast_rcv++;
682 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700683 }
684 return;
685
686drop:
687 ibp->n_pkt_drops++;
688}
689
690/*
691 * This is called from a timer to check for QPs
692 * which need kernel memory in order to send a packet.
693 */
694static void mem_timer(unsigned long data)
695{
696 struct qib_ibdev *dev = (struct qib_ibdev *) data;
697 struct list_head *list = &dev->memwait;
698 struct qib_qp *qp = NULL;
699 unsigned long flags;
700
701 spin_lock_irqsave(&dev->pending_lock, flags);
702 if (!list_empty(list)) {
703 qp = list_entry(list->next, struct qib_qp, iowait);
704 list_del_init(&qp->iowait);
705 atomic_inc(&qp->refcount);
706 if (!list_empty(list))
707 mod_timer(&dev->mem_timer, jiffies + 1);
708 }
709 spin_unlock_irqrestore(&dev->pending_lock, flags);
710
711 if (qp) {
712 spin_lock_irqsave(&qp->s_lock, flags);
713 if (qp->s_flags & QIB_S_WAIT_KMEM) {
714 qp->s_flags &= ~QIB_S_WAIT_KMEM;
715 qib_schedule_send(qp);
716 }
717 spin_unlock_irqrestore(&qp->s_lock, flags);
718 if (atomic_dec_and_test(&qp->refcount))
719 wake_up(&qp->wait);
720 }
721}
722
723static void update_sge(struct qib_sge_state *ss, u32 length)
724{
725 struct qib_sge *sge = &ss->sge;
726
727 sge->vaddr += length;
728 sge->length -= length;
729 sge->sge_length -= length;
730 if (sge->sge_length == 0) {
731 if (--ss->num_sge)
732 *sge = *ss->sg_list++;
733 } else if (sge->length == 0 && sge->mr->lkey) {
734 if (++sge->n >= QIB_SEGSZ) {
735 if (++sge->m >= sge->mr->mapsz)
736 return;
737 sge->n = 0;
738 }
739 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
740 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
741 }
742}
743
744#ifdef __LITTLE_ENDIAN
745static inline u32 get_upper_bits(u32 data, u32 shift)
746{
747 return data >> shift;
748}
749
750static inline u32 set_upper_bits(u32 data, u32 shift)
751{
752 return data << shift;
753}
754
755static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
756{
757 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
758 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
759 return data;
760}
761#else
762static inline u32 get_upper_bits(u32 data, u32 shift)
763{
764 return data << shift;
765}
766
767static inline u32 set_upper_bits(u32 data, u32 shift)
768{
769 return data >> shift;
770}
771
772static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
773{
774 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
775 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
776 return data;
777}
778#endif
779
780static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
781 u32 length, unsigned flush_wc)
782{
783 u32 extra = 0;
784 u32 data = 0;
785 u32 last;
786
787 while (1) {
788 u32 len = ss->sge.length;
789 u32 off;
790
791 if (len > length)
792 len = length;
793 if (len > ss->sge.sge_length)
794 len = ss->sge.sge_length;
795 BUG_ON(len == 0);
796 /* If the source address is not aligned, try to align it. */
797 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
798 if (off) {
799 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
800 ~(sizeof(u32) - 1));
801 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
802 u32 y;
803
804 y = sizeof(u32) - off;
805 if (len > y)
806 len = y;
807 if (len + extra >= sizeof(u32)) {
808 data |= set_upper_bits(v, extra *
809 BITS_PER_BYTE);
810 len = sizeof(u32) - extra;
811 if (len == length) {
812 last = data;
813 break;
814 }
815 __raw_writel(data, piobuf);
816 piobuf++;
817 extra = 0;
818 data = 0;
819 } else {
820 /* Clear unused upper bytes */
821 data |= clear_upper_bytes(v, len, extra);
822 if (len == length) {
823 last = data;
824 break;
825 }
826 extra += len;
827 }
828 } else if (extra) {
829 /* Source address is aligned. */
830 u32 *addr = (u32 *) ss->sge.vaddr;
831 int shift = extra * BITS_PER_BYTE;
832 int ushift = 32 - shift;
833 u32 l = len;
834
835 while (l >= sizeof(u32)) {
836 u32 v = *addr;
837
838 data |= set_upper_bits(v, shift);
839 __raw_writel(data, piobuf);
840 data = get_upper_bits(v, ushift);
841 piobuf++;
842 addr++;
843 l -= sizeof(u32);
844 }
845 /*
846 * We still have 'extra' number of bytes leftover.
847 */
848 if (l) {
849 u32 v = *addr;
850
851 if (l + extra >= sizeof(u32)) {
852 data |= set_upper_bits(v, shift);
853 len -= l + extra - sizeof(u32);
854 if (len == length) {
855 last = data;
856 break;
857 }
858 __raw_writel(data, piobuf);
859 piobuf++;
860 extra = 0;
861 data = 0;
862 } else {
863 /* Clear unused upper bytes */
864 data |= clear_upper_bytes(v, l, extra);
865 if (len == length) {
866 last = data;
867 break;
868 }
869 extra += l;
870 }
871 } else if (len == length) {
872 last = data;
873 break;
874 }
875 } else if (len == length) {
876 u32 w;
877
878 /*
879 * Need to round up for the last dword in the
880 * packet.
881 */
882 w = (len + 3) >> 2;
883 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
884 piobuf += w - 1;
885 last = ((u32 *) ss->sge.vaddr)[w - 1];
886 break;
887 } else {
888 u32 w = len >> 2;
889
890 qib_pio_copy(piobuf, ss->sge.vaddr, w);
891 piobuf += w;
892
893 extra = len & (sizeof(u32) - 1);
894 if (extra) {
895 u32 v = ((u32 *) ss->sge.vaddr)[w];
896
897 /* Clear unused upper bytes */
898 data = clear_upper_bytes(v, extra, 0);
899 }
900 }
901 update_sge(ss, len);
902 length -= len;
903 }
904 /* Update address before sending packet. */
905 update_sge(ss, length);
906 if (flush_wc) {
907 /* must flush early everything before trigger word */
908 qib_flush_wc();
909 __raw_writel(last, piobuf);
910 /* be sure trigger word is written */
911 qib_flush_wc();
912 } else
913 __raw_writel(last, piobuf);
914}
915
916static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
917 struct qib_qp *qp, int *retp)
918{
919 struct qib_verbs_txreq *tx;
920 unsigned long flags;
921
922 spin_lock_irqsave(&qp->s_lock, flags);
923 spin_lock(&dev->pending_lock);
924
925 if (!list_empty(&dev->txreq_free)) {
926 struct list_head *l = dev->txreq_free.next;
927
928 list_del(l);
929 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
930 *retp = 0;
931 } else {
932 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
933 list_empty(&qp->iowait)) {
934 dev->n_txwait++;
935 qp->s_flags |= QIB_S_WAIT_TX;
936 list_add_tail(&qp->iowait, &dev->txwait);
937 }
938 tx = NULL;
939 qp->s_flags &= ~QIB_S_BUSY;
940 *retp = -EBUSY;
941 }
942
943 spin_unlock(&dev->pending_lock);
944 spin_unlock_irqrestore(&qp->s_lock, flags);
945
946 return tx;
947}
948
949void qib_put_txreq(struct qib_verbs_txreq *tx)
950{
951 struct qib_ibdev *dev;
952 struct qib_qp *qp;
953 unsigned long flags;
954
955 qp = tx->qp;
956 dev = to_idev(qp->ibqp.device);
957
958 if (atomic_dec_and_test(&qp->refcount))
959 wake_up(&qp->wait);
960 if (tx->mr) {
961 atomic_dec(&tx->mr->refcount);
962 tx->mr = NULL;
963 }
964 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
965 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
966 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
967 tx->txreq.addr, tx->hdr_dwords << 2,
968 DMA_TO_DEVICE);
969 kfree(tx->align_buf);
970 }
971
972 spin_lock_irqsave(&dev->pending_lock, flags);
973
974 /* Put struct back on free list */
975 list_add(&tx->txreq.list, &dev->txreq_free);
976
977 if (!list_empty(&dev->txwait)) {
978 /* Wake up first QP wanting a free struct */
979 qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
980 list_del_init(&qp->iowait);
981 atomic_inc(&qp->refcount);
982 spin_unlock_irqrestore(&dev->pending_lock, flags);
983
984 spin_lock_irqsave(&qp->s_lock, flags);
985 if (qp->s_flags & QIB_S_WAIT_TX) {
986 qp->s_flags &= ~QIB_S_WAIT_TX;
987 qib_schedule_send(qp);
988 }
989 spin_unlock_irqrestore(&qp->s_lock, flags);
990
991 if (atomic_dec_and_test(&qp->refcount))
992 wake_up(&qp->wait);
993 } else
994 spin_unlock_irqrestore(&dev->pending_lock, flags);
995}
996
997/*
998 * This is called when there are send DMA descriptors that might be
999 * available.
1000 *
1001 * This is called with ppd->sdma_lock held.
1002 */
1003void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1004{
1005 struct qib_qp *qp, *nqp;
1006 struct qib_qp *qps[20];
1007 struct qib_ibdev *dev;
1008 unsigned i, n;
1009
1010 n = 0;
1011 dev = &ppd->dd->verbs_dev;
1012 spin_lock(&dev->pending_lock);
1013
1014 /* Search wait list for first QP wanting DMA descriptors. */
1015 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
1016 if (qp->port_num != ppd->port)
1017 continue;
1018 if (n == ARRAY_SIZE(qps))
1019 break;
1020 if (qp->s_tx->txreq.sg_count > avail)
1021 break;
1022 avail -= qp->s_tx->txreq.sg_count;
1023 list_del_init(&qp->iowait);
1024 atomic_inc(&qp->refcount);
1025 qps[n++] = qp;
1026 }
1027
1028 spin_unlock(&dev->pending_lock);
1029
1030 for (i = 0; i < n; i++) {
1031 qp = qps[i];
1032 spin_lock(&qp->s_lock);
1033 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1034 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1035 qib_schedule_send(qp);
1036 }
1037 spin_unlock(&qp->s_lock);
1038 if (atomic_dec_and_test(&qp->refcount))
1039 wake_up(&qp->wait);
1040 }
1041}
1042
1043/*
1044 * This is called with ppd->sdma_lock held.
1045 */
1046static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1047{
1048 struct qib_verbs_txreq *tx =
1049 container_of(cookie, struct qib_verbs_txreq, txreq);
1050 struct qib_qp *qp = tx->qp;
1051
1052 spin_lock(&qp->s_lock);
1053 if (tx->wqe)
1054 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1055 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1056 struct qib_ib_header *hdr;
1057
1058 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1059 hdr = &tx->align_buf->hdr;
1060 else {
1061 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1062
1063 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1064 }
1065 qib_rc_send_complete(qp, hdr);
1066 }
1067 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1068 if (qp->state == IB_QPS_RESET)
1069 wake_up(&qp->wait_dma);
1070 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1071 qp->s_flags &= ~QIB_S_WAIT_DMA;
1072 qib_schedule_send(qp);
1073 }
1074 }
1075 spin_unlock(&qp->s_lock);
1076
1077 qib_put_txreq(tx);
1078}
1079
1080static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
1081{
1082 unsigned long flags;
1083 int ret = 0;
1084
1085 spin_lock_irqsave(&qp->s_lock, flags);
1086 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1087 spin_lock(&dev->pending_lock);
1088 if (list_empty(&qp->iowait)) {
1089 if (list_empty(&dev->memwait))
1090 mod_timer(&dev->mem_timer, jiffies + 1);
1091 qp->s_flags |= QIB_S_WAIT_KMEM;
1092 list_add_tail(&qp->iowait, &dev->memwait);
1093 }
1094 spin_unlock(&dev->pending_lock);
1095 qp->s_flags &= ~QIB_S_BUSY;
1096 ret = -EBUSY;
1097 }
1098 spin_unlock_irqrestore(&qp->s_lock, flags);
1099
1100 return ret;
1101}
1102
1103static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
1104 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1105 u32 plen, u32 dwords)
1106{
1107 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1108 struct qib_devdata *dd = dd_from_dev(dev);
1109 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1110 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1111 struct qib_verbs_txreq *tx;
1112 struct qib_pio_header *phdr;
1113 u32 control;
1114 u32 ndesc;
1115 int ret;
1116
1117 tx = qp->s_tx;
1118 if (tx) {
1119 qp->s_tx = NULL;
1120 /* resend previously constructed packet */
1121 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1122 goto bail;
1123 }
1124
1125 tx = get_txreq(dev, qp, &ret);
1126 if (!tx)
1127 goto bail;
1128
1129 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1130 be16_to_cpu(hdr->lrh[0]) >> 12);
1131 tx->qp = qp;
1132 atomic_inc(&qp->refcount);
1133 tx->wqe = qp->s_wqe;
1134 tx->mr = qp->s_rdma_mr;
1135 if (qp->s_rdma_mr)
1136 qp->s_rdma_mr = NULL;
1137 tx->txreq.callback = sdma_complete;
1138 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1139 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1140 else
1141 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1142 if (plen + 1 > dd->piosize2kmax_dwords)
1143 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1144
1145 if (len) {
1146 /*
1147 * Don't try to DMA if it takes more descriptors than
1148 * the queue holds.
1149 */
1150 ndesc = qib_count_sge(ss, len);
1151 if (ndesc >= ppd->sdma_descq_cnt)
1152 ndesc = 0;
1153 } else
1154 ndesc = 1;
1155 if (ndesc) {
1156 phdr = &dev->pio_hdrs[tx->hdr_inx];
1157 phdr->pbc[0] = cpu_to_le32(plen);
1158 phdr->pbc[1] = cpu_to_le32(control);
1159 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1160 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1161 tx->txreq.sg_count = ndesc;
1162 tx->txreq.addr = dev->pio_hdrs_phys +
1163 tx->hdr_inx * sizeof(struct qib_pio_header);
1164 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1165 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1166 goto bail;
1167 }
1168
1169 /* Allocate a buffer and copy the header and payload to it. */
1170 tx->hdr_dwords = plen + 1;
1171 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1172 if (!phdr)
1173 goto err_tx;
1174 phdr->pbc[0] = cpu_to_le32(plen);
1175 phdr->pbc[1] = cpu_to_le32(control);
1176 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1177 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1178
1179 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1180 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1181 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1182 goto map_err;
1183 tx->align_buf = phdr;
1184 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1185 tx->txreq.sg_count = 1;
1186 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1187 goto unaligned;
1188
1189map_err:
1190 kfree(phdr);
1191err_tx:
1192 qib_put_txreq(tx);
1193 ret = wait_kmem(dev, qp);
1194unaligned:
1195 ibp->n_unaligned++;
1196bail:
1197 return ret;
1198}
1199
1200/*
1201 * If we are now in the error state, return zero to flush the
1202 * send work request.
1203 */
1204static int no_bufs_available(struct qib_qp *qp)
1205{
1206 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1207 struct qib_devdata *dd;
1208 unsigned long flags;
1209 int ret = 0;
1210
1211 /*
1212 * Note that as soon as want_buffer() is called and
1213 * possibly before it returns, qib_ib_piobufavail()
1214 * could be called. Therefore, put QP on the I/O wait list before
1215 * enabling the PIO avail interrupt.
1216 */
1217 spin_lock_irqsave(&qp->s_lock, flags);
1218 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1219 spin_lock(&dev->pending_lock);
1220 if (list_empty(&qp->iowait)) {
1221 dev->n_piowait++;
1222 qp->s_flags |= QIB_S_WAIT_PIO;
1223 list_add_tail(&qp->iowait, &dev->piowait);
1224 dd = dd_from_dev(dev);
1225 dd->f_wantpiobuf_intr(dd, 1);
1226 }
1227 spin_unlock(&dev->pending_lock);
1228 qp->s_flags &= ~QIB_S_BUSY;
1229 ret = -EBUSY;
1230 }
1231 spin_unlock_irqrestore(&qp->s_lock, flags);
1232 return ret;
1233}
1234
1235static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1236 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1237 u32 plen, u32 dwords)
1238{
1239 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1240 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1241 u32 *hdr = (u32 *) ibhdr;
1242 u32 __iomem *piobuf_orig;
1243 u32 __iomem *piobuf;
1244 u64 pbc;
1245 unsigned long flags;
1246 unsigned flush_wc;
1247 u32 control;
1248 u32 pbufn;
1249
1250 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1251 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1252 pbc = ((u64) control << 32) | plen;
1253 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1254 if (unlikely(piobuf == NULL))
1255 return no_bufs_available(qp);
1256
1257 /*
1258 * Write the pbc.
1259 * We have to flush after the PBC for correctness on some cpus
1260 * or WC buffer can be written out of order.
1261 */
1262 writeq(pbc, piobuf);
1263 piobuf_orig = piobuf;
1264 piobuf += 2;
1265
1266 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1267 if (len == 0) {
1268 /*
1269 * If there is just the header portion, must flush before
1270 * writing last word of header for correctness, and after
1271 * the last header word (trigger word).
1272 */
1273 if (flush_wc) {
1274 qib_flush_wc();
1275 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1276 qib_flush_wc();
1277 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1278 qib_flush_wc();
1279 } else
1280 qib_pio_copy(piobuf, hdr, hdrwords);
1281 goto done;
1282 }
1283
1284 if (flush_wc)
1285 qib_flush_wc();
1286 qib_pio_copy(piobuf, hdr, hdrwords);
1287 piobuf += hdrwords;
1288
1289 /* The common case is aligned and contained in one segment. */
1290 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1291 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1292 u32 *addr = (u32 *) ss->sge.vaddr;
1293
1294 /* Update address before sending packet. */
1295 update_sge(ss, len);
1296 if (flush_wc) {
1297 qib_pio_copy(piobuf, addr, dwords - 1);
1298 /* must flush early everything before trigger word */
1299 qib_flush_wc();
1300 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1301 /* be sure trigger word is written */
1302 qib_flush_wc();
1303 } else
1304 qib_pio_copy(piobuf, addr, dwords);
1305 goto done;
1306 }
1307 copy_io(piobuf, ss, len, flush_wc);
1308done:
1309 if (dd->flags & QIB_USE_SPCL_TRIG) {
1310 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1311 qib_flush_wc();
1312 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1313 }
1314 qib_sendbuf_done(dd, pbufn);
1315 if (qp->s_rdma_mr) {
1316 atomic_dec(&qp->s_rdma_mr->refcount);
1317 qp->s_rdma_mr = NULL;
1318 }
1319 if (qp->s_wqe) {
1320 spin_lock_irqsave(&qp->s_lock, flags);
1321 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1322 spin_unlock_irqrestore(&qp->s_lock, flags);
1323 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1324 spin_lock_irqsave(&qp->s_lock, flags);
1325 qib_rc_send_complete(qp, ibhdr);
1326 spin_unlock_irqrestore(&qp->s_lock, flags);
1327 }
1328 return 0;
1329}
1330
1331/**
1332 * qib_verbs_send - send a packet
1333 * @qp: the QP to send on
1334 * @hdr: the packet header
1335 * @hdrwords: the number of 32-bit words in the header
1336 * @ss: the SGE to send
1337 * @len: the length of the packet in bytes
1338 *
1339 * Return zero if packet is sent or queued OK.
1340 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1341 */
1342int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
1343 u32 hdrwords, struct qib_sge_state *ss, u32 len)
1344{
1345 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1346 u32 plen;
1347 int ret;
1348 u32 dwords = (len + 3) >> 2;
1349
1350 /*
1351 * Calculate the send buffer trigger address.
1352 * The +1 counts for the pbc control dword following the pbc length.
1353 */
1354 plen = hdrwords + dwords + 1;
1355
1356 /*
1357 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1358 * can defer SDMA restart until link goes ACTIVE without
1359 * worrying about just how we got there.
1360 */
1361 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1362 !(dd->flags & QIB_HAS_SEND_DMA))
1363 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1364 plen, dwords);
1365 else
1366 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1367 plen, dwords);
1368
1369 return ret;
1370}
1371
1372int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1373 u64 *rwords, u64 *spkts, u64 *rpkts,
1374 u64 *xmit_wait)
1375{
1376 int ret;
1377 struct qib_devdata *dd = ppd->dd;
1378
1379 if (!(dd->flags & QIB_PRESENT)) {
1380 /* no hardware, freeze, etc. */
1381 ret = -EINVAL;
1382 goto bail;
1383 }
1384 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1385 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1386 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1387 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1388 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1389
1390 ret = 0;
1391
1392bail:
1393 return ret;
1394}
1395
1396/**
1397 * qib_get_counters - get various chip counters
1398 * @dd: the qlogic_ib device
1399 * @cntrs: counters are placed here
1400 *
1401 * Return the counters needed by recv_pma_get_portcounters().
1402 */
1403int qib_get_counters(struct qib_pportdata *ppd,
1404 struct qib_verbs_counters *cntrs)
1405{
1406 int ret;
1407
1408 if (!(ppd->dd->flags & QIB_PRESENT)) {
1409 /* no hardware, freeze, etc. */
1410 ret = -EINVAL;
1411 goto bail;
1412 }
1413 cntrs->symbol_error_counter =
1414 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1415 cntrs->link_error_recovery_counter =
1416 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1417 /*
1418 * The link downed counter counts when the other side downs the
1419 * connection. We add in the number of times we downed the link
1420 * due to local link integrity errors to compensate.
1421 */
1422 cntrs->link_downed_counter =
1423 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1424 cntrs->port_rcv_errors =
1425 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1426 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1427 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1428 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1429 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1430 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1431 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1432 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1433 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1434 cntrs->port_rcv_errors +=
1435 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1436 cntrs->port_rcv_errors +=
1437 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1438 cntrs->port_rcv_remphys_errors =
1439 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1440 cntrs->port_xmit_discards =
1441 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1442 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1443 QIBPORTCNTR_WORDSEND);
1444 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1445 QIBPORTCNTR_WORDRCV);
1446 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1447 QIBPORTCNTR_PKTSEND);
1448 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1449 QIBPORTCNTR_PKTRCV);
1450 cntrs->local_link_integrity_errors =
1451 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1452 cntrs->excessive_buffer_overrun_errors =
1453 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1454 cntrs->vl15_dropped =
1455 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1456
1457 ret = 0;
1458
1459bail:
1460 return ret;
1461}
1462
1463/**
1464 * qib_ib_piobufavail - callback when a PIO buffer is available
1465 * @dd: the device pointer
1466 *
1467 * This is called from qib_intr() at interrupt level when a PIO buffer is
1468 * available after qib_verbs_send() returned an error that no buffers were
1469 * available. Disable the interrupt if there are no more QPs waiting.
1470 */
1471void qib_ib_piobufavail(struct qib_devdata *dd)
1472{
1473 struct qib_ibdev *dev = &dd->verbs_dev;
1474 struct list_head *list;
1475 struct qib_qp *qps[5];
1476 struct qib_qp *qp;
1477 unsigned long flags;
1478 unsigned i, n;
1479
1480 list = &dev->piowait;
1481 n = 0;
1482
1483 /*
1484 * Note: checking that the piowait list is empty and clearing
1485 * the buffer available interrupt needs to be atomic or we
1486 * could end up with QPs on the wait list with the interrupt
1487 * disabled.
1488 */
1489 spin_lock_irqsave(&dev->pending_lock, flags);
1490 while (!list_empty(list)) {
1491 if (n == ARRAY_SIZE(qps))
1492 goto full;
1493 qp = list_entry(list->next, struct qib_qp, iowait);
1494 list_del_init(&qp->iowait);
1495 atomic_inc(&qp->refcount);
1496 qps[n++] = qp;
1497 }
1498 dd->f_wantpiobuf_intr(dd, 0);
1499full:
1500 spin_unlock_irqrestore(&dev->pending_lock, flags);
1501
1502 for (i = 0; i < n; i++) {
1503 qp = qps[i];
1504
1505 spin_lock_irqsave(&qp->s_lock, flags);
1506 if (qp->s_flags & QIB_S_WAIT_PIO) {
1507 qp->s_flags &= ~QIB_S_WAIT_PIO;
1508 qib_schedule_send(qp);
1509 }
1510 spin_unlock_irqrestore(&qp->s_lock, flags);
1511
1512 /* Notify qib_destroy_qp() if it is waiting. */
1513 if (atomic_dec_and_test(&qp->refcount))
1514 wake_up(&qp->wait);
1515 }
1516}
1517
1518static int qib_query_device(struct ib_device *ibdev,
1519 struct ib_device_attr *props)
1520{
1521 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1522 struct qib_ibdev *dev = to_idev(ibdev);
1523
1524 memset(props, 0, sizeof(*props));
1525
1526 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1527 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1528 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1529 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1530 props->page_size_cap = PAGE_SIZE;
1531 props->vendor_id =
1532 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1533 props->vendor_part_id = dd->deviceid;
1534 props->hw_ver = dd->minrev;
1535 props->sys_image_guid = ib_qib_sys_image_guid;
1536 props->max_mr_size = ~0ULL;
1537 props->max_qp = ib_qib_max_qps;
1538 props->max_qp_wr = ib_qib_max_qp_wrs;
1539 props->max_sge = ib_qib_max_sges;
1540 props->max_cq = ib_qib_max_cqs;
1541 props->max_ah = ib_qib_max_ahs;
1542 props->max_cqe = ib_qib_max_cqes;
1543 props->max_mr = dev->lk_table.max;
1544 props->max_fmr = dev->lk_table.max;
1545 props->max_map_per_fmr = 32767;
1546 props->max_pd = ib_qib_max_pds;
1547 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1548 props->max_qp_init_rd_atom = 255;
1549 /* props->max_res_rd_atom */
1550 props->max_srq = ib_qib_max_srqs;
1551 props->max_srq_wr = ib_qib_max_srq_wrs;
1552 props->max_srq_sge = ib_qib_max_srq_sges;
1553 /* props->local_ca_ack_delay */
1554 props->atomic_cap = IB_ATOMIC_GLOB;
1555 props->max_pkeys = qib_get_npkeys(dd);
1556 props->max_mcast_grp = ib_qib_max_mcast_grps;
1557 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1558 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1559 props->max_mcast_grp;
1560
1561 return 0;
1562}
1563
1564static int qib_query_port(struct ib_device *ibdev, u8 port,
1565 struct ib_port_attr *props)
1566{
1567 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1568 struct qib_ibport *ibp = to_iport(ibdev, port);
1569 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1570 enum ib_mtu mtu;
1571 u16 lid = ppd->lid;
1572
1573 memset(props, 0, sizeof(*props));
1574 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1575 props->lmc = ppd->lmc;
1576 props->sm_lid = ibp->sm_lid;
1577 props->sm_sl = ibp->sm_sl;
1578 props->state = dd->f_iblink_state(ppd->lastibcstat);
1579 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1580 props->port_cap_flags = ibp->port_cap_flags;
1581 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1582 props->max_msg_sz = 0x80000000;
1583 props->pkey_tbl_len = qib_get_npkeys(dd);
1584 props->bad_pkey_cntr = ibp->pkey_violations;
1585 props->qkey_viol_cntr = ibp->qkey_violations;
1586 props->active_width = ppd->link_width_active;
1587 /* See rate_show() */
1588 props->active_speed = ppd->link_speed_active;
1589 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1590 props->init_type_reply = 0;
1591
1592 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1593 switch (ppd->ibmtu) {
1594 case 4096:
1595 mtu = IB_MTU_4096;
1596 break;
1597 case 2048:
1598 mtu = IB_MTU_2048;
1599 break;
1600 case 1024:
1601 mtu = IB_MTU_1024;
1602 break;
1603 case 512:
1604 mtu = IB_MTU_512;
1605 break;
1606 case 256:
1607 mtu = IB_MTU_256;
1608 break;
1609 default:
1610 mtu = IB_MTU_2048;
1611 }
1612 props->active_mtu = mtu;
1613 props->subnet_timeout = ibp->subnet_timeout;
1614
1615 return 0;
1616}
1617
1618static int qib_modify_device(struct ib_device *device,
1619 int device_modify_mask,
1620 struct ib_device_modify *device_modify)
1621{
1622 struct qib_devdata *dd = dd_from_ibdev(device);
1623 unsigned i;
1624 int ret;
1625
1626 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1627 IB_DEVICE_MODIFY_NODE_DESC)) {
1628 ret = -EOPNOTSUPP;
1629 goto bail;
1630 }
1631
1632 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1633 memcpy(device->node_desc, device_modify->node_desc, 64);
1634 for (i = 0; i < dd->num_pports; i++) {
1635 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1636
1637 qib_node_desc_chg(ibp);
1638 }
1639 }
1640
1641 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1642 ib_qib_sys_image_guid =
1643 cpu_to_be64(device_modify->sys_image_guid);
1644 for (i = 0; i < dd->num_pports; i++) {
1645 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1646
1647 qib_sys_guid_chg(ibp);
1648 }
1649 }
1650
1651 ret = 0;
1652
1653bail:
1654 return ret;
1655}
1656
1657static int qib_modify_port(struct ib_device *ibdev, u8 port,
1658 int port_modify_mask, struct ib_port_modify *props)
1659{
1660 struct qib_ibport *ibp = to_iport(ibdev, port);
1661 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1662
1663 ibp->port_cap_flags |= props->set_port_cap_mask;
1664 ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1665 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1666 qib_cap_mask_chg(ibp);
1667 if (port_modify_mask & IB_PORT_SHUTDOWN)
1668 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1669 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1670 ibp->qkey_violations = 0;
1671 return 0;
1672}
1673
1674static int qib_query_gid(struct ib_device *ibdev, u8 port,
1675 int index, union ib_gid *gid)
1676{
1677 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1678 int ret = 0;
1679
1680 if (!port || port > dd->num_pports)
1681 ret = -EINVAL;
1682 else {
1683 struct qib_ibport *ibp = to_iport(ibdev, port);
1684 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1685
1686 gid->global.subnet_prefix = ibp->gid_prefix;
1687 if (index == 0)
1688 gid->global.interface_id = ppd->guid;
1689 else if (index < QIB_GUIDS_PER_PORT)
1690 gid->global.interface_id = ibp->guids[index - 1];
1691 else
1692 ret = -EINVAL;
1693 }
1694
1695 return ret;
1696}
1697
1698static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1699 struct ib_ucontext *context,
1700 struct ib_udata *udata)
1701{
1702 struct qib_ibdev *dev = to_idev(ibdev);
1703 struct qib_pd *pd;
1704 struct ib_pd *ret;
1705
1706 /*
1707 * This is actually totally arbitrary. Some correctness tests
1708 * assume there's a maximum number of PDs that can be allocated.
1709 * We don't actually have this limit, but we fail the test if
1710 * we allow allocations of more than we report for this value.
1711 */
1712
1713 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1714 if (!pd) {
1715 ret = ERR_PTR(-ENOMEM);
1716 goto bail;
1717 }
1718
1719 spin_lock(&dev->n_pds_lock);
1720 if (dev->n_pds_allocated == ib_qib_max_pds) {
1721 spin_unlock(&dev->n_pds_lock);
1722 kfree(pd);
1723 ret = ERR_PTR(-ENOMEM);
1724 goto bail;
1725 }
1726
1727 dev->n_pds_allocated++;
1728 spin_unlock(&dev->n_pds_lock);
1729
1730 /* ib_alloc_pd() will initialize pd->ibpd. */
1731 pd->user = udata != NULL;
1732
1733 ret = &pd->ibpd;
1734
1735bail:
1736 return ret;
1737}
1738
1739static int qib_dealloc_pd(struct ib_pd *ibpd)
1740{
1741 struct qib_pd *pd = to_ipd(ibpd);
1742 struct qib_ibdev *dev = to_idev(ibpd->device);
1743
1744 spin_lock(&dev->n_pds_lock);
1745 dev->n_pds_allocated--;
1746 spin_unlock(&dev->n_pds_lock);
1747
1748 kfree(pd);
1749
1750 return 0;
1751}
1752
1753int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1754{
1755 /* A multicast address requires a GRH (see ch. 8.4.1). */
1756 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
1757 ah_attr->dlid != QIB_PERMISSIVE_LID &&
1758 !(ah_attr->ah_flags & IB_AH_GRH))
1759 goto bail;
1760 if ((ah_attr->ah_flags & IB_AH_GRH) &&
1761 ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
1762 goto bail;
1763 if (ah_attr->dlid == 0)
1764 goto bail;
1765 if (ah_attr->port_num < 1 ||
1766 ah_attr->port_num > ibdev->phys_port_cnt)
1767 goto bail;
1768 if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1769 ib_rate_to_mult(ah_attr->static_rate) < 0)
1770 goto bail;
1771 if (ah_attr->sl > 15)
1772 goto bail;
1773 return 0;
1774bail:
1775 return -EINVAL;
1776}
1777
1778/**
1779 * qib_create_ah - create an address handle
1780 * @pd: the protection domain
1781 * @ah_attr: the attributes of the AH
1782 *
1783 * This may be called from interrupt context.
1784 */
1785static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1786 struct ib_ah_attr *ah_attr)
1787{
1788 struct qib_ah *ah;
1789 struct ib_ah *ret;
1790 struct qib_ibdev *dev = to_idev(pd->device);
1791 unsigned long flags;
1792
1793 if (qib_check_ah(pd->device, ah_attr)) {
1794 ret = ERR_PTR(-EINVAL);
1795 goto bail;
1796 }
1797
1798 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1799 if (!ah) {
1800 ret = ERR_PTR(-ENOMEM);
1801 goto bail;
1802 }
1803
1804 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1805 if (dev->n_ahs_allocated == ib_qib_max_ahs) {
1806 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1807 kfree(ah);
1808 ret = ERR_PTR(-ENOMEM);
1809 goto bail;
1810 }
1811
1812 dev->n_ahs_allocated++;
1813 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1814
1815 /* ib_create_ah() will initialize ah->ibah. */
1816 ah->attr = *ah_attr;
1817 atomic_set(&ah->refcount, 0);
1818
1819 ret = &ah->ibah;
1820
1821bail:
1822 return ret;
1823}
1824
1825/**
1826 * qib_destroy_ah - destroy an address handle
1827 * @ibah: the AH to destroy
1828 *
1829 * This may be called from interrupt context.
1830 */
1831static int qib_destroy_ah(struct ib_ah *ibah)
1832{
1833 struct qib_ibdev *dev = to_idev(ibah->device);
1834 struct qib_ah *ah = to_iah(ibah);
1835 unsigned long flags;
1836
1837 if (atomic_read(&ah->refcount) != 0)
1838 return -EBUSY;
1839
1840 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1841 dev->n_ahs_allocated--;
1842 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1843
1844 kfree(ah);
1845
1846 return 0;
1847}
1848
1849static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1850{
1851 struct qib_ah *ah = to_iah(ibah);
1852
1853 if (qib_check_ah(ibah->device, ah_attr))
1854 return -EINVAL;
1855
1856 ah->attr = *ah_attr;
1857
1858 return 0;
1859}
1860
1861static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1862{
1863 struct qib_ah *ah = to_iah(ibah);
1864
1865 *ah_attr = ah->attr;
1866
1867 return 0;
1868}
1869
1870/**
1871 * qib_get_npkeys - return the size of the PKEY table for context 0
1872 * @dd: the qlogic_ib device
1873 */
1874unsigned qib_get_npkeys(struct qib_devdata *dd)
1875{
1876 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1877}
1878
1879/*
1880 * Return the indexed PKEY from the port PKEY table.
1881 * No need to validate rcd[ctxt]; the port is setup if we are here.
1882 */
1883unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1884{
1885 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1886 struct qib_devdata *dd = ppd->dd;
1887 unsigned ctxt = ppd->hw_pidx;
1888 unsigned ret;
1889
1890 /* dd->rcd null if mini_init or some init failures */
1891 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1892 ret = 0;
1893 else
1894 ret = dd->rcd[ctxt]->pkeys[index];
1895
1896 return ret;
1897}
1898
1899static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1900 u16 *pkey)
1901{
1902 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1903 int ret;
1904
1905 if (index >= qib_get_npkeys(dd)) {
1906 ret = -EINVAL;
1907 goto bail;
1908 }
1909
1910 *pkey = qib_get_pkey(to_iport(ibdev, port), index);
1911 ret = 0;
1912
1913bail:
1914 return ret;
1915}
1916
1917/**
1918 * qib_alloc_ucontext - allocate a ucontest
1919 * @ibdev: the infiniband device
1920 * @udata: not used by the QLogic_IB driver
1921 */
1922
1923static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1924 struct ib_udata *udata)
1925{
1926 struct qib_ucontext *context;
1927 struct ib_ucontext *ret;
1928
1929 context = kmalloc(sizeof *context, GFP_KERNEL);
1930 if (!context) {
1931 ret = ERR_PTR(-ENOMEM);
1932 goto bail;
1933 }
1934
1935 ret = &context->ibucontext;
1936
1937bail:
1938 return ret;
1939}
1940
1941static int qib_dealloc_ucontext(struct ib_ucontext *context)
1942{
1943 kfree(to_iucontext(context));
1944 return 0;
1945}
1946
1947static void init_ibport(struct qib_pportdata *ppd)
1948{
1949 struct qib_verbs_counters cntrs;
1950 struct qib_ibport *ibp = &ppd->ibport_data;
1951
1952 spin_lock_init(&ibp->lock);
1953 /* Set the prefix to the default value (see ch. 4.1.1) */
1954 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
1955 ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1956 ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1957 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1958 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1959 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1960 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1961 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1962 ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1963 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1964 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1965 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1966 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1967 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1968
1969 /* Snapshot current HW counters to "clear" them. */
1970 qib_get_counters(ppd, &cntrs);
1971 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1972 ibp->z_link_error_recovery_counter =
1973 cntrs.link_error_recovery_counter;
1974 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1975 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1976 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1977 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1978 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1979 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1980 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1981 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1982 ibp->z_local_link_integrity_errors =
1983 cntrs.local_link_integrity_errors;
1984 ibp->z_excessive_buffer_overrun_errors =
1985 cntrs.excessive_buffer_overrun_errors;
1986 ibp->z_vl15_dropped = cntrs.vl15_dropped;
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001987 RCU_INIT_POINTER(ibp->qp0, NULL);
1988 RCU_INIT_POINTER(ibp->qp1, NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001989}
1990
1991/**
1992 * qib_register_ib_device - register our device with the infiniband core
1993 * @dd: the device data structure
1994 * Return the allocated qib_ibdev pointer or NULL on error.
1995 */
1996int qib_register_ib_device(struct qib_devdata *dd)
1997{
1998 struct qib_ibdev *dev = &dd->verbs_dev;
1999 struct ib_device *ibdev = &dev->ibdev;
2000 struct qib_pportdata *ppd = dd->pport;
2001 unsigned i, lk_tab_size;
2002 int ret;
2003
2004 dev->qp_table_size = ib_qib_qp_table_size;
Mike Marciniszynaf061a62011-09-23 13:16:44 -04002005 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
2006 dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
Ralph Campbellf9315512010-05-23 21:44:54 -07002007 GFP_KERNEL);
2008 if (!dev->qp_table) {
2009 ret = -ENOMEM;
2010 goto err_qpt;
2011 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -04002012 for (i = 0; i < dev->qp_table_size; i++)
2013 RCU_INIT_POINTER(dev->qp_table[i], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07002014
2015 for (i = 0; i < dd->num_pports; i++)
2016 init_ibport(ppd + i);
2017
2018 /* Only need to initialize non-zero fields. */
2019 spin_lock_init(&dev->qpt_lock);
2020 spin_lock_init(&dev->n_pds_lock);
2021 spin_lock_init(&dev->n_ahs_lock);
2022 spin_lock_init(&dev->n_cqs_lock);
2023 spin_lock_init(&dev->n_qps_lock);
2024 spin_lock_init(&dev->n_srqs_lock);
2025 spin_lock_init(&dev->n_mcast_grps_lock);
2026 init_timer(&dev->mem_timer);
2027 dev->mem_timer.function = mem_timer;
2028 dev->mem_timer.data = (unsigned long) dev;
2029
2030 qib_init_qpn_table(dd, &dev->qpn_table);
2031
2032 /*
2033 * The top ib_qib_lkey_table_size bits are used to index the
2034 * table. The lower 8 bits can be owned by the user (copied from
2035 * the LKEY). The remaining bits act as a generation number or tag.
2036 */
2037 spin_lock_init(&dev->lk_table.lock);
2038 dev->lk_table.max = 1 << ib_qib_lkey_table_size;
2039 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2040 dev->lk_table.table = (struct qib_mregion **)
2041 __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
2042 if (dev->lk_table.table == NULL) {
2043 ret = -ENOMEM;
2044 goto err_lk;
2045 }
2046 memset(dev->lk_table.table, 0, lk_tab_size);
2047 INIT_LIST_HEAD(&dev->pending_mmaps);
2048 spin_lock_init(&dev->pending_lock);
2049 dev->mmap_offset = PAGE_SIZE;
2050 spin_lock_init(&dev->mmap_offset_lock);
2051 INIT_LIST_HEAD(&dev->piowait);
2052 INIT_LIST_HEAD(&dev->dmawait);
2053 INIT_LIST_HEAD(&dev->txwait);
2054 INIT_LIST_HEAD(&dev->memwait);
2055 INIT_LIST_HEAD(&dev->txreq_free);
2056
2057 if (ppd->sdma_descq_cnt) {
2058 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2059 ppd->sdma_descq_cnt *
2060 sizeof(struct qib_pio_header),
2061 &dev->pio_hdrs_phys,
2062 GFP_KERNEL);
2063 if (!dev->pio_hdrs) {
2064 ret = -ENOMEM;
2065 goto err_hdrs;
2066 }
2067 }
2068
2069 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2070 struct qib_verbs_txreq *tx;
2071
2072 tx = kzalloc(sizeof *tx, GFP_KERNEL);
2073 if (!tx) {
2074 ret = -ENOMEM;
2075 goto err_tx;
2076 }
2077 tx->hdr_inx = i;
2078 list_add(&tx->txreq.list, &dev->txreq_free);
2079 }
2080
2081 /*
2082 * The system image GUID is supposed to be the same for all
2083 * IB HCAs in a single system but since there can be other
2084 * device types in the system, we can't be sure this is unique.
2085 */
2086 if (!ib_qib_sys_image_guid)
2087 ib_qib_sys_image_guid = ppd->guid;
2088
2089 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2090 ibdev->owner = THIS_MODULE;
2091 ibdev->node_guid = ppd->guid;
2092 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2093 ibdev->uverbs_cmd_mask =
2094 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2095 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2096 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2097 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2098 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2099 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2100 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
2101 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2102 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2103 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2104 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2105 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2106 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2107 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2108 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2109 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2110 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2111 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2112 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2113 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2114 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2115 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2116 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2117 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2118 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2119 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2120 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2121 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2122 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2123 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2124 ibdev->node_type = RDMA_NODE_IB_CA;
2125 ibdev->phys_port_cnt = dd->num_pports;
2126 ibdev->num_comp_vectors = 1;
2127 ibdev->dma_device = &dd->pcidev->dev;
2128 ibdev->query_device = qib_query_device;
2129 ibdev->modify_device = qib_modify_device;
2130 ibdev->query_port = qib_query_port;
2131 ibdev->modify_port = qib_modify_port;
2132 ibdev->query_pkey = qib_query_pkey;
2133 ibdev->query_gid = qib_query_gid;
2134 ibdev->alloc_ucontext = qib_alloc_ucontext;
2135 ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2136 ibdev->alloc_pd = qib_alloc_pd;
2137 ibdev->dealloc_pd = qib_dealloc_pd;
2138 ibdev->create_ah = qib_create_ah;
2139 ibdev->destroy_ah = qib_destroy_ah;
2140 ibdev->modify_ah = qib_modify_ah;
2141 ibdev->query_ah = qib_query_ah;
2142 ibdev->create_srq = qib_create_srq;
2143 ibdev->modify_srq = qib_modify_srq;
2144 ibdev->query_srq = qib_query_srq;
2145 ibdev->destroy_srq = qib_destroy_srq;
2146 ibdev->create_qp = qib_create_qp;
2147 ibdev->modify_qp = qib_modify_qp;
2148 ibdev->query_qp = qib_query_qp;
2149 ibdev->destroy_qp = qib_destroy_qp;
2150 ibdev->post_send = qib_post_send;
2151 ibdev->post_recv = qib_post_receive;
2152 ibdev->post_srq_recv = qib_post_srq_receive;
2153 ibdev->create_cq = qib_create_cq;
2154 ibdev->destroy_cq = qib_destroy_cq;
2155 ibdev->resize_cq = qib_resize_cq;
2156 ibdev->poll_cq = qib_poll_cq;
2157 ibdev->req_notify_cq = qib_req_notify_cq;
2158 ibdev->get_dma_mr = qib_get_dma_mr;
2159 ibdev->reg_phys_mr = qib_reg_phys_mr;
2160 ibdev->reg_user_mr = qib_reg_user_mr;
2161 ibdev->dereg_mr = qib_dereg_mr;
2162 ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
2163 ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
2164 ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
2165 ibdev->alloc_fmr = qib_alloc_fmr;
2166 ibdev->map_phys_fmr = qib_map_phys_fmr;
2167 ibdev->unmap_fmr = qib_unmap_fmr;
2168 ibdev->dealloc_fmr = qib_dealloc_fmr;
2169 ibdev->attach_mcast = qib_multicast_attach;
2170 ibdev->detach_mcast = qib_multicast_detach;
2171 ibdev->process_mad = qib_process_mad;
2172 ibdev->mmap = qib_mmap;
2173 ibdev->dma_ops = &qib_dma_mapping_ops;
2174
2175 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2176 QIB_IDSTR " %s", init_utsname()->nodename);
2177
2178 ret = ib_register_device(ibdev, qib_create_port_files);
2179 if (ret)
2180 goto err_reg;
2181
2182 ret = qib_create_agents(dev);
2183 if (ret)
2184 goto err_agents;
2185
2186 if (qib_verbs_register_sysfs(dd))
2187 goto err_class;
2188
2189 goto bail;
2190
2191err_class:
2192 qib_free_agents(dev);
2193err_agents:
2194 ib_unregister_device(ibdev);
2195err_reg:
2196err_tx:
2197 while (!list_empty(&dev->txreq_free)) {
2198 struct list_head *l = dev->txreq_free.next;
2199 struct qib_verbs_txreq *tx;
2200
2201 list_del(l);
2202 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2203 kfree(tx);
2204 }
2205 if (ppd->sdma_descq_cnt)
2206 dma_free_coherent(&dd->pcidev->dev,
2207 ppd->sdma_descq_cnt *
2208 sizeof(struct qib_pio_header),
2209 dev->pio_hdrs, dev->pio_hdrs_phys);
2210err_hdrs:
2211 free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
2212err_lk:
2213 kfree(dev->qp_table);
2214err_qpt:
2215 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2216bail:
2217 return ret;
2218}
2219
2220void qib_unregister_ib_device(struct qib_devdata *dd)
2221{
2222 struct qib_ibdev *dev = &dd->verbs_dev;
2223 struct ib_device *ibdev = &dev->ibdev;
2224 u32 qps_inuse;
2225 unsigned lk_tab_size;
2226
2227 qib_verbs_unregister_sysfs(dd);
2228
2229 qib_free_agents(dev);
2230
2231 ib_unregister_device(ibdev);
2232
2233 if (!list_empty(&dev->piowait))
2234 qib_dev_err(dd, "piowait list not empty!\n");
2235 if (!list_empty(&dev->dmawait))
2236 qib_dev_err(dd, "dmawait list not empty!\n");
2237 if (!list_empty(&dev->txwait))
2238 qib_dev_err(dd, "txwait list not empty!\n");
2239 if (!list_empty(&dev->memwait))
2240 qib_dev_err(dd, "memwait list not empty!\n");
2241 if (dev->dma_mr)
2242 qib_dev_err(dd, "DMA MR not NULL!\n");
2243
2244 qps_inuse = qib_free_all_qps(dd);
2245 if (qps_inuse)
2246 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2247 qps_inuse);
2248
2249 del_timer_sync(&dev->mem_timer);
2250 qib_free_qpn_table(&dev->qpn_table);
2251 while (!list_empty(&dev->txreq_free)) {
2252 struct list_head *l = dev->txreq_free.next;
2253 struct qib_verbs_txreq *tx;
2254
2255 list_del(l);
2256 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2257 kfree(tx);
2258 }
2259 if (dd->pport->sdma_descq_cnt)
2260 dma_free_coherent(&dd->pcidev->dev,
2261 dd->pport->sdma_descq_cnt *
2262 sizeof(struct qib_pio_header),
2263 dev->pio_hdrs, dev->pio_hdrs_phys);
2264 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2265 free_pages((unsigned long) dev->lk_table.table,
2266 get_order(lk_tab_size));
2267 kfree(dev->qp_table);
2268}