blob: f1ca5d1262c906f8214b50e358826c4e05461436 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -04002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef QIB_VERBS_H
36#define QIB_VERBS_H
37
38#include <linux/types.h>
39#include <linux/spinlock.h>
40#include <linux/kernel.h>
41#include <linux/interrupt.h>
42#include <linux/kref.h>
43#include <linux/workqueue.h>
Mike Marciniszyn85caafe2013-06-04 15:05:37 -040044#include <linux/kthread.h>
Mike Marciniszyn6a826492012-06-27 18:33:12 -040045#include <linux/completion.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070046#include <rdma/ib_pack.h>
47#include <rdma/ib_user_verbs.h>
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -080048#include <rdma/rdma_vt.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070049
50struct qib_ctxtdata;
51struct qib_pportdata;
52struct qib_devdata;
53struct qib_verbs_txreq;
54
55#define QIB_MAX_RDMA_ATOMIC 16
56#define QIB_GUIDS_PER_PORT 5
57
58#define QPN_MAX (1 << 24)
59#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
60
61/*
62 * Increment this value if any changes that break userspace ABI
63 * compatibility are made.
64 */
65#define QIB_UVERBS_ABI_VERSION 2
66
67/*
68 * Define an ib_cq_notify value that is not valid so we know when CQ
69 * notifications are armed.
70 */
71#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
72
73#define IB_SEQ_NAK (3 << 29)
74
75/* AETH NAK opcode values */
76#define IB_RNR_NAK 0x20
77#define IB_NAK_PSN_ERROR 0x60
78#define IB_NAK_INVALID_REQUEST 0x61
79#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
80#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
81#define IB_NAK_INVALID_RD_REQUEST 0x64
82
83/* Flags for checking QP state (see ib_qib_state_ops[]) */
84#define QIB_POST_SEND_OK 0x01
85#define QIB_POST_RECV_OK 0x02
86#define QIB_PROCESS_RECV_OK 0x04
87#define QIB_PROCESS_SEND_OK 0x08
88#define QIB_PROCESS_NEXT_SEND_OK 0x10
89#define QIB_FLUSH_SEND 0x20
90#define QIB_FLUSH_RECV 0x40
91#define QIB_PROCESS_OR_FLUSH_SEND \
92 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
93
94/* IB Performance Manager status values */
95#define IB_PMA_SAMPLE_STATUS_DONE 0x00
96#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
97#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
98
99/* Mandatory IB performance counter select values. */
100#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
101#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
102#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
103#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
104#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
105
106#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
107
108#define IB_BTH_REQ_ACK (1 << 31)
109#define IB_BTH_SOLICITED (1 << 23)
110#define IB_BTH_MIG_REQ (1 << 22)
111
112/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
113#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
114
115#define IB_GRH_VERSION 6
116#define IB_GRH_VERSION_MASK 0xF
117#define IB_GRH_VERSION_SHIFT 28
118#define IB_GRH_TCLASS_MASK 0xFF
119#define IB_GRH_TCLASS_SHIFT 20
120#define IB_GRH_FLOW_MASK 0xFFFFF
121#define IB_GRH_FLOW_SHIFT 0
122#define IB_GRH_NEXT_HDR 0x1B
123
124#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
125
126/* Values for set/get portinfo VLCap OperationalVLs */
127#define IB_VL_VL0 1
128#define IB_VL_VL0_1 2
129#define IB_VL_VL0_3 3
130#define IB_VL_VL0_7 4
131#define IB_VL_VL0_14 5
132
133static inline int qib_num_vls(int vls)
134{
135 switch (vls) {
136 default:
137 case IB_VL_VL0:
138 return 1;
139 case IB_VL_VL0_1:
140 return 2;
141 case IB_VL_VL0_3:
142 return 4;
143 case IB_VL_VL0_7:
144 return 8;
145 case IB_VL_VL0_14:
146 return 15;
147 }
148}
149
150struct ib_reth {
151 __be64 vaddr;
152 __be32 rkey;
153 __be32 length;
Mike Marciniszyn78a58862013-10-24 11:40:37 -0400154} __packed;
Ralph Campbellf9315512010-05-23 21:44:54 -0700155
156struct ib_atomic_eth {
157 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
158 __be32 rkey;
159 __be64 swap_data;
160 __be64 compare_data;
Mike Marciniszyn78a58862013-10-24 11:40:37 -0400161} __packed;
Ralph Campbellf9315512010-05-23 21:44:54 -0700162
163struct qib_other_headers {
164 __be32 bth[3];
165 union {
166 struct {
167 __be32 deth[2];
168 __be32 imm_data;
169 } ud;
170 struct {
171 struct ib_reth reth;
172 __be32 imm_data;
173 } rc;
174 struct {
175 __be32 aeth;
176 __be32 atomic_ack_eth[2];
177 } at;
178 __be32 imm_data;
179 __be32 aeth;
180 struct ib_atomic_eth atomic_eth;
181 } u;
Mike Marciniszyn78a58862013-10-24 11:40:37 -0400182} __packed;
Ralph Campbellf9315512010-05-23 21:44:54 -0700183
184/*
185 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
186 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
187 * will be in the eager header buffer. The remaining 12 or 16 bytes
188 * are in the data buffer.
189 */
190struct qib_ib_header {
191 __be16 lrh[4];
192 union {
193 struct {
194 struct ib_grh grh;
195 struct qib_other_headers oth;
196 } l;
197 struct qib_other_headers oth;
198 } u;
Mike Marciniszyn78a58862013-10-24 11:40:37 -0400199} __packed;
Ralph Campbellf9315512010-05-23 21:44:54 -0700200
201struct qib_pio_header {
202 __le32 pbc[2];
203 struct qib_ib_header hdr;
Mike Marciniszyn78a58862013-10-24 11:40:37 -0400204} __packed;
Ralph Campbellf9315512010-05-23 21:44:54 -0700205
206/*
207 * There is one struct qib_mcast for each multicast GID.
208 * All attached QPs are then stored as a list of
209 * struct qib_mcast_qp.
210 */
211struct qib_mcast_qp {
212 struct list_head list;
213 struct qib_qp *qp;
214};
215
216struct qib_mcast {
217 struct rb_node rb_node;
218 union ib_gid mgid;
219 struct list_head qp_list;
220 wait_queue_head_t wait;
221 atomic_t refcount;
222 int n_attached;
223};
224
Ralph Campbellf9315512010-05-23 21:44:54 -0700225/* Address Handle */
226struct qib_ah {
227 struct ib_ah ibah;
228 struct ib_ah_attr attr;
229 atomic_t refcount;
230};
231
232/*
233 * This structure is used by qib_mmap() to validate an offset
234 * when an mmap() request is made. The vm_area_struct then uses
235 * this as its vm_private_data.
236 */
237struct qib_mmap_info {
238 struct list_head pending_mmaps;
239 struct ib_ucontext *context;
240 void *obj;
241 __u64 offset;
242 struct kref ref;
243 unsigned size;
244};
245
246/*
247 * This structure is used to contain the head pointer, tail pointer,
248 * and completion queue entries as a single memory allocation so
249 * it can be mmap'ed into user space.
250 */
251struct qib_cq_wc {
252 u32 head; /* index of next entry to fill */
253 u32 tail; /* index of next ib_poll_cq() entry */
254 union {
255 /* these are actually size ibcq.cqe + 1 */
256 struct ib_uverbs_wc uqueue[0];
257 struct ib_wc kqueue[0];
258 };
259};
260
261/*
262 * The completion queue structure.
263 */
264struct qib_cq {
265 struct ib_cq ibcq;
Mike Marciniszyn85caafe2013-06-04 15:05:37 -0400266 struct kthread_work comptask;
267 struct qib_devdata *dd;
Ralph Campbellf9315512010-05-23 21:44:54 -0700268 spinlock_t lock; /* protect changes in this struct */
269 u8 notify;
270 u8 triggered;
271 struct qib_cq_wc *queue;
272 struct qib_mmap_info *ip;
273};
274
275/*
276 * A segment is a linear region of low physical memory.
277 * XXX Maybe we should use phys addr here and kmap()/kunmap().
278 * Used by the verbs layer.
279 */
280struct qib_seg {
281 void *vaddr;
282 size_t length;
283};
284
285/* The number of qib_segs that fit in a page. */
286#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
287
288struct qib_segarray {
289 struct qib_seg segs[QIB_SEGSZ];
290};
291
292struct qib_mregion {
293 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
294 u64 user_base; /* User's address for this region */
295 u64 iova; /* IB start address of this region */
296 size_t length;
297 u32 lkey;
298 u32 offset; /* offset (bytes) to start of region */
299 int access_flags;
300 u32 max_segs; /* number of qib_segs in all the arrays */
301 u32 mapsz; /* size of the map array */
Mike Marciniszyn2a600f12011-01-10 17:42:22 -0800302 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
Mike Marciniszyn8aac4cc2012-06-27 18:33:19 -0400303 u8 lkey_published; /* in global table */
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400304 struct completion comp; /* complete when refcount goes to zero */
Mike Marciniszyn8aac4cc2012-06-27 18:33:19 -0400305 struct rcu_head list;
Ralph Campbellf9315512010-05-23 21:44:54 -0700306 atomic_t refcount;
307 struct qib_segarray *map[0]; /* the segments */
308};
309
310/*
311 * These keep track of the copy progress within a memory region.
312 * Used by the verbs layer.
313 */
314struct qib_sge {
315 struct qib_mregion *mr;
316 void *vaddr; /* kernel virtual address of segment */
317 u32 sge_length; /* length of the SGE */
318 u32 length; /* remaining length of the segment */
319 u16 m; /* current index: mr->map[m] */
320 u16 n; /* current index: mr->map[m]->segs[n] */
321};
322
323/* Memory region */
324struct qib_mr {
325 struct ib_mr ibmr;
326 struct ib_umem *umem;
Sagi Grimberg38071a42015-10-13 19:11:31 +0300327 u64 *pages;
328 u32 npages;
Ira Weiny785f7422015-11-30 09:34:26 -0500329 struct qib_mregion mr; /* must be last */
Ralph Campbellf9315512010-05-23 21:44:54 -0700330};
331
332/*
333 * Send work request queue entry.
334 * The size of the sg_list is determined when the QP is created and stored
335 * in qp->s_max_sge.
336 */
337struct qib_swqe {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100338 union {
339 struct ib_send_wr wr; /* don't use wr.sg_list */
340 struct ib_ud_wr ud_wr;
Sagi Grimberg38071a42015-10-13 19:11:31 +0300341 struct ib_reg_wr reg_wr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100342 struct ib_rdma_wr rdma_wr;
343 struct ib_atomic_wr atomic_wr;
344 };
Ralph Campbellf9315512010-05-23 21:44:54 -0700345 u32 psn; /* first packet sequence number */
346 u32 lpsn; /* last packet sequence number */
347 u32 ssn; /* send sequence number */
348 u32 length; /* total length of data in sg_list */
349 struct qib_sge sg_list[0];
350};
351
352/*
353 * Receive work request queue entry.
354 * The size of the sg_list is determined when the QP (or SRQ) is created
355 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
356 */
357struct qib_rwqe {
358 u64 wr_id;
359 u8 num_sge;
360 struct ib_sge sg_list[0];
361};
362
363/*
364 * This structure is used to contain the head pointer, tail pointer,
365 * and receive work queue entries as a single memory allocation so
366 * it can be mmap'ed into user space.
367 * Note that the wq array elements are variable size so you can't
368 * just index into the array to get the N'th element;
369 * use get_rwqe_ptr() instead.
370 */
371struct qib_rwq {
372 u32 head; /* new work requests posted to the head */
373 u32 tail; /* receives pull requests from here. */
374 struct qib_rwqe wq[0];
375};
376
377struct qib_rq {
378 struct qib_rwq *wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700379 u32 size; /* size of RWQE array */
380 u8 max_sge;
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400381 spinlock_t lock /* protect changes in this struct */
382 ____cacheline_aligned_in_smp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700383};
384
385struct qib_srq {
386 struct ib_srq ibsrq;
387 struct qib_rq rq;
388 struct qib_mmap_info *ip;
389 /* send signal when number of RWQEs < limit */
390 u32 limit;
391};
392
393struct qib_sge_state {
394 struct qib_sge *sg_list; /* next SGE to be used if any */
395 struct qib_sge sge; /* progress state for the current SGE */
396 u32 total_len;
397 u8 num_sge;
398};
399
400/*
401 * This structure holds the information that the send tasklet needs
402 * to send a RDMA read response or atomic operation.
403 */
404struct qib_ack_entry {
405 u8 opcode;
406 u8 sent;
407 u32 psn;
408 u32 lpsn;
409 union {
410 struct qib_sge rdma_sge;
411 u64 atomic_data;
412 };
413};
414
415/*
416 * Variables prefixed with s_ are for the requester (sender).
417 * Variables prefixed with r_ are for the responder (receiver).
418 * Variables prefixed with ack_ are for responder replies.
419 *
420 * Common variables are protected by both r_rq.lock and s_lock in that order
421 * which only happens in modify_qp() or changing the QP 'state'.
422 */
423struct qib_qp {
424 struct ib_qp ibqp;
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400425 /* read mostly fields above and below */
Ralph Campbellf9315512010-05-23 21:44:54 -0700426 struct ib_ah_attr remote_ah_attr;
427 struct ib_ah_attr alt_ah_attr;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000428 struct qib_qp __rcu *next; /* link list for QPN hash table */
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400429 struct qib_swqe *s_wq; /* send work queue */
Ralph Campbellf9315512010-05-23 21:44:54 -0700430 struct qib_mmap_info *ip;
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400431 struct qib_ib_header *s_hdr; /* next packet header to send */
432 unsigned long timeout_jiffies; /* computed from timeout */
433
434 enum ib_mtu path_mtu;
435 u32 remote_qpn;
436 u32 pmtu; /* decoded from path_mtu */
437 u32 qkey; /* QKEY for this QP (for UD or RD) */
438 u32 s_size; /* send work queue size */
439 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
440
441 u8 state; /* QP state */
442 u8 qp_access_flags;
443 u8 alt_timeout; /* Alternate path timeout for this QP */
444 u8 timeout; /* Timeout for this QP */
445 u8 s_srate;
446 u8 s_mig_state;
447 u8 port_num;
448 u8 s_pkey_index; /* PKEY index to use */
449 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
450 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
451 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
452 u8 s_retry_cnt; /* number of times to retry */
453 u8 s_rnr_retry_cnt;
454 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
455 u8 s_max_sge; /* size of s_wq->sg_list */
456 u8 s_draining;
457
458 /* start of read/write fields */
459
460 atomic_t refcount ____cacheline_aligned_in_smp;
461 wait_queue_head_t wait;
462
463
464 struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
465 ____cacheline_aligned_in_smp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700466 struct qib_sge_state s_rdma_read_sge;
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400467
468 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
469 unsigned long r_aflags;
470 u64 r_wr_id; /* ID for current receive WQE */
471 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
472 u32 r_len; /* total length of r_sge */
473 u32 r_rcv_len; /* receive data len processed */
474 u32 r_psn; /* expected rcv packet sequence number */
475 u32 r_msn; /* message sequence number */
476
477 u8 r_state; /* opcode of last packet received */
478 u8 r_flags;
479 u8 r_head_ack_queue; /* index into s_ack_queue[] */
480
481 struct list_head rspwait; /* link for waititing to respond */
482
Ralph Campbellf9315512010-05-23 21:44:54 -0700483 struct qib_sge_state r_sge; /* current receive data */
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400484 struct qib_rq r_rq; /* receive work queue */
485
486 spinlock_t s_lock ____cacheline_aligned_in_smp;
487 struct qib_sge_state *s_cur_sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700488 u32 s_flags;
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400489 struct qib_verbs_txreq *s_tx;
490 struct qib_swqe *s_wqe;
491 struct qib_sge_state s_sge; /* current send request data */
492 struct qib_mregion *s_rdma_mr;
493 atomic_t s_dma_busy;
Ralph Campbellf9315512010-05-23 21:44:54 -0700494 u32 s_cur_size; /* size of send packet in bytes */
495 u32 s_len; /* total length of s_sge */
496 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
497 u32 s_next_psn; /* PSN for next request */
498 u32 s_last_psn; /* last response PSN processed */
499 u32 s_sending_psn; /* lowest PSN that is being sent */
500 u32 s_sending_hpsn; /* highest PSN that is being sent */
501 u32 s_psn; /* current packet sequence number */
502 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
503 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
Ralph Campbellf9315512010-05-23 21:44:54 -0700504 u32 s_head; /* new entries added here */
505 u32 s_tail; /* next entry to process */
506 u32 s_cur; /* current work queue entry */
507 u32 s_acked; /* last un-ACK'ed entry */
508 u32 s_last; /* last completed entry */
509 u32 s_ssn; /* SSN of tail entry */
510 u32 s_lsn; /* limit sequence number (credit) */
Mike Marciniszyn1c942832012-05-07 14:02:31 -0400511 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
512 u16 s_rdma_ack_cnt;
513 u8 s_state; /* opcode of last packet sent */
514 u8 s_ack_state; /* opcode of packet to ACK */
515 u8 s_nak_state; /* non-zero if NAK is pending */
516 u8 r_nak_state; /* non-zero if NAK is pending */
517 u8 s_retry; /* requester retry counter */
518 u8 s_rnr_retry; /* requester RNR retry counter */
519 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
520 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
521
522 struct qib_sge_state s_ack_rdma_sge;
523 struct timer_list s_timer;
524 struct list_head iowait; /* link for wait PIO buf */
525
526 struct work_struct s_work;
527
528 wait_queue_head_t wait_dma;
529
530 struct qib_sge r_sg_list[0] /* verified SGEs */
531 ____cacheline_aligned_in_smp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700532};
533
534/*
535 * Atomic bit definitions for r_aflags.
536 */
537#define QIB_R_WRID_VALID 0
538#define QIB_R_REWIND_SGE 1
539
540/*
541 * Bit definitions for r_flags.
542 */
543#define QIB_R_REUSE_SGE 0x01
544#define QIB_R_RDMAR_SEQ 0x02
545#define QIB_R_RSP_NAK 0x04
546#define QIB_R_RSP_SEND 0x08
547#define QIB_R_COMM_EST 0x10
548
549/*
550 * Bit definitions for s_flags.
551 *
552 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
553 * QIB_S_BUSY - send tasklet is processing the QP
554 * QIB_S_TIMER - the RC retry timer is active
555 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
556 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
557 * before processing the next SWQE
558 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
559 * before processing the next SWQE
560 * QIB_S_WAIT_RNR - waiting for RNR timeout
561 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
562 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
563 * next send completion entry not via send DMA
564 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
565 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
566 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
567 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
568 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
569 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
570 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
571 */
572#define QIB_S_SIGNAL_REQ_WR 0x0001
573#define QIB_S_BUSY 0x0002
574#define QIB_S_TIMER 0x0004
575#define QIB_S_RESP_PENDING 0x0008
576#define QIB_S_ACK_PENDING 0x0010
577#define QIB_S_WAIT_FENCE 0x0020
578#define QIB_S_WAIT_RDMAR 0x0040
579#define QIB_S_WAIT_RNR 0x0080
580#define QIB_S_WAIT_SSN_CREDIT 0x0100
581#define QIB_S_WAIT_DMA 0x0200
582#define QIB_S_WAIT_PIO 0x0400
583#define QIB_S_WAIT_TX 0x0800
584#define QIB_S_WAIT_DMA_DESC 0x1000
585#define QIB_S_WAIT_KMEM 0x2000
586#define QIB_S_WAIT_PSN 0x4000
587#define QIB_S_WAIT_ACK 0x8000
588#define QIB_S_SEND_ONE 0x10000
589#define QIB_S_UNLIMITED_CREDIT 0x20000
590
591/*
592 * Wait flags that would prevent any packet type from being sent.
593 */
594#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
595 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
596
597/*
598 * Wait flags that would prevent send work requests from making progress.
599 */
600#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
601 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
602 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
603
604#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
605
606#define QIB_PSN_CREDIT 16
607
608/*
609 * Since struct qib_swqe is not a fixed size, we can't simply index into
610 * struct qib_qp.s_wq. This function does the array index computation.
611 */
612static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
613 unsigned n)
614{
615 return (struct qib_swqe *)((char *)qp->s_wq +
616 (sizeof(struct qib_swqe) +
617 qp->s_max_sge *
618 sizeof(struct qib_sge)) * n);
619}
620
621/*
622 * Since struct qib_rwqe is not a fixed size, we can't simply index into
623 * struct qib_rwq.wq. This function does the array index computation.
624 */
625static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
626{
627 return (struct qib_rwqe *)
628 ((char *) rq->wq->wq +
629 (sizeof(struct qib_rwqe) +
630 rq->max_sge * sizeof(struct ib_sge)) * n);
631}
632
633/*
634 * QPN-map pages start out as NULL, they get allocated upon
635 * first use and are never deallocated. This way,
636 * large bitmaps are not allocated unless large numbers of QPs are used.
637 */
638struct qpn_map {
639 void *page;
640};
641
642struct qib_qpn_table {
643 spinlock_t lock; /* protect changes in this struct */
644 unsigned flags; /* flags for QP0/1 allocated for each port */
645 u32 last; /* last QP number allocated */
646 u32 nmaps; /* size of the map table */
647 u16 limit;
648 u16 mask;
649 /* bit map of free QP numbers other than 0/1 */
650 struct qpn_map map[QPNMAP_ENTRIES];
651};
652
Mike Marciniszynd6f1c172015-07-21 08:36:07 -0400653#define MAX_LKEY_TABLE_BITS 23
654
Ralph Campbellf9315512010-05-23 21:44:54 -0700655struct qib_lkey_table {
656 spinlock_t lock; /* protect changes in this struct */
657 u32 next; /* next unused index (speeds search) */
658 u32 gen; /* generation count */
659 u32 max; /* size of the table */
Mike Marciniszyn7e230172012-07-06 18:29:45 +0000660 struct qib_mregion __rcu **table;
Ralph Campbellf9315512010-05-23 21:44:54 -0700661};
662
663struct qib_opcode_stats {
664 u64 n_packets; /* number of packets */
665 u64 n_bytes; /* total number of bytes */
666};
667
Mike Marciniszynddb88762013-06-15 17:07:03 -0400668struct qib_opcode_stats_perctx {
669 struct qib_opcode_stats stats[128];
670};
671
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500672struct qib_pma_counters {
673 u64 n_unicast_xmit; /* total unicast packets sent */
674 u64 n_unicast_rcv; /* total unicast packets received */
675 u64 n_multicast_xmit; /* total multicast packets sent */
676 u64 n_multicast_rcv; /* total multicast packets received */
677};
678
Ralph Campbellf9315512010-05-23 21:44:54 -0700679struct qib_ibport {
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000680 struct qib_qp __rcu *qp0;
681 struct qib_qp __rcu *qp1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700682 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
683 struct qib_ah *sm_ah;
684 struct qib_ah *smi_ah;
685 struct rb_root mcast_tree;
686 spinlock_t lock; /* protect changes in this struct */
687
688 /* non-zero when timer is set */
689 unsigned long mkey_lease_timeout;
690 unsigned long trap_timeout;
691 __be64 gid_prefix; /* in network order */
692 __be64 mkey;
693 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
694 u64 tid; /* TID for traps */
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500695 struct qib_pma_counters __percpu *pmastats;
696 u64 z_unicast_xmit; /* starting count for PMA */
697 u64 z_unicast_rcv; /* starting count for PMA */
698 u64 z_multicast_xmit; /* starting count for PMA */
699 u64 z_multicast_rcv; /* starting count for PMA */
Ralph Campbellf9315512010-05-23 21:44:54 -0700700 u64 z_symbol_error_counter; /* starting count for PMA */
701 u64 z_link_error_recovery_counter; /* starting count for PMA */
702 u64 z_link_downed_counter; /* starting count for PMA */
703 u64 z_port_rcv_errors; /* starting count for PMA */
704 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
705 u64 z_port_xmit_discards; /* starting count for PMA */
706 u64 z_port_xmit_data; /* starting count for PMA */
707 u64 z_port_rcv_data; /* starting count for PMA */
708 u64 z_port_xmit_packets; /* starting count for PMA */
709 u64 z_port_rcv_packets; /* starting count for PMA */
710 u32 z_local_link_integrity_errors; /* starting count for PMA */
711 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
712 u32 z_vl15_dropped; /* starting count for PMA */
713 u32 n_rc_resends;
714 u32 n_rc_acks;
715 u32 n_rc_qacks;
716 u32 n_rc_delayed_comp;
717 u32 n_seq_naks;
718 u32 n_rdma_seq;
719 u32 n_rnr_naks;
720 u32 n_other_naks;
721 u32 n_loop_pkts;
722 u32 n_pkt_drops;
723 u32 n_vl15_dropped;
724 u32 n_rc_timeouts;
725 u32 n_dmawait;
726 u32 n_unaligned;
727 u32 n_rc_dupreq;
728 u32 n_rc_seqnak;
729 u32 port_cap_flags;
730 u32 pma_sample_start;
731 u32 pma_sample_interval;
732 __be16 pma_counter_select[5];
733 u16 pma_tag;
734 u16 pkey_violations;
735 u16 qkey_violations;
736 u16 mkey_violations;
737 u16 mkey_lease_period;
738 u16 sm_lid;
739 u16 repress_traps;
740 u8 sm_sl;
741 u8 mkeyprot;
742 u8 subnet_timeout;
743 u8 vl_high_limit;
744 u8 sl_to_vl[16];
745
Ralph Campbellf9315512010-05-23 21:44:54 -0700746};
747
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000748
Ralph Campbellf9315512010-05-23 21:44:54 -0700749struct qib_ibdev {
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -0800750 struct rvt_dev_info rdi;
Ralph Campbellf9315512010-05-23 21:44:54 -0700751 struct list_head pending_mmaps;
752 spinlock_t mmap_offset_lock; /* protect mmap_offset */
753 u32 mmap_offset;
Mike Marciniszyn7e230172012-07-06 18:29:45 +0000754 struct qib_mregion __rcu *dma_mr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700755
756 /* QP numbers are shared by all IB ports */
757 struct qib_qpn_table qpn_table;
758 struct qib_lkey_table lk_table;
759 struct list_head piowait; /* list for wait PIO buf */
760 struct list_head dmawait; /* list for wait DMA */
761 struct list_head txwait; /* list for wait qib_verbs_txreq */
762 struct list_head memwait; /* list for wait kernel memory */
763 struct list_head txreq_free;
764 struct timer_list mem_timer;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000765 struct qib_qp __rcu **qp_table;
Ralph Campbellf9315512010-05-23 21:44:54 -0700766 struct qib_pio_header *pio_hdrs;
767 dma_addr_t pio_hdrs_phys;
768 /* list of QPs waiting for RNR timer */
769 spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400770 u32 qp_table_size; /* size of the hash table */
771 u32 qp_rnd; /* random bytes for hash */
Ralph Campbellf9315512010-05-23 21:44:54 -0700772 spinlock_t qpt_lock;
773
774 u32 n_piowait;
775 u32 n_txwait;
776
777 u32 n_pds_allocated; /* number of PDs allocated for device */
778 spinlock_t n_pds_lock;
779 u32 n_ahs_allocated; /* number of AHs allocated for device */
780 spinlock_t n_ahs_lock;
781 u32 n_cqs_allocated; /* number of CQs allocated for device */
782 spinlock_t n_cqs_lock;
783 u32 n_qps_allocated; /* number of QPs allocated for device */
784 spinlock_t n_qps_lock;
785 u32 n_srqs_allocated; /* number of SRQs allocated for device */
786 spinlock_t n_srqs_lock;
787 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
788 spinlock_t n_mcast_grps_lock;
Mike Marciniszynddb88762013-06-15 17:07:03 -0400789#ifdef CONFIG_DEBUG_FS
790 /* per HCA debugfs */
791 struct dentry *qib_ibdev_dbg;
792#endif
Ralph Campbellf9315512010-05-23 21:44:54 -0700793};
794
795struct qib_verbs_counters {
796 u64 symbol_error_counter;
797 u64 link_error_recovery_counter;
798 u64 link_downed_counter;
799 u64 port_rcv_errors;
800 u64 port_rcv_remphys_errors;
801 u64 port_xmit_discards;
802 u64 port_xmit_data;
803 u64 port_rcv_data;
804 u64 port_xmit_packets;
805 u64 port_rcv_packets;
806 u32 local_link_integrity_errors;
807 u32 excessive_buffer_overrun_errors;
808 u32 vl15_dropped;
809};
810
811static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
812{
813 return container_of(ibmr, struct qib_mr, ibmr);
814}
815
Ralph Campbellf9315512010-05-23 21:44:54 -0700816static inline struct qib_ah *to_iah(struct ib_ah *ibah)
817{
818 return container_of(ibah, struct qib_ah, ibah);
819}
820
821static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
822{
823 return container_of(ibcq, struct qib_cq, ibcq);
824}
825
826static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
827{
828 return container_of(ibsrq, struct qib_srq, ibsrq);
829}
830
831static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
832{
833 return container_of(ibqp, struct qib_qp, ibqp);
834}
835
836static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
837{
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -0800838 struct rvt_dev_info *rdi;
839
840 rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
841 return container_of(rdi, struct qib_ibdev, rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -0700842}
843
844/*
845 * Send if not busy or waiting for I/O and either
846 * a RC response is pending or we can process send work requests.
847 */
848static inline int qib_send_ok(struct qib_qp *qp)
849{
850 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
851 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
852 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
853}
854
Ralph Campbellf9315512010-05-23 21:44:54 -0700855/*
856 * This must be called with s_lock held.
857 */
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000858void qib_schedule_send(struct qib_qp *qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700859
860static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
861{
862 u16 p1 = pkey1 & 0x7FFF;
863 u16 p2 = pkey2 & 0x7FFF;
864
865 /*
866 * Low 15 bits must be non-zero and match, and
867 * one of the two must be a full member.
868 */
869 return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
870}
871
872void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
873 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
874void qib_cap_mask_chg(struct qib_ibport *ibp);
875void qib_sys_guid_chg(struct qib_ibport *ibp);
876void qib_node_desc_chg(struct qib_ibport *ibp);
877int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -0400878 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400879 const struct ib_mad_hdr *in, size_t in_mad_size,
880 struct ib_mad_hdr *out, size_t *out_mad_size,
881 u16 *out_mad_pkey_index);
Ralph Campbellf9315512010-05-23 21:44:54 -0700882int qib_create_agents(struct qib_ibdev *dev);
883void qib_free_agents(struct qib_ibdev *dev);
884
885/*
886 * Compare the lower 24 bits of the two values.
887 * Returns an integer <, ==, or > than zero.
888 */
889static inline int qib_cmp24(u32 a, u32 b)
890{
891 return (((int) a) - ((int) b)) << 8;
892}
893
894struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
895
896int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
897 u64 *rwords, u64 *spkts, u64 *rpkts,
898 u64 *xmit_wait);
899
900int qib_get_counters(struct qib_pportdata *ppd,
901 struct qib_verbs_counters *cntrs);
902
903int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
904
905int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
906
907int qib_mcast_tree_empty(struct qib_ibport *ibp);
908
909__be32 qib_compute_aeth(struct qib_qp *qp);
910
911struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
912
913struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
914 struct ib_qp_init_attr *init_attr,
915 struct ib_udata *udata);
916
917int qib_destroy_qp(struct ib_qp *ibqp);
918
919int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
920
921int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
922 int attr_mask, struct ib_udata *udata);
923
924int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
925 int attr_mask, struct ib_qp_init_attr *init_attr);
926
927unsigned qib_free_all_qps(struct qib_devdata *dd);
928
929void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
930
931void qib_free_qpn_table(struct qib_qpn_table *qpt);
932
Mike Marciniszyn1dd173b2013-06-15 17:07:14 -0400933#ifdef CONFIG_DEBUG_FS
934
935struct qib_qp_iter;
936
937struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
938
939int qib_qp_iter_next(struct qib_qp_iter *iter);
940
941void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
942
943#endif
944
Ralph Campbellf9315512010-05-23 21:44:54 -0700945void qib_get_credit(struct qib_qp *qp, u32 aeth);
946
947unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
948
949void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
950
951void qib_put_txreq(struct qib_verbs_txreq *tx);
952
953int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
954 u32 hdrwords, struct qib_sge_state *ss, u32 len);
955
956void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
957 int release);
958
959void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
960
961void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
962 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
963
964void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
965 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
966
967int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
968
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +0000969struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
970
Ralph Campbellf9315512010-05-23 21:44:54 -0700971void qib_rc_rnr_retry(unsigned long arg);
972
973void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
974
975void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
976
977int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
978
979void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
980 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
981
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400982int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
Ralph Campbellf9315512010-05-23 21:44:54 -0700983
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400984void qib_free_lkey(struct qib_mregion *mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700985
Dennis Dalessandrof44728d2016-01-22 12:44:44 -0800986int qib_lkey_ok(struct qib_lkey_table *rkt, struct rvt_pd *pd,
Ralph Campbellf9315512010-05-23 21:44:54 -0700987 struct qib_sge *isge, struct ib_sge *sge, int acc);
988
989int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
990 u32 len, u64 vaddr, u32 rkey, int acc);
991
992int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
993 struct ib_recv_wr **bad_wr);
994
995struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
996 struct ib_srq_init_attr *srq_init_attr,
997 struct ib_udata *udata);
998
999int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1000 enum ib_srq_attr_mask attr_mask,
1001 struct ib_udata *udata);
1002
1003int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
1004
1005int qib_destroy_srq(struct ib_srq *ibsrq);
1006
Mike Marciniszyn85caafe2013-06-04 15:05:37 -04001007int qib_cq_init(struct qib_devdata *dd);
1008
1009void qib_cq_exit(struct qib_devdata *dd);
1010
Ralph Campbellf9315512010-05-23 21:44:54 -07001011void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
1012
1013int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
1014
Matan Barakbcf4c1e2015-06-11 16:35:20 +03001015struct ib_cq *qib_create_cq(struct ib_device *ibdev,
1016 const struct ib_cq_init_attr *attr,
1017 struct ib_ucontext *context,
Ralph Campbellf9315512010-05-23 21:44:54 -07001018 struct ib_udata *udata);
1019
1020int qib_destroy_cq(struct ib_cq *ibcq);
1021
1022int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
1023
1024int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
1025
1026struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
1027
Ralph Campbellf9315512010-05-23 21:44:54 -07001028struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1029 u64 virt_addr, int mr_access_flags,
1030 struct ib_udata *udata);
1031
1032int qib_dereg_mr(struct ib_mr *ibmr);
1033
Sagi Grimberg1302f842015-07-30 10:32:47 +03001034struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
1035 enum ib_mr_type mr_type,
1036 u32 max_entries);
Ralph Campbellf9315512010-05-23 21:44:54 -07001037
Sagi Grimberg38071a42015-10-13 19:11:31 +03001038int qib_map_mr_sg(struct ib_mr *ibmr,
1039 struct scatterlist *sg,
1040 int sg_nents);
1041
Sagi Grimberg38071a42015-10-13 19:11:31 +03001042int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001043
1044struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1045 struct ib_fmr_attr *fmr_attr);
1046
1047int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1048 int list_len, u64 iova);
1049
1050int qib_unmap_fmr(struct list_head *fmr_list);
1051
1052int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1053
Mike Marciniszyn6a826492012-06-27 18:33:12 -04001054static inline void qib_get_mr(struct qib_mregion *mr)
1055{
1056 atomic_inc(&mr->refcount);
1057}
1058
Mike Marciniszyn8aac4cc2012-06-27 18:33:19 -04001059void mr_rcu_callback(struct rcu_head *list);
1060
Mike Marciniszyn6a826492012-06-27 18:33:12 -04001061static inline void qib_put_mr(struct qib_mregion *mr)
1062{
1063 if (unlikely(atomic_dec_and_test(&mr->refcount)))
Mike Marciniszyn8aac4cc2012-06-27 18:33:19 -04001064 call_rcu(&mr->list, mr_rcu_callback);
Mike Marciniszyn6a826492012-06-27 18:33:12 -04001065}
1066
1067static inline void qib_put_ss(struct qib_sge_state *ss)
1068{
1069 while (ss->num_sge) {
1070 qib_put_mr(ss->sge.mr);
1071 if (--ss->num_sge)
1072 ss->sge = *ss->sg_list++;
1073 }
1074}
1075
1076
Ralph Campbellf9315512010-05-23 21:44:54 -07001077void qib_release_mmap_info(struct kref *ref);
1078
1079struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1080 struct ib_ucontext *context,
1081 void *obj);
1082
1083void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1084 u32 size, void *obj);
1085
1086int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1087
1088int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1089
1090void qib_migrate_qp(struct qib_qp *qp);
1091
1092int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1093 int has_grh, struct qib_qp *qp, u32 bth0);
1094
1095u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1096 struct ib_global_route *grh, u32 hwords, u32 nwords);
1097
1098void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1099 u32 bth0, u32 bth2);
1100
1101void qib_do_send(struct work_struct *work);
1102
1103void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1104 enum ib_wc_status status);
1105
1106void qib_send_rc_ack(struct qib_qp *qp);
1107
1108int qib_make_rc_req(struct qib_qp *qp);
1109
1110int qib_make_uc_req(struct qib_qp *qp);
1111
1112int qib_make_ud_req(struct qib_qp *qp);
1113
1114int qib_register_ib_device(struct qib_devdata *);
1115
1116void qib_unregister_ib_device(struct qib_devdata *);
1117
1118void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1119
1120void qib_ib_piobufavail(struct qib_devdata *);
1121
1122unsigned qib_get_npkeys(struct qib_devdata *);
1123
1124unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1125
1126extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1127
1128/*
1129 * Below HCA-independent IB PhysPortState values, returned
1130 * by the f_ibphys_portstate() routine.
1131 */
1132#define IB_PHYSPORTSTATE_SLEEP 1
1133#define IB_PHYSPORTSTATE_POLL 2
1134#define IB_PHYSPORTSTATE_DISABLED 3
1135#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1136#define IB_PHYSPORTSTATE_LINKUP 5
1137#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1138#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1139#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1140#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1141#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1142#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1143#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1144#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1145
1146extern const int ib_qib_state_ops[];
1147
1148extern __be64 ib_qib_sys_image_guid; /* in network order */
1149
1150extern unsigned int ib_qib_lkey_table_size;
1151
1152extern unsigned int ib_qib_max_cqes;
1153
1154extern unsigned int ib_qib_max_cqs;
1155
1156extern unsigned int ib_qib_max_qp_wrs;
1157
1158extern unsigned int ib_qib_max_qps;
1159
1160extern unsigned int ib_qib_max_sges;
1161
1162extern unsigned int ib_qib_max_mcast_grps;
1163
1164extern unsigned int ib_qib_max_mcast_qp_attached;
1165
1166extern unsigned int ib_qib_max_srqs;
1167
1168extern unsigned int ib_qib_max_srq_sges;
1169
1170extern unsigned int ib_qib_max_srq_wrs;
1171
1172extern const u32 ib_qib_rnr_table[];
1173
Ralph Campbellf9315512010-05-23 21:44:54 -07001174#endif /* QIB_VERBS_H */