blob: ea0f6eed78631cf88afd4a4636c03aa1758cf46e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08008 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
Ralph Campbell9b513092006-12-12 14:27:41 -080044#include <linux/mm.h>
45#include <linux/dma-mapping.h>
Michael S. Tsirkin459d6e22007-02-04 14:11:55 -080046#include <linux/kref.h>
Dotan Barakbfb3ea12007-07-31 16:49:15 +030047#include <linux/list.h>
48#include <linux/rwsem.h>
Adrian Bunk87ae9af2007-10-30 10:35:04 +010049#include <linux/scatterlist.h>
Tejun Heof0626712010-10-19 15:24:36 +000050#include <linux/workqueue.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020051#include <uapi/linux/if_ether.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070052
Arun Sharma600634972011-07-26 16:09:06 -070053#include <linux/atomic.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070054#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Tejun Heof0626712010-10-19 15:24:36 +000056extern struct workqueue_struct *ib_wq;
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058union ib_gid {
59 u8 raw[16];
60 struct {
Sean Hefty97f52eb2005-08-13 21:05:57 -070061 __be64 subnet_prefix;
62 __be64 interface_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 } global;
64};
65
Tom Tucker07ebafb2006-08-03 16:02:42 -050066enum rdma_node_type {
67 /* IB values map to NodeInfo:NodeType. */
68 RDMA_NODE_IB_CA = 1,
69 RDMA_NODE_IB_SWITCH,
70 RDMA_NODE_IB_ROUTER,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +000071 RDMA_NODE_RNIC,
72 RDMA_NODE_USNIC,
Linus Torvalds1da177e2005-04-16 15:20:36 -070073};
74
Tom Tucker07ebafb2006-08-03 16:02:42 -050075enum rdma_transport_type {
76 RDMA_TRANSPORT_IB,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +000077 RDMA_TRANSPORT_IWARP,
78 RDMA_TRANSPORT_USNIC
Tom Tucker07ebafb2006-08-03 16:02:42 -050079};
80
81enum rdma_transport_type
82rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
83
Eli Cohena3f5ada2010-09-27 17:51:10 -070084enum rdma_link_layer {
85 IB_LINK_LAYER_UNSPECIFIED,
86 IB_LINK_LAYER_INFINIBAND,
87 IB_LINK_LAYER_ETHERNET,
88};
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090enum ib_device_cap_flags {
91 IB_DEVICE_RESIZE_MAX_WR = 1,
92 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
93 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
94 IB_DEVICE_RAW_MULTI = (1<<3),
95 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
96 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
97 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
98 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
99 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
100 IB_DEVICE_INIT_TYPE = (1<<9),
101 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
102 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
103 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
104 IB_DEVICE_SRQ_RESIZE = (1<<13),
105 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
Steve Wise96f15c02008-07-14 23:48:53 -0700106 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
Roland Dreier0f39cf32008-04-16 21:09:32 -0700107 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
Eli Cohene0605d92008-01-30 18:30:57 +0200108 IB_DEVICE_MEM_WINDOW = (1<<17),
109 /*
110 * Devices should set IB_DEVICE_UD_IP_SUM if they support
111 * insertion of UDP and TCP checksum on outgoing UD IPoIB
112 * messages and can verify the validity of checksum for
113 * incoming messages. Setting this flag implies that the
114 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
115 */
116 IB_DEVICE_UD_IP_CSUM = (1<<18),
Eli Cohenc93570f2008-04-16 21:09:27 -0700117 IB_DEVICE_UD_TSO = (1<<19),
Sean Hefty59991f92011-05-23 17:52:46 -0700118 IB_DEVICE_XRC = (1<<20),
Steve Wise00f7ec32008-07-14 23:48:45 -0700119 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
Ron Livne47ee1b92008-07-14 23:48:48 -0700120 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
Shani Michaeli7083e422013-02-06 16:19:12 +0000121 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
Hadar Hen Zion319a4412013-08-07 14:01:59 +0300122 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
123 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124};
125
126enum ib_atomic_cap {
127 IB_ATOMIC_NONE,
128 IB_ATOMIC_HCA,
129 IB_ATOMIC_GLOB
130};
131
132struct ib_device_attr {
133 u64 fw_ver;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700134 __be64 sys_image_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 u64 max_mr_size;
136 u64 page_size_cap;
137 u32 vendor_id;
138 u32 vendor_part_id;
139 u32 hw_ver;
140 int max_qp;
141 int max_qp_wr;
142 int device_cap_flags;
143 int max_sge;
144 int max_sge_rd;
145 int max_cq;
146 int max_cqe;
147 int max_mr;
148 int max_pd;
149 int max_qp_rd_atom;
150 int max_ee_rd_atom;
151 int max_res_rd_atom;
152 int max_qp_init_rd_atom;
153 int max_ee_init_rd_atom;
154 enum ib_atomic_cap atomic_cap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300155 enum ib_atomic_cap masked_atomic_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 int max_ee;
157 int max_rdd;
158 int max_mw;
159 int max_raw_ipv6_qp;
160 int max_raw_ethy_qp;
161 int max_mcast_grp;
162 int max_mcast_qp_attach;
163 int max_total_mcast_qp_attach;
164 int max_ah;
165 int max_fmr;
166 int max_map_per_fmr;
167 int max_srq;
168 int max_srq_wr;
169 int max_srq_sge;
Steve Wise00f7ec32008-07-14 23:48:45 -0700170 unsigned int max_fast_reg_page_list_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 u16 max_pkeys;
172 u8 local_ca_ack_delay;
173};
174
175enum ib_mtu {
176 IB_MTU_256 = 1,
177 IB_MTU_512 = 2,
178 IB_MTU_1024 = 3,
179 IB_MTU_2048 = 4,
180 IB_MTU_4096 = 5
181};
182
183static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
184{
185 switch (mtu) {
186 case IB_MTU_256: return 256;
187 case IB_MTU_512: return 512;
188 case IB_MTU_1024: return 1024;
189 case IB_MTU_2048: return 2048;
190 case IB_MTU_4096: return 4096;
191 default: return -1;
192 }
193}
194
195enum ib_port_state {
196 IB_PORT_NOP = 0,
197 IB_PORT_DOWN = 1,
198 IB_PORT_INIT = 2,
199 IB_PORT_ARMED = 3,
200 IB_PORT_ACTIVE = 4,
201 IB_PORT_ACTIVE_DEFER = 5
202};
203
204enum ib_port_cap_flags {
205 IB_PORT_SM = 1 << 1,
206 IB_PORT_NOTICE_SUP = 1 << 2,
207 IB_PORT_TRAP_SUP = 1 << 3,
208 IB_PORT_OPT_IPD_SUP = 1 << 4,
209 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
210 IB_PORT_SL_MAP_SUP = 1 << 6,
211 IB_PORT_MKEY_NVRAM = 1 << 7,
212 IB_PORT_PKEY_NVRAM = 1 << 8,
213 IB_PORT_LED_INFO_SUP = 1 << 9,
214 IB_PORT_SM_DISABLED = 1 << 10,
215 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
216 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300217 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 IB_PORT_CM_SUP = 1 << 16,
219 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
220 IB_PORT_REINIT_SUP = 1 << 18,
221 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
222 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
223 IB_PORT_DR_NOTICE_SUP = 1 << 21,
224 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
225 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
226 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
227 IB_PORT_CLIENT_REG_SUP = 1 << 25
228};
229
230enum ib_port_width {
231 IB_WIDTH_1X = 1,
232 IB_WIDTH_4X = 2,
233 IB_WIDTH_8X = 4,
234 IB_WIDTH_12X = 8
235};
236
237static inline int ib_width_enum_to_int(enum ib_port_width width)
238{
239 switch (width) {
240 case IB_WIDTH_1X: return 1;
241 case IB_WIDTH_4X: return 4;
242 case IB_WIDTH_8X: return 8;
243 case IB_WIDTH_12X: return 12;
244 default: return -1;
245 }
246}
247
Or Gerlitz2e966912012-02-28 18:49:50 +0200248enum ib_port_speed {
249 IB_SPEED_SDR = 1,
250 IB_SPEED_DDR = 2,
251 IB_SPEED_QDR = 4,
252 IB_SPEED_FDR10 = 8,
253 IB_SPEED_FDR = 16,
254 IB_SPEED_EDR = 32
255};
256
Steve Wise7f624d02008-07-14 23:48:48 -0700257struct ib_protocol_stats {
258 /* TBD... */
259};
260
261struct iw_protocol_stats {
262 u64 ipInReceives;
263 u64 ipInHdrErrors;
264 u64 ipInTooBigErrors;
265 u64 ipInNoRoutes;
266 u64 ipInAddrErrors;
267 u64 ipInUnknownProtos;
268 u64 ipInTruncatedPkts;
269 u64 ipInDiscards;
270 u64 ipInDelivers;
271 u64 ipOutForwDatagrams;
272 u64 ipOutRequests;
273 u64 ipOutDiscards;
274 u64 ipOutNoRoutes;
275 u64 ipReasmTimeout;
276 u64 ipReasmReqds;
277 u64 ipReasmOKs;
278 u64 ipReasmFails;
279 u64 ipFragOKs;
280 u64 ipFragFails;
281 u64 ipFragCreates;
282 u64 ipInMcastPkts;
283 u64 ipOutMcastPkts;
284 u64 ipInBcastPkts;
285 u64 ipOutBcastPkts;
286
287 u64 tcpRtoAlgorithm;
288 u64 tcpRtoMin;
289 u64 tcpRtoMax;
290 u64 tcpMaxConn;
291 u64 tcpActiveOpens;
292 u64 tcpPassiveOpens;
293 u64 tcpAttemptFails;
294 u64 tcpEstabResets;
295 u64 tcpCurrEstab;
296 u64 tcpInSegs;
297 u64 tcpOutSegs;
298 u64 tcpRetransSegs;
299 u64 tcpInErrs;
300 u64 tcpOutRsts;
301};
302
303union rdma_protocol_stats {
304 struct ib_protocol_stats ib;
305 struct iw_protocol_stats iw;
306};
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308struct ib_port_attr {
309 enum ib_port_state state;
310 enum ib_mtu max_mtu;
311 enum ib_mtu active_mtu;
312 int gid_tbl_len;
313 u32 port_cap_flags;
314 u32 max_msg_sz;
315 u32 bad_pkey_cntr;
316 u32 qkey_viol_cntr;
317 u16 pkey_tbl_len;
318 u16 lid;
319 u16 sm_lid;
320 u8 lmc;
321 u8 max_vl_num;
322 u8 sm_sl;
323 u8 subnet_timeout;
324 u8 init_type_reply;
325 u8 active_width;
326 u8 active_speed;
327 u8 phys_state;
328};
329
330enum ib_device_modify_flags {
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800331 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
332 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333};
334
335struct ib_device_modify {
336 u64 sys_image_guid;
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800337 char node_desc[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338};
339
340enum ib_port_modify_flags {
341 IB_PORT_SHUTDOWN = 1,
342 IB_PORT_INIT_TYPE = (1<<2),
343 IB_PORT_RESET_QKEY_CNTR = (1<<3)
344};
345
346struct ib_port_modify {
347 u32 set_port_cap_mask;
348 u32 clr_port_cap_mask;
349 u8 init_type;
350};
351
352enum ib_event_type {
353 IB_EVENT_CQ_ERR,
354 IB_EVENT_QP_FATAL,
355 IB_EVENT_QP_REQ_ERR,
356 IB_EVENT_QP_ACCESS_ERR,
357 IB_EVENT_COMM_EST,
358 IB_EVENT_SQ_DRAINED,
359 IB_EVENT_PATH_MIG,
360 IB_EVENT_PATH_MIG_ERR,
361 IB_EVENT_DEVICE_FATAL,
362 IB_EVENT_PORT_ACTIVE,
363 IB_EVENT_PORT_ERR,
364 IB_EVENT_LID_CHANGE,
365 IB_EVENT_PKEY_CHANGE,
Roland Dreierd41fcc62005-08-18 12:23:08 -0700366 IB_EVENT_SM_CHANGE,
367 IB_EVENT_SRQ_ERR,
368 IB_EVENT_SRQ_LIMIT_REACHED,
Leonid Arsh63942c92006-06-17 20:37:35 -0700369 IB_EVENT_QP_LAST_WQE_REACHED,
Or Gerlitz761d90e2011-06-15 14:39:29 +0000370 IB_EVENT_CLIENT_REREGISTER,
371 IB_EVENT_GID_CHANGE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372};
373
374struct ib_event {
375 struct ib_device *device;
376 union {
377 struct ib_cq *cq;
378 struct ib_qp *qp;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700379 struct ib_srq *srq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 u8 port_num;
381 } element;
382 enum ib_event_type event;
383};
384
385struct ib_event_handler {
386 struct ib_device *device;
387 void (*handler)(struct ib_event_handler *, struct ib_event *);
388 struct list_head list;
389};
390
391#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
392 do { \
393 (_ptr)->device = _device; \
394 (_ptr)->handler = _handler; \
395 INIT_LIST_HEAD(&(_ptr)->list); \
396 } while (0)
397
398struct ib_global_route {
399 union ib_gid dgid;
400 u32 flow_label;
401 u8 sgid_index;
402 u8 hop_limit;
403 u8 traffic_class;
404};
405
Hal Rosenstock513789e2005-07-27 11:45:34 -0700406struct ib_grh {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700407 __be32 version_tclass_flow;
408 __be16 paylen;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700409 u8 next_hdr;
410 u8 hop_limit;
411 union ib_gid sgid;
412 union ib_gid dgid;
413};
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415enum {
416 IB_MULTICAST_QPN = 0xffffff
417};
418
Harvey Harrisonf3a7c662009-02-14 22:58:35 -0800419#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
Sean Hefty97f52eb2005-08-13 21:05:57 -0700420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421enum ib_ah_flags {
422 IB_AH_GRH = 1
423};
424
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700425enum ib_rate {
426 IB_RATE_PORT_CURRENT = 0,
427 IB_RATE_2_5_GBPS = 2,
428 IB_RATE_5_GBPS = 5,
429 IB_RATE_10_GBPS = 3,
430 IB_RATE_20_GBPS = 6,
431 IB_RATE_30_GBPS = 4,
432 IB_RATE_40_GBPS = 7,
433 IB_RATE_60_GBPS = 8,
434 IB_RATE_80_GBPS = 9,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300435 IB_RATE_120_GBPS = 10,
436 IB_RATE_14_GBPS = 11,
437 IB_RATE_56_GBPS = 12,
438 IB_RATE_112_GBPS = 13,
439 IB_RATE_168_GBPS = 14,
440 IB_RATE_25_GBPS = 15,
441 IB_RATE_100_GBPS = 16,
442 IB_RATE_200_GBPS = 17,
443 IB_RATE_300_GBPS = 18
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700444};
445
446/**
447 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
448 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
449 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
450 * @rate: rate to convert.
451 */
452int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
453
454/**
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300455 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
456 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
457 * @rate: rate to convert.
458 */
459int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
460
461/**
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700462 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
463 * enum.
464 * @mult: multiple to convert.
465 */
466enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468struct ib_ah_attr {
469 struct ib_global_route grh;
470 u16 dlid;
471 u8 sl;
472 u8 src_path_bits;
473 u8 static_rate;
474 u8 ah_flags;
475 u8 port_num;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200476 u8 dmac[ETH_ALEN];
477 u16 vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478};
479
480enum ib_wc_status {
481 IB_WC_SUCCESS,
482 IB_WC_LOC_LEN_ERR,
483 IB_WC_LOC_QP_OP_ERR,
484 IB_WC_LOC_EEC_OP_ERR,
485 IB_WC_LOC_PROT_ERR,
486 IB_WC_WR_FLUSH_ERR,
487 IB_WC_MW_BIND_ERR,
488 IB_WC_BAD_RESP_ERR,
489 IB_WC_LOC_ACCESS_ERR,
490 IB_WC_REM_INV_REQ_ERR,
491 IB_WC_REM_ACCESS_ERR,
492 IB_WC_REM_OP_ERR,
493 IB_WC_RETRY_EXC_ERR,
494 IB_WC_RNR_RETRY_EXC_ERR,
495 IB_WC_LOC_RDD_VIOL_ERR,
496 IB_WC_REM_INV_RD_REQ_ERR,
497 IB_WC_REM_ABORT_ERR,
498 IB_WC_INV_EECN_ERR,
499 IB_WC_INV_EEC_STATE_ERR,
500 IB_WC_FATAL_ERR,
501 IB_WC_RESP_TIMEOUT_ERR,
502 IB_WC_GENERAL_ERR
503};
504
505enum ib_wc_opcode {
506 IB_WC_SEND,
507 IB_WC_RDMA_WRITE,
508 IB_WC_RDMA_READ,
509 IB_WC_COMP_SWAP,
510 IB_WC_FETCH_ADD,
511 IB_WC_BIND_MW,
Eli Cohenc93570f2008-04-16 21:09:27 -0700512 IB_WC_LSO,
Steve Wise00f7ec32008-07-14 23:48:45 -0700513 IB_WC_LOCAL_INV,
514 IB_WC_FAST_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300515 IB_WC_MASKED_COMP_SWAP,
516 IB_WC_MASKED_FETCH_ADD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517/*
518 * Set value of IB_WC_RECV so consumers can test if a completion is a
519 * receive by testing (opcode & IB_WC_RECV).
520 */
521 IB_WC_RECV = 1 << 7,
522 IB_WC_RECV_RDMA_WITH_IMM
523};
524
525enum ib_wc_flags {
526 IB_WC_GRH = 1,
Steve Wise00f7ec32008-07-14 23:48:45 -0700527 IB_WC_WITH_IMM = (1<<1),
528 IB_WC_WITH_INVALIDATE = (1<<2),
Or Gerlitzd927d502012-01-11 19:03:51 +0200529 IB_WC_IP_CSUM_OK = (1<<3),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200530 IB_WC_WITH_SMAC = (1<<4),
531 IB_WC_WITH_VLAN = (1<<5),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532};
533
534struct ib_wc {
535 u64 wr_id;
536 enum ib_wc_status status;
537 enum ib_wc_opcode opcode;
538 u32 vendor_err;
539 u32 byte_len;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200540 struct ib_qp *qp;
Steve Wise00f7ec32008-07-14 23:48:45 -0700541 union {
542 __be32 imm_data;
543 u32 invalidate_rkey;
544 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 u32 src_qp;
546 int wc_flags;
547 u16 pkey_index;
548 u16 slid;
549 u8 sl;
550 u8 dlid_path_bits;
551 u8 port_num; /* valid only for DR SMPs on switches */
Matan Barakdd5f03b2013-12-12 18:03:11 +0200552 u8 smac[ETH_ALEN];
553 u16 vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554};
555
Roland Dreiered23a722007-05-06 21:02:48 -0700556enum ib_cq_notify_flags {
557 IB_CQ_SOLICITED = 1 << 0,
558 IB_CQ_NEXT_COMP = 1 << 1,
559 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
560 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561};
562
Sean Hefty96104ed2011-05-23 16:31:36 -0700563enum ib_srq_type {
Sean Hefty418d5132011-05-23 19:42:29 -0700564 IB_SRQT_BASIC,
565 IB_SRQT_XRC
Sean Hefty96104ed2011-05-23 16:31:36 -0700566};
567
Roland Dreierd41fcc62005-08-18 12:23:08 -0700568enum ib_srq_attr_mask {
569 IB_SRQ_MAX_WR = 1 << 0,
570 IB_SRQ_LIMIT = 1 << 1,
571};
572
573struct ib_srq_attr {
574 u32 max_wr;
575 u32 max_sge;
576 u32 srq_limit;
577};
578
579struct ib_srq_init_attr {
580 void (*event_handler)(struct ib_event *, void *);
581 void *srq_context;
582 struct ib_srq_attr attr;
Sean Hefty96104ed2011-05-23 16:31:36 -0700583 enum ib_srq_type srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -0700584
585 union {
586 struct {
587 struct ib_xrcd *xrcd;
588 struct ib_cq *cq;
589 } xrc;
590 } ext;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700591};
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593struct ib_qp_cap {
594 u32 max_send_wr;
595 u32 max_recv_wr;
596 u32 max_send_sge;
597 u32 max_recv_sge;
598 u32 max_inline_data;
599};
600
601enum ib_sig_type {
602 IB_SIGNAL_ALL_WR,
603 IB_SIGNAL_REQ_WR
604};
605
606enum ib_qp_type {
607 /*
608 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
609 * here (and in that order) since the MAD layer uses them as
610 * indices into a 2-entry table.
611 */
612 IB_QPT_SMI,
613 IB_QPT_GSI,
614
615 IB_QPT_RC,
616 IB_QPT_UC,
617 IB_QPT_UD,
618 IB_QPT_RAW_IPV6,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700619 IB_QPT_RAW_ETHERTYPE,
Or Gerlitzc938a612012-03-01 12:17:51 +0200620 IB_QPT_RAW_PACKET = 8,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700621 IB_QPT_XRC_INI = 9,
622 IB_QPT_XRC_TGT,
Jack Morgenstein0134f162013-07-07 17:25:52 +0300623 IB_QPT_MAX,
624 /* Reserve a range for qp types internal to the low level driver.
625 * These qp types will not be visible at the IB core layer, so the
626 * IB_QPT_MAX usages should not be affected in the core layer
627 */
628 IB_QPT_RESERVED1 = 0x1000,
629 IB_QPT_RESERVED2,
630 IB_QPT_RESERVED3,
631 IB_QPT_RESERVED4,
632 IB_QPT_RESERVED5,
633 IB_QPT_RESERVED6,
634 IB_QPT_RESERVED7,
635 IB_QPT_RESERVED8,
636 IB_QPT_RESERVED9,
637 IB_QPT_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638};
639
Eli Cohenb846f252008-04-16 21:09:27 -0700640enum ib_qp_create_flags {
Ron Livne47ee1b92008-07-14 23:48:48 -0700641 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
642 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
Jack Morgensteind2b57062012-08-03 08:40:37 +0000643 /* reserve bits 26-31 for low level drivers' internal use */
644 IB_QP_CREATE_RESERVED_START = 1 << 26,
645 IB_QP_CREATE_RESERVED_END = 1 << 31,
Eli Cohenb846f252008-04-16 21:09:27 -0700646};
647
Yishai Hadas73c40c62013-08-01 18:49:53 +0300648
649/*
650 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
651 * callback to destroy the passed in QP.
652 */
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654struct ib_qp_init_attr {
655 void (*event_handler)(struct ib_event *, void *);
656 void *qp_context;
657 struct ib_cq *send_cq;
658 struct ib_cq *recv_cq;
659 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700660 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 struct ib_qp_cap cap;
662 enum ib_sig_type sq_sig_type;
663 enum ib_qp_type qp_type;
Eli Cohenb846f252008-04-16 21:09:27 -0700664 enum ib_qp_create_flags create_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 u8 port_num; /* special QP types only */
666};
667
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700668struct ib_qp_open_attr {
669 void (*event_handler)(struct ib_event *, void *);
670 void *qp_context;
671 u32 qp_num;
672 enum ib_qp_type qp_type;
673};
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675enum ib_rnr_timeout {
676 IB_RNR_TIMER_655_36 = 0,
677 IB_RNR_TIMER_000_01 = 1,
678 IB_RNR_TIMER_000_02 = 2,
679 IB_RNR_TIMER_000_03 = 3,
680 IB_RNR_TIMER_000_04 = 4,
681 IB_RNR_TIMER_000_06 = 5,
682 IB_RNR_TIMER_000_08 = 6,
683 IB_RNR_TIMER_000_12 = 7,
684 IB_RNR_TIMER_000_16 = 8,
685 IB_RNR_TIMER_000_24 = 9,
686 IB_RNR_TIMER_000_32 = 10,
687 IB_RNR_TIMER_000_48 = 11,
688 IB_RNR_TIMER_000_64 = 12,
689 IB_RNR_TIMER_000_96 = 13,
690 IB_RNR_TIMER_001_28 = 14,
691 IB_RNR_TIMER_001_92 = 15,
692 IB_RNR_TIMER_002_56 = 16,
693 IB_RNR_TIMER_003_84 = 17,
694 IB_RNR_TIMER_005_12 = 18,
695 IB_RNR_TIMER_007_68 = 19,
696 IB_RNR_TIMER_010_24 = 20,
697 IB_RNR_TIMER_015_36 = 21,
698 IB_RNR_TIMER_020_48 = 22,
699 IB_RNR_TIMER_030_72 = 23,
700 IB_RNR_TIMER_040_96 = 24,
701 IB_RNR_TIMER_061_44 = 25,
702 IB_RNR_TIMER_081_92 = 26,
703 IB_RNR_TIMER_122_88 = 27,
704 IB_RNR_TIMER_163_84 = 28,
705 IB_RNR_TIMER_245_76 = 29,
706 IB_RNR_TIMER_327_68 = 30,
707 IB_RNR_TIMER_491_52 = 31
708};
709
710enum ib_qp_attr_mask {
711 IB_QP_STATE = 1,
712 IB_QP_CUR_STATE = (1<<1),
713 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
714 IB_QP_ACCESS_FLAGS = (1<<3),
715 IB_QP_PKEY_INDEX = (1<<4),
716 IB_QP_PORT = (1<<5),
717 IB_QP_QKEY = (1<<6),
718 IB_QP_AV = (1<<7),
719 IB_QP_PATH_MTU = (1<<8),
720 IB_QP_TIMEOUT = (1<<9),
721 IB_QP_RETRY_CNT = (1<<10),
722 IB_QP_RNR_RETRY = (1<<11),
723 IB_QP_RQ_PSN = (1<<12),
724 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
725 IB_QP_ALT_PATH = (1<<14),
726 IB_QP_MIN_RNR_TIMER = (1<<15),
727 IB_QP_SQ_PSN = (1<<16),
728 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
729 IB_QP_PATH_MIG_STATE = (1<<18),
730 IB_QP_CAP = (1<<19),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200731 IB_QP_DEST_QPN = (1<<20),
732 IB_QP_SMAC = (1<<21),
733 IB_QP_ALT_SMAC = (1<<22),
734 IB_QP_VID = (1<<23),
735 IB_QP_ALT_VID = (1<<24),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736};
737
738enum ib_qp_state {
739 IB_QPS_RESET,
740 IB_QPS_INIT,
741 IB_QPS_RTR,
742 IB_QPS_RTS,
743 IB_QPS_SQD,
744 IB_QPS_SQE,
745 IB_QPS_ERR
746};
747
748enum ib_mig_state {
749 IB_MIG_MIGRATED,
750 IB_MIG_REARM,
751 IB_MIG_ARMED
752};
753
Shani Michaeli7083e422013-02-06 16:19:12 +0000754enum ib_mw_type {
755 IB_MW_TYPE_1 = 1,
756 IB_MW_TYPE_2 = 2
757};
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759struct ib_qp_attr {
760 enum ib_qp_state qp_state;
761 enum ib_qp_state cur_qp_state;
762 enum ib_mtu path_mtu;
763 enum ib_mig_state path_mig_state;
764 u32 qkey;
765 u32 rq_psn;
766 u32 sq_psn;
767 u32 dest_qp_num;
768 int qp_access_flags;
769 struct ib_qp_cap cap;
770 struct ib_ah_attr ah_attr;
771 struct ib_ah_attr alt_ah_attr;
772 u16 pkey_index;
773 u16 alt_pkey_index;
774 u8 en_sqd_async_notify;
775 u8 sq_draining;
776 u8 max_rd_atomic;
777 u8 max_dest_rd_atomic;
778 u8 min_rnr_timer;
779 u8 port_num;
780 u8 timeout;
781 u8 retry_cnt;
782 u8 rnr_retry;
783 u8 alt_port_num;
784 u8 alt_timeout;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200785 u8 smac[ETH_ALEN];
786 u8 alt_smac[ETH_ALEN];
787 u16 vlan_id;
788 u16 alt_vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789};
790
791enum ib_wr_opcode {
792 IB_WR_RDMA_WRITE,
793 IB_WR_RDMA_WRITE_WITH_IMM,
794 IB_WR_SEND,
795 IB_WR_SEND_WITH_IMM,
796 IB_WR_RDMA_READ,
797 IB_WR_ATOMIC_CMP_AND_SWP,
Eli Cohenc93570f2008-04-16 21:09:27 -0700798 IB_WR_ATOMIC_FETCH_AND_ADD,
Roland Dreier0f39cf32008-04-16 21:09:32 -0700799 IB_WR_LSO,
800 IB_WR_SEND_WITH_INV,
Steve Wise00f7ec32008-07-14 23:48:45 -0700801 IB_WR_RDMA_READ_WITH_INV,
802 IB_WR_LOCAL_INV,
803 IB_WR_FAST_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300804 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
805 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
Shani Michaeli7083e422013-02-06 16:19:12 +0000806 IB_WR_BIND_MW,
Jack Morgenstein0134f162013-07-07 17:25:52 +0300807 /* reserve values for low level drivers' internal use.
808 * These values will not be used at all in the ib core layer.
809 */
810 IB_WR_RESERVED1 = 0xf0,
811 IB_WR_RESERVED2,
812 IB_WR_RESERVED3,
813 IB_WR_RESERVED4,
814 IB_WR_RESERVED5,
815 IB_WR_RESERVED6,
816 IB_WR_RESERVED7,
817 IB_WR_RESERVED8,
818 IB_WR_RESERVED9,
819 IB_WR_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820};
821
822enum ib_send_flags {
823 IB_SEND_FENCE = 1,
824 IB_SEND_SIGNALED = (1<<1),
825 IB_SEND_SOLICITED = (1<<2),
Eli Cohene0605d92008-01-30 18:30:57 +0200826 IB_SEND_INLINE = (1<<3),
Jack Morgenstein0134f162013-07-07 17:25:52 +0300827 IB_SEND_IP_CSUM = (1<<4),
828
829 /* reserve bits 26-31 for low level drivers' internal use */
830 IB_SEND_RESERVED_START = (1 << 26),
831 IB_SEND_RESERVED_END = (1 << 31),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832};
833
834struct ib_sge {
835 u64 addr;
836 u32 length;
837 u32 lkey;
838};
839
Steve Wise00f7ec32008-07-14 23:48:45 -0700840struct ib_fast_reg_page_list {
841 struct ib_device *device;
842 u64 *page_list;
843 unsigned int max_page_list_len;
844};
845
Shani Michaeli7083e422013-02-06 16:19:12 +0000846/**
847 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
848 * @mr: A memory region to bind the memory window to.
849 * @addr: The address where the memory window should begin.
850 * @length: The length of the memory window, in bytes.
851 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
852 *
853 * This struct contains the shared parameters for type 1 and type 2
854 * memory window bind operations.
855 */
856struct ib_mw_bind_info {
857 struct ib_mr *mr;
858 u64 addr;
859 u64 length;
860 int mw_access_flags;
861};
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863struct ib_send_wr {
864 struct ib_send_wr *next;
865 u64 wr_id;
866 struct ib_sge *sg_list;
867 int num_sge;
868 enum ib_wr_opcode opcode;
869 int send_flags;
Roland Dreier0f39cf32008-04-16 21:09:32 -0700870 union {
871 __be32 imm_data;
872 u32 invalidate_rkey;
873 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 union {
875 struct {
876 u64 remote_addr;
877 u32 rkey;
878 } rdma;
879 struct {
880 u64 remote_addr;
881 u64 compare_add;
882 u64 swap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300883 u64 compare_add_mask;
884 u64 swap_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 u32 rkey;
886 } atomic;
887 struct {
888 struct ib_ah *ah;
Eli Cohenc93570f2008-04-16 21:09:27 -0700889 void *header;
890 int hlen;
891 int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 u32 remote_qpn;
893 u32 remote_qkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 u16 pkey_index; /* valid for GSI only */
895 u8 port_num; /* valid for DR SMPs on switch only */
896 } ud;
Steve Wise00f7ec32008-07-14 23:48:45 -0700897 struct {
898 u64 iova_start;
899 struct ib_fast_reg_page_list *page_list;
900 unsigned int page_shift;
901 unsigned int page_list_len;
902 u32 length;
903 int access_flags;
904 u32 rkey;
905 } fast_reg;
Shani Michaeli7083e422013-02-06 16:19:12 +0000906 struct {
907 struct ib_mw *mw;
908 /* The new rkey for the memory window. */
909 u32 rkey;
910 struct ib_mw_bind_info bind_info;
911 } bind_mw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 } wr;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700913 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914};
915
916struct ib_recv_wr {
917 struct ib_recv_wr *next;
918 u64 wr_id;
919 struct ib_sge *sg_list;
920 int num_sge;
921};
922
923enum ib_access_flags {
924 IB_ACCESS_LOCAL_WRITE = 1,
925 IB_ACCESS_REMOTE_WRITE = (1<<1),
926 IB_ACCESS_REMOTE_READ = (1<<2),
927 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
Shani Michaeli7083e422013-02-06 16:19:12 +0000928 IB_ACCESS_MW_BIND = (1<<4),
929 IB_ZERO_BASED = (1<<5)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930};
931
932struct ib_phys_buf {
933 u64 addr;
934 u64 size;
935};
936
937struct ib_mr_attr {
938 struct ib_pd *pd;
939 u64 device_virt_addr;
940 u64 size;
941 int mr_access_flags;
942 u32 lkey;
943 u32 rkey;
944};
945
946enum ib_mr_rereg_flags {
947 IB_MR_REREG_TRANS = 1,
948 IB_MR_REREG_PD = (1<<1),
949 IB_MR_REREG_ACCESS = (1<<2)
950};
951
Shani Michaeli7083e422013-02-06 16:19:12 +0000952/**
953 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
954 * @wr_id: Work request id.
955 * @send_flags: Flags from ib_send_flags enum.
956 * @bind_info: More parameters of the bind operation.
957 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958struct ib_mw_bind {
Shani Michaeli7083e422013-02-06 16:19:12 +0000959 u64 wr_id;
960 int send_flags;
961 struct ib_mw_bind_info bind_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962};
963
964struct ib_fmr_attr {
965 int max_pages;
966 int max_maps;
Or Gerlitzd36f34a2006-02-02 10:43:45 -0800967 u8 page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968};
969
Roland Dreiere2773c02005-07-07 17:57:10 -0700970struct ib_ucontext {
971 struct ib_device *device;
972 struct list_head pd_list;
973 struct list_head mr_list;
974 struct list_head mw_list;
975 struct list_head cq_list;
976 struct list_head qp_list;
977 struct list_head srq_list;
978 struct list_head ah_list;
Sean Hefty53d0bd12011-05-24 08:33:46 -0700979 struct list_head xrcd_list;
Hadar Hen Zion436f2ad2013-08-14 13:58:30 +0300980 struct list_head rule_list;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800981 int closing;
Roland Dreiere2773c02005-07-07 17:57:10 -0700982};
983
984struct ib_uobject {
985 u64 user_handle; /* handle given to us by userspace */
986 struct ib_ucontext *context; /* associated user context */
Roland Dreier9ead1902006-06-17 20:44:49 -0700987 void *object; /* containing object */
Roland Dreiere2773c02005-07-07 17:57:10 -0700988 struct list_head list; /* link to context's list */
Roland Dreierb3d636b2008-04-16 21:01:06 -0700989 int id; /* index into kernel idr */
Roland Dreier9ead1902006-06-17 20:44:49 -0700990 struct kref ref;
991 struct rw_semaphore mutex; /* protects .live */
992 int live;
Roland Dreiere2773c02005-07-07 17:57:10 -0700993};
994
Roland Dreiere2773c02005-07-07 17:57:10 -0700995struct ib_udata {
Yann Droneaud309243e2013-12-11 23:01:44 +0100996 const void __user *inbuf;
Roland Dreiere2773c02005-07-07 17:57:10 -0700997 void __user *outbuf;
998 size_t inlen;
999 size_t outlen;
1000};
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002struct ib_pd {
Roland Dreiere2773c02005-07-07 17:57:10 -07001003 struct ib_device *device;
1004 struct ib_uobject *uobject;
1005 atomic_t usecnt; /* count all resources */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006};
1007
Sean Hefty59991f92011-05-23 17:52:46 -07001008struct ib_xrcd {
1009 struct ib_device *device;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001010 atomic_t usecnt; /* count all exposed resources */
Sean Hefty53d0bd12011-05-24 08:33:46 -07001011 struct inode *inode;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001012
1013 struct mutex tgt_qp_mutex;
1014 struct list_head tgt_qp_list;
Sean Hefty59991f92011-05-23 17:52:46 -07001015};
1016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017struct ib_ah {
1018 struct ib_device *device;
1019 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001020 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021};
1022
1023typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1024
1025struct ib_cq {
Roland Dreiere2773c02005-07-07 17:57:10 -07001026 struct ib_device *device;
1027 struct ib_uobject *uobject;
1028 ib_comp_handler comp_handler;
1029 void (*event_handler)(struct ib_event *, void *);
Dotan Barak4deccd62008-07-14 23:48:44 -07001030 void *cq_context;
Roland Dreiere2773c02005-07-07 17:57:10 -07001031 int cqe;
1032 atomic_t usecnt; /* count number of work queues */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033};
1034
1035struct ib_srq {
Roland Dreierd41fcc62005-08-18 12:23:08 -07001036 struct ib_device *device;
1037 struct ib_pd *pd;
1038 struct ib_uobject *uobject;
1039 void (*event_handler)(struct ib_event *, void *);
1040 void *srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -07001041 enum ib_srq_type srq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 atomic_t usecnt;
Sean Hefty418d5132011-05-23 19:42:29 -07001043
1044 union {
1045 struct {
1046 struct ib_xrcd *xrcd;
1047 struct ib_cq *cq;
1048 u32 srq_num;
1049 } xrc;
1050 } ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051};
1052
1053struct ib_qp {
1054 struct ib_device *device;
1055 struct ib_pd *pd;
1056 struct ib_cq *send_cq;
1057 struct ib_cq *recv_cq;
1058 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001059 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Sean Heftyd3d72d92011-05-26 23:06:44 -07001060 struct list_head xrcd_list;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001061 /* count times opened, mcast attaches, flow attaches */
1062 atomic_t usecnt;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001063 struct list_head open_list;
1064 struct ib_qp *real_qp;
Roland Dreiere2773c02005-07-07 17:57:10 -07001065 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 void (*event_handler)(struct ib_event *, void *);
1067 void *qp_context;
1068 u32 qp_num;
1069 enum ib_qp_type qp_type;
1070};
1071
1072struct ib_mr {
Roland Dreiere2773c02005-07-07 17:57:10 -07001073 struct ib_device *device;
1074 struct ib_pd *pd;
1075 struct ib_uobject *uobject;
1076 u32 lkey;
1077 u32 rkey;
1078 atomic_t usecnt; /* count number of MWs */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079};
1080
1081struct ib_mw {
1082 struct ib_device *device;
1083 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001084 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 u32 rkey;
Shani Michaeli7083e422013-02-06 16:19:12 +00001086 enum ib_mw_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087};
1088
1089struct ib_fmr {
1090 struct ib_device *device;
1091 struct ib_pd *pd;
1092 struct list_head list;
1093 u32 lkey;
1094 u32 rkey;
1095};
1096
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001097/* Supported steering options */
1098enum ib_flow_attr_type {
1099 /* steering according to rule specifications */
1100 IB_FLOW_ATTR_NORMAL = 0x0,
1101 /* default unicast and multicast rule -
1102 * receive all Eth traffic which isn't steered to any QP
1103 */
1104 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1105 /* default multicast rule -
1106 * receive all Eth multicast traffic which isn't steered to any QP
1107 */
1108 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1109 /* sniffer rule - receive all port traffic */
1110 IB_FLOW_ATTR_SNIFFER = 0x3
1111};
1112
1113/* Supported steering header types */
1114enum ib_flow_spec_type {
1115 /* L2 headers*/
1116 IB_FLOW_SPEC_ETH = 0x20,
1117 /* L3 header*/
1118 IB_FLOW_SPEC_IPV4 = 0x30,
1119 /* L4 headers*/
1120 IB_FLOW_SPEC_TCP = 0x40,
1121 IB_FLOW_SPEC_UDP = 0x41
1122};
1123
Matan Barak22878db2013-09-01 18:39:52 +03001124#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1125
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001126/* Flow steering rule priority is set according to it's domain.
1127 * Lower domain value means higher priority.
1128 */
1129enum ib_flow_domain {
1130 IB_FLOW_DOMAIN_USER,
1131 IB_FLOW_DOMAIN_ETHTOOL,
1132 IB_FLOW_DOMAIN_RFS,
1133 IB_FLOW_DOMAIN_NIC,
1134 IB_FLOW_DOMAIN_NUM /* Must be last */
1135};
1136
1137struct ib_flow_eth_filter {
1138 u8 dst_mac[6];
1139 u8 src_mac[6];
1140 __be16 ether_type;
1141 __be16 vlan_tag;
1142};
1143
1144struct ib_flow_spec_eth {
1145 enum ib_flow_spec_type type;
1146 u16 size;
1147 struct ib_flow_eth_filter val;
1148 struct ib_flow_eth_filter mask;
1149};
1150
1151struct ib_flow_ipv4_filter {
1152 __be32 src_ip;
1153 __be32 dst_ip;
1154};
1155
1156struct ib_flow_spec_ipv4 {
1157 enum ib_flow_spec_type type;
1158 u16 size;
1159 struct ib_flow_ipv4_filter val;
1160 struct ib_flow_ipv4_filter mask;
1161};
1162
1163struct ib_flow_tcp_udp_filter {
1164 __be16 dst_port;
1165 __be16 src_port;
1166};
1167
1168struct ib_flow_spec_tcp_udp {
1169 enum ib_flow_spec_type type;
1170 u16 size;
1171 struct ib_flow_tcp_udp_filter val;
1172 struct ib_flow_tcp_udp_filter mask;
1173};
1174
1175union ib_flow_spec {
1176 struct {
1177 enum ib_flow_spec_type type;
1178 u16 size;
1179 };
1180 struct ib_flow_spec_eth eth;
1181 struct ib_flow_spec_ipv4 ipv4;
1182 struct ib_flow_spec_tcp_udp tcp_udp;
1183};
1184
1185struct ib_flow_attr {
1186 enum ib_flow_attr_type type;
1187 u16 size;
1188 u16 priority;
1189 u32 flags;
1190 u8 num_of_specs;
1191 u8 port;
1192 /* Following are the optional layers according to user request
1193 * struct ib_flow_spec_xxx
1194 * struct ib_flow_spec_yyy
1195 */
1196};
1197
1198struct ib_flow {
1199 struct ib_qp *qp;
1200 struct ib_uobject *uobject;
1201};
1202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203struct ib_mad;
1204struct ib_grh;
1205
1206enum ib_process_mad_flags {
1207 IB_MAD_IGNORE_MKEY = 1,
1208 IB_MAD_IGNORE_BKEY = 2,
1209 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1210};
1211
1212enum ib_mad_result {
1213 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1214 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1215 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1216 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1217};
1218
1219#define IB_DEVICE_NAME_MAX 64
1220
1221struct ib_cache {
1222 rwlock_t lock;
1223 struct ib_event_handler event_handler;
1224 struct ib_pkey_cache **pkey_cache;
1225 struct ib_gid_cache **gid_cache;
Jack Morgenstein6fb9cdb2006-06-17 20:37:34 -07001226 u8 *lmc_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227};
1228
Ralph Campbell9b513092006-12-12 14:27:41 -08001229struct ib_dma_mapping_ops {
1230 int (*mapping_error)(struct ib_device *dev,
1231 u64 dma_addr);
1232 u64 (*map_single)(struct ib_device *dev,
1233 void *ptr, size_t size,
1234 enum dma_data_direction direction);
1235 void (*unmap_single)(struct ib_device *dev,
1236 u64 addr, size_t size,
1237 enum dma_data_direction direction);
1238 u64 (*map_page)(struct ib_device *dev,
1239 struct page *page, unsigned long offset,
1240 size_t size,
1241 enum dma_data_direction direction);
1242 void (*unmap_page)(struct ib_device *dev,
1243 u64 addr, size_t size,
1244 enum dma_data_direction direction);
1245 int (*map_sg)(struct ib_device *dev,
1246 struct scatterlist *sg, int nents,
1247 enum dma_data_direction direction);
1248 void (*unmap_sg)(struct ib_device *dev,
1249 struct scatterlist *sg, int nents,
1250 enum dma_data_direction direction);
1251 u64 (*dma_address)(struct ib_device *dev,
1252 struct scatterlist *sg);
1253 unsigned int (*dma_len)(struct ib_device *dev,
1254 struct scatterlist *sg);
1255 void (*sync_single_for_cpu)(struct ib_device *dev,
1256 u64 dma_handle,
1257 size_t size,
Dotan Barak4deccd62008-07-14 23:48:44 -07001258 enum dma_data_direction dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08001259 void (*sync_single_for_device)(struct ib_device *dev,
1260 u64 dma_handle,
1261 size_t size,
1262 enum dma_data_direction dir);
1263 void *(*alloc_coherent)(struct ib_device *dev,
1264 size_t size,
1265 u64 *dma_handle,
1266 gfp_t flag);
1267 void (*free_coherent)(struct ib_device *dev,
1268 size_t size, void *cpu_addr,
1269 u64 dma_handle);
1270};
1271
Tom Tucker07ebafb2006-08-03 16:02:42 -05001272struct iw_cm_verbs;
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274struct ib_device {
1275 struct device *dma_device;
1276
1277 char name[IB_DEVICE_NAME_MAX];
1278
1279 struct list_head event_handler_list;
1280 spinlock_t event_handler_lock;
1281
Alexander Chiang17a55f72010-02-02 19:09:16 +00001282 spinlock_t client_data_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 struct list_head core_list;
1284 struct list_head client_data_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
1286 struct ib_cache cache;
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001287 int *pkey_tbl_len;
1288 int *gid_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001290 int num_comp_vectors;
1291
Tom Tucker07ebafb2006-08-03 16:02:42 -05001292 struct iw_cm_verbs *iwcm;
1293
Steve Wise7f624d02008-07-14 23:48:48 -07001294 int (*get_protocol_stats)(struct ib_device *device,
1295 union rdma_protocol_stats *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 int (*query_device)(struct ib_device *device,
1297 struct ib_device_attr *device_attr);
1298 int (*query_port)(struct ib_device *device,
1299 u8 port_num,
1300 struct ib_port_attr *port_attr);
Eli Cohena3f5ada2010-09-27 17:51:10 -07001301 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1302 u8 port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 int (*query_gid)(struct ib_device *device,
1304 u8 port_num, int index,
1305 union ib_gid *gid);
1306 int (*query_pkey)(struct ib_device *device,
1307 u8 port_num, u16 index, u16 *pkey);
1308 int (*modify_device)(struct ib_device *device,
1309 int device_modify_mask,
1310 struct ib_device_modify *device_modify);
1311 int (*modify_port)(struct ib_device *device,
1312 u8 port_num, int port_modify_mask,
1313 struct ib_port_modify *port_modify);
Roland Dreiere2773c02005-07-07 17:57:10 -07001314 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1315 struct ib_udata *udata);
1316 int (*dealloc_ucontext)(struct ib_ucontext *context);
1317 int (*mmap)(struct ib_ucontext *context,
1318 struct vm_area_struct *vma);
1319 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1320 struct ib_ucontext *context,
1321 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 int (*dealloc_pd)(struct ib_pd *pd);
1323 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1324 struct ib_ah_attr *ah_attr);
1325 int (*modify_ah)(struct ib_ah *ah,
1326 struct ib_ah_attr *ah_attr);
1327 int (*query_ah)(struct ib_ah *ah,
1328 struct ib_ah_attr *ah_attr);
1329 int (*destroy_ah)(struct ib_ah *ah);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001330 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1331 struct ib_srq_init_attr *srq_init_attr,
1332 struct ib_udata *udata);
1333 int (*modify_srq)(struct ib_srq *srq,
1334 struct ib_srq_attr *srq_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001335 enum ib_srq_attr_mask srq_attr_mask,
1336 struct ib_udata *udata);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001337 int (*query_srq)(struct ib_srq *srq,
1338 struct ib_srq_attr *srq_attr);
1339 int (*destroy_srq)(struct ib_srq *srq);
1340 int (*post_srq_recv)(struct ib_srq *srq,
1341 struct ib_recv_wr *recv_wr,
1342 struct ib_recv_wr **bad_recv_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 struct ib_qp * (*create_qp)(struct ib_pd *pd,
Roland Dreiere2773c02005-07-07 17:57:10 -07001344 struct ib_qp_init_attr *qp_init_attr,
1345 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 int (*modify_qp)(struct ib_qp *qp,
1347 struct ib_qp_attr *qp_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001348 int qp_attr_mask,
1349 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 int (*query_qp)(struct ib_qp *qp,
1351 struct ib_qp_attr *qp_attr,
1352 int qp_attr_mask,
1353 struct ib_qp_init_attr *qp_init_attr);
1354 int (*destroy_qp)(struct ib_qp *qp);
1355 int (*post_send)(struct ib_qp *qp,
1356 struct ib_send_wr *send_wr,
1357 struct ib_send_wr **bad_send_wr);
1358 int (*post_recv)(struct ib_qp *qp,
1359 struct ib_recv_wr *recv_wr,
1360 struct ib_recv_wr **bad_recv_wr);
Roland Dreiere2773c02005-07-07 17:57:10 -07001361 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001362 int comp_vector,
Roland Dreiere2773c02005-07-07 17:57:10 -07001363 struct ib_ucontext *context,
1364 struct ib_udata *udata);
Eli Cohen2dd57162008-04-16 21:09:33 -07001365 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1366 u16 cq_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 int (*destroy_cq)(struct ib_cq *cq);
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001368 int (*resize_cq)(struct ib_cq *cq, int cqe,
1369 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1371 struct ib_wc *wc);
1372 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1373 int (*req_notify_cq)(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07001374 enum ib_cq_notify_flags flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 int (*req_ncomp_notif)(struct ib_cq *cq,
1376 int wc_cnt);
1377 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1378 int mr_access_flags);
1379 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1380 struct ib_phys_buf *phys_buf_array,
1381 int num_phys_buf,
1382 int mr_access_flags,
1383 u64 *iova_start);
Roland Dreiere2773c02005-07-07 17:57:10 -07001384 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001385 u64 start, u64 length,
1386 u64 virt_addr,
Roland Dreiere2773c02005-07-07 17:57:10 -07001387 int mr_access_flags,
1388 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 int (*query_mr)(struct ib_mr *mr,
1390 struct ib_mr_attr *mr_attr);
1391 int (*dereg_mr)(struct ib_mr *mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001392 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1393 int max_page_list_len);
1394 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1395 int page_list_len);
1396 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 int (*rereg_phys_mr)(struct ib_mr *mr,
1398 int mr_rereg_mask,
1399 struct ib_pd *pd,
1400 struct ib_phys_buf *phys_buf_array,
1401 int num_phys_buf,
1402 int mr_access_flags,
1403 u64 *iova_start);
Shani Michaeli7083e422013-02-06 16:19:12 +00001404 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1405 enum ib_mw_type type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 int (*bind_mw)(struct ib_qp *qp,
1407 struct ib_mw *mw,
1408 struct ib_mw_bind *mw_bind);
1409 int (*dealloc_mw)(struct ib_mw *mw);
1410 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1411 int mr_access_flags,
1412 struct ib_fmr_attr *fmr_attr);
1413 int (*map_phys_fmr)(struct ib_fmr *fmr,
1414 u64 *page_list, int list_len,
1415 u64 iova);
1416 int (*unmap_fmr)(struct list_head *fmr_list);
1417 int (*dealloc_fmr)(struct ib_fmr *fmr);
1418 int (*attach_mcast)(struct ib_qp *qp,
1419 union ib_gid *gid,
1420 u16 lid);
1421 int (*detach_mcast)(struct ib_qp *qp,
1422 union ib_gid *gid,
1423 u16 lid);
1424 int (*process_mad)(struct ib_device *device,
1425 int process_mad_flags,
1426 u8 port_num,
1427 struct ib_wc *in_wc,
1428 struct ib_grh *in_grh,
1429 struct ib_mad *in_mad,
1430 struct ib_mad *out_mad);
Sean Hefty59991f92011-05-23 17:52:46 -07001431 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1432 struct ib_ucontext *ucontext,
1433 struct ib_udata *udata);
1434 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001435 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1436 struct ib_flow_attr
1437 *flow_attr,
1438 int domain);
1439 int (*destroy_flow)(struct ib_flow *flow_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Ralph Campbell9b513092006-12-12 14:27:41 -08001441 struct ib_dma_mapping_ops *dma_ops;
1442
Roland Dreiere2773c02005-07-07 17:57:10 -07001443 struct module *owner;
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001444 struct device dev;
Greg Kroah-Hartman35be0682007-12-17 15:54:39 -04001445 struct kobject *ports_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 struct list_head port_list;
1447
1448 enum {
1449 IB_DEV_UNINITIALIZED,
1450 IB_DEV_REGISTERED,
1451 IB_DEV_UNREGISTERED
1452 } reg_state;
1453
Roland Dreier274c0892005-09-29 14:17:48 -07001454 int uverbs_abi_ver;
Alexander Chiang17a55f72010-02-02 19:09:16 +00001455 u64 uverbs_cmd_mask;
Yann Droneaudf21519b2013-11-06 23:21:49 +01001456 u64 uverbs_ex_cmd_mask;
Roland Dreier274c0892005-09-29 14:17:48 -07001457
Roland Dreierc5bcbbb2006-02-02 09:47:14 -08001458 char node_desc[64];
Sean Heftycf311cd2006-01-10 07:39:34 -08001459 __be64 node_guid;
Steve Wise96f15c02008-07-14 23:48:53 -07001460 u32 local_dma_lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 u8 node_type;
1462 u8 phys_port_cnt;
1463};
1464
1465struct ib_client {
1466 char *name;
1467 void (*add) (struct ib_device *);
1468 void (*remove)(struct ib_device *);
1469
1470 struct list_head list;
1471};
1472
1473struct ib_device *ib_alloc_device(size_t size);
1474void ib_dealloc_device(struct ib_device *device);
1475
Ralph Campbell9a6edb62010-05-06 17:03:25 -07001476int ib_register_device(struct ib_device *device,
1477 int (*port_callback)(struct ib_device *,
1478 u8, struct kobject *));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479void ib_unregister_device(struct ib_device *device);
1480
1481int ib_register_client (struct ib_client *client);
1482void ib_unregister_client(struct ib_client *client);
1483
1484void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1485void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1486 void *data);
1487
Roland Dreiere2773c02005-07-07 17:57:10 -07001488static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1489{
1490 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1491}
1492
1493static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1494{
1495 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1496}
1497
Roland Dreier8a518662006-02-13 12:48:12 -08001498/**
1499 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1500 * contains all required attributes and no attributes not allowed for
1501 * the given QP state transition.
1502 * @cur_state: Current QP state
1503 * @next_state: Next QP state
1504 * @type: QP type
1505 * @mask: Mask of supplied QP attributes
Matan Barakdd5f03b2013-12-12 18:03:11 +02001506 * @ll : link layer of port
Roland Dreier8a518662006-02-13 12:48:12 -08001507 *
1508 * This function is a helper function that a low-level driver's
1509 * modify_qp method can use to validate the consumer's input. It
1510 * checks that cur_state and next_state are valid QP states, that a
1511 * transition from cur_state to next_state is allowed by the IB spec,
1512 * and that the attribute mask supplied is allowed for the transition.
1513 */
1514int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001515 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1516 enum rdma_link_layer ll);
Roland Dreier8a518662006-02-13 12:48:12 -08001517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518int ib_register_event_handler (struct ib_event_handler *event_handler);
1519int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1520void ib_dispatch_event(struct ib_event *event);
1521
1522int ib_query_device(struct ib_device *device,
1523 struct ib_device_attr *device_attr);
1524
1525int ib_query_port(struct ib_device *device,
1526 u8 port_num, struct ib_port_attr *port_attr);
1527
Eli Cohena3f5ada2010-09-27 17:51:10 -07001528enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1529 u8 port_num);
1530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531int ib_query_gid(struct ib_device *device,
1532 u8 port_num, int index, union ib_gid *gid);
1533
1534int ib_query_pkey(struct ib_device *device,
1535 u8 port_num, u16 index, u16 *pkey);
1536
1537int ib_modify_device(struct ib_device *device,
1538 int device_modify_mask,
1539 struct ib_device_modify *device_modify);
1540
1541int ib_modify_port(struct ib_device *device,
1542 u8 port_num, int port_modify_mask,
1543 struct ib_port_modify *port_modify);
1544
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001545int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1546 u8 *port_num, u16 *index);
1547
1548int ib_find_pkey(struct ib_device *device,
1549 u8 port_num, u16 pkey, u16 *index);
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551/**
1552 * ib_alloc_pd - Allocates an unused protection domain.
1553 * @device: The device on which to allocate the protection domain.
1554 *
1555 * A protection domain object provides an association between QPs, shared
1556 * receive queues, address handles, memory regions, and memory windows.
1557 */
1558struct ib_pd *ib_alloc_pd(struct ib_device *device);
1559
1560/**
1561 * ib_dealloc_pd - Deallocates a protection domain.
1562 * @pd: The protection domain to deallocate.
1563 */
1564int ib_dealloc_pd(struct ib_pd *pd);
1565
1566/**
1567 * ib_create_ah - Creates an address handle for the given address vector.
1568 * @pd: The protection domain associated with the address handle.
1569 * @ah_attr: The attributes of the address vector.
1570 *
1571 * The address handle is used to reference a local or global destination
1572 * in all UD QP post sends.
1573 */
1574struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1575
1576/**
Sean Hefty4e00d692006-06-17 20:37:39 -07001577 * ib_init_ah_from_wc - Initializes address handle attributes from a
1578 * work completion.
1579 * @device: Device on which the received message arrived.
1580 * @port_num: Port on which the received message arrived.
1581 * @wc: Work completion associated with the received message.
1582 * @grh: References the received global route header. This parameter is
1583 * ignored unless the work completion indicates that the GRH is valid.
1584 * @ah_attr: Returned attributes that can be used when creating an address
1585 * handle for replying to the message.
1586 */
1587int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1588 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1589
1590/**
Hal Rosenstock513789e2005-07-27 11:45:34 -07001591 * ib_create_ah_from_wc - Creates an address handle associated with the
1592 * sender of the specified work completion.
1593 * @pd: The protection domain associated with the address handle.
1594 * @wc: Work completion information associated with a received message.
1595 * @grh: References the received global route header. This parameter is
1596 * ignored unless the work completion indicates that the GRH is valid.
1597 * @port_num: The outbound port number to associate with the address.
1598 *
1599 * The address handle is used to reference a local or global destination
1600 * in all UD QP post sends.
1601 */
1602struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1603 struct ib_grh *grh, u8 port_num);
1604
1605/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 * ib_modify_ah - Modifies the address vector associated with an address
1607 * handle.
1608 * @ah: The address handle to modify.
1609 * @ah_attr: The new address vector attributes to associate with the
1610 * address handle.
1611 */
1612int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1613
1614/**
1615 * ib_query_ah - Queries the address vector associated with an address
1616 * handle.
1617 * @ah: The address handle to query.
1618 * @ah_attr: The address vector attributes associated with the address
1619 * handle.
1620 */
1621int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1622
1623/**
1624 * ib_destroy_ah - Destroys an address handle.
1625 * @ah: The address handle to destroy.
1626 */
1627int ib_destroy_ah(struct ib_ah *ah);
1628
1629/**
Roland Dreierd41fcc62005-08-18 12:23:08 -07001630 * ib_create_srq - Creates a SRQ associated with the specified protection
1631 * domain.
1632 * @pd: The protection domain associated with the SRQ.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08001633 * @srq_init_attr: A list of initial attributes required to create the
1634 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1635 * the actual capabilities of the created SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07001636 *
1637 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1638 * requested size of the SRQ, and set to the actual values allocated
1639 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1640 * will always be at least as large as the requested values.
1641 */
1642struct ib_srq *ib_create_srq(struct ib_pd *pd,
1643 struct ib_srq_init_attr *srq_init_attr);
1644
1645/**
1646 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1647 * @srq: The SRQ to modify.
1648 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1649 * the current values of selected SRQ attributes are returned.
1650 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1651 * are being modified.
1652 *
1653 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1654 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1655 * the number of receives queued drops below the limit.
1656 */
1657int ib_modify_srq(struct ib_srq *srq,
1658 struct ib_srq_attr *srq_attr,
1659 enum ib_srq_attr_mask srq_attr_mask);
1660
1661/**
1662 * ib_query_srq - Returns the attribute list and current values for the
1663 * specified SRQ.
1664 * @srq: The SRQ to query.
1665 * @srq_attr: The attributes of the specified SRQ.
1666 */
1667int ib_query_srq(struct ib_srq *srq,
1668 struct ib_srq_attr *srq_attr);
1669
1670/**
1671 * ib_destroy_srq - Destroys the specified SRQ.
1672 * @srq: The SRQ to destroy.
1673 */
1674int ib_destroy_srq(struct ib_srq *srq);
1675
1676/**
1677 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1678 * @srq: The SRQ to post the work request on.
1679 * @recv_wr: A list of work requests to post on the receive queue.
1680 * @bad_recv_wr: On an immediate failure, this parameter will reference
1681 * the work request that failed to be posted on the QP.
1682 */
1683static inline int ib_post_srq_recv(struct ib_srq *srq,
1684 struct ib_recv_wr *recv_wr,
1685 struct ib_recv_wr **bad_recv_wr)
1686{
1687 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1688}
1689
1690/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 * ib_create_qp - Creates a QP associated with the specified protection
1692 * domain.
1693 * @pd: The protection domain associated with the QP.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08001694 * @qp_init_attr: A list of initial attributes required to create the
1695 * QP. If QP creation succeeds, then the attributes are updated to
1696 * the actual capabilities of the created QP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 */
1698struct ib_qp *ib_create_qp(struct ib_pd *pd,
1699 struct ib_qp_init_attr *qp_init_attr);
1700
1701/**
1702 * ib_modify_qp - Modifies the attributes for the specified QP and then
1703 * transitions the QP to the given state.
1704 * @qp: The QP to modify.
1705 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1706 * the current values of selected QP attributes are returned.
1707 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1708 * are being modified.
1709 */
1710int ib_modify_qp(struct ib_qp *qp,
1711 struct ib_qp_attr *qp_attr,
1712 int qp_attr_mask);
1713
1714/**
1715 * ib_query_qp - Returns the attribute list and current values for the
1716 * specified QP.
1717 * @qp: The QP to query.
1718 * @qp_attr: The attributes of the specified QP.
1719 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1720 * @qp_init_attr: Additional attributes of the selected QP.
1721 *
1722 * The qp_attr_mask may be used to limit the query to gathering only the
1723 * selected attributes.
1724 */
1725int ib_query_qp(struct ib_qp *qp,
1726 struct ib_qp_attr *qp_attr,
1727 int qp_attr_mask,
1728 struct ib_qp_init_attr *qp_init_attr);
1729
1730/**
1731 * ib_destroy_qp - Destroys the specified QP.
1732 * @qp: The QP to destroy.
1733 */
1734int ib_destroy_qp(struct ib_qp *qp);
1735
1736/**
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001737 * ib_open_qp - Obtain a reference to an existing sharable QP.
1738 * @xrcd - XRC domain
1739 * @qp_open_attr: Attributes identifying the QP to open.
1740 *
1741 * Returns a reference to a sharable QP.
1742 */
1743struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1744 struct ib_qp_open_attr *qp_open_attr);
1745
1746/**
1747 * ib_close_qp - Release an external reference to a QP.
Sean Heftyd3d72d92011-05-26 23:06:44 -07001748 * @qp: The QP handle to release
1749 *
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001750 * The opened QP handle is released by the caller. The underlying
1751 * shared QP is not destroyed until all internal references are released.
Sean Heftyd3d72d92011-05-26 23:06:44 -07001752 */
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001753int ib_close_qp(struct ib_qp *qp);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001754
1755/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 * ib_post_send - Posts a list of work requests to the send queue of
1757 * the specified QP.
1758 * @qp: The QP to post the work request on.
1759 * @send_wr: A list of work requests to post on the send queue.
1760 * @bad_send_wr: On an immediate failure, this parameter will reference
1761 * the work request that failed to be posted on the QP.
Bart Van Assche55464d42009-12-09 14:20:04 -08001762 *
1763 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1764 * error is returned, the QP state shall not be affected,
1765 * ib_post_send() will return an immediate error after queueing any
1766 * earlier work requests in the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 */
1768static inline int ib_post_send(struct ib_qp *qp,
1769 struct ib_send_wr *send_wr,
1770 struct ib_send_wr **bad_send_wr)
1771{
1772 return qp->device->post_send(qp, send_wr, bad_send_wr);
1773}
1774
1775/**
1776 * ib_post_recv - Posts a list of work requests to the receive queue of
1777 * the specified QP.
1778 * @qp: The QP to post the work request on.
1779 * @recv_wr: A list of work requests to post on the receive queue.
1780 * @bad_recv_wr: On an immediate failure, this parameter will reference
1781 * the work request that failed to be posted on the QP.
1782 */
1783static inline int ib_post_recv(struct ib_qp *qp,
1784 struct ib_recv_wr *recv_wr,
1785 struct ib_recv_wr **bad_recv_wr)
1786{
1787 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1788}
1789
1790/**
1791 * ib_create_cq - Creates a CQ on the specified device.
1792 * @device: The device on which to create the CQ.
1793 * @comp_handler: A user-specified callback that is invoked when a
1794 * completion event occurs on the CQ.
1795 * @event_handler: A user-specified callback that is invoked when an
1796 * asynchronous event not associated with a completion occurs on the CQ.
1797 * @cq_context: Context associated with the CQ returned to the user via
1798 * the associated completion and event handlers.
1799 * @cqe: The minimum size of the CQ.
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001800 * @comp_vector - Completion vector used to signal completion events.
1801 * Must be >= 0 and < context->num_comp_vectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 *
1803 * Users can examine the cq structure to determine the actual CQ size.
1804 */
1805struct ib_cq *ib_create_cq(struct ib_device *device,
1806 ib_comp_handler comp_handler,
1807 void (*event_handler)(struct ib_event *, void *),
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001808 void *cq_context, int cqe, int comp_vector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810/**
1811 * ib_resize_cq - Modifies the capacity of the CQ.
1812 * @cq: The CQ to resize.
1813 * @cqe: The minimum size of the CQ.
1814 *
1815 * Users can examine the cq structure to determine the actual CQ size.
1816 */
1817int ib_resize_cq(struct ib_cq *cq, int cqe);
1818
1819/**
Eli Cohen2dd57162008-04-16 21:09:33 -07001820 * ib_modify_cq - Modifies moderation params of the CQ
1821 * @cq: The CQ to modify.
1822 * @cq_count: number of CQEs that will trigger an event
1823 * @cq_period: max period of time in usec before triggering an event
1824 *
1825 */
1826int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1827
1828/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 * ib_destroy_cq - Destroys the specified CQ.
1830 * @cq: The CQ to destroy.
1831 */
1832int ib_destroy_cq(struct ib_cq *cq);
1833
1834/**
1835 * ib_poll_cq - poll a CQ for completion(s)
1836 * @cq:the CQ being polled
1837 * @num_entries:maximum number of completions to return
1838 * @wc:array of at least @num_entries &struct ib_wc where completions
1839 * will be returned
1840 *
1841 * Poll a CQ for (possibly multiple) completions. If the return value
1842 * is < 0, an error occurred. If the return value is >= 0, it is the
1843 * number of completions returned. If the return value is
1844 * non-negative and < num_entries, then the CQ was emptied.
1845 */
1846static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1847 struct ib_wc *wc)
1848{
1849 return cq->device->poll_cq(cq, num_entries, wc);
1850}
1851
1852/**
1853 * ib_peek_cq - Returns the number of unreaped completions currently
1854 * on the specified CQ.
1855 * @cq: The CQ to peek.
1856 * @wc_cnt: A minimum number of unreaped completions to check for.
1857 *
1858 * If the number of unreaped completions is greater than or equal to wc_cnt,
1859 * this function returns wc_cnt, otherwise, it returns the actual number of
1860 * unreaped completions.
1861 */
1862int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1863
1864/**
1865 * ib_req_notify_cq - Request completion notification on a CQ.
1866 * @cq: The CQ to generate an event for.
Roland Dreiered23a722007-05-06 21:02:48 -07001867 * @flags:
1868 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1869 * to request an event on the next solicited event or next work
1870 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1871 * may also be |ed in to request a hint about missed events, as
1872 * described below.
1873 *
1874 * Return Value:
1875 * < 0 means an error occurred while requesting notification
1876 * == 0 means notification was requested successfully, and if
1877 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1878 * were missed and it is safe to wait for another event. In
1879 * this case is it guaranteed that any work completions added
1880 * to the CQ since the last CQ poll will trigger a completion
1881 * notification event.
1882 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1883 * in. It means that the consumer must poll the CQ again to
1884 * make sure it is empty to avoid missing an event because of a
1885 * race between requesting notification and an entry being
1886 * added to the CQ. This return value means it is possible
1887 * (but not guaranteed) that a work completion has been added
1888 * to the CQ since the last poll without triggering a
1889 * completion notification event.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 */
1891static inline int ib_req_notify_cq(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07001892 enum ib_cq_notify_flags flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893{
Roland Dreiered23a722007-05-06 21:02:48 -07001894 return cq->device->req_notify_cq(cq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895}
1896
1897/**
1898 * ib_req_ncomp_notif - Request completion notification when there are
1899 * at least the specified number of unreaped completions on the CQ.
1900 * @cq: The CQ to generate an event for.
1901 * @wc_cnt: The number of unreaped completions that should be on the
1902 * CQ before an event is generated.
1903 */
1904static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1905{
1906 return cq->device->req_ncomp_notif ?
1907 cq->device->req_ncomp_notif(cq, wc_cnt) :
1908 -ENOSYS;
1909}
1910
1911/**
1912 * ib_get_dma_mr - Returns a memory region for system memory that is
1913 * usable for DMA.
1914 * @pd: The protection domain associated with the memory region.
1915 * @mr_access_flags: Specifies the memory access rights.
Ralph Campbell9b513092006-12-12 14:27:41 -08001916 *
1917 * Note that the ib_dma_*() functions defined below must be used
1918 * to create/destroy addresses used with the Lkey or Rkey returned
1919 * by ib_get_dma_mr().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 */
1921struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1922
1923/**
Ralph Campbell9b513092006-12-12 14:27:41 -08001924 * ib_dma_mapping_error - check a DMA addr for error
1925 * @dev: The device for which the dma_addr was created
1926 * @dma_addr: The DMA address to check
1927 */
1928static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1929{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001930 if (dev->dma_ops)
1931 return dev->dma_ops->mapping_error(dev, dma_addr);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001932 return dma_mapping_error(dev->dma_device, dma_addr);
Ralph Campbell9b513092006-12-12 14:27:41 -08001933}
1934
1935/**
1936 * ib_dma_map_single - Map a kernel virtual address to DMA address
1937 * @dev: The device for which the dma_addr is to be created
1938 * @cpu_addr: The kernel virtual address
1939 * @size: The size of the region in bytes
1940 * @direction: The direction of the DMA
1941 */
1942static inline u64 ib_dma_map_single(struct ib_device *dev,
1943 void *cpu_addr, size_t size,
1944 enum dma_data_direction direction)
1945{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001946 if (dev->dma_ops)
1947 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1948 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08001949}
1950
1951/**
1952 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1953 * @dev: The device for which the DMA address was created
1954 * @addr: The DMA address
1955 * @size: The size of the region in bytes
1956 * @direction: The direction of the DMA
1957 */
1958static inline void ib_dma_unmap_single(struct ib_device *dev,
1959 u64 addr, size_t size,
1960 enum dma_data_direction direction)
1961{
Ben Collinsd1998ef2006-12-13 22:10:05 -05001962 if (dev->dma_ops)
1963 dev->dma_ops->unmap_single(dev, addr, size, direction);
1964 else
Ralph Campbell9b513092006-12-12 14:27:41 -08001965 dma_unmap_single(dev->dma_device, addr, size, direction);
1966}
1967
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07001968static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1969 void *cpu_addr, size_t size,
1970 enum dma_data_direction direction,
1971 struct dma_attrs *attrs)
1972{
1973 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1974 direction, attrs);
1975}
1976
1977static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1978 u64 addr, size_t size,
1979 enum dma_data_direction direction,
1980 struct dma_attrs *attrs)
1981{
1982 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1983 direction, attrs);
1984}
1985
Ralph Campbell9b513092006-12-12 14:27:41 -08001986/**
1987 * ib_dma_map_page - Map a physical page to DMA address
1988 * @dev: The device for which the dma_addr is to be created
1989 * @page: The page to be mapped
1990 * @offset: The offset within the page
1991 * @size: The size of the region in bytes
1992 * @direction: The direction of the DMA
1993 */
1994static inline u64 ib_dma_map_page(struct ib_device *dev,
1995 struct page *page,
1996 unsigned long offset,
1997 size_t size,
1998 enum dma_data_direction direction)
1999{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002000 if (dev->dma_ops)
2001 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2002 return dma_map_page(dev->dma_device, page, offset, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002003}
2004
2005/**
2006 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2007 * @dev: The device for which the DMA address was created
2008 * @addr: The DMA address
2009 * @size: The size of the region in bytes
2010 * @direction: The direction of the DMA
2011 */
2012static inline void ib_dma_unmap_page(struct ib_device *dev,
2013 u64 addr, size_t size,
2014 enum dma_data_direction direction)
2015{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002016 if (dev->dma_ops)
2017 dev->dma_ops->unmap_page(dev, addr, size, direction);
2018 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002019 dma_unmap_page(dev->dma_device, addr, size, direction);
2020}
2021
2022/**
2023 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2024 * @dev: The device for which the DMA addresses are to be created
2025 * @sg: The array of scatter/gather entries
2026 * @nents: The number of scatter/gather entries
2027 * @direction: The direction of the DMA
2028 */
2029static inline int ib_dma_map_sg(struct ib_device *dev,
2030 struct scatterlist *sg, int nents,
2031 enum dma_data_direction direction)
2032{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002033 if (dev->dma_ops)
2034 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2035 return dma_map_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002036}
2037
2038/**
2039 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2040 * @dev: The device for which the DMA addresses were created
2041 * @sg: The array of scatter/gather entries
2042 * @nents: The number of scatter/gather entries
2043 * @direction: The direction of the DMA
2044 */
2045static inline void ib_dma_unmap_sg(struct ib_device *dev,
2046 struct scatterlist *sg, int nents,
2047 enum dma_data_direction direction)
2048{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002049 if (dev->dma_ops)
2050 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2051 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002052 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2053}
2054
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07002055static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2056 struct scatterlist *sg, int nents,
2057 enum dma_data_direction direction,
2058 struct dma_attrs *attrs)
2059{
2060 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2061}
2062
2063static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2064 struct scatterlist *sg, int nents,
2065 enum dma_data_direction direction,
2066 struct dma_attrs *attrs)
2067{
2068 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2069}
Ralph Campbell9b513092006-12-12 14:27:41 -08002070/**
2071 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2072 * @dev: The device for which the DMA addresses were created
2073 * @sg: The scatter/gather entry
2074 */
2075static inline u64 ib_sg_dma_address(struct ib_device *dev,
2076 struct scatterlist *sg)
2077{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002078 if (dev->dma_ops)
2079 return dev->dma_ops->dma_address(dev, sg);
2080 return sg_dma_address(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08002081}
2082
2083/**
2084 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2085 * @dev: The device for which the DMA addresses were created
2086 * @sg: The scatter/gather entry
2087 */
2088static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2089 struct scatterlist *sg)
2090{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002091 if (dev->dma_ops)
2092 return dev->dma_ops->dma_len(dev, sg);
2093 return sg_dma_len(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08002094}
2095
2096/**
2097 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2098 * @dev: The device for which the DMA address was created
2099 * @addr: The DMA address
2100 * @size: The size of the region in bytes
2101 * @dir: The direction of the DMA
2102 */
2103static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2104 u64 addr,
2105 size_t size,
2106 enum dma_data_direction dir)
2107{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002108 if (dev->dma_ops)
2109 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2110 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002111 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2112}
2113
2114/**
2115 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2116 * @dev: The device for which the DMA address was created
2117 * @addr: The DMA address
2118 * @size: The size of the region in bytes
2119 * @dir: The direction of the DMA
2120 */
2121static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2122 u64 addr,
2123 size_t size,
2124 enum dma_data_direction dir)
2125{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002126 if (dev->dma_ops)
2127 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2128 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002129 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2130}
2131
2132/**
2133 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2134 * @dev: The device for which the DMA address is requested
2135 * @size: The size of the region to allocate in bytes
2136 * @dma_handle: A pointer for returning the DMA address of the region
2137 * @flag: memory allocator flags
2138 */
2139static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2140 size_t size,
2141 u64 *dma_handle,
2142 gfp_t flag)
2143{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002144 if (dev->dma_ops)
2145 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
Roland Dreierc59a3da2006-12-15 13:57:26 -08002146 else {
2147 dma_addr_t handle;
2148 void *ret;
2149
2150 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2151 *dma_handle = handle;
2152 return ret;
2153 }
Ralph Campbell9b513092006-12-12 14:27:41 -08002154}
2155
2156/**
2157 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2158 * @dev: The device for which the DMA addresses were allocated
2159 * @size: The size of the region
2160 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2161 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2162 */
2163static inline void ib_dma_free_coherent(struct ib_device *dev,
2164 size_t size, void *cpu_addr,
2165 u64 dma_handle)
2166{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002167 if (dev->dma_ops)
2168 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2169 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002170 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2171}
2172
2173/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2175 * by an HCA.
2176 * @pd: The protection domain associated assigned to the registered region.
2177 * @phys_buf_array: Specifies a list of physical buffers to use in the
2178 * memory region.
2179 * @num_phys_buf: Specifies the size of the phys_buf_array.
2180 * @mr_access_flags: Specifies the memory access rights.
2181 * @iova_start: The offset of the region's starting I/O virtual address.
2182 */
2183struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2184 struct ib_phys_buf *phys_buf_array,
2185 int num_phys_buf,
2186 int mr_access_flags,
2187 u64 *iova_start);
2188
2189/**
2190 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2191 * Conceptually, this call performs the functions deregister memory region
2192 * followed by register physical memory region. Where possible,
2193 * resources are reused instead of deallocated and reallocated.
2194 * @mr: The memory region to modify.
2195 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2196 * properties of the memory region are being modified.
2197 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2198 * the new protection domain to associated with the memory region,
2199 * otherwise, this parameter is ignored.
2200 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2201 * field specifies a list of physical buffers to use in the new
2202 * translation, otherwise, this parameter is ignored.
2203 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2204 * field specifies the size of the phys_buf_array, otherwise, this
2205 * parameter is ignored.
2206 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2207 * field specifies the new memory access rights, otherwise, this
2208 * parameter is ignored.
2209 * @iova_start: The offset of the region's starting I/O virtual address.
2210 */
2211int ib_rereg_phys_mr(struct ib_mr *mr,
2212 int mr_rereg_mask,
2213 struct ib_pd *pd,
2214 struct ib_phys_buf *phys_buf_array,
2215 int num_phys_buf,
2216 int mr_access_flags,
2217 u64 *iova_start);
2218
2219/**
2220 * ib_query_mr - Retrieves information about a specific memory region.
2221 * @mr: The memory region to retrieve information about.
2222 * @mr_attr: The attributes of the specified memory region.
2223 */
2224int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2225
2226/**
2227 * ib_dereg_mr - Deregisters a memory region and removes it from the
2228 * HCA translation table.
2229 * @mr: The memory region to deregister.
Shani Michaeli7083e422013-02-06 16:19:12 +00002230 *
2231 * This function can fail, if the memory region has memory windows bound to it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 */
2233int ib_dereg_mr(struct ib_mr *mr);
2234
2235/**
Steve Wise00f7ec32008-07-14 23:48:45 -07002236 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2237 * IB_WR_FAST_REG_MR send work request.
2238 * @pd: The protection domain associated with the region.
2239 * @max_page_list_len: requested max physical buffer list length to be
2240 * used with fast register work requests for this MR.
2241 */
2242struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2243
2244/**
2245 * ib_alloc_fast_reg_page_list - Allocates a page list array
2246 * @device - ib device pointer.
2247 * @page_list_len - size of the page list array to be allocated.
2248 *
2249 * This allocates and returns a struct ib_fast_reg_page_list * and a
2250 * page_list array that is at least page_list_len in size. The actual
2251 * size is returned in max_page_list_len. The caller is responsible
2252 * for initializing the contents of the page_list array before posting
2253 * a send work request with the IB_WC_FAST_REG_MR opcode.
2254 *
2255 * The page_list array entries must be translated using one of the
2256 * ib_dma_*() functions just like the addresses passed to
2257 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
2258 * ib_fast_reg_page_list must not be modified by the caller until the
2259 * IB_WC_FAST_REG_MR work request completes.
2260 */
2261struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2262 struct ib_device *device, int page_list_len);
2263
2264/**
2265 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2266 * page list array.
2267 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2268 */
2269void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2270
2271/**
2272 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2273 * R_Key and L_Key.
2274 * @mr - struct ib_mr pointer to be updated.
2275 * @newkey - new key to be used.
2276 */
2277static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2278{
2279 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2280 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2281}
2282
2283/**
Shani Michaeli7083e422013-02-06 16:19:12 +00002284 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2285 * for calculating a new rkey for type 2 memory windows.
2286 * @rkey - the rkey to increment.
2287 */
2288static inline u32 ib_inc_rkey(u32 rkey)
2289{
2290 const u32 mask = 0x000000ff;
2291 return ((rkey + 1) & mask) | (rkey & ~mask);
2292}
2293
2294/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 * ib_alloc_mw - Allocates a memory window.
2296 * @pd: The protection domain associated with the memory window.
Shani Michaeli7083e422013-02-06 16:19:12 +00002297 * @type: The type of the memory window (1 or 2).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 */
Shani Michaeli7083e422013-02-06 16:19:12 +00002299struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
2301/**
2302 * ib_bind_mw - Posts a work request to the send queue of the specified
2303 * QP, which binds the memory window to the given address range and
2304 * remote access attributes.
2305 * @qp: QP to post the bind work request on.
2306 * @mw: The memory window to bind.
2307 * @mw_bind: Specifies information about the memory window, including
2308 * its address range, remote access rights, and associated memory region.
Shani Michaeli7083e422013-02-06 16:19:12 +00002309 *
2310 * If there is no immediate error, the function will update the rkey member
2311 * of the mw parameter to its new value. The bind operation can still fail
2312 * asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 */
2314static inline int ib_bind_mw(struct ib_qp *qp,
2315 struct ib_mw *mw,
2316 struct ib_mw_bind *mw_bind)
2317{
2318 /* XXX reference counting in corresponding MR? */
2319 return mw->device->bind_mw ?
2320 mw->device->bind_mw(qp, mw, mw_bind) :
2321 -ENOSYS;
2322}
2323
2324/**
2325 * ib_dealloc_mw - Deallocates a memory window.
2326 * @mw: The memory window to deallocate.
2327 */
2328int ib_dealloc_mw(struct ib_mw *mw);
2329
2330/**
2331 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2332 * @pd: The protection domain associated with the unmapped region.
2333 * @mr_access_flags: Specifies the memory access rights.
2334 * @fmr_attr: Attributes of the unmapped region.
2335 *
2336 * A fast memory region must be mapped before it can be used as part of
2337 * a work request.
2338 */
2339struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2340 int mr_access_flags,
2341 struct ib_fmr_attr *fmr_attr);
2342
2343/**
2344 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2345 * @fmr: The fast memory region to associate with the pages.
2346 * @page_list: An array of physical pages to map to the fast memory region.
2347 * @list_len: The number of pages in page_list.
2348 * @iova: The I/O virtual address to use with the mapped region.
2349 */
2350static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2351 u64 *page_list, int list_len,
2352 u64 iova)
2353{
2354 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2355}
2356
2357/**
2358 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2359 * @fmr_list: A linked list of fast memory regions to unmap.
2360 */
2361int ib_unmap_fmr(struct list_head *fmr_list);
2362
2363/**
2364 * ib_dealloc_fmr - Deallocates a fast memory region.
2365 * @fmr: The fast memory region to deallocate.
2366 */
2367int ib_dealloc_fmr(struct ib_fmr *fmr);
2368
2369/**
2370 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2371 * @qp: QP to attach to the multicast group. The QP must be type
2372 * IB_QPT_UD.
2373 * @gid: Multicast group GID.
2374 * @lid: Multicast group LID in host byte order.
2375 *
2376 * In order to send and receive multicast packets, subnet
2377 * administration must have created the multicast group and configured
2378 * the fabric appropriately. The port associated with the specified
2379 * QP must also be a member of the multicast group.
2380 */
2381int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2382
2383/**
2384 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2385 * @qp: QP to detach from the multicast group.
2386 * @gid: Multicast group GID.
2387 * @lid: Multicast group LID in host byte order.
2388 */
2389int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2390
Sean Hefty59991f92011-05-23 17:52:46 -07002391/**
2392 * ib_alloc_xrcd - Allocates an XRC domain.
2393 * @device: The device on which to allocate the XRC domain.
2394 */
2395struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2396
2397/**
2398 * ib_dealloc_xrcd - Deallocates an XRC domain.
2399 * @xrcd: The XRC domain to deallocate.
2400 */
2401int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2402
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002403struct ib_flow *ib_create_flow(struct ib_qp *qp,
2404 struct ib_flow_attr *flow_attr, int domain);
2405int ib_destroy_flow(struct ib_flow *flow_id);
2406
Eli Cohen1c636f82013-10-31 15:26:32 +02002407static inline int ib_check_mr_access(int flags)
2408{
2409 /*
2410 * Local write permission is required if remote write or
2411 * remote atomic permission is also requested.
2412 */
2413 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2414 !(flags & IB_ACCESS_LOCAL_WRITE))
2415 return -EINVAL;
2416
2417 return 0;
2418}
2419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420#endif /* IB_VERBS_H */