blob: ed44cc07a7b3d8659eecf8a99aefb846b37d7f74 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08008 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
Ralph Campbell9b513092006-12-12 14:27:41 -080044#include <linux/mm.h>
45#include <linux/dma-mapping.h>
Michael S. Tsirkin459d6e22007-02-04 14:11:55 -080046#include <linux/kref.h>
Dotan Barakbfb3ea12007-07-31 16:49:15 +030047#include <linux/list.h>
48#include <linux/rwsem.h>
Adrian Bunk87ae9af2007-10-30 10:35:04 +010049#include <linux/scatterlist.h>
Tejun Heof0626712010-10-19 15:24:36 +000050#include <linux/workqueue.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020051#include <uapi/linux/if_ether.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070052
Arun Sharma600634972011-07-26 16:09:06 -070053#include <linux/atomic.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070054#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Tejun Heof0626712010-10-19 15:24:36 +000056extern struct workqueue_struct *ib_wq;
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058union ib_gid {
59 u8 raw[16];
60 struct {
Sean Hefty97f52eb2005-08-13 21:05:57 -070061 __be64 subnet_prefix;
62 __be64 interface_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 } global;
64};
65
Tom Tucker07ebafb2006-08-03 16:02:42 -050066enum rdma_node_type {
67 /* IB values map to NodeInfo:NodeType. */
68 RDMA_NODE_IB_CA = 1,
69 RDMA_NODE_IB_SWITCH,
70 RDMA_NODE_IB_ROUTER,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +000071 RDMA_NODE_RNIC,
72 RDMA_NODE_USNIC,
Upinder Malhi5db57652014-01-15 17:02:36 -080073 RDMA_NODE_USNIC_UDP,
Linus Torvalds1da177e2005-04-16 15:20:36 -070074};
75
Tom Tucker07ebafb2006-08-03 16:02:42 -050076enum rdma_transport_type {
77 RDMA_TRANSPORT_IB,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +000078 RDMA_TRANSPORT_IWARP,
Upinder Malhi248567f2014-01-09 14:48:19 -080079 RDMA_TRANSPORT_USNIC,
80 RDMA_TRANSPORT_USNIC_UDP
Tom Tucker07ebafb2006-08-03 16:02:42 -050081};
82
Roland Dreier8385fd82014-06-04 10:00:16 -070083__attribute_const__ enum rdma_transport_type
84rdma_node_get_transport(enum rdma_node_type node_type);
Tom Tucker07ebafb2006-08-03 16:02:42 -050085
Eli Cohena3f5ada2010-09-27 17:51:10 -070086enum rdma_link_layer {
87 IB_LINK_LAYER_UNSPECIFIED,
88 IB_LINK_LAYER_INFINIBAND,
89 IB_LINK_LAYER_ETHERNET,
90};
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092enum ib_device_cap_flags {
93 IB_DEVICE_RESIZE_MAX_WR = 1,
94 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
95 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
96 IB_DEVICE_RAW_MULTI = (1<<3),
97 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
98 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
99 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
100 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
101 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
102 IB_DEVICE_INIT_TYPE = (1<<9),
103 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
104 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
105 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
106 IB_DEVICE_SRQ_RESIZE = (1<<13),
107 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
Steve Wise96f15c02008-07-14 23:48:53 -0700108 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
Roland Dreier0f39cf32008-04-16 21:09:32 -0700109 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
Eli Cohene0605d92008-01-30 18:30:57 +0200110 IB_DEVICE_MEM_WINDOW = (1<<17),
111 /*
112 * Devices should set IB_DEVICE_UD_IP_SUM if they support
113 * insertion of UDP and TCP checksum on outgoing UD IPoIB
114 * messages and can verify the validity of checksum for
115 * incoming messages. Setting this flag implies that the
116 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
117 */
118 IB_DEVICE_UD_IP_CSUM = (1<<18),
Eli Cohenc93570f2008-04-16 21:09:27 -0700119 IB_DEVICE_UD_TSO = (1<<19),
Sean Hefty59991f92011-05-23 17:52:46 -0700120 IB_DEVICE_XRC = (1<<20),
Steve Wise00f7ec32008-07-14 23:48:45 -0700121 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
Ron Livne47ee1b92008-07-14 23:48:48 -0700122 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
Shani Michaeli7083e422013-02-06 16:19:12 +0000123 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
Hadar Hen Zion319a4412013-08-07 14:01:59 +0300124 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200125 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
126 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
127};
128
129enum ib_signature_prot_cap {
130 IB_PROT_T10DIF_TYPE_1 = 1,
131 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
132 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
133};
134
135enum ib_signature_guard_cap {
136 IB_GUARD_T10DIF_CRC = 1,
137 IB_GUARD_T10DIF_CSUM = 1 << 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138};
139
140enum ib_atomic_cap {
141 IB_ATOMIC_NONE,
142 IB_ATOMIC_HCA,
143 IB_ATOMIC_GLOB
144};
145
146struct ib_device_attr {
147 u64 fw_ver;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700148 __be64 sys_image_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 u64 max_mr_size;
150 u64 page_size_cap;
151 u32 vendor_id;
152 u32 vendor_part_id;
153 u32 hw_ver;
154 int max_qp;
155 int max_qp_wr;
156 int device_cap_flags;
157 int max_sge;
158 int max_sge_rd;
159 int max_cq;
160 int max_cqe;
161 int max_mr;
162 int max_pd;
163 int max_qp_rd_atom;
164 int max_ee_rd_atom;
165 int max_res_rd_atom;
166 int max_qp_init_rd_atom;
167 int max_ee_init_rd_atom;
168 enum ib_atomic_cap atomic_cap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300169 enum ib_atomic_cap masked_atomic_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 int max_ee;
171 int max_rdd;
172 int max_mw;
173 int max_raw_ipv6_qp;
174 int max_raw_ethy_qp;
175 int max_mcast_grp;
176 int max_mcast_qp_attach;
177 int max_total_mcast_qp_attach;
178 int max_ah;
179 int max_fmr;
180 int max_map_per_fmr;
181 int max_srq;
182 int max_srq_wr;
183 int max_srq_sge;
Steve Wise00f7ec32008-07-14 23:48:45 -0700184 unsigned int max_fast_reg_page_list_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 u16 max_pkeys;
186 u8 local_ca_ack_delay;
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200187 int sig_prot_cap;
188 int sig_guard_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189};
190
191enum ib_mtu {
192 IB_MTU_256 = 1,
193 IB_MTU_512 = 2,
194 IB_MTU_1024 = 3,
195 IB_MTU_2048 = 4,
196 IB_MTU_4096 = 5
197};
198
199static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
200{
201 switch (mtu) {
202 case IB_MTU_256: return 256;
203 case IB_MTU_512: return 512;
204 case IB_MTU_1024: return 1024;
205 case IB_MTU_2048: return 2048;
206 case IB_MTU_4096: return 4096;
207 default: return -1;
208 }
209}
210
211enum ib_port_state {
212 IB_PORT_NOP = 0,
213 IB_PORT_DOWN = 1,
214 IB_PORT_INIT = 2,
215 IB_PORT_ARMED = 3,
216 IB_PORT_ACTIVE = 4,
217 IB_PORT_ACTIVE_DEFER = 5
218};
219
220enum ib_port_cap_flags {
221 IB_PORT_SM = 1 << 1,
222 IB_PORT_NOTICE_SUP = 1 << 2,
223 IB_PORT_TRAP_SUP = 1 << 3,
224 IB_PORT_OPT_IPD_SUP = 1 << 4,
225 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
226 IB_PORT_SL_MAP_SUP = 1 << 6,
227 IB_PORT_MKEY_NVRAM = 1 << 7,
228 IB_PORT_PKEY_NVRAM = 1 << 8,
229 IB_PORT_LED_INFO_SUP = 1 << 9,
230 IB_PORT_SM_DISABLED = 1 << 10,
231 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
232 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300233 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 IB_PORT_CM_SUP = 1 << 16,
235 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
236 IB_PORT_REINIT_SUP = 1 << 18,
237 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
238 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
239 IB_PORT_DR_NOTICE_SUP = 1 << 21,
240 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
241 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
242 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
Moni Shouab4a26a22014-02-09 11:54:34 +0200243 IB_PORT_CLIENT_REG_SUP = 1 << 25,
244 IB_PORT_IP_BASED_GIDS = 1 << 26
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245};
246
247enum ib_port_width {
248 IB_WIDTH_1X = 1,
249 IB_WIDTH_4X = 2,
250 IB_WIDTH_8X = 4,
251 IB_WIDTH_12X = 8
252};
253
254static inline int ib_width_enum_to_int(enum ib_port_width width)
255{
256 switch (width) {
257 case IB_WIDTH_1X: return 1;
258 case IB_WIDTH_4X: return 4;
259 case IB_WIDTH_8X: return 8;
260 case IB_WIDTH_12X: return 12;
261 default: return -1;
262 }
263}
264
Or Gerlitz2e966912012-02-28 18:49:50 +0200265enum ib_port_speed {
266 IB_SPEED_SDR = 1,
267 IB_SPEED_DDR = 2,
268 IB_SPEED_QDR = 4,
269 IB_SPEED_FDR10 = 8,
270 IB_SPEED_FDR = 16,
271 IB_SPEED_EDR = 32
272};
273
Steve Wise7f624d02008-07-14 23:48:48 -0700274struct ib_protocol_stats {
275 /* TBD... */
276};
277
278struct iw_protocol_stats {
279 u64 ipInReceives;
280 u64 ipInHdrErrors;
281 u64 ipInTooBigErrors;
282 u64 ipInNoRoutes;
283 u64 ipInAddrErrors;
284 u64 ipInUnknownProtos;
285 u64 ipInTruncatedPkts;
286 u64 ipInDiscards;
287 u64 ipInDelivers;
288 u64 ipOutForwDatagrams;
289 u64 ipOutRequests;
290 u64 ipOutDiscards;
291 u64 ipOutNoRoutes;
292 u64 ipReasmTimeout;
293 u64 ipReasmReqds;
294 u64 ipReasmOKs;
295 u64 ipReasmFails;
296 u64 ipFragOKs;
297 u64 ipFragFails;
298 u64 ipFragCreates;
299 u64 ipInMcastPkts;
300 u64 ipOutMcastPkts;
301 u64 ipInBcastPkts;
302 u64 ipOutBcastPkts;
303
304 u64 tcpRtoAlgorithm;
305 u64 tcpRtoMin;
306 u64 tcpRtoMax;
307 u64 tcpMaxConn;
308 u64 tcpActiveOpens;
309 u64 tcpPassiveOpens;
310 u64 tcpAttemptFails;
311 u64 tcpEstabResets;
312 u64 tcpCurrEstab;
313 u64 tcpInSegs;
314 u64 tcpOutSegs;
315 u64 tcpRetransSegs;
316 u64 tcpInErrs;
317 u64 tcpOutRsts;
318};
319
320union rdma_protocol_stats {
321 struct ib_protocol_stats ib;
322 struct iw_protocol_stats iw;
323};
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325struct ib_port_attr {
326 enum ib_port_state state;
327 enum ib_mtu max_mtu;
328 enum ib_mtu active_mtu;
329 int gid_tbl_len;
330 u32 port_cap_flags;
331 u32 max_msg_sz;
332 u32 bad_pkey_cntr;
333 u32 qkey_viol_cntr;
334 u16 pkey_tbl_len;
335 u16 lid;
336 u16 sm_lid;
337 u8 lmc;
338 u8 max_vl_num;
339 u8 sm_sl;
340 u8 subnet_timeout;
341 u8 init_type_reply;
342 u8 active_width;
343 u8 active_speed;
344 u8 phys_state;
345};
346
347enum ib_device_modify_flags {
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800348 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
349 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350};
351
352struct ib_device_modify {
353 u64 sys_image_guid;
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800354 char node_desc[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355};
356
357enum ib_port_modify_flags {
358 IB_PORT_SHUTDOWN = 1,
359 IB_PORT_INIT_TYPE = (1<<2),
360 IB_PORT_RESET_QKEY_CNTR = (1<<3)
361};
362
363struct ib_port_modify {
364 u32 set_port_cap_mask;
365 u32 clr_port_cap_mask;
366 u8 init_type;
367};
368
369enum ib_event_type {
370 IB_EVENT_CQ_ERR,
371 IB_EVENT_QP_FATAL,
372 IB_EVENT_QP_REQ_ERR,
373 IB_EVENT_QP_ACCESS_ERR,
374 IB_EVENT_COMM_EST,
375 IB_EVENT_SQ_DRAINED,
376 IB_EVENT_PATH_MIG,
377 IB_EVENT_PATH_MIG_ERR,
378 IB_EVENT_DEVICE_FATAL,
379 IB_EVENT_PORT_ACTIVE,
380 IB_EVENT_PORT_ERR,
381 IB_EVENT_LID_CHANGE,
382 IB_EVENT_PKEY_CHANGE,
Roland Dreierd41fcc62005-08-18 12:23:08 -0700383 IB_EVENT_SM_CHANGE,
384 IB_EVENT_SRQ_ERR,
385 IB_EVENT_SRQ_LIMIT_REACHED,
Leonid Arsh63942c92006-06-17 20:37:35 -0700386 IB_EVENT_QP_LAST_WQE_REACHED,
Or Gerlitz761d90e2011-06-15 14:39:29 +0000387 IB_EVENT_CLIENT_REREGISTER,
388 IB_EVENT_GID_CHANGE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389};
390
391struct ib_event {
392 struct ib_device *device;
393 union {
394 struct ib_cq *cq;
395 struct ib_qp *qp;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700396 struct ib_srq *srq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 u8 port_num;
398 } element;
399 enum ib_event_type event;
400};
401
402struct ib_event_handler {
403 struct ib_device *device;
404 void (*handler)(struct ib_event_handler *, struct ib_event *);
405 struct list_head list;
406};
407
408#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
409 do { \
410 (_ptr)->device = _device; \
411 (_ptr)->handler = _handler; \
412 INIT_LIST_HEAD(&(_ptr)->list); \
413 } while (0)
414
415struct ib_global_route {
416 union ib_gid dgid;
417 u32 flow_label;
418 u8 sgid_index;
419 u8 hop_limit;
420 u8 traffic_class;
421};
422
Hal Rosenstock513789e2005-07-27 11:45:34 -0700423struct ib_grh {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700424 __be32 version_tclass_flow;
425 __be16 paylen;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700426 u8 next_hdr;
427 u8 hop_limit;
428 union ib_gid sgid;
429 union ib_gid dgid;
430};
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432enum {
433 IB_MULTICAST_QPN = 0xffffff
434};
435
Harvey Harrisonf3a7c662009-02-14 22:58:35 -0800436#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
Sean Hefty97f52eb2005-08-13 21:05:57 -0700437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438enum ib_ah_flags {
439 IB_AH_GRH = 1
440};
441
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700442enum ib_rate {
443 IB_RATE_PORT_CURRENT = 0,
444 IB_RATE_2_5_GBPS = 2,
445 IB_RATE_5_GBPS = 5,
446 IB_RATE_10_GBPS = 3,
447 IB_RATE_20_GBPS = 6,
448 IB_RATE_30_GBPS = 4,
449 IB_RATE_40_GBPS = 7,
450 IB_RATE_60_GBPS = 8,
451 IB_RATE_80_GBPS = 9,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300452 IB_RATE_120_GBPS = 10,
453 IB_RATE_14_GBPS = 11,
454 IB_RATE_56_GBPS = 12,
455 IB_RATE_112_GBPS = 13,
456 IB_RATE_168_GBPS = 14,
457 IB_RATE_25_GBPS = 15,
458 IB_RATE_100_GBPS = 16,
459 IB_RATE_200_GBPS = 17,
460 IB_RATE_300_GBPS = 18
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700461};
462
463/**
464 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
465 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
466 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
467 * @rate: rate to convert.
468 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700469__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700470
471/**
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300472 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
473 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
474 * @rate: rate to convert.
475 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700476__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300477
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200478enum ib_mr_create_flags {
479 IB_MR_SIGNATURE_EN = 1,
480};
481
482/**
483 * ib_mr_init_attr - Memory region init attributes passed to routine
484 * ib_create_mr.
485 * @max_reg_descriptors: max number of registration descriptors that
486 * may be used with registration work requests.
487 * @flags: MR creation flags bit mask.
488 */
489struct ib_mr_init_attr {
490 int max_reg_descriptors;
491 u32 flags;
492};
493
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200494enum ib_signature_type {
495 IB_SIG_TYPE_T10_DIF,
496};
497
498/**
499 * T10-DIF Signature types
500 * T10-DIF types are defined by SCSI
501 * specifications.
502 */
503enum ib_t10_dif_type {
504 IB_T10DIF_NONE,
505 IB_T10DIF_TYPE1,
506 IB_T10DIF_TYPE2,
507 IB_T10DIF_TYPE3
508};
509
510/**
511 * Signature T10-DIF block-guard types
512 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
513 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
514 */
515enum ib_t10_dif_bg_type {
516 IB_T10DIF_CRC,
517 IB_T10DIF_CSUM
518};
519
520/**
521 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
522 * domain.
523 * @type: T10-DIF type (0|1|2|3)
524 * @bg_type: T10-DIF block guard type (CRC|CSUM)
525 * @pi_interval: protection information interval.
526 * @bg: seed of guard computation.
527 * @app_tag: application tag of guard block
528 * @ref_tag: initial guard block reference tag.
529 * @type3_inc_reftag: T10-DIF type 3 does not state
530 * about the reference tag, it is the user
531 * choice to increment it or not.
532 */
533struct ib_t10_dif_domain {
534 enum ib_t10_dif_type type;
535 enum ib_t10_dif_bg_type bg_type;
536 u16 pi_interval;
537 u16 bg;
538 u16 app_tag;
539 u32 ref_tag;
540 bool type3_inc_reftag;
541};
542
543/**
544 * struct ib_sig_domain - Parameters for signature domain
545 * @sig_type: specific signauture type
546 * @sig: union of all signature domain attributes that may
547 * be used to set domain layout.
548 */
549struct ib_sig_domain {
550 enum ib_signature_type sig_type;
551 union {
552 struct ib_t10_dif_domain dif;
553 } sig;
554};
555
556/**
557 * struct ib_sig_attrs - Parameters for signature handover operation
558 * @check_mask: bitmask for signature byte check (8 bytes)
559 * @mem: memory domain layout desciptor.
560 * @wire: wire domain layout desciptor.
561 */
562struct ib_sig_attrs {
563 u8 check_mask;
564 struct ib_sig_domain mem;
565 struct ib_sig_domain wire;
566};
567
568enum ib_sig_err_type {
569 IB_SIG_BAD_GUARD,
570 IB_SIG_BAD_REFTAG,
571 IB_SIG_BAD_APPTAG,
572};
573
574/**
575 * struct ib_sig_err - signature error descriptor
576 */
577struct ib_sig_err {
578 enum ib_sig_err_type err_type;
579 u32 expected;
580 u32 actual;
581 u64 sig_err_offset;
582 u32 key;
583};
584
585enum ib_mr_status_check {
586 IB_MR_CHECK_SIG_STATUS = 1,
587};
588
589/**
590 * struct ib_mr_status - Memory region status container
591 *
592 * @fail_status: Bitmask of MR checks status. For each
593 * failed check a corresponding status bit is set.
594 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
595 * failure.
596 */
597struct ib_mr_status {
598 u32 fail_status;
599 struct ib_sig_err sig_err;
600};
601
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300602/**
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700603 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
604 * enum.
605 * @mult: multiple to convert.
606 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700607__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609struct ib_ah_attr {
610 struct ib_global_route grh;
611 u16 dlid;
612 u8 sl;
613 u8 src_path_bits;
614 u8 static_rate;
615 u8 ah_flags;
616 u8 port_num;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200617 u8 dmac[ETH_ALEN];
618 u16 vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619};
620
621enum ib_wc_status {
622 IB_WC_SUCCESS,
623 IB_WC_LOC_LEN_ERR,
624 IB_WC_LOC_QP_OP_ERR,
625 IB_WC_LOC_EEC_OP_ERR,
626 IB_WC_LOC_PROT_ERR,
627 IB_WC_WR_FLUSH_ERR,
628 IB_WC_MW_BIND_ERR,
629 IB_WC_BAD_RESP_ERR,
630 IB_WC_LOC_ACCESS_ERR,
631 IB_WC_REM_INV_REQ_ERR,
632 IB_WC_REM_ACCESS_ERR,
633 IB_WC_REM_OP_ERR,
634 IB_WC_RETRY_EXC_ERR,
635 IB_WC_RNR_RETRY_EXC_ERR,
636 IB_WC_LOC_RDD_VIOL_ERR,
637 IB_WC_REM_INV_RD_REQ_ERR,
638 IB_WC_REM_ABORT_ERR,
639 IB_WC_INV_EECN_ERR,
640 IB_WC_INV_EEC_STATE_ERR,
641 IB_WC_FATAL_ERR,
642 IB_WC_RESP_TIMEOUT_ERR,
643 IB_WC_GENERAL_ERR
644};
645
646enum ib_wc_opcode {
647 IB_WC_SEND,
648 IB_WC_RDMA_WRITE,
649 IB_WC_RDMA_READ,
650 IB_WC_COMP_SWAP,
651 IB_WC_FETCH_ADD,
652 IB_WC_BIND_MW,
Eli Cohenc93570f2008-04-16 21:09:27 -0700653 IB_WC_LSO,
Steve Wise00f7ec32008-07-14 23:48:45 -0700654 IB_WC_LOCAL_INV,
655 IB_WC_FAST_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300656 IB_WC_MASKED_COMP_SWAP,
657 IB_WC_MASKED_FETCH_ADD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658/*
659 * Set value of IB_WC_RECV so consumers can test if a completion is a
660 * receive by testing (opcode & IB_WC_RECV).
661 */
662 IB_WC_RECV = 1 << 7,
663 IB_WC_RECV_RDMA_WITH_IMM
664};
665
666enum ib_wc_flags {
667 IB_WC_GRH = 1,
Steve Wise00f7ec32008-07-14 23:48:45 -0700668 IB_WC_WITH_IMM = (1<<1),
669 IB_WC_WITH_INVALIDATE = (1<<2),
Or Gerlitzd927d502012-01-11 19:03:51 +0200670 IB_WC_IP_CSUM_OK = (1<<3),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200671 IB_WC_WITH_SMAC = (1<<4),
672 IB_WC_WITH_VLAN = (1<<5),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673};
674
675struct ib_wc {
676 u64 wr_id;
677 enum ib_wc_status status;
678 enum ib_wc_opcode opcode;
679 u32 vendor_err;
680 u32 byte_len;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200681 struct ib_qp *qp;
Steve Wise00f7ec32008-07-14 23:48:45 -0700682 union {
683 __be32 imm_data;
684 u32 invalidate_rkey;
685 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 u32 src_qp;
687 int wc_flags;
688 u16 pkey_index;
689 u16 slid;
690 u8 sl;
691 u8 dlid_path_bits;
692 u8 port_num; /* valid only for DR SMPs on switches */
Matan Barakdd5f03b2013-12-12 18:03:11 +0200693 u8 smac[ETH_ALEN];
694 u16 vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695};
696
Roland Dreiered23a722007-05-06 21:02:48 -0700697enum ib_cq_notify_flags {
698 IB_CQ_SOLICITED = 1 << 0,
699 IB_CQ_NEXT_COMP = 1 << 1,
700 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
701 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702};
703
Sean Hefty96104ed2011-05-23 16:31:36 -0700704enum ib_srq_type {
Sean Hefty418d5132011-05-23 19:42:29 -0700705 IB_SRQT_BASIC,
706 IB_SRQT_XRC
Sean Hefty96104ed2011-05-23 16:31:36 -0700707};
708
Roland Dreierd41fcc62005-08-18 12:23:08 -0700709enum ib_srq_attr_mask {
710 IB_SRQ_MAX_WR = 1 << 0,
711 IB_SRQ_LIMIT = 1 << 1,
712};
713
714struct ib_srq_attr {
715 u32 max_wr;
716 u32 max_sge;
717 u32 srq_limit;
718};
719
720struct ib_srq_init_attr {
721 void (*event_handler)(struct ib_event *, void *);
722 void *srq_context;
723 struct ib_srq_attr attr;
Sean Hefty96104ed2011-05-23 16:31:36 -0700724 enum ib_srq_type srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -0700725
726 union {
727 struct {
728 struct ib_xrcd *xrcd;
729 struct ib_cq *cq;
730 } xrc;
731 } ext;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700732};
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734struct ib_qp_cap {
735 u32 max_send_wr;
736 u32 max_recv_wr;
737 u32 max_send_sge;
738 u32 max_recv_sge;
739 u32 max_inline_data;
740};
741
742enum ib_sig_type {
743 IB_SIGNAL_ALL_WR,
744 IB_SIGNAL_REQ_WR
745};
746
747enum ib_qp_type {
748 /*
749 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
750 * here (and in that order) since the MAD layer uses them as
751 * indices into a 2-entry table.
752 */
753 IB_QPT_SMI,
754 IB_QPT_GSI,
755
756 IB_QPT_RC,
757 IB_QPT_UC,
758 IB_QPT_UD,
759 IB_QPT_RAW_IPV6,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700760 IB_QPT_RAW_ETHERTYPE,
Or Gerlitzc938a612012-03-01 12:17:51 +0200761 IB_QPT_RAW_PACKET = 8,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700762 IB_QPT_XRC_INI = 9,
763 IB_QPT_XRC_TGT,
Jack Morgenstein0134f162013-07-07 17:25:52 +0300764 IB_QPT_MAX,
765 /* Reserve a range for qp types internal to the low level driver.
766 * These qp types will not be visible at the IB core layer, so the
767 * IB_QPT_MAX usages should not be affected in the core layer
768 */
769 IB_QPT_RESERVED1 = 0x1000,
770 IB_QPT_RESERVED2,
771 IB_QPT_RESERVED3,
772 IB_QPT_RESERVED4,
773 IB_QPT_RESERVED5,
774 IB_QPT_RESERVED6,
775 IB_QPT_RESERVED7,
776 IB_QPT_RESERVED8,
777 IB_QPT_RESERVED9,
778 IB_QPT_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779};
780
Eli Cohenb846f252008-04-16 21:09:27 -0700781enum ib_qp_create_flags {
Ron Livne47ee1b92008-07-14 23:48:48 -0700782 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
783 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
Matan Barak90f1d1b2013-11-07 15:25:12 +0200784 IB_QP_CREATE_NETIF_QP = 1 << 5,
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200785 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
Or Gerlitz09b93082014-05-11 15:15:11 +0300786 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
Jack Morgensteind2b57062012-08-03 08:40:37 +0000787 /* reserve bits 26-31 for low level drivers' internal use */
788 IB_QP_CREATE_RESERVED_START = 1 << 26,
789 IB_QP_CREATE_RESERVED_END = 1 << 31,
Eli Cohenb846f252008-04-16 21:09:27 -0700790};
791
Yishai Hadas73c40c62013-08-01 18:49:53 +0300792
793/*
794 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
795 * callback to destroy the passed in QP.
796 */
797
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798struct ib_qp_init_attr {
799 void (*event_handler)(struct ib_event *, void *);
800 void *qp_context;
801 struct ib_cq *send_cq;
802 struct ib_cq *recv_cq;
803 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700804 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 struct ib_qp_cap cap;
806 enum ib_sig_type sq_sig_type;
807 enum ib_qp_type qp_type;
Eli Cohenb846f252008-04-16 21:09:27 -0700808 enum ib_qp_create_flags create_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 u8 port_num; /* special QP types only */
810};
811
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700812struct ib_qp_open_attr {
813 void (*event_handler)(struct ib_event *, void *);
814 void *qp_context;
815 u32 qp_num;
816 enum ib_qp_type qp_type;
817};
818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819enum ib_rnr_timeout {
820 IB_RNR_TIMER_655_36 = 0,
821 IB_RNR_TIMER_000_01 = 1,
822 IB_RNR_TIMER_000_02 = 2,
823 IB_RNR_TIMER_000_03 = 3,
824 IB_RNR_TIMER_000_04 = 4,
825 IB_RNR_TIMER_000_06 = 5,
826 IB_RNR_TIMER_000_08 = 6,
827 IB_RNR_TIMER_000_12 = 7,
828 IB_RNR_TIMER_000_16 = 8,
829 IB_RNR_TIMER_000_24 = 9,
830 IB_RNR_TIMER_000_32 = 10,
831 IB_RNR_TIMER_000_48 = 11,
832 IB_RNR_TIMER_000_64 = 12,
833 IB_RNR_TIMER_000_96 = 13,
834 IB_RNR_TIMER_001_28 = 14,
835 IB_RNR_TIMER_001_92 = 15,
836 IB_RNR_TIMER_002_56 = 16,
837 IB_RNR_TIMER_003_84 = 17,
838 IB_RNR_TIMER_005_12 = 18,
839 IB_RNR_TIMER_007_68 = 19,
840 IB_RNR_TIMER_010_24 = 20,
841 IB_RNR_TIMER_015_36 = 21,
842 IB_RNR_TIMER_020_48 = 22,
843 IB_RNR_TIMER_030_72 = 23,
844 IB_RNR_TIMER_040_96 = 24,
845 IB_RNR_TIMER_061_44 = 25,
846 IB_RNR_TIMER_081_92 = 26,
847 IB_RNR_TIMER_122_88 = 27,
848 IB_RNR_TIMER_163_84 = 28,
849 IB_RNR_TIMER_245_76 = 29,
850 IB_RNR_TIMER_327_68 = 30,
851 IB_RNR_TIMER_491_52 = 31
852};
853
854enum ib_qp_attr_mask {
855 IB_QP_STATE = 1,
856 IB_QP_CUR_STATE = (1<<1),
857 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
858 IB_QP_ACCESS_FLAGS = (1<<3),
859 IB_QP_PKEY_INDEX = (1<<4),
860 IB_QP_PORT = (1<<5),
861 IB_QP_QKEY = (1<<6),
862 IB_QP_AV = (1<<7),
863 IB_QP_PATH_MTU = (1<<8),
864 IB_QP_TIMEOUT = (1<<9),
865 IB_QP_RETRY_CNT = (1<<10),
866 IB_QP_RNR_RETRY = (1<<11),
867 IB_QP_RQ_PSN = (1<<12),
868 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
869 IB_QP_ALT_PATH = (1<<14),
870 IB_QP_MIN_RNR_TIMER = (1<<15),
871 IB_QP_SQ_PSN = (1<<16),
872 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
873 IB_QP_PATH_MIG_STATE = (1<<18),
874 IB_QP_CAP = (1<<19),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200875 IB_QP_DEST_QPN = (1<<20),
876 IB_QP_SMAC = (1<<21),
877 IB_QP_ALT_SMAC = (1<<22),
878 IB_QP_VID = (1<<23),
879 IB_QP_ALT_VID = (1<<24),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880};
881
882enum ib_qp_state {
883 IB_QPS_RESET,
884 IB_QPS_INIT,
885 IB_QPS_RTR,
886 IB_QPS_RTS,
887 IB_QPS_SQD,
888 IB_QPS_SQE,
889 IB_QPS_ERR
890};
891
892enum ib_mig_state {
893 IB_MIG_MIGRATED,
894 IB_MIG_REARM,
895 IB_MIG_ARMED
896};
897
Shani Michaeli7083e422013-02-06 16:19:12 +0000898enum ib_mw_type {
899 IB_MW_TYPE_1 = 1,
900 IB_MW_TYPE_2 = 2
901};
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903struct ib_qp_attr {
904 enum ib_qp_state qp_state;
905 enum ib_qp_state cur_qp_state;
906 enum ib_mtu path_mtu;
907 enum ib_mig_state path_mig_state;
908 u32 qkey;
909 u32 rq_psn;
910 u32 sq_psn;
911 u32 dest_qp_num;
912 int qp_access_flags;
913 struct ib_qp_cap cap;
914 struct ib_ah_attr ah_attr;
915 struct ib_ah_attr alt_ah_attr;
916 u16 pkey_index;
917 u16 alt_pkey_index;
918 u8 en_sqd_async_notify;
919 u8 sq_draining;
920 u8 max_rd_atomic;
921 u8 max_dest_rd_atomic;
922 u8 min_rnr_timer;
923 u8 port_num;
924 u8 timeout;
925 u8 retry_cnt;
926 u8 rnr_retry;
927 u8 alt_port_num;
928 u8 alt_timeout;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200929 u8 smac[ETH_ALEN];
930 u8 alt_smac[ETH_ALEN];
931 u16 vlan_id;
932 u16 alt_vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933};
934
935enum ib_wr_opcode {
936 IB_WR_RDMA_WRITE,
937 IB_WR_RDMA_WRITE_WITH_IMM,
938 IB_WR_SEND,
939 IB_WR_SEND_WITH_IMM,
940 IB_WR_RDMA_READ,
941 IB_WR_ATOMIC_CMP_AND_SWP,
Eli Cohenc93570f2008-04-16 21:09:27 -0700942 IB_WR_ATOMIC_FETCH_AND_ADD,
Roland Dreier0f39cf32008-04-16 21:09:32 -0700943 IB_WR_LSO,
944 IB_WR_SEND_WITH_INV,
Steve Wise00f7ec32008-07-14 23:48:45 -0700945 IB_WR_RDMA_READ_WITH_INV,
946 IB_WR_LOCAL_INV,
947 IB_WR_FAST_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300948 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
949 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
Shani Michaeli7083e422013-02-06 16:19:12 +0000950 IB_WR_BIND_MW,
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200951 IB_WR_REG_SIG_MR,
Jack Morgenstein0134f162013-07-07 17:25:52 +0300952 /* reserve values for low level drivers' internal use.
953 * These values will not be used at all in the ib core layer.
954 */
955 IB_WR_RESERVED1 = 0xf0,
956 IB_WR_RESERVED2,
957 IB_WR_RESERVED3,
958 IB_WR_RESERVED4,
959 IB_WR_RESERVED5,
960 IB_WR_RESERVED6,
961 IB_WR_RESERVED7,
962 IB_WR_RESERVED8,
963 IB_WR_RESERVED9,
964 IB_WR_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965};
966
967enum ib_send_flags {
968 IB_SEND_FENCE = 1,
969 IB_SEND_SIGNALED = (1<<1),
970 IB_SEND_SOLICITED = (1<<2),
Eli Cohene0605d92008-01-30 18:30:57 +0200971 IB_SEND_INLINE = (1<<3),
Jack Morgenstein0134f162013-07-07 17:25:52 +0300972 IB_SEND_IP_CSUM = (1<<4),
973
974 /* reserve bits 26-31 for low level drivers' internal use */
975 IB_SEND_RESERVED_START = (1 << 26),
976 IB_SEND_RESERVED_END = (1 << 31),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977};
978
979struct ib_sge {
980 u64 addr;
981 u32 length;
982 u32 lkey;
983};
984
Steve Wise00f7ec32008-07-14 23:48:45 -0700985struct ib_fast_reg_page_list {
986 struct ib_device *device;
987 u64 *page_list;
988 unsigned int max_page_list_len;
989};
990
Shani Michaeli7083e422013-02-06 16:19:12 +0000991/**
992 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
993 * @mr: A memory region to bind the memory window to.
994 * @addr: The address where the memory window should begin.
995 * @length: The length of the memory window, in bytes.
996 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
997 *
998 * This struct contains the shared parameters for type 1 and type 2
999 * memory window bind operations.
1000 */
1001struct ib_mw_bind_info {
1002 struct ib_mr *mr;
1003 u64 addr;
1004 u64 length;
1005 int mw_access_flags;
1006};
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008struct ib_send_wr {
1009 struct ib_send_wr *next;
1010 u64 wr_id;
1011 struct ib_sge *sg_list;
1012 int num_sge;
1013 enum ib_wr_opcode opcode;
1014 int send_flags;
Roland Dreier0f39cf32008-04-16 21:09:32 -07001015 union {
1016 __be32 imm_data;
1017 u32 invalidate_rkey;
1018 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 union {
1020 struct {
1021 u64 remote_addr;
1022 u32 rkey;
1023 } rdma;
1024 struct {
1025 u64 remote_addr;
1026 u64 compare_add;
1027 u64 swap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +03001028 u64 compare_add_mask;
1029 u64 swap_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 u32 rkey;
1031 } atomic;
1032 struct {
1033 struct ib_ah *ah;
Eli Cohenc93570f2008-04-16 21:09:27 -07001034 void *header;
1035 int hlen;
1036 int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 u32 remote_qpn;
1038 u32 remote_qkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 u16 pkey_index; /* valid for GSI only */
1040 u8 port_num; /* valid for DR SMPs on switch only */
1041 } ud;
Steve Wise00f7ec32008-07-14 23:48:45 -07001042 struct {
1043 u64 iova_start;
1044 struct ib_fast_reg_page_list *page_list;
1045 unsigned int page_shift;
1046 unsigned int page_list_len;
1047 u32 length;
1048 int access_flags;
1049 u32 rkey;
1050 } fast_reg;
Shani Michaeli7083e422013-02-06 16:19:12 +00001051 struct {
1052 struct ib_mw *mw;
1053 /* The new rkey for the memory window. */
1054 u32 rkey;
1055 struct ib_mw_bind_info bind_info;
1056 } bind_mw;
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001057 struct {
1058 struct ib_sig_attrs *sig_attrs;
1059 struct ib_mr *sig_mr;
1060 int access_flags;
1061 struct ib_sge *prot;
1062 } sig_handover;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 } wr;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001064 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065};
1066
1067struct ib_recv_wr {
1068 struct ib_recv_wr *next;
1069 u64 wr_id;
1070 struct ib_sge *sg_list;
1071 int num_sge;
1072};
1073
1074enum ib_access_flags {
1075 IB_ACCESS_LOCAL_WRITE = 1,
1076 IB_ACCESS_REMOTE_WRITE = (1<<1),
1077 IB_ACCESS_REMOTE_READ = (1<<2),
1078 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
Shani Michaeli7083e422013-02-06 16:19:12 +00001079 IB_ACCESS_MW_BIND = (1<<4),
1080 IB_ZERO_BASED = (1<<5)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081};
1082
1083struct ib_phys_buf {
1084 u64 addr;
1085 u64 size;
1086};
1087
1088struct ib_mr_attr {
1089 struct ib_pd *pd;
1090 u64 device_virt_addr;
1091 u64 size;
1092 int mr_access_flags;
1093 u32 lkey;
1094 u32 rkey;
1095};
1096
1097enum ib_mr_rereg_flags {
1098 IB_MR_REREG_TRANS = 1,
1099 IB_MR_REREG_PD = (1<<1),
Matan Barak7e6edb92014-07-31 11:01:28 +03001100 IB_MR_REREG_ACCESS = (1<<2),
1101 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102};
1103
Shani Michaeli7083e422013-02-06 16:19:12 +00001104/**
1105 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1106 * @wr_id: Work request id.
1107 * @send_flags: Flags from ib_send_flags enum.
1108 * @bind_info: More parameters of the bind operation.
1109 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110struct ib_mw_bind {
Shani Michaeli7083e422013-02-06 16:19:12 +00001111 u64 wr_id;
1112 int send_flags;
1113 struct ib_mw_bind_info bind_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114};
1115
1116struct ib_fmr_attr {
1117 int max_pages;
1118 int max_maps;
Or Gerlitzd36f34a2006-02-02 10:43:45 -08001119 u8 page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120};
1121
Roland Dreiere2773c02005-07-07 17:57:10 -07001122struct ib_ucontext {
1123 struct ib_device *device;
1124 struct list_head pd_list;
1125 struct list_head mr_list;
1126 struct list_head mw_list;
1127 struct list_head cq_list;
1128 struct list_head qp_list;
1129 struct list_head srq_list;
1130 struct list_head ah_list;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001131 struct list_head xrcd_list;
Hadar Hen Zion436f2ad2013-08-14 13:58:30 +03001132 struct list_head rule_list;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001133 int closing;
Roland Dreiere2773c02005-07-07 17:57:10 -07001134};
1135
1136struct ib_uobject {
1137 u64 user_handle; /* handle given to us by userspace */
1138 struct ib_ucontext *context; /* associated user context */
Roland Dreier9ead1902006-06-17 20:44:49 -07001139 void *object; /* containing object */
Roland Dreiere2773c02005-07-07 17:57:10 -07001140 struct list_head list; /* link to context's list */
Roland Dreierb3d636b2008-04-16 21:01:06 -07001141 int id; /* index into kernel idr */
Roland Dreier9ead1902006-06-17 20:44:49 -07001142 struct kref ref;
1143 struct rw_semaphore mutex; /* protects .live */
1144 int live;
Roland Dreiere2773c02005-07-07 17:57:10 -07001145};
1146
Roland Dreiere2773c02005-07-07 17:57:10 -07001147struct ib_udata {
Yann Droneaud309243e2013-12-11 23:01:44 +01001148 const void __user *inbuf;
Roland Dreiere2773c02005-07-07 17:57:10 -07001149 void __user *outbuf;
1150 size_t inlen;
1151 size_t outlen;
1152};
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154struct ib_pd {
Roland Dreiere2773c02005-07-07 17:57:10 -07001155 struct ib_device *device;
1156 struct ib_uobject *uobject;
1157 atomic_t usecnt; /* count all resources */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158};
1159
Sean Hefty59991f92011-05-23 17:52:46 -07001160struct ib_xrcd {
1161 struct ib_device *device;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001162 atomic_t usecnt; /* count all exposed resources */
Sean Hefty53d0bd12011-05-24 08:33:46 -07001163 struct inode *inode;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001164
1165 struct mutex tgt_qp_mutex;
1166 struct list_head tgt_qp_list;
Sean Hefty59991f92011-05-23 17:52:46 -07001167};
1168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169struct ib_ah {
1170 struct ib_device *device;
1171 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001172 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173};
1174
1175typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1176
1177struct ib_cq {
Roland Dreiere2773c02005-07-07 17:57:10 -07001178 struct ib_device *device;
1179 struct ib_uobject *uobject;
1180 ib_comp_handler comp_handler;
1181 void (*event_handler)(struct ib_event *, void *);
Dotan Barak4deccd62008-07-14 23:48:44 -07001182 void *cq_context;
Roland Dreiere2773c02005-07-07 17:57:10 -07001183 int cqe;
1184 atomic_t usecnt; /* count number of work queues */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185};
1186
1187struct ib_srq {
Roland Dreierd41fcc62005-08-18 12:23:08 -07001188 struct ib_device *device;
1189 struct ib_pd *pd;
1190 struct ib_uobject *uobject;
1191 void (*event_handler)(struct ib_event *, void *);
1192 void *srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -07001193 enum ib_srq_type srq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 atomic_t usecnt;
Sean Hefty418d5132011-05-23 19:42:29 -07001195
1196 union {
1197 struct {
1198 struct ib_xrcd *xrcd;
1199 struct ib_cq *cq;
1200 u32 srq_num;
1201 } xrc;
1202 } ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203};
1204
1205struct ib_qp {
1206 struct ib_device *device;
1207 struct ib_pd *pd;
1208 struct ib_cq *send_cq;
1209 struct ib_cq *recv_cq;
1210 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001211 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Sean Heftyd3d72d92011-05-26 23:06:44 -07001212 struct list_head xrcd_list;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001213 /* count times opened, mcast attaches, flow attaches */
1214 atomic_t usecnt;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001215 struct list_head open_list;
1216 struct ib_qp *real_qp;
Roland Dreiere2773c02005-07-07 17:57:10 -07001217 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 void (*event_handler)(struct ib_event *, void *);
1219 void *qp_context;
1220 u32 qp_num;
1221 enum ib_qp_type qp_type;
1222};
1223
1224struct ib_mr {
Roland Dreiere2773c02005-07-07 17:57:10 -07001225 struct ib_device *device;
1226 struct ib_pd *pd;
1227 struct ib_uobject *uobject;
1228 u32 lkey;
1229 u32 rkey;
1230 atomic_t usecnt; /* count number of MWs */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231};
1232
1233struct ib_mw {
1234 struct ib_device *device;
1235 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001236 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 u32 rkey;
Shani Michaeli7083e422013-02-06 16:19:12 +00001238 enum ib_mw_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239};
1240
1241struct ib_fmr {
1242 struct ib_device *device;
1243 struct ib_pd *pd;
1244 struct list_head list;
1245 u32 lkey;
1246 u32 rkey;
1247};
1248
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001249/* Supported steering options */
1250enum ib_flow_attr_type {
1251 /* steering according to rule specifications */
1252 IB_FLOW_ATTR_NORMAL = 0x0,
1253 /* default unicast and multicast rule -
1254 * receive all Eth traffic which isn't steered to any QP
1255 */
1256 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1257 /* default multicast rule -
1258 * receive all Eth multicast traffic which isn't steered to any QP
1259 */
1260 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1261 /* sniffer rule - receive all port traffic */
1262 IB_FLOW_ATTR_SNIFFER = 0x3
1263};
1264
1265/* Supported steering header types */
1266enum ib_flow_spec_type {
1267 /* L2 headers*/
1268 IB_FLOW_SPEC_ETH = 0x20,
Matan Barak240ae002013-11-07 15:25:13 +02001269 IB_FLOW_SPEC_IB = 0x22,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001270 /* L3 header*/
1271 IB_FLOW_SPEC_IPV4 = 0x30,
1272 /* L4 headers*/
1273 IB_FLOW_SPEC_TCP = 0x40,
1274 IB_FLOW_SPEC_UDP = 0x41
1275};
Matan Barak240ae002013-11-07 15:25:13 +02001276#define IB_FLOW_SPEC_LAYER_MASK 0xF0
Matan Barak22878db2013-09-01 18:39:52 +03001277#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1278
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001279/* Flow steering rule priority is set according to it's domain.
1280 * Lower domain value means higher priority.
1281 */
1282enum ib_flow_domain {
1283 IB_FLOW_DOMAIN_USER,
1284 IB_FLOW_DOMAIN_ETHTOOL,
1285 IB_FLOW_DOMAIN_RFS,
1286 IB_FLOW_DOMAIN_NIC,
1287 IB_FLOW_DOMAIN_NUM /* Must be last */
1288};
1289
1290struct ib_flow_eth_filter {
1291 u8 dst_mac[6];
1292 u8 src_mac[6];
1293 __be16 ether_type;
1294 __be16 vlan_tag;
1295};
1296
1297struct ib_flow_spec_eth {
1298 enum ib_flow_spec_type type;
1299 u16 size;
1300 struct ib_flow_eth_filter val;
1301 struct ib_flow_eth_filter mask;
1302};
1303
Matan Barak240ae002013-11-07 15:25:13 +02001304struct ib_flow_ib_filter {
1305 __be16 dlid;
1306 __u8 sl;
1307};
1308
1309struct ib_flow_spec_ib {
1310 enum ib_flow_spec_type type;
1311 u16 size;
1312 struct ib_flow_ib_filter val;
1313 struct ib_flow_ib_filter mask;
1314};
1315
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001316struct ib_flow_ipv4_filter {
1317 __be32 src_ip;
1318 __be32 dst_ip;
1319};
1320
1321struct ib_flow_spec_ipv4 {
1322 enum ib_flow_spec_type type;
1323 u16 size;
1324 struct ib_flow_ipv4_filter val;
1325 struct ib_flow_ipv4_filter mask;
1326};
1327
1328struct ib_flow_tcp_udp_filter {
1329 __be16 dst_port;
1330 __be16 src_port;
1331};
1332
1333struct ib_flow_spec_tcp_udp {
1334 enum ib_flow_spec_type type;
1335 u16 size;
1336 struct ib_flow_tcp_udp_filter val;
1337 struct ib_flow_tcp_udp_filter mask;
1338};
1339
1340union ib_flow_spec {
1341 struct {
1342 enum ib_flow_spec_type type;
1343 u16 size;
1344 };
1345 struct ib_flow_spec_eth eth;
Matan Barak240ae002013-11-07 15:25:13 +02001346 struct ib_flow_spec_ib ib;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001347 struct ib_flow_spec_ipv4 ipv4;
1348 struct ib_flow_spec_tcp_udp tcp_udp;
1349};
1350
1351struct ib_flow_attr {
1352 enum ib_flow_attr_type type;
1353 u16 size;
1354 u16 priority;
1355 u32 flags;
1356 u8 num_of_specs;
1357 u8 port;
1358 /* Following are the optional layers according to user request
1359 * struct ib_flow_spec_xxx
1360 * struct ib_flow_spec_yyy
1361 */
1362};
1363
1364struct ib_flow {
1365 struct ib_qp *qp;
1366 struct ib_uobject *uobject;
1367};
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369struct ib_mad;
1370struct ib_grh;
1371
1372enum ib_process_mad_flags {
1373 IB_MAD_IGNORE_MKEY = 1,
1374 IB_MAD_IGNORE_BKEY = 2,
1375 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1376};
1377
1378enum ib_mad_result {
1379 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1380 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1381 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1382 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1383};
1384
1385#define IB_DEVICE_NAME_MAX 64
1386
1387struct ib_cache {
1388 rwlock_t lock;
1389 struct ib_event_handler event_handler;
1390 struct ib_pkey_cache **pkey_cache;
1391 struct ib_gid_cache **gid_cache;
Jack Morgenstein6fb9cdb2006-06-17 20:37:34 -07001392 u8 *lmc_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393};
1394
Ralph Campbell9b513092006-12-12 14:27:41 -08001395struct ib_dma_mapping_ops {
1396 int (*mapping_error)(struct ib_device *dev,
1397 u64 dma_addr);
1398 u64 (*map_single)(struct ib_device *dev,
1399 void *ptr, size_t size,
1400 enum dma_data_direction direction);
1401 void (*unmap_single)(struct ib_device *dev,
1402 u64 addr, size_t size,
1403 enum dma_data_direction direction);
1404 u64 (*map_page)(struct ib_device *dev,
1405 struct page *page, unsigned long offset,
1406 size_t size,
1407 enum dma_data_direction direction);
1408 void (*unmap_page)(struct ib_device *dev,
1409 u64 addr, size_t size,
1410 enum dma_data_direction direction);
1411 int (*map_sg)(struct ib_device *dev,
1412 struct scatterlist *sg, int nents,
1413 enum dma_data_direction direction);
1414 void (*unmap_sg)(struct ib_device *dev,
1415 struct scatterlist *sg, int nents,
1416 enum dma_data_direction direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08001417 void (*sync_single_for_cpu)(struct ib_device *dev,
1418 u64 dma_handle,
1419 size_t size,
Dotan Barak4deccd62008-07-14 23:48:44 -07001420 enum dma_data_direction dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08001421 void (*sync_single_for_device)(struct ib_device *dev,
1422 u64 dma_handle,
1423 size_t size,
1424 enum dma_data_direction dir);
1425 void *(*alloc_coherent)(struct ib_device *dev,
1426 size_t size,
1427 u64 *dma_handle,
1428 gfp_t flag);
1429 void (*free_coherent)(struct ib_device *dev,
1430 size_t size, void *cpu_addr,
1431 u64 dma_handle);
1432};
1433
Tom Tucker07ebafb2006-08-03 16:02:42 -05001434struct iw_cm_verbs;
1435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436struct ib_device {
1437 struct device *dma_device;
1438
1439 char name[IB_DEVICE_NAME_MAX];
1440
1441 struct list_head event_handler_list;
1442 spinlock_t event_handler_lock;
1443
Alexander Chiang17a55f72010-02-02 19:09:16 +00001444 spinlock_t client_data_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 struct list_head core_list;
1446 struct list_head client_data_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 struct ib_cache cache;
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001449 int *pkey_tbl_len;
1450 int *gid_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001452 int num_comp_vectors;
1453
Tom Tucker07ebafb2006-08-03 16:02:42 -05001454 struct iw_cm_verbs *iwcm;
1455
Steve Wise7f624d02008-07-14 23:48:48 -07001456 int (*get_protocol_stats)(struct ib_device *device,
1457 union rdma_protocol_stats *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 int (*query_device)(struct ib_device *device,
1459 struct ib_device_attr *device_attr);
1460 int (*query_port)(struct ib_device *device,
1461 u8 port_num,
1462 struct ib_port_attr *port_attr);
Eli Cohena3f5ada2010-09-27 17:51:10 -07001463 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1464 u8 port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 int (*query_gid)(struct ib_device *device,
1466 u8 port_num, int index,
1467 union ib_gid *gid);
1468 int (*query_pkey)(struct ib_device *device,
1469 u8 port_num, u16 index, u16 *pkey);
1470 int (*modify_device)(struct ib_device *device,
1471 int device_modify_mask,
1472 struct ib_device_modify *device_modify);
1473 int (*modify_port)(struct ib_device *device,
1474 u8 port_num, int port_modify_mask,
1475 struct ib_port_modify *port_modify);
Roland Dreiere2773c02005-07-07 17:57:10 -07001476 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1477 struct ib_udata *udata);
1478 int (*dealloc_ucontext)(struct ib_ucontext *context);
1479 int (*mmap)(struct ib_ucontext *context,
1480 struct vm_area_struct *vma);
1481 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1482 struct ib_ucontext *context,
1483 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 int (*dealloc_pd)(struct ib_pd *pd);
1485 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1486 struct ib_ah_attr *ah_attr);
1487 int (*modify_ah)(struct ib_ah *ah,
1488 struct ib_ah_attr *ah_attr);
1489 int (*query_ah)(struct ib_ah *ah,
1490 struct ib_ah_attr *ah_attr);
1491 int (*destroy_ah)(struct ib_ah *ah);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001492 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1493 struct ib_srq_init_attr *srq_init_attr,
1494 struct ib_udata *udata);
1495 int (*modify_srq)(struct ib_srq *srq,
1496 struct ib_srq_attr *srq_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001497 enum ib_srq_attr_mask srq_attr_mask,
1498 struct ib_udata *udata);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001499 int (*query_srq)(struct ib_srq *srq,
1500 struct ib_srq_attr *srq_attr);
1501 int (*destroy_srq)(struct ib_srq *srq);
1502 int (*post_srq_recv)(struct ib_srq *srq,
1503 struct ib_recv_wr *recv_wr,
1504 struct ib_recv_wr **bad_recv_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 struct ib_qp * (*create_qp)(struct ib_pd *pd,
Roland Dreiere2773c02005-07-07 17:57:10 -07001506 struct ib_qp_init_attr *qp_init_attr,
1507 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 int (*modify_qp)(struct ib_qp *qp,
1509 struct ib_qp_attr *qp_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001510 int qp_attr_mask,
1511 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 int (*query_qp)(struct ib_qp *qp,
1513 struct ib_qp_attr *qp_attr,
1514 int qp_attr_mask,
1515 struct ib_qp_init_attr *qp_init_attr);
1516 int (*destroy_qp)(struct ib_qp *qp);
1517 int (*post_send)(struct ib_qp *qp,
1518 struct ib_send_wr *send_wr,
1519 struct ib_send_wr **bad_send_wr);
1520 int (*post_recv)(struct ib_qp *qp,
1521 struct ib_recv_wr *recv_wr,
1522 struct ib_recv_wr **bad_recv_wr);
Roland Dreiere2773c02005-07-07 17:57:10 -07001523 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001524 int comp_vector,
Roland Dreiere2773c02005-07-07 17:57:10 -07001525 struct ib_ucontext *context,
1526 struct ib_udata *udata);
Eli Cohen2dd57162008-04-16 21:09:33 -07001527 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1528 u16 cq_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 int (*destroy_cq)(struct ib_cq *cq);
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001530 int (*resize_cq)(struct ib_cq *cq, int cqe,
1531 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1533 struct ib_wc *wc);
1534 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1535 int (*req_notify_cq)(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07001536 enum ib_cq_notify_flags flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 int (*req_ncomp_notif)(struct ib_cq *cq,
1538 int wc_cnt);
1539 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1540 int mr_access_flags);
1541 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1542 struct ib_phys_buf *phys_buf_array,
1543 int num_phys_buf,
1544 int mr_access_flags,
1545 u64 *iova_start);
Roland Dreiere2773c02005-07-07 17:57:10 -07001546 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001547 u64 start, u64 length,
1548 u64 virt_addr,
Roland Dreiere2773c02005-07-07 17:57:10 -07001549 int mr_access_flags,
1550 struct ib_udata *udata);
Matan Barak7e6edb92014-07-31 11:01:28 +03001551 int (*rereg_user_mr)(struct ib_mr *mr,
1552 int flags,
1553 u64 start, u64 length,
1554 u64 virt_addr,
1555 int mr_access_flags,
1556 struct ib_pd *pd,
1557 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 int (*query_mr)(struct ib_mr *mr,
1559 struct ib_mr_attr *mr_attr);
1560 int (*dereg_mr)(struct ib_mr *mr);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001561 int (*destroy_mr)(struct ib_mr *mr);
1562 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1563 struct ib_mr_init_attr *mr_init_attr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001564 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1565 int max_page_list_len);
1566 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1567 int page_list_len);
1568 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 int (*rereg_phys_mr)(struct ib_mr *mr,
1570 int mr_rereg_mask,
1571 struct ib_pd *pd,
1572 struct ib_phys_buf *phys_buf_array,
1573 int num_phys_buf,
1574 int mr_access_flags,
1575 u64 *iova_start);
Shani Michaeli7083e422013-02-06 16:19:12 +00001576 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1577 enum ib_mw_type type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 int (*bind_mw)(struct ib_qp *qp,
1579 struct ib_mw *mw,
1580 struct ib_mw_bind *mw_bind);
1581 int (*dealloc_mw)(struct ib_mw *mw);
1582 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1583 int mr_access_flags,
1584 struct ib_fmr_attr *fmr_attr);
1585 int (*map_phys_fmr)(struct ib_fmr *fmr,
1586 u64 *page_list, int list_len,
1587 u64 iova);
1588 int (*unmap_fmr)(struct list_head *fmr_list);
1589 int (*dealloc_fmr)(struct ib_fmr *fmr);
1590 int (*attach_mcast)(struct ib_qp *qp,
1591 union ib_gid *gid,
1592 u16 lid);
1593 int (*detach_mcast)(struct ib_qp *qp,
1594 union ib_gid *gid,
1595 u16 lid);
1596 int (*process_mad)(struct ib_device *device,
1597 int process_mad_flags,
1598 u8 port_num,
1599 struct ib_wc *in_wc,
1600 struct ib_grh *in_grh,
1601 struct ib_mad *in_mad,
1602 struct ib_mad *out_mad);
Sean Hefty59991f92011-05-23 17:52:46 -07001603 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1604 struct ib_ucontext *ucontext,
1605 struct ib_udata *udata);
1606 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001607 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1608 struct ib_flow_attr
1609 *flow_attr,
1610 int domain);
1611 int (*destroy_flow)(struct ib_flow *flow_id);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001612 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1613 struct ib_mr_status *mr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Ralph Campbell9b513092006-12-12 14:27:41 -08001615 struct ib_dma_mapping_ops *dma_ops;
1616
Roland Dreiere2773c02005-07-07 17:57:10 -07001617 struct module *owner;
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001618 struct device dev;
Greg Kroah-Hartman35be0682007-12-17 15:54:39 -04001619 struct kobject *ports_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 struct list_head port_list;
1621
1622 enum {
1623 IB_DEV_UNINITIALIZED,
1624 IB_DEV_REGISTERED,
1625 IB_DEV_UNREGISTERED
1626 } reg_state;
1627
Roland Dreier274c0892005-09-29 14:17:48 -07001628 int uverbs_abi_ver;
Alexander Chiang17a55f72010-02-02 19:09:16 +00001629 u64 uverbs_cmd_mask;
Yann Droneaudf21519b2013-11-06 23:21:49 +01001630 u64 uverbs_ex_cmd_mask;
Roland Dreier274c0892005-09-29 14:17:48 -07001631
Roland Dreierc5bcbbb2006-02-02 09:47:14 -08001632 char node_desc[64];
Sean Heftycf311cd2006-01-10 07:39:34 -08001633 __be64 node_guid;
Steve Wise96f15c02008-07-14 23:48:53 -07001634 u32 local_dma_lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 u8 node_type;
1636 u8 phys_port_cnt;
1637};
1638
1639struct ib_client {
1640 char *name;
1641 void (*add) (struct ib_device *);
1642 void (*remove)(struct ib_device *);
1643
1644 struct list_head list;
1645};
1646
1647struct ib_device *ib_alloc_device(size_t size);
1648void ib_dealloc_device(struct ib_device *device);
1649
Ralph Campbell9a6edb62010-05-06 17:03:25 -07001650int ib_register_device(struct ib_device *device,
1651 int (*port_callback)(struct ib_device *,
1652 u8, struct kobject *));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653void ib_unregister_device(struct ib_device *device);
1654
1655int ib_register_client (struct ib_client *client);
1656void ib_unregister_client(struct ib_client *client);
1657
1658void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1659void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1660 void *data);
1661
Roland Dreiere2773c02005-07-07 17:57:10 -07001662static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1663{
1664 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1665}
1666
1667static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1668{
1669 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1670}
1671
Roland Dreier8a518662006-02-13 12:48:12 -08001672/**
1673 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1674 * contains all required attributes and no attributes not allowed for
1675 * the given QP state transition.
1676 * @cur_state: Current QP state
1677 * @next_state: Next QP state
1678 * @type: QP type
1679 * @mask: Mask of supplied QP attributes
Matan Barakdd5f03b2013-12-12 18:03:11 +02001680 * @ll : link layer of port
Roland Dreier8a518662006-02-13 12:48:12 -08001681 *
1682 * This function is a helper function that a low-level driver's
1683 * modify_qp method can use to validate the consumer's input. It
1684 * checks that cur_state and next_state are valid QP states, that a
1685 * transition from cur_state to next_state is allowed by the IB spec,
1686 * and that the attribute mask supplied is allowed for the transition.
1687 */
1688int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001689 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1690 enum rdma_link_layer ll);
Roland Dreier8a518662006-02-13 12:48:12 -08001691
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692int ib_register_event_handler (struct ib_event_handler *event_handler);
1693int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1694void ib_dispatch_event(struct ib_event *event);
1695
1696int ib_query_device(struct ib_device *device,
1697 struct ib_device_attr *device_attr);
1698
1699int ib_query_port(struct ib_device *device,
1700 u8 port_num, struct ib_port_attr *port_attr);
1701
Eli Cohena3f5ada2010-09-27 17:51:10 -07001702enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1703 u8 port_num);
1704
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705int ib_query_gid(struct ib_device *device,
1706 u8 port_num, int index, union ib_gid *gid);
1707
1708int ib_query_pkey(struct ib_device *device,
1709 u8 port_num, u16 index, u16 *pkey);
1710
1711int ib_modify_device(struct ib_device *device,
1712 int device_modify_mask,
1713 struct ib_device_modify *device_modify);
1714
1715int ib_modify_port(struct ib_device *device,
1716 u8 port_num, int port_modify_mask,
1717 struct ib_port_modify *port_modify);
1718
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001719int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1720 u8 *port_num, u16 *index);
1721
1722int ib_find_pkey(struct ib_device *device,
1723 u8 port_num, u16 pkey, u16 *index);
1724
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725/**
1726 * ib_alloc_pd - Allocates an unused protection domain.
1727 * @device: The device on which to allocate the protection domain.
1728 *
1729 * A protection domain object provides an association between QPs, shared
1730 * receive queues, address handles, memory regions, and memory windows.
1731 */
1732struct ib_pd *ib_alloc_pd(struct ib_device *device);
1733
1734/**
1735 * ib_dealloc_pd - Deallocates a protection domain.
1736 * @pd: The protection domain to deallocate.
1737 */
1738int ib_dealloc_pd(struct ib_pd *pd);
1739
1740/**
1741 * ib_create_ah - Creates an address handle for the given address vector.
1742 * @pd: The protection domain associated with the address handle.
1743 * @ah_attr: The attributes of the address vector.
1744 *
1745 * The address handle is used to reference a local or global destination
1746 * in all UD QP post sends.
1747 */
1748struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1749
1750/**
Sean Hefty4e00d692006-06-17 20:37:39 -07001751 * ib_init_ah_from_wc - Initializes address handle attributes from a
1752 * work completion.
1753 * @device: Device on which the received message arrived.
1754 * @port_num: Port on which the received message arrived.
1755 * @wc: Work completion associated with the received message.
1756 * @grh: References the received global route header. This parameter is
1757 * ignored unless the work completion indicates that the GRH is valid.
1758 * @ah_attr: Returned attributes that can be used when creating an address
1759 * handle for replying to the message.
1760 */
1761int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1762 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1763
1764/**
Hal Rosenstock513789e2005-07-27 11:45:34 -07001765 * ib_create_ah_from_wc - Creates an address handle associated with the
1766 * sender of the specified work completion.
1767 * @pd: The protection domain associated with the address handle.
1768 * @wc: Work completion information associated with a received message.
1769 * @grh: References the received global route header. This parameter is
1770 * ignored unless the work completion indicates that the GRH is valid.
1771 * @port_num: The outbound port number to associate with the address.
1772 *
1773 * The address handle is used to reference a local or global destination
1774 * in all UD QP post sends.
1775 */
1776struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1777 struct ib_grh *grh, u8 port_num);
1778
1779/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 * ib_modify_ah - Modifies the address vector associated with an address
1781 * handle.
1782 * @ah: The address handle to modify.
1783 * @ah_attr: The new address vector attributes to associate with the
1784 * address handle.
1785 */
1786int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1787
1788/**
1789 * ib_query_ah - Queries the address vector associated with an address
1790 * handle.
1791 * @ah: The address handle to query.
1792 * @ah_attr: The address vector attributes associated with the address
1793 * handle.
1794 */
1795int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1796
1797/**
1798 * ib_destroy_ah - Destroys an address handle.
1799 * @ah: The address handle to destroy.
1800 */
1801int ib_destroy_ah(struct ib_ah *ah);
1802
1803/**
Roland Dreierd41fcc62005-08-18 12:23:08 -07001804 * ib_create_srq - Creates a SRQ associated with the specified protection
1805 * domain.
1806 * @pd: The protection domain associated with the SRQ.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08001807 * @srq_init_attr: A list of initial attributes required to create the
1808 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1809 * the actual capabilities of the created SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07001810 *
1811 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1812 * requested size of the SRQ, and set to the actual values allocated
1813 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1814 * will always be at least as large as the requested values.
1815 */
1816struct ib_srq *ib_create_srq(struct ib_pd *pd,
1817 struct ib_srq_init_attr *srq_init_attr);
1818
1819/**
1820 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1821 * @srq: The SRQ to modify.
1822 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1823 * the current values of selected SRQ attributes are returned.
1824 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1825 * are being modified.
1826 *
1827 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1828 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1829 * the number of receives queued drops below the limit.
1830 */
1831int ib_modify_srq(struct ib_srq *srq,
1832 struct ib_srq_attr *srq_attr,
1833 enum ib_srq_attr_mask srq_attr_mask);
1834
1835/**
1836 * ib_query_srq - Returns the attribute list and current values for the
1837 * specified SRQ.
1838 * @srq: The SRQ to query.
1839 * @srq_attr: The attributes of the specified SRQ.
1840 */
1841int ib_query_srq(struct ib_srq *srq,
1842 struct ib_srq_attr *srq_attr);
1843
1844/**
1845 * ib_destroy_srq - Destroys the specified SRQ.
1846 * @srq: The SRQ to destroy.
1847 */
1848int ib_destroy_srq(struct ib_srq *srq);
1849
1850/**
1851 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1852 * @srq: The SRQ to post the work request on.
1853 * @recv_wr: A list of work requests to post on the receive queue.
1854 * @bad_recv_wr: On an immediate failure, this parameter will reference
1855 * the work request that failed to be posted on the QP.
1856 */
1857static inline int ib_post_srq_recv(struct ib_srq *srq,
1858 struct ib_recv_wr *recv_wr,
1859 struct ib_recv_wr **bad_recv_wr)
1860{
1861 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1862}
1863
1864/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 * ib_create_qp - Creates a QP associated with the specified protection
1866 * domain.
1867 * @pd: The protection domain associated with the QP.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08001868 * @qp_init_attr: A list of initial attributes required to create the
1869 * QP. If QP creation succeeds, then the attributes are updated to
1870 * the actual capabilities of the created QP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 */
1872struct ib_qp *ib_create_qp(struct ib_pd *pd,
1873 struct ib_qp_init_attr *qp_init_attr);
1874
1875/**
1876 * ib_modify_qp - Modifies the attributes for the specified QP and then
1877 * transitions the QP to the given state.
1878 * @qp: The QP to modify.
1879 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1880 * the current values of selected QP attributes are returned.
1881 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1882 * are being modified.
1883 */
1884int ib_modify_qp(struct ib_qp *qp,
1885 struct ib_qp_attr *qp_attr,
1886 int qp_attr_mask);
1887
1888/**
1889 * ib_query_qp - Returns the attribute list and current values for the
1890 * specified QP.
1891 * @qp: The QP to query.
1892 * @qp_attr: The attributes of the specified QP.
1893 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1894 * @qp_init_attr: Additional attributes of the selected QP.
1895 *
1896 * The qp_attr_mask may be used to limit the query to gathering only the
1897 * selected attributes.
1898 */
1899int ib_query_qp(struct ib_qp *qp,
1900 struct ib_qp_attr *qp_attr,
1901 int qp_attr_mask,
1902 struct ib_qp_init_attr *qp_init_attr);
1903
1904/**
1905 * ib_destroy_qp - Destroys the specified QP.
1906 * @qp: The QP to destroy.
1907 */
1908int ib_destroy_qp(struct ib_qp *qp);
1909
1910/**
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001911 * ib_open_qp - Obtain a reference to an existing sharable QP.
1912 * @xrcd - XRC domain
1913 * @qp_open_attr: Attributes identifying the QP to open.
1914 *
1915 * Returns a reference to a sharable QP.
1916 */
1917struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1918 struct ib_qp_open_attr *qp_open_attr);
1919
1920/**
1921 * ib_close_qp - Release an external reference to a QP.
Sean Heftyd3d72d92011-05-26 23:06:44 -07001922 * @qp: The QP handle to release
1923 *
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001924 * The opened QP handle is released by the caller. The underlying
1925 * shared QP is not destroyed until all internal references are released.
Sean Heftyd3d72d92011-05-26 23:06:44 -07001926 */
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001927int ib_close_qp(struct ib_qp *qp);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001928
1929/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 * ib_post_send - Posts a list of work requests to the send queue of
1931 * the specified QP.
1932 * @qp: The QP to post the work request on.
1933 * @send_wr: A list of work requests to post on the send queue.
1934 * @bad_send_wr: On an immediate failure, this parameter will reference
1935 * the work request that failed to be posted on the QP.
Bart Van Assche55464d42009-12-09 14:20:04 -08001936 *
1937 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1938 * error is returned, the QP state shall not be affected,
1939 * ib_post_send() will return an immediate error after queueing any
1940 * earlier work requests in the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 */
1942static inline int ib_post_send(struct ib_qp *qp,
1943 struct ib_send_wr *send_wr,
1944 struct ib_send_wr **bad_send_wr)
1945{
1946 return qp->device->post_send(qp, send_wr, bad_send_wr);
1947}
1948
1949/**
1950 * ib_post_recv - Posts a list of work requests to the receive queue of
1951 * the specified QP.
1952 * @qp: The QP to post the work request on.
1953 * @recv_wr: A list of work requests to post on the receive queue.
1954 * @bad_recv_wr: On an immediate failure, this parameter will reference
1955 * the work request that failed to be posted on the QP.
1956 */
1957static inline int ib_post_recv(struct ib_qp *qp,
1958 struct ib_recv_wr *recv_wr,
1959 struct ib_recv_wr **bad_recv_wr)
1960{
1961 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1962}
1963
1964/**
1965 * ib_create_cq - Creates a CQ on the specified device.
1966 * @device: The device on which to create the CQ.
1967 * @comp_handler: A user-specified callback that is invoked when a
1968 * completion event occurs on the CQ.
1969 * @event_handler: A user-specified callback that is invoked when an
1970 * asynchronous event not associated with a completion occurs on the CQ.
1971 * @cq_context: Context associated with the CQ returned to the user via
1972 * the associated completion and event handlers.
1973 * @cqe: The minimum size of the CQ.
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001974 * @comp_vector - Completion vector used to signal completion events.
1975 * Must be >= 0 and < context->num_comp_vectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 *
1977 * Users can examine the cq structure to determine the actual CQ size.
1978 */
1979struct ib_cq *ib_create_cq(struct ib_device *device,
1980 ib_comp_handler comp_handler,
1981 void (*event_handler)(struct ib_event *, void *),
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001982 void *cq_context, int cqe, int comp_vector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
1984/**
1985 * ib_resize_cq - Modifies the capacity of the CQ.
1986 * @cq: The CQ to resize.
1987 * @cqe: The minimum size of the CQ.
1988 *
1989 * Users can examine the cq structure to determine the actual CQ size.
1990 */
1991int ib_resize_cq(struct ib_cq *cq, int cqe);
1992
1993/**
Eli Cohen2dd57162008-04-16 21:09:33 -07001994 * ib_modify_cq - Modifies moderation params of the CQ
1995 * @cq: The CQ to modify.
1996 * @cq_count: number of CQEs that will trigger an event
1997 * @cq_period: max period of time in usec before triggering an event
1998 *
1999 */
2000int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2001
2002/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 * ib_destroy_cq - Destroys the specified CQ.
2004 * @cq: The CQ to destroy.
2005 */
2006int ib_destroy_cq(struct ib_cq *cq);
2007
2008/**
2009 * ib_poll_cq - poll a CQ for completion(s)
2010 * @cq:the CQ being polled
2011 * @num_entries:maximum number of completions to return
2012 * @wc:array of at least @num_entries &struct ib_wc where completions
2013 * will be returned
2014 *
2015 * Poll a CQ for (possibly multiple) completions. If the return value
2016 * is < 0, an error occurred. If the return value is >= 0, it is the
2017 * number of completions returned. If the return value is
2018 * non-negative and < num_entries, then the CQ was emptied.
2019 */
2020static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2021 struct ib_wc *wc)
2022{
2023 return cq->device->poll_cq(cq, num_entries, wc);
2024}
2025
2026/**
2027 * ib_peek_cq - Returns the number of unreaped completions currently
2028 * on the specified CQ.
2029 * @cq: The CQ to peek.
2030 * @wc_cnt: A minimum number of unreaped completions to check for.
2031 *
2032 * If the number of unreaped completions is greater than or equal to wc_cnt,
2033 * this function returns wc_cnt, otherwise, it returns the actual number of
2034 * unreaped completions.
2035 */
2036int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2037
2038/**
2039 * ib_req_notify_cq - Request completion notification on a CQ.
2040 * @cq: The CQ to generate an event for.
Roland Dreiered23a722007-05-06 21:02:48 -07002041 * @flags:
2042 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2043 * to request an event on the next solicited event or next work
2044 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2045 * may also be |ed in to request a hint about missed events, as
2046 * described below.
2047 *
2048 * Return Value:
2049 * < 0 means an error occurred while requesting notification
2050 * == 0 means notification was requested successfully, and if
2051 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2052 * were missed and it is safe to wait for another event. In
2053 * this case is it guaranteed that any work completions added
2054 * to the CQ since the last CQ poll will trigger a completion
2055 * notification event.
2056 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2057 * in. It means that the consumer must poll the CQ again to
2058 * make sure it is empty to avoid missing an event because of a
2059 * race between requesting notification and an entry being
2060 * added to the CQ. This return value means it is possible
2061 * (but not guaranteed) that a work completion has been added
2062 * to the CQ since the last poll without triggering a
2063 * completion notification event.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 */
2065static inline int ib_req_notify_cq(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07002066 enum ib_cq_notify_flags flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067{
Roland Dreiered23a722007-05-06 21:02:48 -07002068 return cq->device->req_notify_cq(cq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069}
2070
2071/**
2072 * ib_req_ncomp_notif - Request completion notification when there are
2073 * at least the specified number of unreaped completions on the CQ.
2074 * @cq: The CQ to generate an event for.
2075 * @wc_cnt: The number of unreaped completions that should be on the
2076 * CQ before an event is generated.
2077 */
2078static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2079{
2080 return cq->device->req_ncomp_notif ?
2081 cq->device->req_ncomp_notif(cq, wc_cnt) :
2082 -ENOSYS;
2083}
2084
2085/**
2086 * ib_get_dma_mr - Returns a memory region for system memory that is
2087 * usable for DMA.
2088 * @pd: The protection domain associated with the memory region.
2089 * @mr_access_flags: Specifies the memory access rights.
Ralph Campbell9b513092006-12-12 14:27:41 -08002090 *
2091 * Note that the ib_dma_*() functions defined below must be used
2092 * to create/destroy addresses used with the Lkey or Rkey returned
2093 * by ib_get_dma_mr().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 */
2095struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2096
2097/**
Ralph Campbell9b513092006-12-12 14:27:41 -08002098 * ib_dma_mapping_error - check a DMA addr for error
2099 * @dev: The device for which the dma_addr was created
2100 * @dma_addr: The DMA address to check
2101 */
2102static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2103{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002104 if (dev->dma_ops)
2105 return dev->dma_ops->mapping_error(dev, dma_addr);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07002106 return dma_mapping_error(dev->dma_device, dma_addr);
Ralph Campbell9b513092006-12-12 14:27:41 -08002107}
2108
2109/**
2110 * ib_dma_map_single - Map a kernel virtual address to DMA address
2111 * @dev: The device for which the dma_addr is to be created
2112 * @cpu_addr: The kernel virtual address
2113 * @size: The size of the region in bytes
2114 * @direction: The direction of the DMA
2115 */
2116static inline u64 ib_dma_map_single(struct ib_device *dev,
2117 void *cpu_addr, size_t size,
2118 enum dma_data_direction direction)
2119{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002120 if (dev->dma_ops)
2121 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2122 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002123}
2124
2125/**
2126 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2127 * @dev: The device for which the DMA address was created
2128 * @addr: The DMA address
2129 * @size: The size of the region in bytes
2130 * @direction: The direction of the DMA
2131 */
2132static inline void ib_dma_unmap_single(struct ib_device *dev,
2133 u64 addr, size_t size,
2134 enum dma_data_direction direction)
2135{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002136 if (dev->dma_ops)
2137 dev->dma_ops->unmap_single(dev, addr, size, direction);
2138 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002139 dma_unmap_single(dev->dma_device, addr, size, direction);
2140}
2141
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07002142static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2143 void *cpu_addr, size_t size,
2144 enum dma_data_direction direction,
2145 struct dma_attrs *attrs)
2146{
2147 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2148 direction, attrs);
2149}
2150
2151static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2152 u64 addr, size_t size,
2153 enum dma_data_direction direction,
2154 struct dma_attrs *attrs)
2155{
2156 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2157 direction, attrs);
2158}
2159
Ralph Campbell9b513092006-12-12 14:27:41 -08002160/**
2161 * ib_dma_map_page - Map a physical page to DMA address
2162 * @dev: The device for which the dma_addr is to be created
2163 * @page: The page to be mapped
2164 * @offset: The offset within the page
2165 * @size: The size of the region in bytes
2166 * @direction: The direction of the DMA
2167 */
2168static inline u64 ib_dma_map_page(struct ib_device *dev,
2169 struct page *page,
2170 unsigned long offset,
2171 size_t size,
2172 enum dma_data_direction direction)
2173{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002174 if (dev->dma_ops)
2175 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2176 return dma_map_page(dev->dma_device, page, offset, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002177}
2178
2179/**
2180 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2181 * @dev: The device for which the DMA address was created
2182 * @addr: The DMA address
2183 * @size: The size of the region in bytes
2184 * @direction: The direction of the DMA
2185 */
2186static inline void ib_dma_unmap_page(struct ib_device *dev,
2187 u64 addr, size_t size,
2188 enum dma_data_direction direction)
2189{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002190 if (dev->dma_ops)
2191 dev->dma_ops->unmap_page(dev, addr, size, direction);
2192 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002193 dma_unmap_page(dev->dma_device, addr, size, direction);
2194}
2195
2196/**
2197 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2198 * @dev: The device for which the DMA addresses are to be created
2199 * @sg: The array of scatter/gather entries
2200 * @nents: The number of scatter/gather entries
2201 * @direction: The direction of the DMA
2202 */
2203static inline int ib_dma_map_sg(struct ib_device *dev,
2204 struct scatterlist *sg, int nents,
2205 enum dma_data_direction direction)
2206{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002207 if (dev->dma_ops)
2208 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2209 return dma_map_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002210}
2211
2212/**
2213 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2214 * @dev: The device for which the DMA addresses were created
2215 * @sg: The array of scatter/gather entries
2216 * @nents: The number of scatter/gather entries
2217 * @direction: The direction of the DMA
2218 */
2219static inline void ib_dma_unmap_sg(struct ib_device *dev,
2220 struct scatterlist *sg, int nents,
2221 enum dma_data_direction direction)
2222{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002223 if (dev->dma_ops)
2224 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2225 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002226 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2227}
2228
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07002229static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2230 struct scatterlist *sg, int nents,
2231 enum dma_data_direction direction,
2232 struct dma_attrs *attrs)
2233{
2234 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2235}
2236
2237static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2238 struct scatterlist *sg, int nents,
2239 enum dma_data_direction direction,
2240 struct dma_attrs *attrs)
2241{
2242 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2243}
Ralph Campbell9b513092006-12-12 14:27:41 -08002244/**
2245 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2246 * @dev: The device for which the DMA addresses were created
2247 * @sg: The scatter/gather entry
Mike Marciniszynea58a592014-03-28 13:26:59 -04002248 *
2249 * Note: this function is obsolete. To do: change all occurrences of
2250 * ib_sg_dma_address() into sg_dma_address().
Ralph Campbell9b513092006-12-12 14:27:41 -08002251 */
2252static inline u64 ib_sg_dma_address(struct ib_device *dev,
2253 struct scatterlist *sg)
2254{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002255 return sg_dma_address(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08002256}
2257
2258/**
2259 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2260 * @dev: The device for which the DMA addresses were created
2261 * @sg: The scatter/gather entry
Mike Marciniszynea58a592014-03-28 13:26:59 -04002262 *
2263 * Note: this function is obsolete. To do: change all occurrences of
2264 * ib_sg_dma_len() into sg_dma_len().
Ralph Campbell9b513092006-12-12 14:27:41 -08002265 */
2266static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2267 struct scatterlist *sg)
2268{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002269 return sg_dma_len(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08002270}
2271
2272/**
2273 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2274 * @dev: The device for which the DMA address was created
2275 * @addr: The DMA address
2276 * @size: The size of the region in bytes
2277 * @dir: The direction of the DMA
2278 */
2279static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2280 u64 addr,
2281 size_t size,
2282 enum dma_data_direction dir)
2283{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002284 if (dev->dma_ops)
2285 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2286 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002287 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2288}
2289
2290/**
2291 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2292 * @dev: The device for which the DMA address was created
2293 * @addr: The DMA address
2294 * @size: The size of the region in bytes
2295 * @dir: The direction of the DMA
2296 */
2297static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2298 u64 addr,
2299 size_t size,
2300 enum dma_data_direction dir)
2301{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002302 if (dev->dma_ops)
2303 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2304 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002305 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2306}
2307
2308/**
2309 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2310 * @dev: The device for which the DMA address is requested
2311 * @size: The size of the region to allocate in bytes
2312 * @dma_handle: A pointer for returning the DMA address of the region
2313 * @flag: memory allocator flags
2314 */
2315static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2316 size_t size,
2317 u64 *dma_handle,
2318 gfp_t flag)
2319{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002320 if (dev->dma_ops)
2321 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
Roland Dreierc59a3da2006-12-15 13:57:26 -08002322 else {
2323 dma_addr_t handle;
2324 void *ret;
2325
2326 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2327 *dma_handle = handle;
2328 return ret;
2329 }
Ralph Campbell9b513092006-12-12 14:27:41 -08002330}
2331
2332/**
2333 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2334 * @dev: The device for which the DMA addresses were allocated
2335 * @size: The size of the region
2336 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2337 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2338 */
2339static inline void ib_dma_free_coherent(struct ib_device *dev,
2340 size_t size, void *cpu_addr,
2341 u64 dma_handle)
2342{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002343 if (dev->dma_ops)
2344 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2345 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002346 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2347}
2348
2349/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2351 * by an HCA.
2352 * @pd: The protection domain associated assigned to the registered region.
2353 * @phys_buf_array: Specifies a list of physical buffers to use in the
2354 * memory region.
2355 * @num_phys_buf: Specifies the size of the phys_buf_array.
2356 * @mr_access_flags: Specifies the memory access rights.
2357 * @iova_start: The offset of the region's starting I/O virtual address.
2358 */
2359struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2360 struct ib_phys_buf *phys_buf_array,
2361 int num_phys_buf,
2362 int mr_access_flags,
2363 u64 *iova_start);
2364
2365/**
2366 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2367 * Conceptually, this call performs the functions deregister memory region
2368 * followed by register physical memory region. Where possible,
2369 * resources are reused instead of deallocated and reallocated.
2370 * @mr: The memory region to modify.
2371 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2372 * properties of the memory region are being modified.
2373 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2374 * the new protection domain to associated with the memory region,
2375 * otherwise, this parameter is ignored.
2376 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2377 * field specifies a list of physical buffers to use in the new
2378 * translation, otherwise, this parameter is ignored.
2379 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2380 * field specifies the size of the phys_buf_array, otherwise, this
2381 * parameter is ignored.
2382 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2383 * field specifies the new memory access rights, otherwise, this
2384 * parameter is ignored.
2385 * @iova_start: The offset of the region's starting I/O virtual address.
2386 */
2387int ib_rereg_phys_mr(struct ib_mr *mr,
2388 int mr_rereg_mask,
2389 struct ib_pd *pd,
2390 struct ib_phys_buf *phys_buf_array,
2391 int num_phys_buf,
2392 int mr_access_flags,
2393 u64 *iova_start);
2394
2395/**
2396 * ib_query_mr - Retrieves information about a specific memory region.
2397 * @mr: The memory region to retrieve information about.
2398 * @mr_attr: The attributes of the specified memory region.
2399 */
2400int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2401
2402/**
2403 * ib_dereg_mr - Deregisters a memory region and removes it from the
2404 * HCA translation table.
2405 * @mr: The memory region to deregister.
Shani Michaeli7083e422013-02-06 16:19:12 +00002406 *
2407 * This function can fail, if the memory region has memory windows bound to it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 */
2409int ib_dereg_mr(struct ib_mr *mr);
2410
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02002411
2412/**
2413 * ib_create_mr - Allocates a memory region that may be used for
2414 * signature handover operations.
2415 * @pd: The protection domain associated with the region.
2416 * @mr_init_attr: memory region init attributes.
2417 */
2418struct ib_mr *ib_create_mr(struct ib_pd *pd,
2419 struct ib_mr_init_attr *mr_init_attr);
2420
2421/**
2422 * ib_destroy_mr - Destroys a memory region that was created using
2423 * ib_create_mr and removes it from HW translation tables.
2424 * @mr: The memory region to destroy.
2425 *
2426 * This function can fail, if the memory region has memory windows bound to it.
2427 */
2428int ib_destroy_mr(struct ib_mr *mr);
2429
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430/**
Steve Wise00f7ec32008-07-14 23:48:45 -07002431 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2432 * IB_WR_FAST_REG_MR send work request.
2433 * @pd: The protection domain associated with the region.
2434 * @max_page_list_len: requested max physical buffer list length to be
2435 * used with fast register work requests for this MR.
2436 */
2437struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2438
2439/**
2440 * ib_alloc_fast_reg_page_list - Allocates a page list array
2441 * @device - ib device pointer.
2442 * @page_list_len - size of the page list array to be allocated.
2443 *
2444 * This allocates and returns a struct ib_fast_reg_page_list * and a
2445 * page_list array that is at least page_list_len in size. The actual
2446 * size is returned in max_page_list_len. The caller is responsible
2447 * for initializing the contents of the page_list array before posting
2448 * a send work request with the IB_WC_FAST_REG_MR opcode.
2449 *
2450 * The page_list array entries must be translated using one of the
2451 * ib_dma_*() functions just like the addresses passed to
2452 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
2453 * ib_fast_reg_page_list must not be modified by the caller until the
2454 * IB_WC_FAST_REG_MR work request completes.
2455 */
2456struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2457 struct ib_device *device, int page_list_len);
2458
2459/**
2460 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2461 * page list array.
2462 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2463 */
2464void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2465
2466/**
2467 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2468 * R_Key and L_Key.
2469 * @mr - struct ib_mr pointer to be updated.
2470 * @newkey - new key to be used.
2471 */
2472static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2473{
2474 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2475 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2476}
2477
2478/**
Shani Michaeli7083e422013-02-06 16:19:12 +00002479 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2480 * for calculating a new rkey for type 2 memory windows.
2481 * @rkey - the rkey to increment.
2482 */
2483static inline u32 ib_inc_rkey(u32 rkey)
2484{
2485 const u32 mask = 0x000000ff;
2486 return ((rkey + 1) & mask) | (rkey & ~mask);
2487}
2488
2489/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 * ib_alloc_mw - Allocates a memory window.
2491 * @pd: The protection domain associated with the memory window.
Shani Michaeli7083e422013-02-06 16:19:12 +00002492 * @type: The type of the memory window (1 or 2).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 */
Shani Michaeli7083e422013-02-06 16:19:12 +00002494struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
2496/**
2497 * ib_bind_mw - Posts a work request to the send queue of the specified
2498 * QP, which binds the memory window to the given address range and
2499 * remote access attributes.
2500 * @qp: QP to post the bind work request on.
2501 * @mw: The memory window to bind.
2502 * @mw_bind: Specifies information about the memory window, including
2503 * its address range, remote access rights, and associated memory region.
Shani Michaeli7083e422013-02-06 16:19:12 +00002504 *
2505 * If there is no immediate error, the function will update the rkey member
2506 * of the mw parameter to its new value. The bind operation can still fail
2507 * asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 */
2509static inline int ib_bind_mw(struct ib_qp *qp,
2510 struct ib_mw *mw,
2511 struct ib_mw_bind *mw_bind)
2512{
2513 /* XXX reference counting in corresponding MR? */
2514 return mw->device->bind_mw ?
2515 mw->device->bind_mw(qp, mw, mw_bind) :
2516 -ENOSYS;
2517}
2518
2519/**
2520 * ib_dealloc_mw - Deallocates a memory window.
2521 * @mw: The memory window to deallocate.
2522 */
2523int ib_dealloc_mw(struct ib_mw *mw);
2524
2525/**
2526 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2527 * @pd: The protection domain associated with the unmapped region.
2528 * @mr_access_flags: Specifies the memory access rights.
2529 * @fmr_attr: Attributes of the unmapped region.
2530 *
2531 * A fast memory region must be mapped before it can be used as part of
2532 * a work request.
2533 */
2534struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2535 int mr_access_flags,
2536 struct ib_fmr_attr *fmr_attr);
2537
2538/**
2539 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2540 * @fmr: The fast memory region to associate with the pages.
2541 * @page_list: An array of physical pages to map to the fast memory region.
2542 * @list_len: The number of pages in page_list.
2543 * @iova: The I/O virtual address to use with the mapped region.
2544 */
2545static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2546 u64 *page_list, int list_len,
2547 u64 iova)
2548{
2549 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2550}
2551
2552/**
2553 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2554 * @fmr_list: A linked list of fast memory regions to unmap.
2555 */
2556int ib_unmap_fmr(struct list_head *fmr_list);
2557
2558/**
2559 * ib_dealloc_fmr - Deallocates a fast memory region.
2560 * @fmr: The fast memory region to deallocate.
2561 */
2562int ib_dealloc_fmr(struct ib_fmr *fmr);
2563
2564/**
2565 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2566 * @qp: QP to attach to the multicast group. The QP must be type
2567 * IB_QPT_UD.
2568 * @gid: Multicast group GID.
2569 * @lid: Multicast group LID in host byte order.
2570 *
2571 * In order to send and receive multicast packets, subnet
2572 * administration must have created the multicast group and configured
2573 * the fabric appropriately. The port associated with the specified
2574 * QP must also be a member of the multicast group.
2575 */
2576int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2577
2578/**
2579 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2580 * @qp: QP to detach from the multicast group.
2581 * @gid: Multicast group GID.
2582 * @lid: Multicast group LID in host byte order.
2583 */
2584int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2585
Sean Hefty59991f92011-05-23 17:52:46 -07002586/**
2587 * ib_alloc_xrcd - Allocates an XRC domain.
2588 * @device: The device on which to allocate the XRC domain.
2589 */
2590struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2591
2592/**
2593 * ib_dealloc_xrcd - Deallocates an XRC domain.
2594 * @xrcd: The XRC domain to deallocate.
2595 */
2596int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2597
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002598struct ib_flow *ib_create_flow(struct ib_qp *qp,
2599 struct ib_flow_attr *flow_attr, int domain);
2600int ib_destroy_flow(struct ib_flow *flow_id);
2601
Eli Cohen1c636f82013-10-31 15:26:32 +02002602static inline int ib_check_mr_access(int flags)
2603{
2604 /*
2605 * Local write permission is required if remote write or
2606 * remote atomic permission is also requested.
2607 */
2608 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2609 !(flags & IB_ACCESS_LOCAL_WRITE))
2610 return -EINVAL;
2611
2612 return 0;
2613}
2614
Sagi Grimberg1b01d332014-02-23 14:19:05 +02002615/**
2616 * ib_check_mr_status: lightweight check of MR status.
2617 * This routine may provide status checks on a selected
2618 * ib_mr. first use is for signature status check.
2619 *
2620 * @mr: A memory region.
2621 * @check_mask: Bitmask of which checks to perform from
2622 * ib_mr_status_check enumeration.
2623 * @mr_status: The container of relevant status checks.
2624 * failed checks will be indicated in the status bitmask
2625 * and the relevant info shall be in the error item.
2626 */
2627int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2628 struct ib_mr_status *mr_status);
2629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630#endif /* IB_VERBS_H */