blob: 7d78794ed1898228dd414fc637e4bac68655e865 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08008 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
Ralph Campbell9b513092006-12-12 14:27:41 -080044#include <linux/mm.h>
45#include <linux/dma-mapping.h>
Michael S. Tsirkin459d6e22007-02-04 14:11:55 -080046#include <linux/kref.h>
Dotan Barakbfb3ea12007-07-31 16:49:15 +030047#include <linux/list.h>
48#include <linux/rwsem.h>
Adrian Bunk87ae9af2007-10-30 10:35:04 +010049#include <linux/scatterlist.h>
Tejun Heof0626712010-10-19 15:24:36 +000050#include <linux/workqueue.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020051#include <uapi/linux/if_ether.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070052
Arun Sharma600634972011-07-26 16:09:06 -070053#include <linux/atomic.h>
Haggai Eran882214e2014-12-11 17:04:18 +020054#include <linux/mmu_notifier.h>
Roland Dreiere2773c02005-07-07 17:57:10 -070055#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Tejun Heof0626712010-10-19 15:24:36 +000057extern struct workqueue_struct *ib_wq;
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059union ib_gid {
60 u8 raw[16];
61 struct {
Sean Hefty97f52eb2005-08-13 21:05:57 -070062 __be64 subnet_prefix;
63 __be64 interface_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 } global;
65};
66
Tom Tucker07ebafb2006-08-03 16:02:42 -050067enum rdma_node_type {
68 /* IB values map to NodeInfo:NodeType. */
69 RDMA_NODE_IB_CA = 1,
70 RDMA_NODE_IB_SWITCH,
71 RDMA_NODE_IB_ROUTER,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +000072 RDMA_NODE_RNIC,
73 RDMA_NODE_USNIC,
Upinder Malhi5db57652014-01-15 17:02:36 -080074 RDMA_NODE_USNIC_UDP,
Linus Torvalds1da177e2005-04-16 15:20:36 -070075};
76
Tom Tucker07ebafb2006-08-03 16:02:42 -050077enum rdma_transport_type {
78 RDMA_TRANSPORT_IB,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +000079 RDMA_TRANSPORT_IWARP,
Upinder Malhi248567f2014-01-09 14:48:19 -080080 RDMA_TRANSPORT_USNIC,
81 RDMA_TRANSPORT_USNIC_UDP
Tom Tucker07ebafb2006-08-03 16:02:42 -050082};
83
Michael Wang6b90a6d2015-05-05 14:50:18 +020084enum rdma_protocol_type {
85 RDMA_PROTOCOL_IB,
86 RDMA_PROTOCOL_IBOE,
87 RDMA_PROTOCOL_IWARP,
88 RDMA_PROTOCOL_USNIC_UDP
89};
90
Roland Dreier8385fd82014-06-04 10:00:16 -070091__attribute_const__ enum rdma_transport_type
92rdma_node_get_transport(enum rdma_node_type node_type);
Tom Tucker07ebafb2006-08-03 16:02:42 -050093
Eli Cohena3f5ada2010-09-27 17:51:10 -070094enum rdma_link_layer {
95 IB_LINK_LAYER_UNSPECIFIED,
96 IB_LINK_LAYER_INFINIBAND,
97 IB_LINK_LAYER_ETHERNET,
98};
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100enum ib_device_cap_flags {
101 IB_DEVICE_RESIZE_MAX_WR = 1,
102 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
103 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
104 IB_DEVICE_RAW_MULTI = (1<<3),
105 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
106 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
107 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
108 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
109 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
110 IB_DEVICE_INIT_TYPE = (1<<9),
111 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
112 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
113 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
114 IB_DEVICE_SRQ_RESIZE = (1<<13),
115 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
Steve Wise96f15c02008-07-14 23:48:53 -0700116 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
Roland Dreier0f39cf32008-04-16 21:09:32 -0700117 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
Eli Cohene0605d92008-01-30 18:30:57 +0200118 IB_DEVICE_MEM_WINDOW = (1<<17),
119 /*
120 * Devices should set IB_DEVICE_UD_IP_SUM if they support
121 * insertion of UDP and TCP checksum on outgoing UD IPoIB
122 * messages and can verify the validity of checksum for
123 * incoming messages. Setting this flag implies that the
124 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
125 */
126 IB_DEVICE_UD_IP_CSUM = (1<<18),
Eli Cohenc93570f2008-04-16 21:09:27 -0700127 IB_DEVICE_UD_TSO = (1<<19),
Sean Hefty59991f92011-05-23 17:52:46 -0700128 IB_DEVICE_XRC = (1<<20),
Steve Wise00f7ec32008-07-14 23:48:45 -0700129 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
Ron Livne47ee1b92008-07-14 23:48:48 -0700130 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
Shani Michaeli7083e422013-02-06 16:19:12 +0000131 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
Hadar Hen Zion319a4412013-08-07 14:01:59 +0300132 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200133 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200134 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
135 IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200136};
137
138enum ib_signature_prot_cap {
139 IB_PROT_T10DIF_TYPE_1 = 1,
140 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
141 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
142};
143
144enum ib_signature_guard_cap {
145 IB_GUARD_T10DIF_CRC = 1,
146 IB_GUARD_T10DIF_CSUM = 1 << 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147};
148
149enum ib_atomic_cap {
150 IB_ATOMIC_NONE,
151 IB_ATOMIC_HCA,
152 IB_ATOMIC_GLOB
153};
154
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200155enum ib_odp_general_cap_bits {
156 IB_ODP_SUPPORT = 1 << 0,
157};
158
159enum ib_odp_transport_cap_bits {
160 IB_ODP_SUPPORT_SEND = 1 << 0,
161 IB_ODP_SUPPORT_RECV = 1 << 1,
162 IB_ODP_SUPPORT_WRITE = 1 << 2,
163 IB_ODP_SUPPORT_READ = 1 << 3,
164 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
165};
166
167struct ib_odp_caps {
168 uint64_t general_caps;
169 struct {
170 uint32_t rc_odp_caps;
171 uint32_t uc_odp_caps;
172 uint32_t ud_odp_caps;
173 } per_transport_caps;
174};
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176struct ib_device_attr {
177 u64 fw_ver;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700178 __be64 sys_image_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 u64 max_mr_size;
180 u64 page_size_cap;
181 u32 vendor_id;
182 u32 vendor_part_id;
183 u32 hw_ver;
184 int max_qp;
185 int max_qp_wr;
186 int device_cap_flags;
187 int max_sge;
188 int max_sge_rd;
189 int max_cq;
190 int max_cqe;
191 int max_mr;
192 int max_pd;
193 int max_qp_rd_atom;
194 int max_ee_rd_atom;
195 int max_res_rd_atom;
196 int max_qp_init_rd_atom;
197 int max_ee_init_rd_atom;
198 enum ib_atomic_cap atomic_cap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300199 enum ib_atomic_cap masked_atomic_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 int max_ee;
201 int max_rdd;
202 int max_mw;
203 int max_raw_ipv6_qp;
204 int max_raw_ethy_qp;
205 int max_mcast_grp;
206 int max_mcast_qp_attach;
207 int max_total_mcast_qp_attach;
208 int max_ah;
209 int max_fmr;
210 int max_map_per_fmr;
211 int max_srq;
212 int max_srq_wr;
213 int max_srq_sge;
Steve Wise00f7ec32008-07-14 23:48:45 -0700214 unsigned int max_fast_reg_page_list_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 u16 max_pkeys;
216 u8 local_ca_ack_delay;
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200217 int sig_prot_cap;
218 int sig_guard_cap;
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200219 struct ib_odp_caps odp_caps;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220};
221
222enum ib_mtu {
223 IB_MTU_256 = 1,
224 IB_MTU_512 = 2,
225 IB_MTU_1024 = 3,
226 IB_MTU_2048 = 4,
227 IB_MTU_4096 = 5
228};
229
230static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
231{
232 switch (mtu) {
233 case IB_MTU_256: return 256;
234 case IB_MTU_512: return 512;
235 case IB_MTU_1024: return 1024;
236 case IB_MTU_2048: return 2048;
237 case IB_MTU_4096: return 4096;
238 default: return -1;
239 }
240}
241
242enum ib_port_state {
243 IB_PORT_NOP = 0,
244 IB_PORT_DOWN = 1,
245 IB_PORT_INIT = 2,
246 IB_PORT_ARMED = 3,
247 IB_PORT_ACTIVE = 4,
248 IB_PORT_ACTIVE_DEFER = 5
249};
250
251enum ib_port_cap_flags {
252 IB_PORT_SM = 1 << 1,
253 IB_PORT_NOTICE_SUP = 1 << 2,
254 IB_PORT_TRAP_SUP = 1 << 3,
255 IB_PORT_OPT_IPD_SUP = 1 << 4,
256 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
257 IB_PORT_SL_MAP_SUP = 1 << 6,
258 IB_PORT_MKEY_NVRAM = 1 << 7,
259 IB_PORT_PKEY_NVRAM = 1 << 8,
260 IB_PORT_LED_INFO_SUP = 1 << 9,
261 IB_PORT_SM_DISABLED = 1 << 10,
262 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
263 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300264 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 IB_PORT_CM_SUP = 1 << 16,
266 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
267 IB_PORT_REINIT_SUP = 1 << 18,
268 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
269 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
270 IB_PORT_DR_NOTICE_SUP = 1 << 21,
271 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
272 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
273 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
Moni Shouab4a26a22014-02-09 11:54:34 +0200274 IB_PORT_CLIENT_REG_SUP = 1 << 25,
275 IB_PORT_IP_BASED_GIDS = 1 << 26
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276};
277
278enum ib_port_width {
279 IB_WIDTH_1X = 1,
280 IB_WIDTH_4X = 2,
281 IB_WIDTH_8X = 4,
282 IB_WIDTH_12X = 8
283};
284
285static inline int ib_width_enum_to_int(enum ib_port_width width)
286{
287 switch (width) {
288 case IB_WIDTH_1X: return 1;
289 case IB_WIDTH_4X: return 4;
290 case IB_WIDTH_8X: return 8;
291 case IB_WIDTH_12X: return 12;
292 default: return -1;
293 }
294}
295
Or Gerlitz2e966912012-02-28 18:49:50 +0200296enum ib_port_speed {
297 IB_SPEED_SDR = 1,
298 IB_SPEED_DDR = 2,
299 IB_SPEED_QDR = 4,
300 IB_SPEED_FDR10 = 8,
301 IB_SPEED_FDR = 16,
302 IB_SPEED_EDR = 32
303};
304
Steve Wise7f624d02008-07-14 23:48:48 -0700305struct ib_protocol_stats {
306 /* TBD... */
307};
308
309struct iw_protocol_stats {
310 u64 ipInReceives;
311 u64 ipInHdrErrors;
312 u64 ipInTooBigErrors;
313 u64 ipInNoRoutes;
314 u64 ipInAddrErrors;
315 u64 ipInUnknownProtos;
316 u64 ipInTruncatedPkts;
317 u64 ipInDiscards;
318 u64 ipInDelivers;
319 u64 ipOutForwDatagrams;
320 u64 ipOutRequests;
321 u64 ipOutDiscards;
322 u64 ipOutNoRoutes;
323 u64 ipReasmTimeout;
324 u64 ipReasmReqds;
325 u64 ipReasmOKs;
326 u64 ipReasmFails;
327 u64 ipFragOKs;
328 u64 ipFragFails;
329 u64 ipFragCreates;
330 u64 ipInMcastPkts;
331 u64 ipOutMcastPkts;
332 u64 ipInBcastPkts;
333 u64 ipOutBcastPkts;
334
335 u64 tcpRtoAlgorithm;
336 u64 tcpRtoMin;
337 u64 tcpRtoMax;
338 u64 tcpMaxConn;
339 u64 tcpActiveOpens;
340 u64 tcpPassiveOpens;
341 u64 tcpAttemptFails;
342 u64 tcpEstabResets;
343 u64 tcpCurrEstab;
344 u64 tcpInSegs;
345 u64 tcpOutSegs;
346 u64 tcpRetransSegs;
347 u64 tcpInErrs;
348 u64 tcpOutRsts;
349};
350
351union rdma_protocol_stats {
352 struct ib_protocol_stats ib;
353 struct iw_protocol_stats iw;
354};
355
Ira Weinyf9b22e32015-05-13 20:02:59 -0400356/* Define bits for the various functionality this port needs to be supported by
357 * the core.
358 */
359/* Management 0x00000FFF */
360#define RDMA_CORE_CAP_IB_MAD 0x00000001
361#define RDMA_CORE_CAP_IB_SMI 0x00000002
362#define RDMA_CORE_CAP_IB_CM 0x00000004
363#define RDMA_CORE_CAP_IW_CM 0x00000008
364#define RDMA_CORE_CAP_IB_SA 0x00000010
365
366/* Address format 0x000FF000 */
367#define RDMA_CORE_CAP_AF_IB 0x00001000
368#define RDMA_CORE_CAP_ETH_AH 0x00002000
369
370/* Protocol 0xFFF00000 */
371#define RDMA_CORE_CAP_PROT_IB 0x00100000
372#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
373#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
374
375#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
376 | RDMA_CORE_CAP_IB_MAD \
377 | RDMA_CORE_CAP_IB_SMI \
378 | RDMA_CORE_CAP_IB_CM \
379 | RDMA_CORE_CAP_IB_SA \
380 | RDMA_CORE_CAP_AF_IB)
381#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
382 | RDMA_CORE_CAP_IB_MAD \
383 | RDMA_CORE_CAP_IB_CM \
384 | RDMA_CORE_CAP_IB_SA \
385 | RDMA_CORE_CAP_AF_IB \
386 | RDMA_CORE_CAP_ETH_AH)
387#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
388 | RDMA_CORE_CAP_IW_CM)
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390struct ib_port_attr {
391 enum ib_port_state state;
392 enum ib_mtu max_mtu;
393 enum ib_mtu active_mtu;
394 int gid_tbl_len;
395 u32 port_cap_flags;
396 u32 max_msg_sz;
397 u32 bad_pkey_cntr;
398 u32 qkey_viol_cntr;
399 u16 pkey_tbl_len;
400 u16 lid;
401 u16 sm_lid;
402 u8 lmc;
403 u8 max_vl_num;
404 u8 sm_sl;
405 u8 subnet_timeout;
406 u8 init_type_reply;
407 u8 active_width;
408 u8 active_speed;
409 u8 phys_state;
410};
411
412enum ib_device_modify_flags {
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800413 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
414 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415};
416
417struct ib_device_modify {
418 u64 sys_image_guid;
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800419 char node_desc[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420};
421
422enum ib_port_modify_flags {
423 IB_PORT_SHUTDOWN = 1,
424 IB_PORT_INIT_TYPE = (1<<2),
425 IB_PORT_RESET_QKEY_CNTR = (1<<3)
426};
427
428struct ib_port_modify {
429 u32 set_port_cap_mask;
430 u32 clr_port_cap_mask;
431 u8 init_type;
432};
433
434enum ib_event_type {
435 IB_EVENT_CQ_ERR,
436 IB_EVENT_QP_FATAL,
437 IB_EVENT_QP_REQ_ERR,
438 IB_EVENT_QP_ACCESS_ERR,
439 IB_EVENT_COMM_EST,
440 IB_EVENT_SQ_DRAINED,
441 IB_EVENT_PATH_MIG,
442 IB_EVENT_PATH_MIG_ERR,
443 IB_EVENT_DEVICE_FATAL,
444 IB_EVENT_PORT_ACTIVE,
445 IB_EVENT_PORT_ERR,
446 IB_EVENT_LID_CHANGE,
447 IB_EVENT_PKEY_CHANGE,
Roland Dreierd41fcc62005-08-18 12:23:08 -0700448 IB_EVENT_SM_CHANGE,
449 IB_EVENT_SRQ_ERR,
450 IB_EVENT_SRQ_LIMIT_REACHED,
Leonid Arsh63942c92006-06-17 20:37:35 -0700451 IB_EVENT_QP_LAST_WQE_REACHED,
Or Gerlitz761d90e2011-06-15 14:39:29 +0000452 IB_EVENT_CLIENT_REREGISTER,
453 IB_EVENT_GID_CHANGE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454};
455
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300456__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458struct ib_event {
459 struct ib_device *device;
460 union {
461 struct ib_cq *cq;
462 struct ib_qp *qp;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700463 struct ib_srq *srq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 u8 port_num;
465 } element;
466 enum ib_event_type event;
467};
468
469struct ib_event_handler {
470 struct ib_device *device;
471 void (*handler)(struct ib_event_handler *, struct ib_event *);
472 struct list_head list;
473};
474
475#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
476 do { \
477 (_ptr)->device = _device; \
478 (_ptr)->handler = _handler; \
479 INIT_LIST_HEAD(&(_ptr)->list); \
480 } while (0)
481
482struct ib_global_route {
483 union ib_gid dgid;
484 u32 flow_label;
485 u8 sgid_index;
486 u8 hop_limit;
487 u8 traffic_class;
488};
489
Hal Rosenstock513789e2005-07-27 11:45:34 -0700490struct ib_grh {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700491 __be32 version_tclass_flow;
492 __be16 paylen;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700493 u8 next_hdr;
494 u8 hop_limit;
495 union ib_gid sgid;
496 union ib_gid dgid;
497};
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499enum {
500 IB_MULTICAST_QPN = 0xffffff
501};
502
Harvey Harrisonf3a7c662009-02-14 22:58:35 -0800503#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
Sean Hefty97f52eb2005-08-13 21:05:57 -0700504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505enum ib_ah_flags {
506 IB_AH_GRH = 1
507};
508
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700509enum ib_rate {
510 IB_RATE_PORT_CURRENT = 0,
511 IB_RATE_2_5_GBPS = 2,
512 IB_RATE_5_GBPS = 5,
513 IB_RATE_10_GBPS = 3,
514 IB_RATE_20_GBPS = 6,
515 IB_RATE_30_GBPS = 4,
516 IB_RATE_40_GBPS = 7,
517 IB_RATE_60_GBPS = 8,
518 IB_RATE_80_GBPS = 9,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300519 IB_RATE_120_GBPS = 10,
520 IB_RATE_14_GBPS = 11,
521 IB_RATE_56_GBPS = 12,
522 IB_RATE_112_GBPS = 13,
523 IB_RATE_168_GBPS = 14,
524 IB_RATE_25_GBPS = 15,
525 IB_RATE_100_GBPS = 16,
526 IB_RATE_200_GBPS = 17,
527 IB_RATE_300_GBPS = 18
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700528};
529
530/**
531 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
532 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
533 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
534 * @rate: rate to convert.
535 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700536__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700537
538/**
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300539 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
540 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
541 * @rate: rate to convert.
542 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700543__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300544
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200545enum ib_mr_create_flags {
546 IB_MR_SIGNATURE_EN = 1,
547};
548
549/**
550 * ib_mr_init_attr - Memory region init attributes passed to routine
551 * ib_create_mr.
552 * @max_reg_descriptors: max number of registration descriptors that
553 * may be used with registration work requests.
554 * @flags: MR creation flags bit mask.
555 */
556struct ib_mr_init_attr {
557 int max_reg_descriptors;
558 u32 flags;
559};
560
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200561/**
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300562 * Signature types
563 * IB_SIG_TYPE_NONE: Unprotected.
564 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200565 */
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300566enum ib_signature_type {
567 IB_SIG_TYPE_NONE,
568 IB_SIG_TYPE_T10_DIF,
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200569};
570
571/**
572 * Signature T10-DIF block-guard types
573 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
574 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
575 */
576enum ib_t10_dif_bg_type {
577 IB_T10DIF_CRC,
578 IB_T10DIF_CSUM
579};
580
581/**
582 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
583 * domain.
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200584 * @bg_type: T10-DIF block guard type (CRC|CSUM)
585 * @pi_interval: protection information interval.
586 * @bg: seed of guard computation.
587 * @app_tag: application tag of guard block
588 * @ref_tag: initial guard block reference tag.
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300589 * @ref_remap: Indicate wethear the reftag increments each block
590 * @app_escape: Indicate to skip block check if apptag=0xffff
591 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
592 * @apptag_check_mask: check bitmask of application tag.
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200593 */
594struct ib_t10_dif_domain {
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200595 enum ib_t10_dif_bg_type bg_type;
596 u16 pi_interval;
597 u16 bg;
598 u16 app_tag;
599 u32 ref_tag;
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300600 bool ref_remap;
601 bool app_escape;
602 bool ref_escape;
603 u16 apptag_check_mask;
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200604};
605
606/**
607 * struct ib_sig_domain - Parameters for signature domain
608 * @sig_type: specific signauture type
609 * @sig: union of all signature domain attributes that may
610 * be used to set domain layout.
611 */
612struct ib_sig_domain {
613 enum ib_signature_type sig_type;
614 union {
615 struct ib_t10_dif_domain dif;
616 } sig;
617};
618
619/**
620 * struct ib_sig_attrs - Parameters for signature handover operation
621 * @check_mask: bitmask for signature byte check (8 bytes)
622 * @mem: memory domain layout desciptor.
623 * @wire: wire domain layout desciptor.
624 */
625struct ib_sig_attrs {
626 u8 check_mask;
627 struct ib_sig_domain mem;
628 struct ib_sig_domain wire;
629};
630
631enum ib_sig_err_type {
632 IB_SIG_BAD_GUARD,
633 IB_SIG_BAD_REFTAG,
634 IB_SIG_BAD_APPTAG,
635};
636
637/**
638 * struct ib_sig_err - signature error descriptor
639 */
640struct ib_sig_err {
641 enum ib_sig_err_type err_type;
642 u32 expected;
643 u32 actual;
644 u64 sig_err_offset;
645 u32 key;
646};
647
648enum ib_mr_status_check {
649 IB_MR_CHECK_SIG_STATUS = 1,
650};
651
652/**
653 * struct ib_mr_status - Memory region status container
654 *
655 * @fail_status: Bitmask of MR checks status. For each
656 * failed check a corresponding status bit is set.
657 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
658 * failure.
659 */
660struct ib_mr_status {
661 u32 fail_status;
662 struct ib_sig_err sig_err;
663};
664
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300665/**
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700666 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
667 * enum.
668 * @mult: multiple to convert.
669 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700670__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672struct ib_ah_attr {
673 struct ib_global_route grh;
674 u16 dlid;
675 u8 sl;
676 u8 src_path_bits;
677 u8 static_rate;
678 u8 ah_flags;
679 u8 port_num;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200680 u8 dmac[ETH_ALEN];
681 u16 vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682};
683
684enum ib_wc_status {
685 IB_WC_SUCCESS,
686 IB_WC_LOC_LEN_ERR,
687 IB_WC_LOC_QP_OP_ERR,
688 IB_WC_LOC_EEC_OP_ERR,
689 IB_WC_LOC_PROT_ERR,
690 IB_WC_WR_FLUSH_ERR,
691 IB_WC_MW_BIND_ERR,
692 IB_WC_BAD_RESP_ERR,
693 IB_WC_LOC_ACCESS_ERR,
694 IB_WC_REM_INV_REQ_ERR,
695 IB_WC_REM_ACCESS_ERR,
696 IB_WC_REM_OP_ERR,
697 IB_WC_RETRY_EXC_ERR,
698 IB_WC_RNR_RETRY_EXC_ERR,
699 IB_WC_LOC_RDD_VIOL_ERR,
700 IB_WC_REM_INV_RD_REQ_ERR,
701 IB_WC_REM_ABORT_ERR,
702 IB_WC_INV_EECN_ERR,
703 IB_WC_INV_EEC_STATE_ERR,
704 IB_WC_FATAL_ERR,
705 IB_WC_RESP_TIMEOUT_ERR,
706 IB_WC_GENERAL_ERR
707};
708
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300709__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
710
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711enum ib_wc_opcode {
712 IB_WC_SEND,
713 IB_WC_RDMA_WRITE,
714 IB_WC_RDMA_READ,
715 IB_WC_COMP_SWAP,
716 IB_WC_FETCH_ADD,
717 IB_WC_BIND_MW,
Eli Cohenc93570f2008-04-16 21:09:27 -0700718 IB_WC_LSO,
Steve Wise00f7ec32008-07-14 23:48:45 -0700719 IB_WC_LOCAL_INV,
720 IB_WC_FAST_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300721 IB_WC_MASKED_COMP_SWAP,
722 IB_WC_MASKED_FETCH_ADD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723/*
724 * Set value of IB_WC_RECV so consumers can test if a completion is a
725 * receive by testing (opcode & IB_WC_RECV).
726 */
727 IB_WC_RECV = 1 << 7,
728 IB_WC_RECV_RDMA_WITH_IMM
729};
730
731enum ib_wc_flags {
732 IB_WC_GRH = 1,
Steve Wise00f7ec32008-07-14 23:48:45 -0700733 IB_WC_WITH_IMM = (1<<1),
734 IB_WC_WITH_INVALIDATE = (1<<2),
Or Gerlitzd927d502012-01-11 19:03:51 +0200735 IB_WC_IP_CSUM_OK = (1<<3),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200736 IB_WC_WITH_SMAC = (1<<4),
737 IB_WC_WITH_VLAN = (1<<5),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738};
739
740struct ib_wc {
741 u64 wr_id;
742 enum ib_wc_status status;
743 enum ib_wc_opcode opcode;
744 u32 vendor_err;
745 u32 byte_len;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200746 struct ib_qp *qp;
Steve Wise00f7ec32008-07-14 23:48:45 -0700747 union {
748 __be32 imm_data;
749 u32 invalidate_rkey;
750 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 u32 src_qp;
752 int wc_flags;
753 u16 pkey_index;
754 u16 slid;
755 u8 sl;
756 u8 dlid_path_bits;
757 u8 port_num; /* valid only for DR SMPs on switches */
Matan Barakdd5f03b2013-12-12 18:03:11 +0200758 u8 smac[ETH_ALEN];
759 u16 vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760};
761
Roland Dreiered23a722007-05-06 21:02:48 -0700762enum ib_cq_notify_flags {
763 IB_CQ_SOLICITED = 1 << 0,
764 IB_CQ_NEXT_COMP = 1 << 1,
765 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
766 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767};
768
Sean Hefty96104ed2011-05-23 16:31:36 -0700769enum ib_srq_type {
Sean Hefty418d5132011-05-23 19:42:29 -0700770 IB_SRQT_BASIC,
771 IB_SRQT_XRC
Sean Hefty96104ed2011-05-23 16:31:36 -0700772};
773
Roland Dreierd41fcc62005-08-18 12:23:08 -0700774enum ib_srq_attr_mask {
775 IB_SRQ_MAX_WR = 1 << 0,
776 IB_SRQ_LIMIT = 1 << 1,
777};
778
779struct ib_srq_attr {
780 u32 max_wr;
781 u32 max_sge;
782 u32 srq_limit;
783};
784
785struct ib_srq_init_attr {
786 void (*event_handler)(struct ib_event *, void *);
787 void *srq_context;
788 struct ib_srq_attr attr;
Sean Hefty96104ed2011-05-23 16:31:36 -0700789 enum ib_srq_type srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -0700790
791 union {
792 struct {
793 struct ib_xrcd *xrcd;
794 struct ib_cq *cq;
795 } xrc;
796 } ext;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700797};
798
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799struct ib_qp_cap {
800 u32 max_send_wr;
801 u32 max_recv_wr;
802 u32 max_send_sge;
803 u32 max_recv_sge;
804 u32 max_inline_data;
805};
806
807enum ib_sig_type {
808 IB_SIGNAL_ALL_WR,
809 IB_SIGNAL_REQ_WR
810};
811
812enum ib_qp_type {
813 /*
814 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
815 * here (and in that order) since the MAD layer uses them as
816 * indices into a 2-entry table.
817 */
818 IB_QPT_SMI,
819 IB_QPT_GSI,
820
821 IB_QPT_RC,
822 IB_QPT_UC,
823 IB_QPT_UD,
824 IB_QPT_RAW_IPV6,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700825 IB_QPT_RAW_ETHERTYPE,
Or Gerlitzc938a612012-03-01 12:17:51 +0200826 IB_QPT_RAW_PACKET = 8,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700827 IB_QPT_XRC_INI = 9,
828 IB_QPT_XRC_TGT,
Jack Morgenstein0134f162013-07-07 17:25:52 +0300829 IB_QPT_MAX,
830 /* Reserve a range for qp types internal to the low level driver.
831 * These qp types will not be visible at the IB core layer, so the
832 * IB_QPT_MAX usages should not be affected in the core layer
833 */
834 IB_QPT_RESERVED1 = 0x1000,
835 IB_QPT_RESERVED2,
836 IB_QPT_RESERVED3,
837 IB_QPT_RESERVED4,
838 IB_QPT_RESERVED5,
839 IB_QPT_RESERVED6,
840 IB_QPT_RESERVED7,
841 IB_QPT_RESERVED8,
842 IB_QPT_RESERVED9,
843 IB_QPT_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844};
845
Eli Cohenb846f252008-04-16 21:09:27 -0700846enum ib_qp_create_flags {
Ron Livne47ee1b92008-07-14 23:48:48 -0700847 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
848 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
Matan Barak90f1d1b2013-11-07 15:25:12 +0200849 IB_QP_CREATE_NETIF_QP = 1 << 5,
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200850 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
Or Gerlitz09b93082014-05-11 15:15:11 +0300851 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
Jack Morgensteind2b57062012-08-03 08:40:37 +0000852 /* reserve bits 26-31 for low level drivers' internal use */
853 IB_QP_CREATE_RESERVED_START = 1 << 26,
854 IB_QP_CREATE_RESERVED_END = 1 << 31,
Eli Cohenb846f252008-04-16 21:09:27 -0700855};
856
Yishai Hadas73c40c62013-08-01 18:49:53 +0300857
858/*
859 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
860 * callback to destroy the passed in QP.
861 */
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863struct ib_qp_init_attr {
864 void (*event_handler)(struct ib_event *, void *);
865 void *qp_context;
866 struct ib_cq *send_cq;
867 struct ib_cq *recv_cq;
868 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700869 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 struct ib_qp_cap cap;
871 enum ib_sig_type sq_sig_type;
872 enum ib_qp_type qp_type;
Eli Cohenb846f252008-04-16 21:09:27 -0700873 enum ib_qp_create_flags create_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 u8 port_num; /* special QP types only */
875};
876
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700877struct ib_qp_open_attr {
878 void (*event_handler)(struct ib_event *, void *);
879 void *qp_context;
880 u32 qp_num;
881 enum ib_qp_type qp_type;
882};
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884enum ib_rnr_timeout {
885 IB_RNR_TIMER_655_36 = 0,
886 IB_RNR_TIMER_000_01 = 1,
887 IB_RNR_TIMER_000_02 = 2,
888 IB_RNR_TIMER_000_03 = 3,
889 IB_RNR_TIMER_000_04 = 4,
890 IB_RNR_TIMER_000_06 = 5,
891 IB_RNR_TIMER_000_08 = 6,
892 IB_RNR_TIMER_000_12 = 7,
893 IB_RNR_TIMER_000_16 = 8,
894 IB_RNR_TIMER_000_24 = 9,
895 IB_RNR_TIMER_000_32 = 10,
896 IB_RNR_TIMER_000_48 = 11,
897 IB_RNR_TIMER_000_64 = 12,
898 IB_RNR_TIMER_000_96 = 13,
899 IB_RNR_TIMER_001_28 = 14,
900 IB_RNR_TIMER_001_92 = 15,
901 IB_RNR_TIMER_002_56 = 16,
902 IB_RNR_TIMER_003_84 = 17,
903 IB_RNR_TIMER_005_12 = 18,
904 IB_RNR_TIMER_007_68 = 19,
905 IB_RNR_TIMER_010_24 = 20,
906 IB_RNR_TIMER_015_36 = 21,
907 IB_RNR_TIMER_020_48 = 22,
908 IB_RNR_TIMER_030_72 = 23,
909 IB_RNR_TIMER_040_96 = 24,
910 IB_RNR_TIMER_061_44 = 25,
911 IB_RNR_TIMER_081_92 = 26,
912 IB_RNR_TIMER_122_88 = 27,
913 IB_RNR_TIMER_163_84 = 28,
914 IB_RNR_TIMER_245_76 = 29,
915 IB_RNR_TIMER_327_68 = 30,
916 IB_RNR_TIMER_491_52 = 31
917};
918
919enum ib_qp_attr_mask {
920 IB_QP_STATE = 1,
921 IB_QP_CUR_STATE = (1<<1),
922 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
923 IB_QP_ACCESS_FLAGS = (1<<3),
924 IB_QP_PKEY_INDEX = (1<<4),
925 IB_QP_PORT = (1<<5),
926 IB_QP_QKEY = (1<<6),
927 IB_QP_AV = (1<<7),
928 IB_QP_PATH_MTU = (1<<8),
929 IB_QP_TIMEOUT = (1<<9),
930 IB_QP_RETRY_CNT = (1<<10),
931 IB_QP_RNR_RETRY = (1<<11),
932 IB_QP_RQ_PSN = (1<<12),
933 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
934 IB_QP_ALT_PATH = (1<<14),
935 IB_QP_MIN_RNR_TIMER = (1<<15),
936 IB_QP_SQ_PSN = (1<<16),
937 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
938 IB_QP_PATH_MIG_STATE = (1<<18),
939 IB_QP_CAP = (1<<19),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200940 IB_QP_DEST_QPN = (1<<20),
941 IB_QP_SMAC = (1<<21),
942 IB_QP_ALT_SMAC = (1<<22),
943 IB_QP_VID = (1<<23),
944 IB_QP_ALT_VID = (1<<24),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945};
946
947enum ib_qp_state {
948 IB_QPS_RESET,
949 IB_QPS_INIT,
950 IB_QPS_RTR,
951 IB_QPS_RTS,
952 IB_QPS_SQD,
953 IB_QPS_SQE,
954 IB_QPS_ERR
955};
956
957enum ib_mig_state {
958 IB_MIG_MIGRATED,
959 IB_MIG_REARM,
960 IB_MIG_ARMED
961};
962
Shani Michaeli7083e422013-02-06 16:19:12 +0000963enum ib_mw_type {
964 IB_MW_TYPE_1 = 1,
965 IB_MW_TYPE_2 = 2
966};
967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968struct ib_qp_attr {
969 enum ib_qp_state qp_state;
970 enum ib_qp_state cur_qp_state;
971 enum ib_mtu path_mtu;
972 enum ib_mig_state path_mig_state;
973 u32 qkey;
974 u32 rq_psn;
975 u32 sq_psn;
976 u32 dest_qp_num;
977 int qp_access_flags;
978 struct ib_qp_cap cap;
979 struct ib_ah_attr ah_attr;
980 struct ib_ah_attr alt_ah_attr;
981 u16 pkey_index;
982 u16 alt_pkey_index;
983 u8 en_sqd_async_notify;
984 u8 sq_draining;
985 u8 max_rd_atomic;
986 u8 max_dest_rd_atomic;
987 u8 min_rnr_timer;
988 u8 port_num;
989 u8 timeout;
990 u8 retry_cnt;
991 u8 rnr_retry;
992 u8 alt_port_num;
993 u8 alt_timeout;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200994 u8 smac[ETH_ALEN];
995 u8 alt_smac[ETH_ALEN];
996 u16 vlan_id;
997 u16 alt_vlan_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998};
999
1000enum ib_wr_opcode {
1001 IB_WR_RDMA_WRITE,
1002 IB_WR_RDMA_WRITE_WITH_IMM,
1003 IB_WR_SEND,
1004 IB_WR_SEND_WITH_IMM,
1005 IB_WR_RDMA_READ,
1006 IB_WR_ATOMIC_CMP_AND_SWP,
Eli Cohenc93570f2008-04-16 21:09:27 -07001007 IB_WR_ATOMIC_FETCH_AND_ADD,
Roland Dreier0f39cf32008-04-16 21:09:32 -07001008 IB_WR_LSO,
1009 IB_WR_SEND_WITH_INV,
Steve Wise00f7ec32008-07-14 23:48:45 -07001010 IB_WR_RDMA_READ_WITH_INV,
1011 IB_WR_LOCAL_INV,
1012 IB_WR_FAST_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +03001013 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1014 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
Shani Michaeli7083e422013-02-06 16:19:12 +00001015 IB_WR_BIND_MW,
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001016 IB_WR_REG_SIG_MR,
Jack Morgenstein0134f162013-07-07 17:25:52 +03001017 /* reserve values for low level drivers' internal use.
1018 * These values will not be used at all in the ib core layer.
1019 */
1020 IB_WR_RESERVED1 = 0xf0,
1021 IB_WR_RESERVED2,
1022 IB_WR_RESERVED3,
1023 IB_WR_RESERVED4,
1024 IB_WR_RESERVED5,
1025 IB_WR_RESERVED6,
1026 IB_WR_RESERVED7,
1027 IB_WR_RESERVED8,
1028 IB_WR_RESERVED9,
1029 IB_WR_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030};
1031
1032enum ib_send_flags {
1033 IB_SEND_FENCE = 1,
1034 IB_SEND_SIGNALED = (1<<1),
1035 IB_SEND_SOLICITED = (1<<2),
Eli Cohene0605d92008-01-30 18:30:57 +02001036 IB_SEND_INLINE = (1<<3),
Jack Morgenstein0134f162013-07-07 17:25:52 +03001037 IB_SEND_IP_CSUM = (1<<4),
1038
1039 /* reserve bits 26-31 for low level drivers' internal use */
1040 IB_SEND_RESERVED_START = (1 << 26),
1041 IB_SEND_RESERVED_END = (1 << 31),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042};
1043
1044struct ib_sge {
1045 u64 addr;
1046 u32 length;
1047 u32 lkey;
1048};
1049
Steve Wise00f7ec32008-07-14 23:48:45 -07001050struct ib_fast_reg_page_list {
1051 struct ib_device *device;
1052 u64 *page_list;
1053 unsigned int max_page_list_len;
1054};
1055
Shani Michaeli7083e422013-02-06 16:19:12 +00001056/**
1057 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1058 * @mr: A memory region to bind the memory window to.
1059 * @addr: The address where the memory window should begin.
1060 * @length: The length of the memory window, in bytes.
1061 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1062 *
1063 * This struct contains the shared parameters for type 1 and type 2
1064 * memory window bind operations.
1065 */
1066struct ib_mw_bind_info {
1067 struct ib_mr *mr;
1068 u64 addr;
1069 u64 length;
1070 int mw_access_flags;
1071};
1072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073struct ib_send_wr {
1074 struct ib_send_wr *next;
1075 u64 wr_id;
1076 struct ib_sge *sg_list;
1077 int num_sge;
1078 enum ib_wr_opcode opcode;
1079 int send_flags;
Roland Dreier0f39cf32008-04-16 21:09:32 -07001080 union {
1081 __be32 imm_data;
1082 u32 invalidate_rkey;
1083 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 union {
1085 struct {
1086 u64 remote_addr;
1087 u32 rkey;
1088 } rdma;
1089 struct {
1090 u64 remote_addr;
1091 u64 compare_add;
1092 u64 swap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +03001093 u64 compare_add_mask;
1094 u64 swap_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 u32 rkey;
1096 } atomic;
1097 struct {
1098 struct ib_ah *ah;
Eli Cohenc93570f2008-04-16 21:09:27 -07001099 void *header;
1100 int hlen;
1101 int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 u32 remote_qpn;
1103 u32 remote_qkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 u16 pkey_index; /* valid for GSI only */
1105 u8 port_num; /* valid for DR SMPs on switch only */
1106 } ud;
Steve Wise00f7ec32008-07-14 23:48:45 -07001107 struct {
1108 u64 iova_start;
1109 struct ib_fast_reg_page_list *page_list;
1110 unsigned int page_shift;
1111 unsigned int page_list_len;
1112 u32 length;
1113 int access_flags;
1114 u32 rkey;
1115 } fast_reg;
Shani Michaeli7083e422013-02-06 16:19:12 +00001116 struct {
1117 struct ib_mw *mw;
1118 /* The new rkey for the memory window. */
1119 u32 rkey;
1120 struct ib_mw_bind_info bind_info;
1121 } bind_mw;
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001122 struct {
1123 struct ib_sig_attrs *sig_attrs;
1124 struct ib_mr *sig_mr;
1125 int access_flags;
1126 struct ib_sge *prot;
1127 } sig_handover;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 } wr;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001129 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130};
1131
1132struct ib_recv_wr {
1133 struct ib_recv_wr *next;
1134 u64 wr_id;
1135 struct ib_sge *sg_list;
1136 int num_sge;
1137};
1138
1139enum ib_access_flags {
1140 IB_ACCESS_LOCAL_WRITE = 1,
1141 IB_ACCESS_REMOTE_WRITE = (1<<1),
1142 IB_ACCESS_REMOTE_READ = (1<<2),
1143 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
Shani Michaeli7083e422013-02-06 16:19:12 +00001144 IB_ACCESS_MW_BIND = (1<<4),
Sagi Grimberg860f10a2014-12-11 17:04:16 +02001145 IB_ZERO_BASED = (1<<5),
1146 IB_ACCESS_ON_DEMAND = (1<<6),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147};
1148
1149struct ib_phys_buf {
1150 u64 addr;
1151 u64 size;
1152};
1153
1154struct ib_mr_attr {
1155 struct ib_pd *pd;
1156 u64 device_virt_addr;
1157 u64 size;
1158 int mr_access_flags;
1159 u32 lkey;
1160 u32 rkey;
1161};
1162
1163enum ib_mr_rereg_flags {
1164 IB_MR_REREG_TRANS = 1,
1165 IB_MR_REREG_PD = (1<<1),
Matan Barak7e6edb92014-07-31 11:01:28 +03001166 IB_MR_REREG_ACCESS = (1<<2),
1167 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168};
1169
Shani Michaeli7083e422013-02-06 16:19:12 +00001170/**
1171 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1172 * @wr_id: Work request id.
1173 * @send_flags: Flags from ib_send_flags enum.
1174 * @bind_info: More parameters of the bind operation.
1175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176struct ib_mw_bind {
Shani Michaeli7083e422013-02-06 16:19:12 +00001177 u64 wr_id;
1178 int send_flags;
1179 struct ib_mw_bind_info bind_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180};
1181
1182struct ib_fmr_attr {
1183 int max_pages;
1184 int max_maps;
Or Gerlitzd36f34a2006-02-02 10:43:45 -08001185 u8 page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186};
1187
Haggai Eran882214e2014-12-11 17:04:18 +02001188struct ib_umem;
1189
Roland Dreiere2773c02005-07-07 17:57:10 -07001190struct ib_ucontext {
1191 struct ib_device *device;
1192 struct list_head pd_list;
1193 struct list_head mr_list;
1194 struct list_head mw_list;
1195 struct list_head cq_list;
1196 struct list_head qp_list;
1197 struct list_head srq_list;
1198 struct list_head ah_list;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001199 struct list_head xrcd_list;
Hadar Hen Zion436f2ad2013-08-14 13:58:30 +03001200 struct list_head rule_list;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001201 int closing;
Shachar Raindel8ada2c12014-12-11 17:04:17 +02001202
1203 struct pid *tgid;
Haggai Eran882214e2014-12-11 17:04:18 +02001204#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1205 struct rb_root umem_tree;
1206 /*
1207 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1208 * mmu notifiers registration.
1209 */
1210 struct rw_semaphore umem_rwsem;
1211 void (*invalidate_range)(struct ib_umem *umem,
1212 unsigned long start, unsigned long end);
1213
1214 struct mmu_notifier mn;
1215 atomic_t notifier_count;
1216 /* A list of umems that don't have private mmu notifier counters yet. */
1217 struct list_head no_private_counters;
1218 int odp_mrs_count;
1219#endif
Roland Dreiere2773c02005-07-07 17:57:10 -07001220};
1221
1222struct ib_uobject {
1223 u64 user_handle; /* handle given to us by userspace */
1224 struct ib_ucontext *context; /* associated user context */
Roland Dreier9ead1902006-06-17 20:44:49 -07001225 void *object; /* containing object */
Roland Dreiere2773c02005-07-07 17:57:10 -07001226 struct list_head list; /* link to context's list */
Roland Dreierb3d636b2008-04-16 21:01:06 -07001227 int id; /* index into kernel idr */
Roland Dreier9ead1902006-06-17 20:44:49 -07001228 struct kref ref;
1229 struct rw_semaphore mutex; /* protects .live */
1230 int live;
Roland Dreiere2773c02005-07-07 17:57:10 -07001231};
1232
Roland Dreiere2773c02005-07-07 17:57:10 -07001233struct ib_udata {
Yann Droneaud309243e2013-12-11 23:01:44 +01001234 const void __user *inbuf;
Roland Dreiere2773c02005-07-07 17:57:10 -07001235 void __user *outbuf;
1236 size_t inlen;
1237 size_t outlen;
1238};
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240struct ib_pd {
Roland Dreiere2773c02005-07-07 17:57:10 -07001241 struct ib_device *device;
1242 struct ib_uobject *uobject;
1243 atomic_t usecnt; /* count all resources */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244};
1245
Sean Hefty59991f92011-05-23 17:52:46 -07001246struct ib_xrcd {
1247 struct ib_device *device;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001248 atomic_t usecnt; /* count all exposed resources */
Sean Hefty53d0bd12011-05-24 08:33:46 -07001249 struct inode *inode;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001250
1251 struct mutex tgt_qp_mutex;
1252 struct list_head tgt_qp_list;
Sean Hefty59991f92011-05-23 17:52:46 -07001253};
1254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255struct ib_ah {
1256 struct ib_device *device;
1257 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001258 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259};
1260
1261typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1262
1263struct ib_cq {
Roland Dreiere2773c02005-07-07 17:57:10 -07001264 struct ib_device *device;
1265 struct ib_uobject *uobject;
1266 ib_comp_handler comp_handler;
1267 void (*event_handler)(struct ib_event *, void *);
Dotan Barak4deccd62008-07-14 23:48:44 -07001268 void *cq_context;
Roland Dreiere2773c02005-07-07 17:57:10 -07001269 int cqe;
1270 atomic_t usecnt; /* count number of work queues */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271};
1272
1273struct ib_srq {
Roland Dreierd41fcc62005-08-18 12:23:08 -07001274 struct ib_device *device;
1275 struct ib_pd *pd;
1276 struct ib_uobject *uobject;
1277 void (*event_handler)(struct ib_event *, void *);
1278 void *srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -07001279 enum ib_srq_type srq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 atomic_t usecnt;
Sean Hefty418d5132011-05-23 19:42:29 -07001281
1282 union {
1283 struct {
1284 struct ib_xrcd *xrcd;
1285 struct ib_cq *cq;
1286 u32 srq_num;
1287 } xrc;
1288 } ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289};
1290
1291struct ib_qp {
1292 struct ib_device *device;
1293 struct ib_pd *pd;
1294 struct ib_cq *send_cq;
1295 struct ib_cq *recv_cq;
1296 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001297 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Sean Heftyd3d72d92011-05-26 23:06:44 -07001298 struct list_head xrcd_list;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001299 /* count times opened, mcast attaches, flow attaches */
1300 atomic_t usecnt;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001301 struct list_head open_list;
1302 struct ib_qp *real_qp;
Roland Dreiere2773c02005-07-07 17:57:10 -07001303 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 void (*event_handler)(struct ib_event *, void *);
1305 void *qp_context;
1306 u32 qp_num;
1307 enum ib_qp_type qp_type;
1308};
1309
1310struct ib_mr {
Roland Dreiere2773c02005-07-07 17:57:10 -07001311 struct ib_device *device;
1312 struct ib_pd *pd;
1313 struct ib_uobject *uobject;
1314 u32 lkey;
1315 u32 rkey;
1316 atomic_t usecnt; /* count number of MWs */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317};
1318
1319struct ib_mw {
1320 struct ib_device *device;
1321 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001322 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 u32 rkey;
Shani Michaeli7083e422013-02-06 16:19:12 +00001324 enum ib_mw_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325};
1326
1327struct ib_fmr {
1328 struct ib_device *device;
1329 struct ib_pd *pd;
1330 struct list_head list;
1331 u32 lkey;
1332 u32 rkey;
1333};
1334
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001335/* Supported steering options */
1336enum ib_flow_attr_type {
1337 /* steering according to rule specifications */
1338 IB_FLOW_ATTR_NORMAL = 0x0,
1339 /* default unicast and multicast rule -
1340 * receive all Eth traffic which isn't steered to any QP
1341 */
1342 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1343 /* default multicast rule -
1344 * receive all Eth multicast traffic which isn't steered to any QP
1345 */
1346 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1347 /* sniffer rule - receive all port traffic */
1348 IB_FLOW_ATTR_SNIFFER = 0x3
1349};
1350
1351/* Supported steering header types */
1352enum ib_flow_spec_type {
1353 /* L2 headers*/
1354 IB_FLOW_SPEC_ETH = 0x20,
Matan Barak240ae002013-11-07 15:25:13 +02001355 IB_FLOW_SPEC_IB = 0x22,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001356 /* L3 header*/
1357 IB_FLOW_SPEC_IPV4 = 0x30,
1358 /* L4 headers*/
1359 IB_FLOW_SPEC_TCP = 0x40,
1360 IB_FLOW_SPEC_UDP = 0x41
1361};
Matan Barak240ae002013-11-07 15:25:13 +02001362#define IB_FLOW_SPEC_LAYER_MASK 0xF0
Matan Barak22878db2013-09-01 18:39:52 +03001363#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1364
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001365/* Flow steering rule priority is set according to it's domain.
1366 * Lower domain value means higher priority.
1367 */
1368enum ib_flow_domain {
1369 IB_FLOW_DOMAIN_USER,
1370 IB_FLOW_DOMAIN_ETHTOOL,
1371 IB_FLOW_DOMAIN_RFS,
1372 IB_FLOW_DOMAIN_NIC,
1373 IB_FLOW_DOMAIN_NUM /* Must be last */
1374};
1375
1376struct ib_flow_eth_filter {
1377 u8 dst_mac[6];
1378 u8 src_mac[6];
1379 __be16 ether_type;
1380 __be16 vlan_tag;
1381};
1382
1383struct ib_flow_spec_eth {
1384 enum ib_flow_spec_type type;
1385 u16 size;
1386 struct ib_flow_eth_filter val;
1387 struct ib_flow_eth_filter mask;
1388};
1389
Matan Barak240ae002013-11-07 15:25:13 +02001390struct ib_flow_ib_filter {
1391 __be16 dlid;
1392 __u8 sl;
1393};
1394
1395struct ib_flow_spec_ib {
1396 enum ib_flow_spec_type type;
1397 u16 size;
1398 struct ib_flow_ib_filter val;
1399 struct ib_flow_ib_filter mask;
1400};
1401
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001402struct ib_flow_ipv4_filter {
1403 __be32 src_ip;
1404 __be32 dst_ip;
1405};
1406
1407struct ib_flow_spec_ipv4 {
1408 enum ib_flow_spec_type type;
1409 u16 size;
1410 struct ib_flow_ipv4_filter val;
1411 struct ib_flow_ipv4_filter mask;
1412};
1413
1414struct ib_flow_tcp_udp_filter {
1415 __be16 dst_port;
1416 __be16 src_port;
1417};
1418
1419struct ib_flow_spec_tcp_udp {
1420 enum ib_flow_spec_type type;
1421 u16 size;
1422 struct ib_flow_tcp_udp_filter val;
1423 struct ib_flow_tcp_udp_filter mask;
1424};
1425
1426union ib_flow_spec {
1427 struct {
1428 enum ib_flow_spec_type type;
1429 u16 size;
1430 };
1431 struct ib_flow_spec_eth eth;
Matan Barak240ae002013-11-07 15:25:13 +02001432 struct ib_flow_spec_ib ib;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001433 struct ib_flow_spec_ipv4 ipv4;
1434 struct ib_flow_spec_tcp_udp tcp_udp;
1435};
1436
1437struct ib_flow_attr {
1438 enum ib_flow_attr_type type;
1439 u16 size;
1440 u16 priority;
1441 u32 flags;
1442 u8 num_of_specs;
1443 u8 port;
1444 /* Following are the optional layers according to user request
1445 * struct ib_flow_spec_xxx
1446 * struct ib_flow_spec_yyy
1447 */
1448};
1449
1450struct ib_flow {
1451 struct ib_qp *qp;
1452 struct ib_uobject *uobject;
1453};
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455struct ib_mad;
1456struct ib_grh;
1457
1458enum ib_process_mad_flags {
1459 IB_MAD_IGNORE_MKEY = 1,
1460 IB_MAD_IGNORE_BKEY = 2,
1461 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1462};
1463
1464enum ib_mad_result {
1465 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1466 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1467 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1468 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1469};
1470
1471#define IB_DEVICE_NAME_MAX 64
1472
1473struct ib_cache {
1474 rwlock_t lock;
1475 struct ib_event_handler event_handler;
1476 struct ib_pkey_cache **pkey_cache;
1477 struct ib_gid_cache **gid_cache;
Jack Morgenstein6fb9cdb2006-06-17 20:37:34 -07001478 u8 *lmc_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479};
1480
Ralph Campbell9b513092006-12-12 14:27:41 -08001481struct ib_dma_mapping_ops {
1482 int (*mapping_error)(struct ib_device *dev,
1483 u64 dma_addr);
1484 u64 (*map_single)(struct ib_device *dev,
1485 void *ptr, size_t size,
1486 enum dma_data_direction direction);
1487 void (*unmap_single)(struct ib_device *dev,
1488 u64 addr, size_t size,
1489 enum dma_data_direction direction);
1490 u64 (*map_page)(struct ib_device *dev,
1491 struct page *page, unsigned long offset,
1492 size_t size,
1493 enum dma_data_direction direction);
1494 void (*unmap_page)(struct ib_device *dev,
1495 u64 addr, size_t size,
1496 enum dma_data_direction direction);
1497 int (*map_sg)(struct ib_device *dev,
1498 struct scatterlist *sg, int nents,
1499 enum dma_data_direction direction);
1500 void (*unmap_sg)(struct ib_device *dev,
1501 struct scatterlist *sg, int nents,
1502 enum dma_data_direction direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08001503 void (*sync_single_for_cpu)(struct ib_device *dev,
1504 u64 dma_handle,
1505 size_t size,
Dotan Barak4deccd62008-07-14 23:48:44 -07001506 enum dma_data_direction dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08001507 void (*sync_single_for_device)(struct ib_device *dev,
1508 u64 dma_handle,
1509 size_t size,
1510 enum dma_data_direction dir);
1511 void *(*alloc_coherent)(struct ib_device *dev,
1512 size_t size,
1513 u64 *dma_handle,
1514 gfp_t flag);
1515 void (*free_coherent)(struct ib_device *dev,
1516 size_t size, void *cpu_addr,
1517 u64 dma_handle);
1518};
1519
Tom Tucker07ebafb2006-08-03 16:02:42 -05001520struct iw_cm_verbs;
1521
Ira Weiny77386132015-05-13 20:02:58 -04001522struct ib_port_immutable {
1523 int pkey_tbl_len;
1524 int gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04001525 u32 core_cap_flags;
Ira Weiny77386132015-05-13 20:02:58 -04001526};
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528struct ib_device {
1529 struct device *dma_device;
1530
1531 char name[IB_DEVICE_NAME_MAX];
1532
1533 struct list_head event_handler_list;
1534 spinlock_t event_handler_lock;
1535
Alexander Chiang17a55f72010-02-02 19:09:16 +00001536 spinlock_t client_data_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 struct list_head core_list;
1538 struct list_head client_data_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
1540 struct ib_cache cache;
Ira Weiny77386132015-05-13 20:02:58 -04001541 /**
1542 * port_immutable is indexed by port number
1543 */
1544 struct ib_port_immutable *port_immutable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001546 int num_comp_vectors;
1547
Tom Tucker07ebafb2006-08-03 16:02:42 -05001548 struct iw_cm_verbs *iwcm;
1549
Steve Wise7f624d02008-07-14 23:48:48 -07001550 int (*get_protocol_stats)(struct ib_device *device,
1551 union rdma_protocol_stats *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 int (*query_device)(struct ib_device *device,
1553 struct ib_device_attr *device_attr);
1554 int (*query_port)(struct ib_device *device,
1555 u8 port_num,
1556 struct ib_port_attr *port_attr);
Eli Cohena3f5ada2010-09-27 17:51:10 -07001557 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1558 u8 port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 int (*query_gid)(struct ib_device *device,
1560 u8 port_num, int index,
1561 union ib_gid *gid);
1562 int (*query_pkey)(struct ib_device *device,
1563 u8 port_num, u16 index, u16 *pkey);
1564 int (*modify_device)(struct ib_device *device,
1565 int device_modify_mask,
1566 struct ib_device_modify *device_modify);
1567 int (*modify_port)(struct ib_device *device,
1568 u8 port_num, int port_modify_mask,
1569 struct ib_port_modify *port_modify);
Roland Dreiere2773c02005-07-07 17:57:10 -07001570 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1571 struct ib_udata *udata);
1572 int (*dealloc_ucontext)(struct ib_ucontext *context);
1573 int (*mmap)(struct ib_ucontext *context,
1574 struct vm_area_struct *vma);
1575 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1576 struct ib_ucontext *context,
1577 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 int (*dealloc_pd)(struct ib_pd *pd);
1579 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1580 struct ib_ah_attr *ah_attr);
1581 int (*modify_ah)(struct ib_ah *ah,
1582 struct ib_ah_attr *ah_attr);
1583 int (*query_ah)(struct ib_ah *ah,
1584 struct ib_ah_attr *ah_attr);
1585 int (*destroy_ah)(struct ib_ah *ah);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001586 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1587 struct ib_srq_init_attr *srq_init_attr,
1588 struct ib_udata *udata);
1589 int (*modify_srq)(struct ib_srq *srq,
1590 struct ib_srq_attr *srq_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001591 enum ib_srq_attr_mask srq_attr_mask,
1592 struct ib_udata *udata);
Roland Dreierd41fcc62005-08-18 12:23:08 -07001593 int (*query_srq)(struct ib_srq *srq,
1594 struct ib_srq_attr *srq_attr);
1595 int (*destroy_srq)(struct ib_srq *srq);
1596 int (*post_srq_recv)(struct ib_srq *srq,
1597 struct ib_recv_wr *recv_wr,
1598 struct ib_recv_wr **bad_recv_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 struct ib_qp * (*create_qp)(struct ib_pd *pd,
Roland Dreiere2773c02005-07-07 17:57:10 -07001600 struct ib_qp_init_attr *qp_init_attr,
1601 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 int (*modify_qp)(struct ib_qp *qp,
1603 struct ib_qp_attr *qp_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07001604 int qp_attr_mask,
1605 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 int (*query_qp)(struct ib_qp *qp,
1607 struct ib_qp_attr *qp_attr,
1608 int qp_attr_mask,
1609 struct ib_qp_init_attr *qp_init_attr);
1610 int (*destroy_qp)(struct ib_qp *qp);
1611 int (*post_send)(struct ib_qp *qp,
1612 struct ib_send_wr *send_wr,
1613 struct ib_send_wr **bad_send_wr);
1614 int (*post_recv)(struct ib_qp *qp,
1615 struct ib_recv_wr *recv_wr,
1616 struct ib_recv_wr **bad_recv_wr);
Roland Dreiere2773c02005-07-07 17:57:10 -07001617 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03001618 int comp_vector,
Roland Dreiere2773c02005-07-07 17:57:10 -07001619 struct ib_ucontext *context,
1620 struct ib_udata *udata);
Eli Cohen2dd57162008-04-16 21:09:33 -07001621 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1622 u16 cq_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 int (*destroy_cq)(struct ib_cq *cq);
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001624 int (*resize_cq)(struct ib_cq *cq, int cqe,
1625 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1627 struct ib_wc *wc);
1628 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1629 int (*req_notify_cq)(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07001630 enum ib_cq_notify_flags flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 int (*req_ncomp_notif)(struct ib_cq *cq,
1632 int wc_cnt);
1633 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1634 int mr_access_flags);
1635 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1636 struct ib_phys_buf *phys_buf_array,
1637 int num_phys_buf,
1638 int mr_access_flags,
1639 u64 *iova_start);
Roland Dreiere2773c02005-07-07 17:57:10 -07001640 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001641 u64 start, u64 length,
1642 u64 virt_addr,
Roland Dreiere2773c02005-07-07 17:57:10 -07001643 int mr_access_flags,
1644 struct ib_udata *udata);
Matan Barak7e6edb92014-07-31 11:01:28 +03001645 int (*rereg_user_mr)(struct ib_mr *mr,
1646 int flags,
1647 u64 start, u64 length,
1648 u64 virt_addr,
1649 int mr_access_flags,
1650 struct ib_pd *pd,
1651 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 int (*query_mr)(struct ib_mr *mr,
1653 struct ib_mr_attr *mr_attr);
1654 int (*dereg_mr)(struct ib_mr *mr);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001655 int (*destroy_mr)(struct ib_mr *mr);
1656 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1657 struct ib_mr_init_attr *mr_init_attr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001658 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1659 int max_page_list_len);
1660 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1661 int page_list_len);
1662 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 int (*rereg_phys_mr)(struct ib_mr *mr,
1664 int mr_rereg_mask,
1665 struct ib_pd *pd,
1666 struct ib_phys_buf *phys_buf_array,
1667 int num_phys_buf,
1668 int mr_access_flags,
1669 u64 *iova_start);
Shani Michaeli7083e422013-02-06 16:19:12 +00001670 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1671 enum ib_mw_type type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 int (*bind_mw)(struct ib_qp *qp,
1673 struct ib_mw *mw,
1674 struct ib_mw_bind *mw_bind);
1675 int (*dealloc_mw)(struct ib_mw *mw);
1676 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1677 int mr_access_flags,
1678 struct ib_fmr_attr *fmr_attr);
1679 int (*map_phys_fmr)(struct ib_fmr *fmr,
1680 u64 *page_list, int list_len,
1681 u64 iova);
1682 int (*unmap_fmr)(struct list_head *fmr_list);
1683 int (*dealloc_fmr)(struct ib_fmr *fmr);
1684 int (*attach_mcast)(struct ib_qp *qp,
1685 union ib_gid *gid,
1686 u16 lid);
1687 int (*detach_mcast)(struct ib_qp *qp,
1688 union ib_gid *gid,
1689 u16 lid);
1690 int (*process_mad)(struct ib_device *device,
1691 int process_mad_flags,
1692 u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -04001693 const struct ib_wc *in_wc,
1694 const struct ib_grh *in_grh,
1695 const struct ib_mad *in_mad,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 struct ib_mad *out_mad);
Sean Hefty59991f92011-05-23 17:52:46 -07001697 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1698 struct ib_ucontext *ucontext,
1699 struct ib_udata *udata);
1700 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001701 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1702 struct ib_flow_attr
1703 *flow_attr,
1704 int domain);
1705 int (*destroy_flow)(struct ib_flow *flow_id);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001706 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1707 struct ib_mr_status *mr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
Ralph Campbell9b513092006-12-12 14:27:41 -08001709 struct ib_dma_mapping_ops *dma_ops;
1710
Roland Dreiere2773c02005-07-07 17:57:10 -07001711 struct module *owner;
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001712 struct device dev;
Greg Kroah-Hartman35be0682007-12-17 15:54:39 -04001713 struct kobject *ports_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 struct list_head port_list;
1715
1716 enum {
1717 IB_DEV_UNINITIALIZED,
1718 IB_DEV_REGISTERED,
1719 IB_DEV_UNREGISTERED
1720 } reg_state;
1721
Roland Dreier274c0892005-09-29 14:17:48 -07001722 int uverbs_abi_ver;
Alexander Chiang17a55f72010-02-02 19:09:16 +00001723 u64 uverbs_cmd_mask;
Yann Droneaudf21519b2013-11-06 23:21:49 +01001724 u64 uverbs_ex_cmd_mask;
Roland Dreier274c0892005-09-29 14:17:48 -07001725
Roland Dreierc5bcbbb2006-02-02 09:47:14 -08001726 char node_desc[64];
Sean Heftycf311cd2006-01-10 07:39:34 -08001727 __be64 node_guid;
Steve Wise96f15c02008-07-14 23:48:53 -07001728 u32 local_dma_lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 u8 node_type;
1730 u8 phys_port_cnt;
Ira Weiny77386132015-05-13 20:02:58 -04001731
1732 /**
1733 * The following mandatory functions are used only at device
1734 * registration. Keep functions such as these at the end of this
1735 * structure to avoid cache line misses when accessing struct ib_device
1736 * in fast paths.
1737 */
1738 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739};
1740
1741struct ib_client {
1742 char *name;
1743 void (*add) (struct ib_device *);
1744 void (*remove)(struct ib_device *);
1745
1746 struct list_head list;
1747};
1748
1749struct ib_device *ib_alloc_device(size_t size);
1750void ib_dealloc_device(struct ib_device *device);
1751
Ralph Campbell9a6edb62010-05-06 17:03:25 -07001752int ib_register_device(struct ib_device *device,
1753 int (*port_callback)(struct ib_device *,
1754 u8, struct kobject *));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755void ib_unregister_device(struct ib_device *device);
1756
1757int ib_register_client (struct ib_client *client);
1758void ib_unregister_client(struct ib_client *client);
1759
1760void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1761void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1762 void *data);
1763
Roland Dreiere2773c02005-07-07 17:57:10 -07001764static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1765{
1766 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1767}
1768
1769static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1770{
Yann Droneaud43c611652015-02-05 22:10:18 +01001771 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
Roland Dreiere2773c02005-07-07 17:57:10 -07001772}
1773
Roland Dreier8a518662006-02-13 12:48:12 -08001774/**
1775 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1776 * contains all required attributes and no attributes not allowed for
1777 * the given QP state transition.
1778 * @cur_state: Current QP state
1779 * @next_state: Next QP state
1780 * @type: QP type
1781 * @mask: Mask of supplied QP attributes
Matan Barakdd5f03b2013-12-12 18:03:11 +02001782 * @ll : link layer of port
Roland Dreier8a518662006-02-13 12:48:12 -08001783 *
1784 * This function is a helper function that a low-level driver's
1785 * modify_qp method can use to validate the consumer's input. It
1786 * checks that cur_state and next_state are valid QP states, that a
1787 * transition from cur_state to next_state is allowed by the IB spec,
1788 * and that the attribute mask supplied is allowed for the transition.
1789 */
1790int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001791 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1792 enum rdma_link_layer ll);
Roland Dreier8a518662006-02-13 12:48:12 -08001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794int ib_register_event_handler (struct ib_event_handler *event_handler);
1795int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1796void ib_dispatch_event(struct ib_event *event);
1797
1798int ib_query_device(struct ib_device *device,
1799 struct ib_device_attr *device_attr);
1800
1801int ib_query_port(struct ib_device *device,
1802 u8 port_num, struct ib_port_attr *port_attr);
1803
Eli Cohena3f5ada2010-09-27 17:51:10 -07001804enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1805 u8 port_num);
1806
Ira Weiny0cf18d72015-05-13 20:02:55 -04001807/**
1808 * rdma_start_port - Return the first valid port number for the device
1809 * specified
1810 *
1811 * @device: Device to be checked
1812 *
1813 * Return start port number
1814 */
1815static inline u8 rdma_start_port(const struct ib_device *device)
1816{
1817 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
1818}
1819
1820/**
1821 * rdma_end_port - Return the last valid port number for the device
1822 * specified
1823 *
1824 * @device: Device to be checked
1825 *
1826 * Return last port number
1827 */
1828static inline u8 rdma_end_port(const struct ib_device *device)
1829{
1830 return (device->node_type == RDMA_NODE_IB_SWITCH) ?
1831 0 : device->phys_port_cnt;
1832}
1833
Ira Weiny5ede9282015-05-31 17:15:29 -04001834static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02001835{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001836 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
Michael Wangde66be92015-05-05 14:50:19 +02001837}
1838
Ira Weiny5ede9282015-05-31 17:15:29 -04001839static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02001840{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001841 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
Michael Wangde66be92015-05-05 14:50:19 +02001842}
1843
Ira Weiny5ede9282015-05-31 17:15:29 -04001844static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02001845{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001846 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
Michael Wangde66be92015-05-05 14:50:19 +02001847}
1848
Ira Weiny5ede9282015-05-31 17:15:29 -04001849static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02001850{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001851 return device->port_immutable[port_num].core_cap_flags &
1852 (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
Michael Wangde66be92015-05-05 14:50:19 +02001853}
1854
Michael Wangc757dea2015-05-05 14:50:32 +02001855/**
Michael Wang296ec002015-05-18 10:41:45 +02001856 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
Michael Wangc757dea2015-05-05 14:50:32 +02001857 * Management Datagrams.
Michael Wang296ec002015-05-18 10:41:45 +02001858 * @device: Device to check
1859 * @port_num: Port number to check
Michael Wangc757dea2015-05-05 14:50:32 +02001860 *
Michael Wang296ec002015-05-18 10:41:45 +02001861 * Management Datagrams (MAD) are a required part of the InfiniBand
1862 * specification and are supported on all InfiniBand devices. A slightly
1863 * extended version are also supported on OPA interfaces.
Michael Wangc757dea2015-05-05 14:50:32 +02001864 *
Michael Wang296ec002015-05-18 10:41:45 +02001865 * Return: true if the port supports sending/receiving of MAD packets.
Michael Wangc757dea2015-05-05 14:50:32 +02001866 */
Ira Weiny5ede9282015-05-31 17:15:29 -04001867static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
Michael Wangc757dea2015-05-05 14:50:32 +02001868{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001869 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
Michael Wangc757dea2015-05-05 14:50:32 +02001870}
1871
Michael Wang29541e32015-05-05 14:50:33 +02001872/**
Michael Wang296ec002015-05-18 10:41:45 +02001873 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
1874 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
1875 * @device: Device to check
1876 * @port_num: Port number to check
Michael Wang29541e32015-05-05 14:50:33 +02001877 *
Michael Wang296ec002015-05-18 10:41:45 +02001878 * Each InfiniBand node is required to provide a Subnet Management Agent
1879 * that the subnet manager can access. Prior to the fabric being fully
1880 * configured by the subnet manager, the SMA is accessed via a well known
1881 * interface called the Subnet Management Interface (SMI). This interface
1882 * uses directed route packets to communicate with the SM to get around the
1883 * chicken and egg problem of the SM needing to know what's on the fabric
1884 * in order to configure the fabric, and needing to configure the fabric in
1885 * order to send packets to the devices on the fabric. These directed
1886 * route packets do not need the fabric fully configured in order to reach
1887 * their destination. The SMI is the only method allowed to send
1888 * directed route packets on an InfiniBand fabric.
Michael Wang29541e32015-05-05 14:50:33 +02001889 *
Michael Wang296ec002015-05-18 10:41:45 +02001890 * Return: true if the port provides an SMI.
Michael Wang29541e32015-05-05 14:50:33 +02001891 */
Ira Weiny5ede9282015-05-31 17:15:29 -04001892static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
Michael Wang29541e32015-05-05 14:50:33 +02001893{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001894 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
Michael Wang29541e32015-05-05 14:50:33 +02001895}
1896
Michael Wang72219cea2015-05-05 14:50:34 +02001897/**
1898 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
1899 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02001900 * @device: Device to check
1901 * @port_num: Port number to check
Michael Wang72219cea2015-05-05 14:50:34 +02001902 *
Michael Wang296ec002015-05-18 10:41:45 +02001903 * The InfiniBand Communication Manager is one of many pre-defined General
1904 * Service Agents (GSA) that are accessed via the General Service
1905 * Interface (GSI). It's role is to facilitate establishment of connections
1906 * between nodes as well as other management related tasks for established
1907 * connections.
Michael Wang72219cea2015-05-05 14:50:34 +02001908 *
Michael Wang296ec002015-05-18 10:41:45 +02001909 * Return: true if the port supports an IB CM (this does not guarantee that
1910 * a CM is actually running however).
Michael Wang72219cea2015-05-05 14:50:34 +02001911 */
Ira Weiny5ede9282015-05-31 17:15:29 -04001912static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
Michael Wang72219cea2015-05-05 14:50:34 +02001913{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001914 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
Michael Wang72219cea2015-05-05 14:50:34 +02001915}
1916
Michael Wang04215332015-05-05 14:50:35 +02001917/**
1918 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
1919 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02001920 * @device: Device to check
1921 * @port_num: Port number to check
Michael Wang04215332015-05-05 14:50:35 +02001922 *
Michael Wang296ec002015-05-18 10:41:45 +02001923 * Similar to above, but specific to iWARP connections which have a different
1924 * managment protocol than InfiniBand.
Michael Wang04215332015-05-05 14:50:35 +02001925 *
Michael Wang296ec002015-05-18 10:41:45 +02001926 * Return: true if the port supports an iWARP CM (this does not guarantee that
1927 * a CM is actually running however).
Michael Wang04215332015-05-05 14:50:35 +02001928 */
Ira Weiny5ede9282015-05-31 17:15:29 -04001929static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
Michael Wang04215332015-05-05 14:50:35 +02001930{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001931 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
Michael Wang04215332015-05-05 14:50:35 +02001932}
1933
Michael Wangfe53ba22015-05-05 14:50:36 +02001934/**
1935 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
1936 * Subnet Administration.
Michael Wang296ec002015-05-18 10:41:45 +02001937 * @device: Device to check
1938 * @port_num: Port number to check
Michael Wangfe53ba22015-05-05 14:50:36 +02001939 *
Michael Wang296ec002015-05-18 10:41:45 +02001940 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
1941 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
1942 * fabrics, devices should resolve routes to other hosts by contacting the
1943 * SA to query the proper route.
Michael Wangfe53ba22015-05-05 14:50:36 +02001944 *
Michael Wang296ec002015-05-18 10:41:45 +02001945 * Return: true if the port should act as a client to the fabric Subnet
1946 * Administration interface. This does not imply that the SA service is
1947 * running locally.
Michael Wangfe53ba22015-05-05 14:50:36 +02001948 */
Ira Weiny5ede9282015-05-31 17:15:29 -04001949static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
Michael Wangfe53ba22015-05-05 14:50:36 +02001950{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001951 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
Michael Wangfe53ba22015-05-05 14:50:36 +02001952}
1953
Michael Wanga31ad3b2015-05-05 14:50:37 +02001954/**
1955 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
1956 * Multicast.
Michael Wang296ec002015-05-18 10:41:45 +02001957 * @device: Device to check
1958 * @port_num: Port number to check
Michael Wanga31ad3b2015-05-05 14:50:37 +02001959 *
Michael Wang296ec002015-05-18 10:41:45 +02001960 * InfiniBand multicast registration is more complex than normal IPv4 or
1961 * IPv6 multicast registration. Each Host Channel Adapter must register
1962 * with the Subnet Manager when it wishes to join a multicast group. It
1963 * should do so only once regardless of how many queue pairs it subscribes
1964 * to this group. And it should leave the group only after all queue pairs
1965 * attached to the group have been detached.
Michael Wanga31ad3b2015-05-05 14:50:37 +02001966 *
Michael Wang296ec002015-05-18 10:41:45 +02001967 * Return: true if the port must undertake the additional adminstrative
1968 * overhead of registering/unregistering with the SM and tracking of the
1969 * total number of queue pairs attached to the multicast group.
Michael Wanga31ad3b2015-05-05 14:50:37 +02001970 */
Ira Weiny5ede9282015-05-31 17:15:29 -04001971static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
Michael Wanga31ad3b2015-05-05 14:50:37 +02001972{
1973 return rdma_cap_ib_sa(device, port_num);
1974}
1975
Michael Wangbc0f1d72015-05-05 14:50:38 +02001976/**
Michael Wang30a74ef2015-05-05 14:50:39 +02001977 * rdma_cap_af_ib - Check if the port of device has the capability
1978 * Native Infiniband Address.
Michael Wang296ec002015-05-18 10:41:45 +02001979 * @device: Device to check
1980 * @port_num: Port number to check
Michael Wang30a74ef2015-05-05 14:50:39 +02001981 *
Michael Wang296ec002015-05-18 10:41:45 +02001982 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
1983 * GID. RoCE uses a different mechanism, but still generates a GID via
1984 * a prescribed mechanism and port specific data.
Michael Wang30a74ef2015-05-05 14:50:39 +02001985 *
Michael Wang296ec002015-05-18 10:41:45 +02001986 * Return: true if the port uses a GID address to identify devices on the
1987 * network.
Michael Wang30a74ef2015-05-05 14:50:39 +02001988 */
Ira Weiny5ede9282015-05-31 17:15:29 -04001989static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
Michael Wang30a74ef2015-05-05 14:50:39 +02001990{
Ira Weinyf9b22e32015-05-13 20:02:59 -04001991 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
Michael Wang30a74ef2015-05-05 14:50:39 +02001992}
1993
1994/**
Michael Wang227128f2015-05-05 14:50:40 +02001995 * rdma_cap_eth_ah - Check if the port of device has the capability
Michael Wang296ec002015-05-18 10:41:45 +02001996 * Ethernet Address Handle.
1997 * @device: Device to check
1998 * @port_num: Port number to check
Michael Wang227128f2015-05-05 14:50:40 +02001999 *
Michael Wang296ec002015-05-18 10:41:45 +02002000 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2001 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2002 * port. Normally, packet headers are generated by the sending host
2003 * adapter, but when sending connectionless datagrams, we must manually
2004 * inject the proper headers for the fabric we are communicating over.
Michael Wang227128f2015-05-05 14:50:40 +02002005 *
Michael Wang296ec002015-05-18 10:41:45 +02002006 * Return: true if we are running as a RoCE port and must force the
2007 * addition of a Global Route Header built from our Ethernet Address
2008 * Handle into our header list for connectionless packets.
Michael Wang227128f2015-05-05 14:50:40 +02002009 */
Ira Weiny5ede9282015-05-31 17:15:29 -04002010static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
Michael Wang227128f2015-05-05 14:50:40 +02002011{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002012 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
Michael Wang227128f2015-05-05 14:50:40 +02002013}
2014
2015/**
Michael Wangbc0f1d72015-05-05 14:50:38 +02002016 * rdma_cap_read_multi_sge - Check if the port of device has the capability
2017 * RDMA Read Multiple Scatter-Gather Entries.
Michael Wang296ec002015-05-18 10:41:45 +02002018 * @device: Device to check
2019 * @port_num: Port number to check
Michael Wangbc0f1d72015-05-05 14:50:38 +02002020 *
Michael Wang296ec002015-05-18 10:41:45 +02002021 * iWARP has a restriction that RDMA READ requests may only have a single
2022 * Scatter/Gather Entry (SGE) in the work request.
Michael Wangbc0f1d72015-05-05 14:50:38 +02002023 *
Michael Wang296ec002015-05-18 10:41:45 +02002024 * NOTE: although the linux kernel currently assumes all devices are either
2025 * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
2026 * WRITEs, according to Tom Talpey, this is not accurate. There are some
2027 * devices out there that support more than a single SGE on RDMA READ
2028 * requests, but do not support the same number of SGEs as they do on
2029 * RDMA WRITE requests. The linux kernel would need rearchitecting to
2030 * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
2031 * suffice with either the device supports the same READ/WRITE SGEs, or
2032 * it only gets one READ sge.
2033 *
2034 * Return: true for any device that allows more than one SGE in RDMA READ
2035 * requests.
Michael Wangbc0f1d72015-05-05 14:50:38 +02002036 */
2037static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
2038 u8 port_num)
2039{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002040 return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
Michael Wangbc0f1d72015-05-05 14:50:38 +02002041}
2042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043int ib_query_gid(struct ib_device *device,
2044 u8 port_num, int index, union ib_gid *gid);
2045
2046int ib_query_pkey(struct ib_device *device,
2047 u8 port_num, u16 index, u16 *pkey);
2048
2049int ib_modify_device(struct ib_device *device,
2050 int device_modify_mask,
2051 struct ib_device_modify *device_modify);
2052
2053int ib_modify_port(struct ib_device *device,
2054 u8 port_num, int port_modify_mask,
2055 struct ib_port_modify *port_modify);
2056
Yosef Etigin5eb620c2007-05-14 07:26:51 +03002057int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2058 u8 *port_num, u16 *index);
2059
2060int ib_find_pkey(struct ib_device *device,
2061 u8 port_num, u16 pkey, u16 *index);
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063/**
2064 * ib_alloc_pd - Allocates an unused protection domain.
2065 * @device: The device on which to allocate the protection domain.
2066 *
2067 * A protection domain object provides an association between QPs, shared
2068 * receive queues, address handles, memory regions, and memory windows.
2069 */
2070struct ib_pd *ib_alloc_pd(struct ib_device *device);
2071
2072/**
2073 * ib_dealloc_pd - Deallocates a protection domain.
2074 * @pd: The protection domain to deallocate.
2075 */
2076int ib_dealloc_pd(struct ib_pd *pd);
2077
2078/**
2079 * ib_create_ah - Creates an address handle for the given address vector.
2080 * @pd: The protection domain associated with the address handle.
2081 * @ah_attr: The attributes of the address vector.
2082 *
2083 * The address handle is used to reference a local or global destination
2084 * in all UD QP post sends.
2085 */
2086struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2087
2088/**
Sean Hefty4e00d692006-06-17 20:37:39 -07002089 * ib_init_ah_from_wc - Initializes address handle attributes from a
2090 * work completion.
2091 * @device: Device on which the received message arrived.
2092 * @port_num: Port on which the received message arrived.
2093 * @wc: Work completion associated with the received message.
2094 * @grh: References the received global route header. This parameter is
2095 * ignored unless the work completion indicates that the GRH is valid.
2096 * @ah_attr: Returned attributes that can be used when creating an address
2097 * handle for replying to the message.
2098 */
Ira Weiny73cdaae2015-05-31 17:15:31 -04002099int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2100 const struct ib_wc *wc, const struct ib_grh *grh,
2101 struct ib_ah_attr *ah_attr);
Sean Hefty4e00d692006-06-17 20:37:39 -07002102
2103/**
Hal Rosenstock513789e2005-07-27 11:45:34 -07002104 * ib_create_ah_from_wc - Creates an address handle associated with the
2105 * sender of the specified work completion.
2106 * @pd: The protection domain associated with the address handle.
2107 * @wc: Work completion information associated with a received message.
2108 * @grh: References the received global route header. This parameter is
2109 * ignored unless the work completion indicates that the GRH is valid.
2110 * @port_num: The outbound port number to associate with the address.
2111 *
2112 * The address handle is used to reference a local or global destination
2113 * in all UD QP post sends.
2114 */
Ira Weiny73cdaae2015-05-31 17:15:31 -04002115struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2116 const struct ib_grh *grh, u8 port_num);
Hal Rosenstock513789e2005-07-27 11:45:34 -07002117
2118/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 * ib_modify_ah - Modifies the address vector associated with an address
2120 * handle.
2121 * @ah: The address handle to modify.
2122 * @ah_attr: The new address vector attributes to associate with the
2123 * address handle.
2124 */
2125int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2126
2127/**
2128 * ib_query_ah - Queries the address vector associated with an address
2129 * handle.
2130 * @ah: The address handle to query.
2131 * @ah_attr: The address vector attributes associated with the address
2132 * handle.
2133 */
2134int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2135
2136/**
2137 * ib_destroy_ah - Destroys an address handle.
2138 * @ah: The address handle to destroy.
2139 */
2140int ib_destroy_ah(struct ib_ah *ah);
2141
2142/**
Roland Dreierd41fcc62005-08-18 12:23:08 -07002143 * ib_create_srq - Creates a SRQ associated with the specified protection
2144 * domain.
2145 * @pd: The protection domain associated with the SRQ.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08002146 * @srq_init_attr: A list of initial attributes required to create the
2147 * SRQ. If SRQ creation succeeds, then the attributes are updated to
2148 * the actual capabilities of the created SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07002149 *
2150 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2151 * requested size of the SRQ, and set to the actual values allocated
2152 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
2153 * will always be at least as large as the requested values.
2154 */
2155struct ib_srq *ib_create_srq(struct ib_pd *pd,
2156 struct ib_srq_init_attr *srq_init_attr);
2157
2158/**
2159 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2160 * @srq: The SRQ to modify.
2161 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
2162 * the current values of selected SRQ attributes are returned.
2163 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2164 * are being modified.
2165 *
2166 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2167 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2168 * the number of receives queued drops below the limit.
2169 */
2170int ib_modify_srq(struct ib_srq *srq,
2171 struct ib_srq_attr *srq_attr,
2172 enum ib_srq_attr_mask srq_attr_mask);
2173
2174/**
2175 * ib_query_srq - Returns the attribute list and current values for the
2176 * specified SRQ.
2177 * @srq: The SRQ to query.
2178 * @srq_attr: The attributes of the specified SRQ.
2179 */
2180int ib_query_srq(struct ib_srq *srq,
2181 struct ib_srq_attr *srq_attr);
2182
2183/**
2184 * ib_destroy_srq - Destroys the specified SRQ.
2185 * @srq: The SRQ to destroy.
2186 */
2187int ib_destroy_srq(struct ib_srq *srq);
2188
2189/**
2190 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2191 * @srq: The SRQ to post the work request on.
2192 * @recv_wr: A list of work requests to post on the receive queue.
2193 * @bad_recv_wr: On an immediate failure, this parameter will reference
2194 * the work request that failed to be posted on the QP.
2195 */
2196static inline int ib_post_srq_recv(struct ib_srq *srq,
2197 struct ib_recv_wr *recv_wr,
2198 struct ib_recv_wr **bad_recv_wr)
2199{
2200 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2201}
2202
2203/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 * ib_create_qp - Creates a QP associated with the specified protection
2205 * domain.
2206 * @pd: The protection domain associated with the QP.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08002207 * @qp_init_attr: A list of initial attributes required to create the
2208 * QP. If QP creation succeeds, then the attributes are updated to
2209 * the actual capabilities of the created QP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 */
2211struct ib_qp *ib_create_qp(struct ib_pd *pd,
2212 struct ib_qp_init_attr *qp_init_attr);
2213
2214/**
2215 * ib_modify_qp - Modifies the attributes for the specified QP and then
2216 * transitions the QP to the given state.
2217 * @qp: The QP to modify.
2218 * @qp_attr: On input, specifies the QP attributes to modify. On output,
2219 * the current values of selected QP attributes are returned.
2220 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2221 * are being modified.
2222 */
2223int ib_modify_qp(struct ib_qp *qp,
2224 struct ib_qp_attr *qp_attr,
2225 int qp_attr_mask);
2226
2227/**
2228 * ib_query_qp - Returns the attribute list and current values for the
2229 * specified QP.
2230 * @qp: The QP to query.
2231 * @qp_attr: The attributes of the specified QP.
2232 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2233 * @qp_init_attr: Additional attributes of the selected QP.
2234 *
2235 * The qp_attr_mask may be used to limit the query to gathering only the
2236 * selected attributes.
2237 */
2238int ib_query_qp(struct ib_qp *qp,
2239 struct ib_qp_attr *qp_attr,
2240 int qp_attr_mask,
2241 struct ib_qp_init_attr *qp_init_attr);
2242
2243/**
2244 * ib_destroy_qp - Destroys the specified QP.
2245 * @qp: The QP to destroy.
2246 */
2247int ib_destroy_qp(struct ib_qp *qp);
2248
2249/**
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07002250 * ib_open_qp - Obtain a reference to an existing sharable QP.
2251 * @xrcd - XRC domain
2252 * @qp_open_attr: Attributes identifying the QP to open.
2253 *
2254 * Returns a reference to a sharable QP.
2255 */
2256struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2257 struct ib_qp_open_attr *qp_open_attr);
2258
2259/**
2260 * ib_close_qp - Release an external reference to a QP.
Sean Heftyd3d72d92011-05-26 23:06:44 -07002261 * @qp: The QP handle to release
2262 *
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07002263 * The opened QP handle is released by the caller. The underlying
2264 * shared QP is not destroyed until all internal references are released.
Sean Heftyd3d72d92011-05-26 23:06:44 -07002265 */
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07002266int ib_close_qp(struct ib_qp *qp);
Sean Heftyd3d72d92011-05-26 23:06:44 -07002267
2268/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 * ib_post_send - Posts a list of work requests to the send queue of
2270 * the specified QP.
2271 * @qp: The QP to post the work request on.
2272 * @send_wr: A list of work requests to post on the send queue.
2273 * @bad_send_wr: On an immediate failure, this parameter will reference
2274 * the work request that failed to be posted on the QP.
Bart Van Assche55464d42009-12-09 14:20:04 -08002275 *
2276 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2277 * error is returned, the QP state shall not be affected,
2278 * ib_post_send() will return an immediate error after queueing any
2279 * earlier work requests in the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 */
2281static inline int ib_post_send(struct ib_qp *qp,
2282 struct ib_send_wr *send_wr,
2283 struct ib_send_wr **bad_send_wr)
2284{
2285 return qp->device->post_send(qp, send_wr, bad_send_wr);
2286}
2287
2288/**
2289 * ib_post_recv - Posts a list of work requests to the receive queue of
2290 * the specified QP.
2291 * @qp: The QP to post the work request on.
2292 * @recv_wr: A list of work requests to post on the receive queue.
2293 * @bad_recv_wr: On an immediate failure, this parameter will reference
2294 * the work request that failed to be posted on the QP.
2295 */
2296static inline int ib_post_recv(struct ib_qp *qp,
2297 struct ib_recv_wr *recv_wr,
2298 struct ib_recv_wr **bad_recv_wr)
2299{
2300 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2301}
2302
2303/**
2304 * ib_create_cq - Creates a CQ on the specified device.
2305 * @device: The device on which to create the CQ.
2306 * @comp_handler: A user-specified callback that is invoked when a
2307 * completion event occurs on the CQ.
2308 * @event_handler: A user-specified callback that is invoked when an
2309 * asynchronous event not associated with a completion occurs on the CQ.
2310 * @cq_context: Context associated with the CQ returned to the user via
2311 * the associated completion and event handlers.
2312 * @cqe: The minimum size of the CQ.
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03002313 * @comp_vector - Completion vector used to signal completion events.
2314 * Must be >= 0 and < context->num_comp_vectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 *
2316 * Users can examine the cq structure to determine the actual CQ size.
2317 */
2318struct ib_cq *ib_create_cq(struct ib_device *device,
2319 ib_comp_handler comp_handler,
2320 void (*event_handler)(struct ib_event *, void *),
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03002321 void *cq_context, int cqe, int comp_vector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
2323/**
2324 * ib_resize_cq - Modifies the capacity of the CQ.
2325 * @cq: The CQ to resize.
2326 * @cqe: The minimum size of the CQ.
2327 *
2328 * Users can examine the cq structure to determine the actual CQ size.
2329 */
2330int ib_resize_cq(struct ib_cq *cq, int cqe);
2331
2332/**
Eli Cohen2dd57162008-04-16 21:09:33 -07002333 * ib_modify_cq - Modifies moderation params of the CQ
2334 * @cq: The CQ to modify.
2335 * @cq_count: number of CQEs that will trigger an event
2336 * @cq_period: max period of time in usec before triggering an event
2337 *
2338 */
2339int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2340
2341/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 * ib_destroy_cq - Destroys the specified CQ.
2343 * @cq: The CQ to destroy.
2344 */
2345int ib_destroy_cq(struct ib_cq *cq);
2346
2347/**
2348 * ib_poll_cq - poll a CQ for completion(s)
2349 * @cq:the CQ being polled
2350 * @num_entries:maximum number of completions to return
2351 * @wc:array of at least @num_entries &struct ib_wc where completions
2352 * will be returned
2353 *
2354 * Poll a CQ for (possibly multiple) completions. If the return value
2355 * is < 0, an error occurred. If the return value is >= 0, it is the
2356 * number of completions returned. If the return value is
2357 * non-negative and < num_entries, then the CQ was emptied.
2358 */
2359static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2360 struct ib_wc *wc)
2361{
2362 return cq->device->poll_cq(cq, num_entries, wc);
2363}
2364
2365/**
2366 * ib_peek_cq - Returns the number of unreaped completions currently
2367 * on the specified CQ.
2368 * @cq: The CQ to peek.
2369 * @wc_cnt: A minimum number of unreaped completions to check for.
2370 *
2371 * If the number of unreaped completions is greater than or equal to wc_cnt,
2372 * this function returns wc_cnt, otherwise, it returns the actual number of
2373 * unreaped completions.
2374 */
2375int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2376
2377/**
2378 * ib_req_notify_cq - Request completion notification on a CQ.
2379 * @cq: The CQ to generate an event for.
Roland Dreiered23a722007-05-06 21:02:48 -07002380 * @flags:
2381 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2382 * to request an event on the next solicited event or next work
2383 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2384 * may also be |ed in to request a hint about missed events, as
2385 * described below.
2386 *
2387 * Return Value:
2388 * < 0 means an error occurred while requesting notification
2389 * == 0 means notification was requested successfully, and if
2390 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2391 * were missed and it is safe to wait for another event. In
2392 * this case is it guaranteed that any work completions added
2393 * to the CQ since the last CQ poll will trigger a completion
2394 * notification event.
2395 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2396 * in. It means that the consumer must poll the CQ again to
2397 * make sure it is empty to avoid missing an event because of a
2398 * race between requesting notification and an entry being
2399 * added to the CQ. This return value means it is possible
2400 * (but not guaranteed) that a work completion has been added
2401 * to the CQ since the last poll without triggering a
2402 * completion notification event.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 */
2404static inline int ib_req_notify_cq(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07002405 enum ib_cq_notify_flags flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406{
Roland Dreiered23a722007-05-06 21:02:48 -07002407 return cq->device->req_notify_cq(cq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408}
2409
2410/**
2411 * ib_req_ncomp_notif - Request completion notification when there are
2412 * at least the specified number of unreaped completions on the CQ.
2413 * @cq: The CQ to generate an event for.
2414 * @wc_cnt: The number of unreaped completions that should be on the
2415 * CQ before an event is generated.
2416 */
2417static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2418{
2419 return cq->device->req_ncomp_notif ?
2420 cq->device->req_ncomp_notif(cq, wc_cnt) :
2421 -ENOSYS;
2422}
2423
2424/**
2425 * ib_get_dma_mr - Returns a memory region for system memory that is
2426 * usable for DMA.
2427 * @pd: The protection domain associated with the memory region.
2428 * @mr_access_flags: Specifies the memory access rights.
Ralph Campbell9b513092006-12-12 14:27:41 -08002429 *
2430 * Note that the ib_dma_*() functions defined below must be used
2431 * to create/destroy addresses used with the Lkey or Rkey returned
2432 * by ib_get_dma_mr().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 */
2434struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2435
2436/**
Ralph Campbell9b513092006-12-12 14:27:41 -08002437 * ib_dma_mapping_error - check a DMA addr for error
2438 * @dev: The device for which the dma_addr was created
2439 * @dma_addr: The DMA address to check
2440 */
2441static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2442{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002443 if (dev->dma_ops)
2444 return dev->dma_ops->mapping_error(dev, dma_addr);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07002445 return dma_mapping_error(dev->dma_device, dma_addr);
Ralph Campbell9b513092006-12-12 14:27:41 -08002446}
2447
2448/**
2449 * ib_dma_map_single - Map a kernel virtual address to DMA address
2450 * @dev: The device for which the dma_addr is to be created
2451 * @cpu_addr: The kernel virtual address
2452 * @size: The size of the region in bytes
2453 * @direction: The direction of the DMA
2454 */
2455static inline u64 ib_dma_map_single(struct ib_device *dev,
2456 void *cpu_addr, size_t size,
2457 enum dma_data_direction direction)
2458{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002459 if (dev->dma_ops)
2460 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2461 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002462}
2463
2464/**
2465 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2466 * @dev: The device for which the DMA address was created
2467 * @addr: The DMA address
2468 * @size: The size of the region in bytes
2469 * @direction: The direction of the DMA
2470 */
2471static inline void ib_dma_unmap_single(struct ib_device *dev,
2472 u64 addr, size_t size,
2473 enum dma_data_direction direction)
2474{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002475 if (dev->dma_ops)
2476 dev->dma_ops->unmap_single(dev, addr, size, direction);
2477 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002478 dma_unmap_single(dev->dma_device, addr, size, direction);
2479}
2480
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07002481static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2482 void *cpu_addr, size_t size,
2483 enum dma_data_direction direction,
2484 struct dma_attrs *attrs)
2485{
2486 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2487 direction, attrs);
2488}
2489
2490static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2491 u64 addr, size_t size,
2492 enum dma_data_direction direction,
2493 struct dma_attrs *attrs)
2494{
2495 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2496 direction, attrs);
2497}
2498
Ralph Campbell9b513092006-12-12 14:27:41 -08002499/**
2500 * ib_dma_map_page - Map a physical page to DMA address
2501 * @dev: The device for which the dma_addr is to be created
2502 * @page: The page to be mapped
2503 * @offset: The offset within the page
2504 * @size: The size of the region in bytes
2505 * @direction: The direction of the DMA
2506 */
2507static inline u64 ib_dma_map_page(struct ib_device *dev,
2508 struct page *page,
2509 unsigned long offset,
2510 size_t size,
2511 enum dma_data_direction direction)
2512{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002513 if (dev->dma_ops)
2514 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2515 return dma_map_page(dev->dma_device, page, offset, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002516}
2517
2518/**
2519 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2520 * @dev: The device for which the DMA address was created
2521 * @addr: The DMA address
2522 * @size: The size of the region in bytes
2523 * @direction: The direction of the DMA
2524 */
2525static inline void ib_dma_unmap_page(struct ib_device *dev,
2526 u64 addr, size_t size,
2527 enum dma_data_direction direction)
2528{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002529 if (dev->dma_ops)
2530 dev->dma_ops->unmap_page(dev, addr, size, direction);
2531 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002532 dma_unmap_page(dev->dma_device, addr, size, direction);
2533}
2534
2535/**
2536 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2537 * @dev: The device for which the DMA addresses are to be created
2538 * @sg: The array of scatter/gather entries
2539 * @nents: The number of scatter/gather entries
2540 * @direction: The direction of the DMA
2541 */
2542static inline int ib_dma_map_sg(struct ib_device *dev,
2543 struct scatterlist *sg, int nents,
2544 enum dma_data_direction direction)
2545{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002546 if (dev->dma_ops)
2547 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2548 return dma_map_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08002549}
2550
2551/**
2552 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2553 * @dev: The device for which the DMA addresses were created
2554 * @sg: The array of scatter/gather entries
2555 * @nents: The number of scatter/gather entries
2556 * @direction: The direction of the DMA
2557 */
2558static inline void ib_dma_unmap_sg(struct ib_device *dev,
2559 struct scatterlist *sg, int nents,
2560 enum dma_data_direction direction)
2561{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002562 if (dev->dma_ops)
2563 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2564 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002565 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2566}
2567
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07002568static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2569 struct scatterlist *sg, int nents,
2570 enum dma_data_direction direction,
2571 struct dma_attrs *attrs)
2572{
2573 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2574}
2575
2576static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2577 struct scatterlist *sg, int nents,
2578 enum dma_data_direction direction,
2579 struct dma_attrs *attrs)
2580{
2581 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2582}
Ralph Campbell9b513092006-12-12 14:27:41 -08002583/**
2584 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2585 * @dev: The device for which the DMA addresses were created
2586 * @sg: The scatter/gather entry
Mike Marciniszynea58a592014-03-28 13:26:59 -04002587 *
2588 * Note: this function is obsolete. To do: change all occurrences of
2589 * ib_sg_dma_address() into sg_dma_address().
Ralph Campbell9b513092006-12-12 14:27:41 -08002590 */
2591static inline u64 ib_sg_dma_address(struct ib_device *dev,
2592 struct scatterlist *sg)
2593{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002594 return sg_dma_address(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08002595}
2596
2597/**
2598 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2599 * @dev: The device for which the DMA addresses were created
2600 * @sg: The scatter/gather entry
Mike Marciniszynea58a592014-03-28 13:26:59 -04002601 *
2602 * Note: this function is obsolete. To do: change all occurrences of
2603 * ib_sg_dma_len() into sg_dma_len().
Ralph Campbell9b513092006-12-12 14:27:41 -08002604 */
2605static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2606 struct scatterlist *sg)
2607{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002608 return sg_dma_len(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08002609}
2610
2611/**
2612 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2613 * @dev: The device for which the DMA address was created
2614 * @addr: The DMA address
2615 * @size: The size of the region in bytes
2616 * @dir: The direction of the DMA
2617 */
2618static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2619 u64 addr,
2620 size_t size,
2621 enum dma_data_direction dir)
2622{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002623 if (dev->dma_ops)
2624 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2625 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002626 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2627}
2628
2629/**
2630 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2631 * @dev: The device for which the DMA address was created
2632 * @addr: The DMA address
2633 * @size: The size of the region in bytes
2634 * @dir: The direction of the DMA
2635 */
2636static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2637 u64 addr,
2638 size_t size,
2639 enum dma_data_direction dir)
2640{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002641 if (dev->dma_ops)
2642 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2643 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002644 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2645}
2646
2647/**
2648 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2649 * @dev: The device for which the DMA address is requested
2650 * @size: The size of the region to allocate in bytes
2651 * @dma_handle: A pointer for returning the DMA address of the region
2652 * @flag: memory allocator flags
2653 */
2654static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2655 size_t size,
2656 u64 *dma_handle,
2657 gfp_t flag)
2658{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002659 if (dev->dma_ops)
2660 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
Roland Dreierc59a3da2006-12-15 13:57:26 -08002661 else {
2662 dma_addr_t handle;
2663 void *ret;
2664
2665 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2666 *dma_handle = handle;
2667 return ret;
2668 }
Ralph Campbell9b513092006-12-12 14:27:41 -08002669}
2670
2671/**
2672 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2673 * @dev: The device for which the DMA addresses were allocated
2674 * @size: The size of the region
2675 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2676 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2677 */
2678static inline void ib_dma_free_coherent(struct ib_device *dev,
2679 size_t size, void *cpu_addr,
2680 u64 dma_handle)
2681{
Ben Collinsd1998ef2006-12-13 22:10:05 -05002682 if (dev->dma_ops)
2683 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2684 else
Ralph Campbell9b513092006-12-12 14:27:41 -08002685 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2686}
2687
2688/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2690 * by an HCA.
2691 * @pd: The protection domain associated assigned to the registered region.
2692 * @phys_buf_array: Specifies a list of physical buffers to use in the
2693 * memory region.
2694 * @num_phys_buf: Specifies the size of the phys_buf_array.
2695 * @mr_access_flags: Specifies the memory access rights.
2696 * @iova_start: The offset of the region's starting I/O virtual address.
2697 */
2698struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2699 struct ib_phys_buf *phys_buf_array,
2700 int num_phys_buf,
2701 int mr_access_flags,
2702 u64 *iova_start);
2703
2704/**
2705 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2706 * Conceptually, this call performs the functions deregister memory region
2707 * followed by register physical memory region. Where possible,
2708 * resources are reused instead of deallocated and reallocated.
2709 * @mr: The memory region to modify.
2710 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2711 * properties of the memory region are being modified.
2712 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2713 * the new protection domain to associated with the memory region,
2714 * otherwise, this parameter is ignored.
2715 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2716 * field specifies a list of physical buffers to use in the new
2717 * translation, otherwise, this parameter is ignored.
2718 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2719 * field specifies the size of the phys_buf_array, otherwise, this
2720 * parameter is ignored.
2721 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2722 * field specifies the new memory access rights, otherwise, this
2723 * parameter is ignored.
2724 * @iova_start: The offset of the region's starting I/O virtual address.
2725 */
2726int ib_rereg_phys_mr(struct ib_mr *mr,
2727 int mr_rereg_mask,
2728 struct ib_pd *pd,
2729 struct ib_phys_buf *phys_buf_array,
2730 int num_phys_buf,
2731 int mr_access_flags,
2732 u64 *iova_start);
2733
2734/**
2735 * ib_query_mr - Retrieves information about a specific memory region.
2736 * @mr: The memory region to retrieve information about.
2737 * @mr_attr: The attributes of the specified memory region.
2738 */
2739int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2740
2741/**
2742 * ib_dereg_mr - Deregisters a memory region and removes it from the
2743 * HCA translation table.
2744 * @mr: The memory region to deregister.
Shani Michaeli7083e422013-02-06 16:19:12 +00002745 *
2746 * This function can fail, if the memory region has memory windows bound to it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 */
2748int ib_dereg_mr(struct ib_mr *mr);
2749
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02002750
2751/**
2752 * ib_create_mr - Allocates a memory region that may be used for
2753 * signature handover operations.
2754 * @pd: The protection domain associated with the region.
2755 * @mr_init_attr: memory region init attributes.
2756 */
2757struct ib_mr *ib_create_mr(struct ib_pd *pd,
2758 struct ib_mr_init_attr *mr_init_attr);
2759
2760/**
2761 * ib_destroy_mr - Destroys a memory region that was created using
2762 * ib_create_mr and removes it from HW translation tables.
2763 * @mr: The memory region to destroy.
2764 *
2765 * This function can fail, if the memory region has memory windows bound to it.
2766 */
2767int ib_destroy_mr(struct ib_mr *mr);
2768
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769/**
Steve Wise00f7ec32008-07-14 23:48:45 -07002770 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2771 * IB_WR_FAST_REG_MR send work request.
2772 * @pd: The protection domain associated with the region.
2773 * @max_page_list_len: requested max physical buffer list length to be
2774 * used with fast register work requests for this MR.
2775 */
2776struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2777
2778/**
2779 * ib_alloc_fast_reg_page_list - Allocates a page list array
2780 * @device - ib device pointer.
2781 * @page_list_len - size of the page list array to be allocated.
2782 *
2783 * This allocates and returns a struct ib_fast_reg_page_list * and a
2784 * page_list array that is at least page_list_len in size. The actual
2785 * size is returned in max_page_list_len. The caller is responsible
2786 * for initializing the contents of the page_list array before posting
2787 * a send work request with the IB_WC_FAST_REG_MR opcode.
2788 *
2789 * The page_list array entries must be translated using one of the
2790 * ib_dma_*() functions just like the addresses passed to
2791 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
2792 * ib_fast_reg_page_list must not be modified by the caller until the
2793 * IB_WC_FAST_REG_MR work request completes.
2794 */
2795struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2796 struct ib_device *device, int page_list_len);
2797
2798/**
2799 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2800 * page list array.
2801 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2802 */
2803void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2804
2805/**
2806 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2807 * R_Key and L_Key.
2808 * @mr - struct ib_mr pointer to be updated.
2809 * @newkey - new key to be used.
2810 */
2811static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2812{
2813 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2814 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2815}
2816
2817/**
Shani Michaeli7083e422013-02-06 16:19:12 +00002818 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2819 * for calculating a new rkey for type 2 memory windows.
2820 * @rkey - the rkey to increment.
2821 */
2822static inline u32 ib_inc_rkey(u32 rkey)
2823{
2824 const u32 mask = 0x000000ff;
2825 return ((rkey + 1) & mask) | (rkey & ~mask);
2826}
2827
2828/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 * ib_alloc_mw - Allocates a memory window.
2830 * @pd: The protection domain associated with the memory window.
Shani Michaeli7083e422013-02-06 16:19:12 +00002831 * @type: The type of the memory window (1 or 2).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 */
Shani Michaeli7083e422013-02-06 16:19:12 +00002833struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
2835/**
2836 * ib_bind_mw - Posts a work request to the send queue of the specified
2837 * QP, which binds the memory window to the given address range and
2838 * remote access attributes.
2839 * @qp: QP to post the bind work request on.
2840 * @mw: The memory window to bind.
2841 * @mw_bind: Specifies information about the memory window, including
2842 * its address range, remote access rights, and associated memory region.
Shani Michaeli7083e422013-02-06 16:19:12 +00002843 *
2844 * If there is no immediate error, the function will update the rkey member
2845 * of the mw parameter to its new value. The bind operation can still fail
2846 * asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 */
2848static inline int ib_bind_mw(struct ib_qp *qp,
2849 struct ib_mw *mw,
2850 struct ib_mw_bind *mw_bind)
2851{
2852 /* XXX reference counting in corresponding MR? */
2853 return mw->device->bind_mw ?
2854 mw->device->bind_mw(qp, mw, mw_bind) :
2855 -ENOSYS;
2856}
2857
2858/**
2859 * ib_dealloc_mw - Deallocates a memory window.
2860 * @mw: The memory window to deallocate.
2861 */
2862int ib_dealloc_mw(struct ib_mw *mw);
2863
2864/**
2865 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2866 * @pd: The protection domain associated with the unmapped region.
2867 * @mr_access_flags: Specifies the memory access rights.
2868 * @fmr_attr: Attributes of the unmapped region.
2869 *
2870 * A fast memory region must be mapped before it can be used as part of
2871 * a work request.
2872 */
2873struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2874 int mr_access_flags,
2875 struct ib_fmr_attr *fmr_attr);
2876
2877/**
2878 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2879 * @fmr: The fast memory region to associate with the pages.
2880 * @page_list: An array of physical pages to map to the fast memory region.
2881 * @list_len: The number of pages in page_list.
2882 * @iova: The I/O virtual address to use with the mapped region.
2883 */
2884static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2885 u64 *page_list, int list_len,
2886 u64 iova)
2887{
2888 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2889}
2890
2891/**
2892 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2893 * @fmr_list: A linked list of fast memory regions to unmap.
2894 */
2895int ib_unmap_fmr(struct list_head *fmr_list);
2896
2897/**
2898 * ib_dealloc_fmr - Deallocates a fast memory region.
2899 * @fmr: The fast memory region to deallocate.
2900 */
2901int ib_dealloc_fmr(struct ib_fmr *fmr);
2902
2903/**
2904 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2905 * @qp: QP to attach to the multicast group. The QP must be type
2906 * IB_QPT_UD.
2907 * @gid: Multicast group GID.
2908 * @lid: Multicast group LID in host byte order.
2909 *
2910 * In order to send and receive multicast packets, subnet
2911 * administration must have created the multicast group and configured
2912 * the fabric appropriately. The port associated with the specified
2913 * QP must also be a member of the multicast group.
2914 */
2915int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2916
2917/**
2918 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2919 * @qp: QP to detach from the multicast group.
2920 * @gid: Multicast group GID.
2921 * @lid: Multicast group LID in host byte order.
2922 */
2923int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2924
Sean Hefty59991f92011-05-23 17:52:46 -07002925/**
2926 * ib_alloc_xrcd - Allocates an XRC domain.
2927 * @device: The device on which to allocate the XRC domain.
2928 */
2929struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2930
2931/**
2932 * ib_dealloc_xrcd - Deallocates an XRC domain.
2933 * @xrcd: The XRC domain to deallocate.
2934 */
2935int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2936
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002937struct ib_flow *ib_create_flow(struct ib_qp *qp,
2938 struct ib_flow_attr *flow_attr, int domain);
2939int ib_destroy_flow(struct ib_flow *flow_id);
2940
Eli Cohen1c636f82013-10-31 15:26:32 +02002941static inline int ib_check_mr_access(int flags)
2942{
2943 /*
2944 * Local write permission is required if remote write or
2945 * remote atomic permission is also requested.
2946 */
2947 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2948 !(flags & IB_ACCESS_LOCAL_WRITE))
2949 return -EINVAL;
2950
2951 return 0;
2952}
2953
Sagi Grimberg1b01d332014-02-23 14:19:05 +02002954/**
2955 * ib_check_mr_status: lightweight check of MR status.
2956 * This routine may provide status checks on a selected
2957 * ib_mr. first use is for signature status check.
2958 *
2959 * @mr: A memory region.
2960 * @check_mask: Bitmask of which checks to perform from
2961 * ib_mr_status_check enumeration.
2962 * @mr_status: The container of relevant status checks.
2963 * failed checks will be indicated in the status bitmask
2964 * and the relevant info shall be in the error item.
2965 */
2966int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2967 struct ib_mr_status *mr_status);
2968
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969#endif /* IB_VERBS_H */