Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. |
| 3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
| 4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
| 5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
| 6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
Roland Dreier | 2a1d9b7 | 2005-08-10 23:03:10 -0700 | [diff] [blame] | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 8 | * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * This software is available to you under a choice of one of two |
| 11 | * licenses. You may choose to be licensed under the terms of the GNU |
| 12 | * General Public License (GPL) Version 2, available from the file |
| 13 | * COPYING in the main directory of this source tree, or the |
| 14 | * OpenIB.org BSD license below: |
| 15 | * |
| 16 | * Redistribution and use in source and binary forms, with or |
| 17 | * without modification, are permitted provided that the following |
| 18 | * conditions are met: |
| 19 | * |
| 20 | * - Redistributions of source code must retain the above |
| 21 | * copyright notice, this list of conditions and the following |
| 22 | * disclaimer. |
| 23 | * |
| 24 | * - Redistributions in binary form must reproduce the above |
| 25 | * copyright notice, this list of conditions and the following |
| 26 | * disclaimer in the documentation and/or other materials |
| 27 | * provided with the distribution. |
| 28 | * |
| 29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 30 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 31 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 32 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 33 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 34 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 35 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 36 | * SOFTWARE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | */ |
| 38 | |
| 39 | #if !defined(IB_VERBS_H) |
| 40 | #define IB_VERBS_H |
| 41 | |
| 42 | #include <linux/types.h> |
| 43 | #include <linux/device.h> |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 44 | #include <linux/mm.h> |
| 45 | #include <linux/dma-mapping.h> |
Michael S. Tsirkin | 459d6e2 | 2007-02-04 14:11:55 -0800 | [diff] [blame] | 46 | #include <linux/kref.h> |
Dotan Barak | bfb3ea1 | 2007-07-31 16:49:15 +0300 | [diff] [blame] | 47 | #include <linux/list.h> |
| 48 | #include <linux/rwsem.h> |
Adrian Bunk | 87ae9af | 2007-10-30 10:35:04 +0100 | [diff] [blame] | 49 | #include <linux/scatterlist.h> |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 50 | #include <linux/workqueue.h> |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 51 | #include <linux/socket.h> |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 52 | #include <linux/irq_poll.h> |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 53 | #include <uapi/linux/if_ether.h> |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 54 | #include <net/ipv6.h> |
| 55 | #include <net/ip.h> |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 56 | #include <linux/string.h> |
| 57 | #include <linux/slab.h> |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 58 | |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 59 | #include <linux/if_link.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 60 | #include <linux/atomic.h> |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 61 | #include <linux/mmu_notifier.h> |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 62 | #include <asm/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 64 | extern struct workqueue_struct *ib_wq; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 65 | extern struct workqueue_struct *ib_comp_wq; |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | union ib_gid { |
| 68 | u8 raw[16]; |
| 69 | struct { |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 70 | __be64 subnet_prefix; |
| 71 | __be64 interface_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | } global; |
| 73 | }; |
| 74 | |
Moni Shoua | e26be1b | 2015-07-30 18:33:29 +0300 | [diff] [blame] | 75 | extern union ib_gid zgid; |
| 76 | |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 77 | enum ib_gid_type { |
| 78 | /* If link layer is Ethernet, this is RoCE V1 */ |
| 79 | IB_GID_TYPE_IB = 0, |
| 80 | IB_GID_TYPE_ROCE = 0, |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 81 | IB_GID_TYPE_ROCE_UDP_ENCAP = 1, |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 82 | IB_GID_TYPE_SIZE |
| 83 | }; |
| 84 | |
Moni Shoua | 7ead4bc | 2016-01-14 17:50:38 +0200 | [diff] [blame] | 85 | #define ROCE_V2_UDP_DPORT 4791 |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 86 | struct ib_gid_attr { |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 87 | enum ib_gid_type gid_type; |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 88 | struct net_device *ndev; |
| 89 | }; |
| 90 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 91 | enum rdma_node_type { |
| 92 | /* IB values map to NodeInfo:NodeType. */ |
| 93 | RDMA_NODE_IB_CA = 1, |
| 94 | RDMA_NODE_IB_SWITCH, |
| 95 | RDMA_NODE_IB_ROUTER, |
Upinder Malhi \(umalhi\) | 180771a | 2013-09-10 03:36:59 +0000 | [diff] [blame] | 96 | RDMA_NODE_RNIC, |
| 97 | RDMA_NODE_USNIC, |
Upinder Malhi | 5db5765 | 2014-01-15 17:02:36 -0800 | [diff] [blame] | 98 | RDMA_NODE_USNIC_UDP, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | }; |
| 100 | |
Eli Cohen | a0c1b2a | 2016-03-11 22:58:37 +0200 | [diff] [blame] | 101 | enum { |
| 102 | /* set the local administered indication */ |
| 103 | IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, |
| 104 | }; |
| 105 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 106 | enum rdma_transport_type { |
| 107 | RDMA_TRANSPORT_IB, |
Upinder Malhi \(umalhi\) | 180771a | 2013-09-10 03:36:59 +0000 | [diff] [blame] | 108 | RDMA_TRANSPORT_IWARP, |
Upinder Malhi | 248567f | 2014-01-09 14:48:19 -0800 | [diff] [blame] | 109 | RDMA_TRANSPORT_USNIC, |
| 110 | RDMA_TRANSPORT_USNIC_UDP |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 111 | }; |
| 112 | |
Michael Wang | 6b90a6d | 2015-05-05 14:50:18 +0200 | [diff] [blame] | 113 | enum rdma_protocol_type { |
| 114 | RDMA_PROTOCOL_IB, |
| 115 | RDMA_PROTOCOL_IBOE, |
| 116 | RDMA_PROTOCOL_IWARP, |
| 117 | RDMA_PROTOCOL_USNIC_UDP |
| 118 | }; |
| 119 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 120 | __attribute_const__ enum rdma_transport_type |
| 121 | rdma_node_get_transport(enum rdma_node_type node_type); |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 122 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 123 | enum rdma_network_type { |
| 124 | RDMA_NETWORK_IB, |
| 125 | RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, |
| 126 | RDMA_NETWORK_IPV4, |
| 127 | RDMA_NETWORK_IPV6 |
| 128 | }; |
| 129 | |
| 130 | static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) |
| 131 | { |
| 132 | if (network_type == RDMA_NETWORK_IPV4 || |
| 133 | network_type == RDMA_NETWORK_IPV6) |
| 134 | return IB_GID_TYPE_ROCE_UDP_ENCAP; |
| 135 | |
| 136 | /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ |
| 137 | return IB_GID_TYPE_IB; |
| 138 | } |
| 139 | |
| 140 | static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, |
| 141 | union ib_gid *gid) |
| 142 | { |
| 143 | if (gid_type == IB_GID_TYPE_IB) |
| 144 | return RDMA_NETWORK_IB; |
| 145 | |
| 146 | if (ipv6_addr_v4mapped((struct in6_addr *)gid)) |
| 147 | return RDMA_NETWORK_IPV4; |
| 148 | else |
| 149 | return RDMA_NETWORK_IPV6; |
| 150 | } |
| 151 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 152 | enum rdma_link_layer { |
| 153 | IB_LINK_LAYER_UNSPECIFIED, |
| 154 | IB_LINK_LAYER_INFINIBAND, |
| 155 | IB_LINK_LAYER_ETHERNET, |
| 156 | }; |
| 157 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | enum ib_device_cap_flags { |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 159 | IB_DEVICE_RESIZE_MAX_WR = (1 << 0), |
| 160 | IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), |
| 161 | IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), |
| 162 | IB_DEVICE_RAW_MULTI = (1 << 3), |
| 163 | IB_DEVICE_AUTO_PATH_MIG = (1 << 4), |
| 164 | IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), |
| 165 | IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), |
| 166 | IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), |
| 167 | IB_DEVICE_SHUTDOWN_PORT = (1 << 8), |
| 168 | IB_DEVICE_INIT_TYPE = (1 << 9), |
| 169 | IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), |
| 170 | IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), |
| 171 | IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), |
| 172 | IB_DEVICE_SRQ_RESIZE = (1 << 13), |
| 173 | IB_DEVICE_N_NOTIFY_CQ = (1 << 14), |
Christoph Hellwig | b1adc71 | 2015-12-23 19:12:45 +0100 | [diff] [blame] | 174 | |
| 175 | /* |
| 176 | * This device supports a per-device lkey or stag that can be |
| 177 | * used without performing a memory registration for the local |
| 178 | * memory. Note that ULPs should never check this flag, but |
| 179 | * instead of use the local_dma_lkey flag in the ib_pd structure, |
| 180 | * which will always contain a usable lkey. |
| 181 | */ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 182 | IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), |
| 183 | IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), |
| 184 | IB_DEVICE_MEM_WINDOW = (1 << 17), |
Eli Cohen | e0605d9 | 2008-01-30 18:30:57 +0200 | [diff] [blame] | 185 | /* |
| 186 | * Devices should set IB_DEVICE_UD_IP_SUM if they support |
| 187 | * insertion of UDP and TCP checksum on outgoing UD IPoIB |
| 188 | * messages and can verify the validity of checksum for |
| 189 | * incoming messages. Setting this flag implies that the |
| 190 | * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. |
| 191 | */ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 192 | IB_DEVICE_UD_IP_CSUM = (1 << 18), |
| 193 | IB_DEVICE_UD_TSO = (1 << 19), |
| 194 | IB_DEVICE_XRC = (1 << 20), |
Christoph Hellwig | b1adc71 | 2015-12-23 19:12:45 +0100 | [diff] [blame] | 195 | |
| 196 | /* |
| 197 | * This device supports the IB "base memory management extension", |
| 198 | * which includes support for fast registrations (IB_WR_REG_MR, |
| 199 | * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should |
| 200 | * also be set by any iWarp device which must support FRs to comply |
| 201 | * to the iWarp verbs spec. iWarp devices also support the |
| 202 | * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the |
| 203 | * stag. |
| 204 | */ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 205 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), |
| 206 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), |
| 207 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), |
| 208 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), |
| 209 | IB_DEVICE_RC_IP_CSUM = (1 << 25), |
| 210 | IB_DEVICE_RAW_IP_CSUM = (1 << 26), |
Leon Romanovsky | 8a06ce5 | 2015-12-20 12:16:10 +0200 | [diff] [blame] | 211 | /* |
| 212 | * Devices should set IB_DEVICE_CROSS_CHANNEL if they |
| 213 | * support execution of WQEs that involve synchronization |
| 214 | * of I/O operations with single completion queue managed |
| 215 | * by hardware. |
| 216 | */ |
| 217 | IB_DEVICE_CROSS_CHANNEL = (1 << 27), |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 218 | IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), |
| 219 | IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), |
Max Gurtovoy | 47355b3 | 2016-06-06 19:34:39 +0300 | [diff] [blame] | 220 | IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 221 | IB_DEVICE_SG_GAPS_REG = (1ULL << 32), |
Max Gurtovoy | c7e162a | 2016-06-06 19:34:40 +0300 | [diff] [blame] | 222 | IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), |
| 223 | IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 224 | }; |
| 225 | |
| 226 | enum ib_signature_prot_cap { |
| 227 | IB_PROT_T10DIF_TYPE_1 = 1, |
| 228 | IB_PROT_T10DIF_TYPE_2 = 1 << 1, |
| 229 | IB_PROT_T10DIF_TYPE_3 = 1 << 2, |
| 230 | }; |
| 231 | |
| 232 | enum ib_signature_guard_cap { |
| 233 | IB_GUARD_T10DIF_CRC = 1, |
| 234 | IB_GUARD_T10DIF_CSUM = 1 << 1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | }; |
| 236 | |
| 237 | enum ib_atomic_cap { |
| 238 | IB_ATOMIC_NONE, |
| 239 | IB_ATOMIC_HCA, |
| 240 | IB_ATOMIC_GLOB |
| 241 | }; |
| 242 | |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 243 | enum ib_odp_general_cap_bits { |
| 244 | IB_ODP_SUPPORT = 1 << 0, |
| 245 | }; |
| 246 | |
| 247 | enum ib_odp_transport_cap_bits { |
| 248 | IB_ODP_SUPPORT_SEND = 1 << 0, |
| 249 | IB_ODP_SUPPORT_RECV = 1 << 1, |
| 250 | IB_ODP_SUPPORT_WRITE = 1 << 2, |
| 251 | IB_ODP_SUPPORT_READ = 1 << 3, |
| 252 | IB_ODP_SUPPORT_ATOMIC = 1 << 4, |
| 253 | }; |
| 254 | |
| 255 | struct ib_odp_caps { |
| 256 | uint64_t general_caps; |
| 257 | struct { |
| 258 | uint32_t rc_odp_caps; |
| 259 | uint32_t uc_odp_caps; |
| 260 | uint32_t ud_odp_caps; |
| 261 | } per_transport_caps; |
| 262 | }; |
| 263 | |
Matan Barak | b9926b9 | 2015-06-11 16:35:22 +0300 | [diff] [blame] | 264 | enum ib_cq_creation_flags { |
| 265 | IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, |
Leon Romanovsky | 8a06ce5 | 2015-12-20 12:16:10 +0200 | [diff] [blame] | 266 | IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, |
Matan Barak | b9926b9 | 2015-06-11 16:35:22 +0300 | [diff] [blame] | 267 | }; |
| 268 | |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 269 | struct ib_cq_init_attr { |
| 270 | unsigned int cqe; |
| 271 | int comp_vector; |
| 272 | u32 flags; |
| 273 | }; |
| 274 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | struct ib_device_attr { |
| 276 | u64 fw_ver; |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 277 | __be64 sys_image_guid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | u64 max_mr_size; |
| 279 | u64 page_size_cap; |
| 280 | u32 vendor_id; |
| 281 | u32 vendor_part_id; |
| 282 | u32 hw_ver; |
| 283 | int max_qp; |
| 284 | int max_qp_wr; |
Leon Romanovsky | fb532d6 | 2016-02-23 10:25:25 +0200 | [diff] [blame] | 285 | u64 device_cap_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | int max_sge; |
| 287 | int max_sge_rd; |
| 288 | int max_cq; |
| 289 | int max_cqe; |
| 290 | int max_mr; |
| 291 | int max_pd; |
| 292 | int max_qp_rd_atom; |
| 293 | int max_ee_rd_atom; |
| 294 | int max_res_rd_atom; |
| 295 | int max_qp_init_rd_atom; |
| 296 | int max_ee_init_rd_atom; |
| 297 | enum ib_atomic_cap atomic_cap; |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 298 | enum ib_atomic_cap masked_atomic_cap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | int max_ee; |
| 300 | int max_rdd; |
| 301 | int max_mw; |
| 302 | int max_raw_ipv6_qp; |
| 303 | int max_raw_ethy_qp; |
| 304 | int max_mcast_grp; |
| 305 | int max_mcast_qp_attach; |
| 306 | int max_total_mcast_qp_attach; |
| 307 | int max_ah; |
| 308 | int max_fmr; |
| 309 | int max_map_per_fmr; |
| 310 | int max_srq; |
| 311 | int max_srq_wr; |
| 312 | int max_srq_sge; |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 313 | unsigned int max_fast_reg_page_list_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | u16 max_pkeys; |
| 315 | u8 local_ca_ack_delay; |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 316 | int sig_prot_cap; |
| 317 | int sig_guard_cap; |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 318 | struct ib_odp_caps odp_caps; |
Matan Barak | 24306dc | 2015-06-11 16:35:24 +0300 | [diff] [blame] | 319 | uint64_t timestamp_mask; |
| 320 | uint64_t hca_core_clock; /* in KHZ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | }; |
| 322 | |
| 323 | enum ib_mtu { |
| 324 | IB_MTU_256 = 1, |
| 325 | IB_MTU_512 = 2, |
| 326 | IB_MTU_1024 = 3, |
| 327 | IB_MTU_2048 = 4, |
| 328 | IB_MTU_4096 = 5 |
| 329 | }; |
| 330 | |
| 331 | static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) |
| 332 | { |
| 333 | switch (mtu) { |
| 334 | case IB_MTU_256: return 256; |
| 335 | case IB_MTU_512: return 512; |
| 336 | case IB_MTU_1024: return 1024; |
| 337 | case IB_MTU_2048: return 2048; |
| 338 | case IB_MTU_4096: return 4096; |
| 339 | default: return -1; |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | enum ib_port_state { |
| 344 | IB_PORT_NOP = 0, |
| 345 | IB_PORT_DOWN = 1, |
| 346 | IB_PORT_INIT = 2, |
| 347 | IB_PORT_ARMED = 3, |
| 348 | IB_PORT_ACTIVE = 4, |
| 349 | IB_PORT_ACTIVE_DEFER = 5 |
| 350 | }; |
| 351 | |
| 352 | enum ib_port_cap_flags { |
| 353 | IB_PORT_SM = 1 << 1, |
| 354 | IB_PORT_NOTICE_SUP = 1 << 2, |
| 355 | IB_PORT_TRAP_SUP = 1 << 3, |
| 356 | IB_PORT_OPT_IPD_SUP = 1 << 4, |
| 357 | IB_PORT_AUTO_MIGR_SUP = 1 << 5, |
| 358 | IB_PORT_SL_MAP_SUP = 1 << 6, |
| 359 | IB_PORT_MKEY_NVRAM = 1 << 7, |
| 360 | IB_PORT_PKEY_NVRAM = 1 << 8, |
| 361 | IB_PORT_LED_INFO_SUP = 1 << 9, |
| 362 | IB_PORT_SM_DISABLED = 1 << 10, |
| 363 | IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, |
| 364 | IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 365 | IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | IB_PORT_CM_SUP = 1 << 16, |
| 367 | IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, |
| 368 | IB_PORT_REINIT_SUP = 1 << 18, |
| 369 | IB_PORT_DEVICE_MGMT_SUP = 1 << 19, |
| 370 | IB_PORT_VENDOR_CLASS_SUP = 1 << 20, |
| 371 | IB_PORT_DR_NOTICE_SUP = 1 << 21, |
| 372 | IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, |
| 373 | IB_PORT_BOOT_MGMT_SUP = 1 << 23, |
| 374 | IB_PORT_LINK_LATENCY_SUP = 1 << 24, |
Moni Shoua | b4a26a2 | 2014-02-09 11:54:34 +0200 | [diff] [blame] | 375 | IB_PORT_CLIENT_REG_SUP = 1 << 25, |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 376 | IB_PORT_IP_BASED_GIDS = 1 << 26, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | }; |
| 378 | |
| 379 | enum ib_port_width { |
| 380 | IB_WIDTH_1X = 1, |
| 381 | IB_WIDTH_4X = 2, |
| 382 | IB_WIDTH_8X = 4, |
| 383 | IB_WIDTH_12X = 8 |
| 384 | }; |
| 385 | |
| 386 | static inline int ib_width_enum_to_int(enum ib_port_width width) |
| 387 | { |
| 388 | switch (width) { |
| 389 | case IB_WIDTH_1X: return 1; |
| 390 | case IB_WIDTH_4X: return 4; |
| 391 | case IB_WIDTH_8X: return 8; |
| 392 | case IB_WIDTH_12X: return 12; |
| 393 | default: return -1; |
| 394 | } |
| 395 | } |
| 396 | |
Or Gerlitz | 2e96691 | 2012-02-28 18:49:50 +0200 | [diff] [blame] | 397 | enum ib_port_speed { |
| 398 | IB_SPEED_SDR = 1, |
| 399 | IB_SPEED_DDR = 2, |
| 400 | IB_SPEED_QDR = 4, |
| 401 | IB_SPEED_FDR10 = 8, |
| 402 | IB_SPEED_FDR = 16, |
| 403 | IB_SPEED_EDR = 32 |
| 404 | }; |
| 405 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 406 | /** |
| 407 | * struct rdma_hw_stats |
| 408 | * @timestamp - Used by the core code to track when the last update was |
| 409 | * @lifespan - Used by the core code to determine how old the counters |
| 410 | * should be before being updated again. Stored in jiffies, defaults |
| 411 | * to 10 milliseconds, drivers can override the default be specifying |
| 412 | * their own value during their allocation routine. |
| 413 | * @name - Array of pointers to static names used for the counters in |
| 414 | * directory. |
| 415 | * @num_counters - How many hardware counters there are. If name is |
| 416 | * shorter than this number, a kernel oops will result. Driver authors |
| 417 | * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) |
| 418 | * in their code to prevent this. |
| 419 | * @value - Array of u64 counters that are accessed by the sysfs code and |
| 420 | * filled in by the drivers get_stats routine |
| 421 | */ |
| 422 | struct rdma_hw_stats { |
| 423 | unsigned long timestamp; |
| 424 | unsigned long lifespan; |
| 425 | const char * const *names; |
| 426 | int num_counters; |
| 427 | u64 value[]; |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 428 | }; |
| 429 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 430 | #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 |
| 431 | /** |
| 432 | * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct |
| 433 | * for drivers. |
| 434 | * @names - Array of static const char * |
| 435 | * @num_counters - How many elements in array |
| 436 | * @lifespan - How many milliseconds between updates |
| 437 | */ |
| 438 | static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( |
| 439 | const char * const *names, int num_counters, |
| 440 | unsigned long lifespan) |
| 441 | { |
| 442 | struct rdma_hw_stats *stats; |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 443 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 444 | stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), |
| 445 | GFP_KERNEL); |
| 446 | if (!stats) |
| 447 | return NULL; |
| 448 | stats->names = names; |
| 449 | stats->num_counters = num_counters; |
| 450 | stats->lifespan = msecs_to_jiffies(lifespan); |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 451 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 452 | return stats; |
| 453 | } |
| 454 | |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 455 | |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 456 | /* Define bits for the various functionality this port needs to be supported by |
| 457 | * the core. |
| 458 | */ |
| 459 | /* Management 0x00000FFF */ |
| 460 | #define RDMA_CORE_CAP_IB_MAD 0x00000001 |
| 461 | #define RDMA_CORE_CAP_IB_SMI 0x00000002 |
| 462 | #define RDMA_CORE_CAP_IB_CM 0x00000004 |
| 463 | #define RDMA_CORE_CAP_IW_CM 0x00000008 |
| 464 | #define RDMA_CORE_CAP_IB_SA 0x00000010 |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 465 | #define RDMA_CORE_CAP_OPA_MAD 0x00000020 |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 466 | |
| 467 | /* Address format 0x000FF000 */ |
| 468 | #define RDMA_CORE_CAP_AF_IB 0x00001000 |
| 469 | #define RDMA_CORE_CAP_ETH_AH 0x00002000 |
| 470 | |
| 471 | /* Protocol 0xFFF00000 */ |
| 472 | #define RDMA_CORE_CAP_PROT_IB 0x00100000 |
| 473 | #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 |
| 474 | #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 475 | #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 476 | |
| 477 | #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ |
| 478 | | RDMA_CORE_CAP_IB_MAD \ |
| 479 | | RDMA_CORE_CAP_IB_SMI \ |
| 480 | | RDMA_CORE_CAP_IB_CM \ |
| 481 | | RDMA_CORE_CAP_IB_SA \ |
| 482 | | RDMA_CORE_CAP_AF_IB) |
| 483 | #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ |
| 484 | | RDMA_CORE_CAP_IB_MAD \ |
| 485 | | RDMA_CORE_CAP_IB_CM \ |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 486 | | RDMA_CORE_CAP_AF_IB \ |
| 487 | | RDMA_CORE_CAP_ETH_AH) |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 488 | #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ |
| 489 | (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ |
| 490 | | RDMA_CORE_CAP_IB_MAD \ |
| 491 | | RDMA_CORE_CAP_IB_CM \ |
| 492 | | RDMA_CORE_CAP_AF_IB \ |
| 493 | | RDMA_CORE_CAP_ETH_AH) |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 494 | #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ |
| 495 | | RDMA_CORE_CAP_IW_CM) |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 496 | #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ |
| 497 | | RDMA_CORE_CAP_OPA_MAD) |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 498 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | struct ib_port_attr { |
Eli Cohen | fad61ad | 2016-03-11 22:58:36 +0200 | [diff] [blame] | 500 | u64 subnet_prefix; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | enum ib_port_state state; |
| 502 | enum ib_mtu max_mtu; |
| 503 | enum ib_mtu active_mtu; |
| 504 | int gid_tbl_len; |
| 505 | u32 port_cap_flags; |
| 506 | u32 max_msg_sz; |
| 507 | u32 bad_pkey_cntr; |
| 508 | u32 qkey_viol_cntr; |
| 509 | u16 pkey_tbl_len; |
| 510 | u16 lid; |
| 511 | u16 sm_lid; |
| 512 | u8 lmc; |
| 513 | u8 max_vl_num; |
| 514 | u8 sm_sl; |
| 515 | u8 subnet_timeout; |
| 516 | u8 init_type_reply; |
| 517 | u8 active_width; |
| 518 | u8 active_speed; |
| 519 | u8 phys_state; |
Eli Cohen | a0c1b2a | 2016-03-11 22:58:37 +0200 | [diff] [blame] | 520 | bool grh_required; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | }; |
| 522 | |
| 523 | enum ib_device_modify_flags { |
Roland Dreier | c5bcbbb | 2006-02-02 09:47:14 -0800 | [diff] [blame] | 524 | IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, |
| 525 | IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | }; |
| 527 | |
| 528 | struct ib_device_modify { |
| 529 | u64 sys_image_guid; |
Roland Dreier | c5bcbbb | 2006-02-02 09:47:14 -0800 | [diff] [blame] | 530 | char node_desc[64]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | }; |
| 532 | |
| 533 | enum ib_port_modify_flags { |
| 534 | IB_PORT_SHUTDOWN = 1, |
| 535 | IB_PORT_INIT_TYPE = (1<<2), |
| 536 | IB_PORT_RESET_QKEY_CNTR = (1<<3) |
| 537 | }; |
| 538 | |
| 539 | struct ib_port_modify { |
| 540 | u32 set_port_cap_mask; |
| 541 | u32 clr_port_cap_mask; |
| 542 | u8 init_type; |
| 543 | }; |
| 544 | |
| 545 | enum ib_event_type { |
| 546 | IB_EVENT_CQ_ERR, |
| 547 | IB_EVENT_QP_FATAL, |
| 548 | IB_EVENT_QP_REQ_ERR, |
| 549 | IB_EVENT_QP_ACCESS_ERR, |
| 550 | IB_EVENT_COMM_EST, |
| 551 | IB_EVENT_SQ_DRAINED, |
| 552 | IB_EVENT_PATH_MIG, |
| 553 | IB_EVENT_PATH_MIG_ERR, |
| 554 | IB_EVENT_DEVICE_FATAL, |
| 555 | IB_EVENT_PORT_ACTIVE, |
| 556 | IB_EVENT_PORT_ERR, |
| 557 | IB_EVENT_LID_CHANGE, |
| 558 | IB_EVENT_PKEY_CHANGE, |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 559 | IB_EVENT_SM_CHANGE, |
| 560 | IB_EVENT_SRQ_ERR, |
| 561 | IB_EVENT_SRQ_LIMIT_REACHED, |
Leonid Arsh | 63942c9 | 2006-06-17 20:37:35 -0700 | [diff] [blame] | 562 | IB_EVENT_QP_LAST_WQE_REACHED, |
Or Gerlitz | 761d90e | 2011-06-15 14:39:29 +0000 | [diff] [blame] | 563 | IB_EVENT_CLIENT_REREGISTER, |
| 564 | IB_EVENT_GID_CHANGE, |
Yishai Hadas | f213c05 | 2016-05-23 15:20:49 +0300 | [diff] [blame] | 565 | IB_EVENT_WQ_FATAL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | }; |
| 567 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 568 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event); |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 569 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | struct ib_event { |
| 571 | struct ib_device *device; |
| 572 | union { |
| 573 | struct ib_cq *cq; |
| 574 | struct ib_qp *qp; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 575 | struct ib_srq *srq; |
Yishai Hadas | f213c05 | 2016-05-23 15:20:49 +0300 | [diff] [blame] | 576 | struct ib_wq *wq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | u8 port_num; |
| 578 | } element; |
| 579 | enum ib_event_type event; |
| 580 | }; |
| 581 | |
| 582 | struct ib_event_handler { |
| 583 | struct ib_device *device; |
| 584 | void (*handler)(struct ib_event_handler *, struct ib_event *); |
| 585 | struct list_head list; |
| 586 | }; |
| 587 | |
| 588 | #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ |
| 589 | do { \ |
| 590 | (_ptr)->device = _device; \ |
| 591 | (_ptr)->handler = _handler; \ |
| 592 | INIT_LIST_HEAD(&(_ptr)->list); \ |
| 593 | } while (0) |
| 594 | |
| 595 | struct ib_global_route { |
| 596 | union ib_gid dgid; |
| 597 | u32 flow_label; |
| 598 | u8 sgid_index; |
| 599 | u8 hop_limit; |
| 600 | u8 traffic_class; |
| 601 | }; |
| 602 | |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 603 | struct ib_grh { |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 604 | __be32 version_tclass_flow; |
| 605 | __be16 paylen; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 606 | u8 next_hdr; |
| 607 | u8 hop_limit; |
| 608 | union ib_gid sgid; |
| 609 | union ib_gid dgid; |
| 610 | }; |
| 611 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 612 | union rdma_network_hdr { |
| 613 | struct ib_grh ibgrh; |
| 614 | struct { |
| 615 | /* The IB spec states that if it's IPv4, the header |
| 616 | * is located in the last 20 bytes of the header. |
| 617 | */ |
| 618 | u8 reserved[20]; |
| 619 | struct iphdr roce4grh; |
| 620 | }; |
| 621 | }; |
| 622 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | enum { |
| 624 | IB_MULTICAST_QPN = 0xffffff |
| 625 | }; |
| 626 | |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 627 | #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 628 | #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 629 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | enum ib_ah_flags { |
| 631 | IB_AH_GRH = 1 |
| 632 | }; |
| 633 | |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 634 | enum ib_rate { |
| 635 | IB_RATE_PORT_CURRENT = 0, |
| 636 | IB_RATE_2_5_GBPS = 2, |
| 637 | IB_RATE_5_GBPS = 5, |
| 638 | IB_RATE_10_GBPS = 3, |
| 639 | IB_RATE_20_GBPS = 6, |
| 640 | IB_RATE_30_GBPS = 4, |
| 641 | IB_RATE_40_GBPS = 7, |
| 642 | IB_RATE_60_GBPS = 8, |
| 643 | IB_RATE_80_GBPS = 9, |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 644 | IB_RATE_120_GBPS = 10, |
| 645 | IB_RATE_14_GBPS = 11, |
| 646 | IB_RATE_56_GBPS = 12, |
| 647 | IB_RATE_112_GBPS = 13, |
| 648 | IB_RATE_168_GBPS = 14, |
| 649 | IB_RATE_25_GBPS = 15, |
| 650 | IB_RATE_100_GBPS = 16, |
| 651 | IB_RATE_200_GBPS = 17, |
| 652 | IB_RATE_300_GBPS = 18 |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 653 | }; |
| 654 | |
| 655 | /** |
| 656 | * ib_rate_to_mult - Convert the IB rate enum to a multiple of the |
| 657 | * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be |
| 658 | * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. |
| 659 | * @rate: rate to convert. |
| 660 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 661 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 662 | |
| 663 | /** |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 664 | * ib_rate_to_mbps - Convert the IB rate enum to Mbps. |
| 665 | * For example, IB_RATE_2_5_GBPS will be converted to 2500. |
| 666 | * @rate: rate to convert. |
| 667 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 668 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 669 | |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 670 | |
| 671 | /** |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 672 | * enum ib_mr_type - memory region type |
| 673 | * @IB_MR_TYPE_MEM_REG: memory region that is used for |
| 674 | * normal registration |
| 675 | * @IB_MR_TYPE_SIGNATURE: memory region that is used for |
| 676 | * signature operations (data-integrity |
| 677 | * capable regions) |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 678 | * @IB_MR_TYPE_SG_GAPS: memory region that is capable to |
| 679 | * register any arbitrary sg lists (without |
| 680 | * the normal mr constraints - see |
| 681 | * ib_map_mr_sg) |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 682 | */ |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 683 | enum ib_mr_type { |
| 684 | IB_MR_TYPE_MEM_REG, |
| 685 | IB_MR_TYPE_SIGNATURE, |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 686 | IB_MR_TYPE_SG_GAPS, |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 687 | }; |
| 688 | |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 689 | /** |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 690 | * Signature types |
| 691 | * IB_SIG_TYPE_NONE: Unprotected. |
| 692 | * IB_SIG_TYPE_T10_DIF: Type T10-DIF |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 693 | */ |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 694 | enum ib_signature_type { |
| 695 | IB_SIG_TYPE_NONE, |
| 696 | IB_SIG_TYPE_T10_DIF, |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 697 | }; |
| 698 | |
| 699 | /** |
| 700 | * Signature T10-DIF block-guard types |
| 701 | * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. |
| 702 | * IB_T10DIF_CSUM: Corresponds to IP checksum rules. |
| 703 | */ |
| 704 | enum ib_t10_dif_bg_type { |
| 705 | IB_T10DIF_CRC, |
| 706 | IB_T10DIF_CSUM |
| 707 | }; |
| 708 | |
| 709 | /** |
| 710 | * struct ib_t10_dif_domain - Parameters specific for T10-DIF |
| 711 | * domain. |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 712 | * @bg_type: T10-DIF block guard type (CRC|CSUM) |
| 713 | * @pi_interval: protection information interval. |
| 714 | * @bg: seed of guard computation. |
| 715 | * @app_tag: application tag of guard block |
| 716 | * @ref_tag: initial guard block reference tag. |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 717 | * @ref_remap: Indicate wethear the reftag increments each block |
| 718 | * @app_escape: Indicate to skip block check if apptag=0xffff |
| 719 | * @ref_escape: Indicate to skip block check if reftag=0xffffffff |
| 720 | * @apptag_check_mask: check bitmask of application tag. |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 721 | */ |
| 722 | struct ib_t10_dif_domain { |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 723 | enum ib_t10_dif_bg_type bg_type; |
| 724 | u16 pi_interval; |
| 725 | u16 bg; |
| 726 | u16 app_tag; |
| 727 | u32 ref_tag; |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 728 | bool ref_remap; |
| 729 | bool app_escape; |
| 730 | bool ref_escape; |
| 731 | u16 apptag_check_mask; |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 732 | }; |
| 733 | |
| 734 | /** |
| 735 | * struct ib_sig_domain - Parameters for signature domain |
| 736 | * @sig_type: specific signauture type |
| 737 | * @sig: union of all signature domain attributes that may |
| 738 | * be used to set domain layout. |
| 739 | */ |
| 740 | struct ib_sig_domain { |
| 741 | enum ib_signature_type sig_type; |
| 742 | union { |
| 743 | struct ib_t10_dif_domain dif; |
| 744 | } sig; |
| 745 | }; |
| 746 | |
| 747 | /** |
| 748 | * struct ib_sig_attrs - Parameters for signature handover operation |
| 749 | * @check_mask: bitmask for signature byte check (8 bytes) |
| 750 | * @mem: memory domain layout desciptor. |
| 751 | * @wire: wire domain layout desciptor. |
| 752 | */ |
| 753 | struct ib_sig_attrs { |
| 754 | u8 check_mask; |
| 755 | struct ib_sig_domain mem; |
| 756 | struct ib_sig_domain wire; |
| 757 | }; |
| 758 | |
| 759 | enum ib_sig_err_type { |
| 760 | IB_SIG_BAD_GUARD, |
| 761 | IB_SIG_BAD_REFTAG, |
| 762 | IB_SIG_BAD_APPTAG, |
| 763 | }; |
| 764 | |
| 765 | /** |
| 766 | * struct ib_sig_err - signature error descriptor |
| 767 | */ |
| 768 | struct ib_sig_err { |
| 769 | enum ib_sig_err_type err_type; |
| 770 | u32 expected; |
| 771 | u32 actual; |
| 772 | u64 sig_err_offset; |
| 773 | u32 key; |
| 774 | }; |
| 775 | |
| 776 | enum ib_mr_status_check { |
| 777 | IB_MR_CHECK_SIG_STATUS = 1, |
| 778 | }; |
| 779 | |
| 780 | /** |
| 781 | * struct ib_mr_status - Memory region status container |
| 782 | * |
| 783 | * @fail_status: Bitmask of MR checks status. For each |
| 784 | * failed check a corresponding status bit is set. |
| 785 | * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS |
| 786 | * failure. |
| 787 | */ |
| 788 | struct ib_mr_status { |
| 789 | u32 fail_status; |
| 790 | struct ib_sig_err sig_err; |
| 791 | }; |
| 792 | |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 793 | /** |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 794 | * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate |
| 795 | * enum. |
| 796 | * @mult: multiple to convert. |
| 797 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 798 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 799 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | struct ib_ah_attr { |
| 801 | struct ib_global_route grh; |
| 802 | u16 dlid; |
| 803 | u8 sl; |
| 804 | u8 src_path_bits; |
| 805 | u8 static_rate; |
| 806 | u8 ah_flags; |
| 807 | u8 port_num; |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 808 | u8 dmac[ETH_ALEN]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | }; |
| 810 | |
| 811 | enum ib_wc_status { |
| 812 | IB_WC_SUCCESS, |
| 813 | IB_WC_LOC_LEN_ERR, |
| 814 | IB_WC_LOC_QP_OP_ERR, |
| 815 | IB_WC_LOC_EEC_OP_ERR, |
| 816 | IB_WC_LOC_PROT_ERR, |
| 817 | IB_WC_WR_FLUSH_ERR, |
| 818 | IB_WC_MW_BIND_ERR, |
| 819 | IB_WC_BAD_RESP_ERR, |
| 820 | IB_WC_LOC_ACCESS_ERR, |
| 821 | IB_WC_REM_INV_REQ_ERR, |
| 822 | IB_WC_REM_ACCESS_ERR, |
| 823 | IB_WC_REM_OP_ERR, |
| 824 | IB_WC_RETRY_EXC_ERR, |
| 825 | IB_WC_RNR_RETRY_EXC_ERR, |
| 826 | IB_WC_LOC_RDD_VIOL_ERR, |
| 827 | IB_WC_REM_INV_RD_REQ_ERR, |
| 828 | IB_WC_REM_ABORT_ERR, |
| 829 | IB_WC_INV_EECN_ERR, |
| 830 | IB_WC_INV_EEC_STATE_ERR, |
| 831 | IB_WC_FATAL_ERR, |
| 832 | IB_WC_RESP_TIMEOUT_ERR, |
| 833 | IB_WC_GENERAL_ERR |
| 834 | }; |
| 835 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 836 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 837 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | enum ib_wc_opcode { |
| 839 | IB_WC_SEND, |
| 840 | IB_WC_RDMA_WRITE, |
| 841 | IB_WC_RDMA_READ, |
| 842 | IB_WC_COMP_SWAP, |
| 843 | IB_WC_FETCH_ADD, |
Eli Cohen | c93570f | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 844 | IB_WC_LSO, |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 845 | IB_WC_LOCAL_INV, |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 846 | IB_WC_REG_MR, |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 847 | IB_WC_MASKED_COMP_SWAP, |
| 848 | IB_WC_MASKED_FETCH_ADD, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | /* |
| 850 | * Set value of IB_WC_RECV so consumers can test if a completion is a |
| 851 | * receive by testing (opcode & IB_WC_RECV). |
| 852 | */ |
| 853 | IB_WC_RECV = 1 << 7, |
| 854 | IB_WC_RECV_RDMA_WITH_IMM |
| 855 | }; |
| 856 | |
| 857 | enum ib_wc_flags { |
| 858 | IB_WC_GRH = 1, |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 859 | IB_WC_WITH_IMM = (1<<1), |
| 860 | IB_WC_WITH_INVALIDATE = (1<<2), |
Or Gerlitz | d927d50 | 2012-01-11 19:03:51 +0200 | [diff] [blame] | 861 | IB_WC_IP_CSUM_OK = (1<<3), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 862 | IB_WC_WITH_SMAC = (1<<4), |
| 863 | IB_WC_WITH_VLAN = (1<<5), |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 864 | IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | }; |
| 866 | |
| 867 | struct ib_wc { |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 868 | union { |
| 869 | u64 wr_id; |
| 870 | struct ib_cqe *wr_cqe; |
| 871 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | enum ib_wc_status status; |
| 873 | enum ib_wc_opcode opcode; |
| 874 | u32 vendor_err; |
| 875 | u32 byte_len; |
Michael S. Tsirkin | 062dbb6 | 2006-12-31 21:09:42 +0200 | [diff] [blame] | 876 | struct ib_qp *qp; |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 877 | union { |
| 878 | __be32 imm_data; |
| 879 | u32 invalidate_rkey; |
| 880 | } ex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | u32 src_qp; |
| 882 | int wc_flags; |
| 883 | u16 pkey_index; |
| 884 | u16 slid; |
| 885 | u8 sl; |
| 886 | u8 dlid_path_bits; |
| 887 | u8 port_num; /* valid only for DR SMPs on switches */ |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 888 | u8 smac[ETH_ALEN]; |
| 889 | u16 vlan_id; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 890 | u8 network_hdr_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | }; |
| 892 | |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 893 | enum ib_cq_notify_flags { |
| 894 | IB_CQ_SOLICITED = 1 << 0, |
| 895 | IB_CQ_NEXT_COMP = 1 << 1, |
| 896 | IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, |
| 897 | IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | }; |
| 899 | |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 900 | enum ib_srq_type { |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 901 | IB_SRQT_BASIC, |
| 902 | IB_SRQT_XRC |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 903 | }; |
| 904 | |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 905 | enum ib_srq_attr_mask { |
| 906 | IB_SRQ_MAX_WR = 1 << 0, |
| 907 | IB_SRQ_LIMIT = 1 << 1, |
| 908 | }; |
| 909 | |
| 910 | struct ib_srq_attr { |
| 911 | u32 max_wr; |
| 912 | u32 max_sge; |
| 913 | u32 srq_limit; |
| 914 | }; |
| 915 | |
| 916 | struct ib_srq_init_attr { |
| 917 | void (*event_handler)(struct ib_event *, void *); |
| 918 | void *srq_context; |
| 919 | struct ib_srq_attr attr; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 920 | enum ib_srq_type srq_type; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 921 | |
| 922 | union { |
| 923 | struct { |
| 924 | struct ib_xrcd *xrcd; |
| 925 | struct ib_cq *cq; |
| 926 | } xrc; |
| 927 | } ext; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 928 | }; |
| 929 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | struct ib_qp_cap { |
| 931 | u32 max_send_wr; |
| 932 | u32 max_recv_wr; |
| 933 | u32 max_send_sge; |
| 934 | u32 max_recv_sge; |
| 935 | u32 max_inline_data; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 936 | |
| 937 | /* |
| 938 | * Maximum number of rdma_rw_ctx structures in flight at a time. |
| 939 | * ib_create_qp() will calculate the right amount of neededed WRs |
| 940 | * and MRs based on this. |
| 941 | */ |
| 942 | u32 max_rdma_ctxs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | }; |
| 944 | |
| 945 | enum ib_sig_type { |
| 946 | IB_SIGNAL_ALL_WR, |
| 947 | IB_SIGNAL_REQ_WR |
| 948 | }; |
| 949 | |
| 950 | enum ib_qp_type { |
| 951 | /* |
| 952 | * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries |
| 953 | * here (and in that order) since the MAD layer uses them as |
| 954 | * indices into a 2-entry table. |
| 955 | */ |
| 956 | IB_QPT_SMI, |
| 957 | IB_QPT_GSI, |
| 958 | |
| 959 | IB_QPT_RC, |
| 960 | IB_QPT_UC, |
| 961 | IB_QPT_UD, |
| 962 | IB_QPT_RAW_IPV6, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 963 | IB_QPT_RAW_ETHERTYPE, |
Or Gerlitz | c938a61 | 2012-03-01 12:17:51 +0200 | [diff] [blame] | 964 | IB_QPT_RAW_PACKET = 8, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 965 | IB_QPT_XRC_INI = 9, |
| 966 | IB_QPT_XRC_TGT, |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 967 | IB_QPT_MAX, |
| 968 | /* Reserve a range for qp types internal to the low level driver. |
| 969 | * These qp types will not be visible at the IB core layer, so the |
| 970 | * IB_QPT_MAX usages should not be affected in the core layer |
| 971 | */ |
| 972 | IB_QPT_RESERVED1 = 0x1000, |
| 973 | IB_QPT_RESERVED2, |
| 974 | IB_QPT_RESERVED3, |
| 975 | IB_QPT_RESERVED4, |
| 976 | IB_QPT_RESERVED5, |
| 977 | IB_QPT_RESERVED6, |
| 978 | IB_QPT_RESERVED7, |
| 979 | IB_QPT_RESERVED8, |
| 980 | IB_QPT_RESERVED9, |
| 981 | IB_QPT_RESERVED10, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | }; |
| 983 | |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 984 | enum ib_qp_create_flags { |
Ron Livne | 47ee1b9 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 985 | IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, |
| 986 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, |
Leon Romanovsky | 8a06ce5 | 2015-12-20 12:16:10 +0200 | [diff] [blame] | 987 | IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, |
| 988 | IB_QP_CREATE_MANAGED_SEND = 1 << 3, |
| 989 | IB_QP_CREATE_MANAGED_RECV = 1 << 4, |
Matan Barak | 90f1d1b | 2013-11-07 15:25:12 +0200 | [diff] [blame] | 990 | IB_QP_CREATE_NETIF_QP = 1 << 5, |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 991 | IB_QP_CREATE_SIGNATURE_EN = 1 << 6, |
Or Gerlitz | 09b9308 | 2014-05-11 15:15:11 +0300 | [diff] [blame] | 992 | IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, |
Majd Dibbiny | b531b90 | 2016-04-17 17:19:36 +0300 | [diff] [blame] | 993 | IB_QP_CREATE_SCATTER_FCS = 1 << 8, |
Jack Morgenstein | d2b5706 | 2012-08-03 08:40:37 +0000 | [diff] [blame] | 994 | /* reserve bits 26-31 for low level drivers' internal use */ |
| 995 | IB_QP_CREATE_RESERVED_START = 1 << 26, |
| 996 | IB_QP_CREATE_RESERVED_END = 1 << 31, |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 997 | }; |
| 998 | |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 999 | /* |
| 1000 | * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler |
| 1001 | * callback to destroy the passed in QP. |
| 1002 | */ |
| 1003 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 | struct ib_qp_init_attr { |
| 1005 | void (*event_handler)(struct ib_event *, void *); |
| 1006 | void *qp_context; |
| 1007 | struct ib_cq *send_cq; |
| 1008 | struct ib_cq *recv_cq; |
| 1009 | struct ib_srq *srq; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1010 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | struct ib_qp_cap cap; |
| 1012 | enum ib_sig_type sq_sig_type; |
| 1013 | enum ib_qp_type qp_type; |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 1014 | enum ib_qp_create_flags create_flags; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1015 | |
| 1016 | /* |
| 1017 | * Only needed for special QP types, or when using the RW API. |
| 1018 | */ |
| 1019 | u8 port_num; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1020 | struct ib_rwq_ind_table *rwq_ind_tbl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | }; |
| 1022 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1023 | struct ib_qp_open_attr { |
| 1024 | void (*event_handler)(struct ib_event *, void *); |
| 1025 | void *qp_context; |
| 1026 | u32 qp_num; |
| 1027 | enum ib_qp_type qp_type; |
| 1028 | }; |
| 1029 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | enum ib_rnr_timeout { |
| 1031 | IB_RNR_TIMER_655_36 = 0, |
| 1032 | IB_RNR_TIMER_000_01 = 1, |
| 1033 | IB_RNR_TIMER_000_02 = 2, |
| 1034 | IB_RNR_TIMER_000_03 = 3, |
| 1035 | IB_RNR_TIMER_000_04 = 4, |
| 1036 | IB_RNR_TIMER_000_06 = 5, |
| 1037 | IB_RNR_TIMER_000_08 = 6, |
| 1038 | IB_RNR_TIMER_000_12 = 7, |
| 1039 | IB_RNR_TIMER_000_16 = 8, |
| 1040 | IB_RNR_TIMER_000_24 = 9, |
| 1041 | IB_RNR_TIMER_000_32 = 10, |
| 1042 | IB_RNR_TIMER_000_48 = 11, |
| 1043 | IB_RNR_TIMER_000_64 = 12, |
| 1044 | IB_RNR_TIMER_000_96 = 13, |
| 1045 | IB_RNR_TIMER_001_28 = 14, |
| 1046 | IB_RNR_TIMER_001_92 = 15, |
| 1047 | IB_RNR_TIMER_002_56 = 16, |
| 1048 | IB_RNR_TIMER_003_84 = 17, |
| 1049 | IB_RNR_TIMER_005_12 = 18, |
| 1050 | IB_RNR_TIMER_007_68 = 19, |
| 1051 | IB_RNR_TIMER_010_24 = 20, |
| 1052 | IB_RNR_TIMER_015_36 = 21, |
| 1053 | IB_RNR_TIMER_020_48 = 22, |
| 1054 | IB_RNR_TIMER_030_72 = 23, |
| 1055 | IB_RNR_TIMER_040_96 = 24, |
| 1056 | IB_RNR_TIMER_061_44 = 25, |
| 1057 | IB_RNR_TIMER_081_92 = 26, |
| 1058 | IB_RNR_TIMER_122_88 = 27, |
| 1059 | IB_RNR_TIMER_163_84 = 28, |
| 1060 | IB_RNR_TIMER_245_76 = 29, |
| 1061 | IB_RNR_TIMER_327_68 = 30, |
| 1062 | IB_RNR_TIMER_491_52 = 31 |
| 1063 | }; |
| 1064 | |
| 1065 | enum ib_qp_attr_mask { |
| 1066 | IB_QP_STATE = 1, |
| 1067 | IB_QP_CUR_STATE = (1<<1), |
| 1068 | IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), |
| 1069 | IB_QP_ACCESS_FLAGS = (1<<3), |
| 1070 | IB_QP_PKEY_INDEX = (1<<4), |
| 1071 | IB_QP_PORT = (1<<5), |
| 1072 | IB_QP_QKEY = (1<<6), |
| 1073 | IB_QP_AV = (1<<7), |
| 1074 | IB_QP_PATH_MTU = (1<<8), |
| 1075 | IB_QP_TIMEOUT = (1<<9), |
| 1076 | IB_QP_RETRY_CNT = (1<<10), |
| 1077 | IB_QP_RNR_RETRY = (1<<11), |
| 1078 | IB_QP_RQ_PSN = (1<<12), |
| 1079 | IB_QP_MAX_QP_RD_ATOMIC = (1<<13), |
| 1080 | IB_QP_ALT_PATH = (1<<14), |
| 1081 | IB_QP_MIN_RNR_TIMER = (1<<15), |
| 1082 | IB_QP_SQ_PSN = (1<<16), |
| 1083 | IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), |
| 1084 | IB_QP_PATH_MIG_STATE = (1<<18), |
| 1085 | IB_QP_CAP = (1<<19), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1086 | IB_QP_DEST_QPN = (1<<20), |
Matan Barak | aa744cc | 2015-10-15 18:38:53 +0300 | [diff] [blame] | 1087 | IB_QP_RESERVED1 = (1<<21), |
| 1088 | IB_QP_RESERVED2 = (1<<22), |
| 1089 | IB_QP_RESERVED3 = (1<<23), |
| 1090 | IB_QP_RESERVED4 = (1<<24), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | }; |
| 1092 | |
| 1093 | enum ib_qp_state { |
| 1094 | IB_QPS_RESET, |
| 1095 | IB_QPS_INIT, |
| 1096 | IB_QPS_RTR, |
| 1097 | IB_QPS_RTS, |
| 1098 | IB_QPS_SQD, |
| 1099 | IB_QPS_SQE, |
| 1100 | IB_QPS_ERR |
| 1101 | }; |
| 1102 | |
| 1103 | enum ib_mig_state { |
| 1104 | IB_MIG_MIGRATED, |
| 1105 | IB_MIG_REARM, |
| 1106 | IB_MIG_ARMED |
| 1107 | }; |
| 1108 | |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1109 | enum ib_mw_type { |
| 1110 | IB_MW_TYPE_1 = 1, |
| 1111 | IB_MW_TYPE_2 = 2 |
| 1112 | }; |
| 1113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | struct ib_qp_attr { |
| 1115 | enum ib_qp_state qp_state; |
| 1116 | enum ib_qp_state cur_qp_state; |
| 1117 | enum ib_mtu path_mtu; |
| 1118 | enum ib_mig_state path_mig_state; |
| 1119 | u32 qkey; |
| 1120 | u32 rq_psn; |
| 1121 | u32 sq_psn; |
| 1122 | u32 dest_qp_num; |
| 1123 | int qp_access_flags; |
| 1124 | struct ib_qp_cap cap; |
| 1125 | struct ib_ah_attr ah_attr; |
| 1126 | struct ib_ah_attr alt_ah_attr; |
| 1127 | u16 pkey_index; |
| 1128 | u16 alt_pkey_index; |
| 1129 | u8 en_sqd_async_notify; |
| 1130 | u8 sq_draining; |
| 1131 | u8 max_rd_atomic; |
| 1132 | u8 max_dest_rd_atomic; |
| 1133 | u8 min_rnr_timer; |
| 1134 | u8 port_num; |
| 1135 | u8 timeout; |
| 1136 | u8 retry_cnt; |
| 1137 | u8 rnr_retry; |
| 1138 | u8 alt_port_num; |
| 1139 | u8 alt_timeout; |
| 1140 | }; |
| 1141 | |
| 1142 | enum ib_wr_opcode { |
| 1143 | IB_WR_RDMA_WRITE, |
| 1144 | IB_WR_RDMA_WRITE_WITH_IMM, |
| 1145 | IB_WR_SEND, |
| 1146 | IB_WR_SEND_WITH_IMM, |
| 1147 | IB_WR_RDMA_READ, |
| 1148 | IB_WR_ATOMIC_CMP_AND_SWP, |
Eli Cohen | c93570f | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 1149 | IB_WR_ATOMIC_FETCH_AND_ADD, |
Roland Dreier | 0f39cf3 | 2008-04-16 21:09:32 -0700 | [diff] [blame] | 1150 | IB_WR_LSO, |
| 1151 | IB_WR_SEND_WITH_INV, |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 1152 | IB_WR_RDMA_READ_WITH_INV, |
| 1153 | IB_WR_LOCAL_INV, |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1154 | IB_WR_REG_MR, |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 1155 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, |
| 1156 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 1157 | IB_WR_REG_SIG_MR, |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1158 | /* reserve values for low level drivers' internal use. |
| 1159 | * These values will not be used at all in the ib core layer. |
| 1160 | */ |
| 1161 | IB_WR_RESERVED1 = 0xf0, |
| 1162 | IB_WR_RESERVED2, |
| 1163 | IB_WR_RESERVED3, |
| 1164 | IB_WR_RESERVED4, |
| 1165 | IB_WR_RESERVED5, |
| 1166 | IB_WR_RESERVED6, |
| 1167 | IB_WR_RESERVED7, |
| 1168 | IB_WR_RESERVED8, |
| 1169 | IB_WR_RESERVED9, |
| 1170 | IB_WR_RESERVED10, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 | }; |
| 1172 | |
| 1173 | enum ib_send_flags { |
| 1174 | IB_SEND_FENCE = 1, |
| 1175 | IB_SEND_SIGNALED = (1<<1), |
| 1176 | IB_SEND_SOLICITED = (1<<2), |
Eli Cohen | e0605d9 | 2008-01-30 18:30:57 +0200 | [diff] [blame] | 1177 | IB_SEND_INLINE = (1<<3), |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1178 | IB_SEND_IP_CSUM = (1<<4), |
| 1179 | |
| 1180 | /* reserve bits 26-31 for low level drivers' internal use */ |
| 1181 | IB_SEND_RESERVED_START = (1 << 26), |
| 1182 | IB_SEND_RESERVED_END = (1 << 31), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | }; |
| 1184 | |
| 1185 | struct ib_sge { |
| 1186 | u64 addr; |
| 1187 | u32 length; |
| 1188 | u32 lkey; |
| 1189 | }; |
| 1190 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1191 | struct ib_cqe { |
| 1192 | void (*done)(struct ib_cq *cq, struct ib_wc *wc); |
| 1193 | }; |
| 1194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | struct ib_send_wr { |
| 1196 | struct ib_send_wr *next; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1197 | union { |
| 1198 | u64 wr_id; |
| 1199 | struct ib_cqe *wr_cqe; |
| 1200 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | struct ib_sge *sg_list; |
| 1202 | int num_sge; |
| 1203 | enum ib_wr_opcode opcode; |
| 1204 | int send_flags; |
Roland Dreier | 0f39cf3 | 2008-04-16 21:09:32 -0700 | [diff] [blame] | 1205 | union { |
| 1206 | __be32 imm_data; |
| 1207 | u32 invalidate_rkey; |
| 1208 | } ex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | }; |
| 1210 | |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1211 | struct ib_rdma_wr { |
| 1212 | struct ib_send_wr wr; |
| 1213 | u64 remote_addr; |
| 1214 | u32 rkey; |
| 1215 | }; |
| 1216 | |
| 1217 | static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) |
| 1218 | { |
| 1219 | return container_of(wr, struct ib_rdma_wr, wr); |
| 1220 | } |
| 1221 | |
| 1222 | struct ib_atomic_wr { |
| 1223 | struct ib_send_wr wr; |
| 1224 | u64 remote_addr; |
| 1225 | u64 compare_add; |
| 1226 | u64 swap; |
| 1227 | u64 compare_add_mask; |
| 1228 | u64 swap_mask; |
| 1229 | u32 rkey; |
| 1230 | }; |
| 1231 | |
| 1232 | static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) |
| 1233 | { |
| 1234 | return container_of(wr, struct ib_atomic_wr, wr); |
| 1235 | } |
| 1236 | |
| 1237 | struct ib_ud_wr { |
| 1238 | struct ib_send_wr wr; |
| 1239 | struct ib_ah *ah; |
| 1240 | void *header; |
| 1241 | int hlen; |
| 1242 | int mss; |
| 1243 | u32 remote_qpn; |
| 1244 | u32 remote_qkey; |
| 1245 | u16 pkey_index; /* valid for GSI only */ |
| 1246 | u8 port_num; /* valid for DR SMPs on switch only */ |
| 1247 | }; |
| 1248 | |
| 1249 | static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) |
| 1250 | { |
| 1251 | return container_of(wr, struct ib_ud_wr, wr); |
| 1252 | } |
| 1253 | |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1254 | struct ib_reg_wr { |
| 1255 | struct ib_send_wr wr; |
| 1256 | struct ib_mr *mr; |
| 1257 | u32 key; |
| 1258 | int access; |
| 1259 | }; |
| 1260 | |
| 1261 | static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) |
| 1262 | { |
| 1263 | return container_of(wr, struct ib_reg_wr, wr); |
| 1264 | } |
| 1265 | |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1266 | struct ib_sig_handover_wr { |
| 1267 | struct ib_send_wr wr; |
| 1268 | struct ib_sig_attrs *sig_attrs; |
| 1269 | struct ib_mr *sig_mr; |
| 1270 | int access_flags; |
| 1271 | struct ib_sge *prot; |
| 1272 | }; |
| 1273 | |
| 1274 | static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) |
| 1275 | { |
| 1276 | return container_of(wr, struct ib_sig_handover_wr, wr); |
| 1277 | } |
| 1278 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | struct ib_recv_wr { |
| 1280 | struct ib_recv_wr *next; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1281 | union { |
| 1282 | u64 wr_id; |
| 1283 | struct ib_cqe *wr_cqe; |
| 1284 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | struct ib_sge *sg_list; |
| 1286 | int num_sge; |
| 1287 | }; |
| 1288 | |
| 1289 | enum ib_access_flags { |
| 1290 | IB_ACCESS_LOCAL_WRITE = 1, |
| 1291 | IB_ACCESS_REMOTE_WRITE = (1<<1), |
| 1292 | IB_ACCESS_REMOTE_READ = (1<<2), |
| 1293 | IB_ACCESS_REMOTE_ATOMIC = (1<<3), |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1294 | IB_ACCESS_MW_BIND = (1<<4), |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 1295 | IB_ZERO_BASED = (1<<5), |
| 1296 | IB_ACCESS_ON_DEMAND = (1<<6), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | }; |
| 1298 | |
Christoph Hellwig | b7d3e0a | 2015-12-23 19:12:47 +0100 | [diff] [blame] | 1299 | /* |
| 1300 | * XXX: these are apparently used for ->rereg_user_mr, no idea why they |
| 1301 | * are hidden here instead of a uapi header! |
| 1302 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | enum ib_mr_rereg_flags { |
| 1304 | IB_MR_REREG_TRANS = 1, |
| 1305 | IB_MR_REREG_PD = (1<<1), |
Matan Barak | 7e6edb9 | 2014-07-31 11:01:28 +0300 | [diff] [blame] | 1306 | IB_MR_REREG_ACCESS = (1<<2), |
| 1307 | IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | }; |
| 1309 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | struct ib_fmr_attr { |
| 1311 | int max_pages; |
| 1312 | int max_maps; |
Or Gerlitz | d36f34a | 2006-02-02 10:43:45 -0800 | [diff] [blame] | 1313 | u8 page_shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | }; |
| 1315 | |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 1316 | struct ib_umem; |
| 1317 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1318 | struct ib_ucontext { |
| 1319 | struct ib_device *device; |
| 1320 | struct list_head pd_list; |
| 1321 | struct list_head mr_list; |
| 1322 | struct list_head mw_list; |
| 1323 | struct list_head cq_list; |
| 1324 | struct list_head qp_list; |
| 1325 | struct list_head srq_list; |
| 1326 | struct list_head ah_list; |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1327 | struct list_head xrcd_list; |
Hadar Hen Zion | 436f2ad | 2013-08-14 13:58:30 +0300 | [diff] [blame] | 1328 | struct list_head rule_list; |
Yishai Hadas | f213c05 | 2016-05-23 15:20:49 +0300 | [diff] [blame] | 1329 | struct list_head wq_list; |
Yishai Hadas | de019a9 | 2016-05-23 15:20:52 +0300 | [diff] [blame] | 1330 | struct list_head rwq_ind_tbl_list; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 1331 | int closing; |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 1332 | |
| 1333 | struct pid *tgid; |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 1334 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| 1335 | struct rb_root umem_tree; |
| 1336 | /* |
| 1337 | * Protects .umem_rbroot and tree, as well as odp_mrs_count and |
| 1338 | * mmu notifiers registration. |
| 1339 | */ |
| 1340 | struct rw_semaphore umem_rwsem; |
| 1341 | void (*invalidate_range)(struct ib_umem *umem, |
| 1342 | unsigned long start, unsigned long end); |
| 1343 | |
| 1344 | struct mmu_notifier mn; |
| 1345 | atomic_t notifier_count; |
| 1346 | /* A list of umems that don't have private mmu notifier counters yet. */ |
| 1347 | struct list_head no_private_counters; |
| 1348 | int odp_mrs_count; |
| 1349 | #endif |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1350 | }; |
| 1351 | |
| 1352 | struct ib_uobject { |
| 1353 | u64 user_handle; /* handle given to us by userspace */ |
| 1354 | struct ib_ucontext *context; /* associated user context */ |
Roland Dreier | 9ead190 | 2006-06-17 20:44:49 -0700 | [diff] [blame] | 1355 | void *object; /* containing object */ |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1356 | struct list_head list; /* link to context's list */ |
Roland Dreier | b3d636b | 2008-04-16 21:01:06 -0700 | [diff] [blame] | 1357 | int id; /* index into kernel idr */ |
Roland Dreier | 9ead190 | 2006-06-17 20:44:49 -0700 | [diff] [blame] | 1358 | struct kref ref; |
| 1359 | struct rw_semaphore mutex; /* protects .live */ |
Mike Marciniszyn | d144da8 | 2015-11-02 12:13:25 -0500 | [diff] [blame] | 1360 | struct rcu_head rcu; /* kfree_rcu() overhead */ |
Roland Dreier | 9ead190 | 2006-06-17 20:44:49 -0700 | [diff] [blame] | 1361 | int live; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1362 | }; |
| 1363 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1364 | struct ib_udata { |
Yann Droneaud | 309243e | 2013-12-11 23:01:44 +0100 | [diff] [blame] | 1365 | const void __user *inbuf; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1366 | void __user *outbuf; |
| 1367 | size_t inlen; |
| 1368 | size_t outlen; |
| 1369 | }; |
| 1370 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 | struct ib_pd { |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 1372 | u32 local_dma_lkey; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1373 | struct ib_device *device; |
| 1374 | struct ib_uobject *uobject; |
| 1375 | atomic_t usecnt; /* count all resources */ |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 1376 | struct ib_mr *local_mr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | }; |
| 1378 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1379 | struct ib_xrcd { |
| 1380 | struct ib_device *device; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1381 | atomic_t usecnt; /* count all exposed resources */ |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1382 | struct inode *inode; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1383 | |
| 1384 | struct mutex tgt_qp_mutex; |
| 1385 | struct list_head tgt_qp_list; |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1386 | }; |
| 1387 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | struct ib_ah { |
| 1389 | struct ib_device *device; |
| 1390 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1391 | struct ib_uobject *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | }; |
| 1393 | |
| 1394 | typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); |
| 1395 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1396 | enum ib_poll_context { |
| 1397 | IB_POLL_DIRECT, /* caller context, no hw completions */ |
| 1398 | IB_POLL_SOFTIRQ, /* poll from softirq context */ |
| 1399 | IB_POLL_WORKQUEUE, /* poll from workqueue */ |
| 1400 | }; |
| 1401 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1402 | struct ib_cq { |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1403 | struct ib_device *device; |
| 1404 | struct ib_uobject *uobject; |
| 1405 | ib_comp_handler comp_handler; |
| 1406 | void (*event_handler)(struct ib_event *, void *); |
Dotan Barak | 4deccd6 | 2008-07-14 23:48:44 -0700 | [diff] [blame] | 1407 | void *cq_context; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1408 | int cqe; |
| 1409 | atomic_t usecnt; /* count number of work queues */ |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1410 | enum ib_poll_context poll_ctx; |
| 1411 | struct ib_wc *wc; |
| 1412 | union { |
| 1413 | struct irq_poll iop; |
| 1414 | struct work_struct work; |
| 1415 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1416 | }; |
| 1417 | |
| 1418 | struct ib_srq { |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1419 | struct ib_device *device; |
| 1420 | struct ib_pd *pd; |
| 1421 | struct ib_uobject *uobject; |
| 1422 | void (*event_handler)(struct ib_event *, void *); |
| 1423 | void *srq_context; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 1424 | enum ib_srq_type srq_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 | atomic_t usecnt; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 1426 | |
| 1427 | union { |
| 1428 | struct { |
| 1429 | struct ib_xrcd *xrcd; |
| 1430 | struct ib_cq *cq; |
| 1431 | u32 srq_num; |
| 1432 | } xrc; |
| 1433 | } ext; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | }; |
| 1435 | |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1436 | enum ib_wq_type { |
| 1437 | IB_WQT_RQ |
| 1438 | }; |
| 1439 | |
| 1440 | enum ib_wq_state { |
| 1441 | IB_WQS_RESET, |
| 1442 | IB_WQS_RDY, |
| 1443 | IB_WQS_ERR |
| 1444 | }; |
| 1445 | |
| 1446 | struct ib_wq { |
| 1447 | struct ib_device *device; |
| 1448 | struct ib_uobject *uobject; |
| 1449 | void *wq_context; |
| 1450 | void (*event_handler)(struct ib_event *, void *); |
| 1451 | struct ib_pd *pd; |
| 1452 | struct ib_cq *cq; |
| 1453 | u32 wq_num; |
| 1454 | enum ib_wq_state state; |
| 1455 | enum ib_wq_type wq_type; |
| 1456 | atomic_t usecnt; |
| 1457 | }; |
| 1458 | |
| 1459 | struct ib_wq_init_attr { |
| 1460 | void *wq_context; |
| 1461 | enum ib_wq_type wq_type; |
| 1462 | u32 max_wr; |
| 1463 | u32 max_sge; |
| 1464 | struct ib_cq *cq; |
| 1465 | void (*event_handler)(struct ib_event *, void *); |
| 1466 | }; |
| 1467 | |
| 1468 | enum ib_wq_attr_mask { |
| 1469 | IB_WQ_STATE = 1 << 0, |
| 1470 | IB_WQ_CUR_STATE = 1 << 1, |
| 1471 | }; |
| 1472 | |
| 1473 | struct ib_wq_attr { |
| 1474 | enum ib_wq_state wq_state; |
| 1475 | enum ib_wq_state curr_wq_state; |
| 1476 | }; |
| 1477 | |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 1478 | struct ib_rwq_ind_table { |
| 1479 | struct ib_device *device; |
| 1480 | struct ib_uobject *uobject; |
| 1481 | atomic_t usecnt; |
| 1482 | u32 ind_tbl_num; |
| 1483 | u32 log_ind_tbl_size; |
| 1484 | struct ib_wq **ind_tbl; |
| 1485 | }; |
| 1486 | |
| 1487 | struct ib_rwq_ind_table_init_attr { |
| 1488 | u32 log_ind_tbl_size; |
| 1489 | /* Each entry is a pointer to Receive Work Queue */ |
| 1490 | struct ib_wq **ind_tbl; |
| 1491 | }; |
| 1492 | |
Bart Van Assche | 632bc3f | 2016-07-21 13:03:30 -0700 | [diff] [blame] | 1493 | /* |
| 1494 | * @max_write_sge: Maximum SGE elements per RDMA WRITE request. |
| 1495 | * @max_read_sge: Maximum SGE elements per RDMA READ request. |
| 1496 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1497 | struct ib_qp { |
| 1498 | struct ib_device *device; |
| 1499 | struct ib_pd *pd; |
| 1500 | struct ib_cq *send_cq; |
| 1501 | struct ib_cq *recv_cq; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1502 | spinlock_t mr_lock; |
| 1503 | int mrs_used; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1504 | struct list_head rdma_mrs; |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 1505 | struct list_head sig_mrs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | struct ib_srq *srq; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1507 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1508 | struct list_head xrcd_list; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1509 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1510 | /* count times opened, mcast attaches, flow attaches */ |
| 1511 | atomic_t usecnt; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1512 | struct list_head open_list; |
| 1513 | struct ib_qp *real_qp; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1514 | struct ib_uobject *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | void (*event_handler)(struct ib_event *, void *); |
| 1516 | void *qp_context; |
| 1517 | u32 qp_num; |
Bart Van Assche | 632bc3f | 2016-07-21 13:03:30 -0700 | [diff] [blame] | 1518 | u32 max_write_sge; |
| 1519 | u32 max_read_sge; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | enum ib_qp_type qp_type; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1521 | struct ib_rwq_ind_table *rwq_ind_tbl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | }; |
| 1523 | |
| 1524 | struct ib_mr { |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1525 | struct ib_device *device; |
| 1526 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1527 | u32 lkey; |
| 1528 | u32 rkey; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1529 | u64 iova; |
| 1530 | u32 length; |
| 1531 | unsigned int page_size; |
Steve Wise | d4a85c3 | 2016-05-03 18:01:08 +0200 | [diff] [blame] | 1532 | bool need_inval; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1533 | union { |
| 1534 | struct ib_uobject *uobject; /* user */ |
| 1535 | struct list_head qp_entry; /* FR */ |
| 1536 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1537 | }; |
| 1538 | |
| 1539 | struct ib_mw { |
| 1540 | struct ib_device *device; |
| 1541 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1542 | struct ib_uobject *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | u32 rkey; |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1544 | enum ib_mw_type type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | }; |
| 1546 | |
| 1547 | struct ib_fmr { |
| 1548 | struct ib_device *device; |
| 1549 | struct ib_pd *pd; |
| 1550 | struct list_head list; |
| 1551 | u32 lkey; |
| 1552 | u32 rkey; |
| 1553 | }; |
| 1554 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1555 | /* Supported steering options */ |
| 1556 | enum ib_flow_attr_type { |
| 1557 | /* steering according to rule specifications */ |
| 1558 | IB_FLOW_ATTR_NORMAL = 0x0, |
| 1559 | /* default unicast and multicast rule - |
| 1560 | * receive all Eth traffic which isn't steered to any QP |
| 1561 | */ |
| 1562 | IB_FLOW_ATTR_ALL_DEFAULT = 0x1, |
| 1563 | /* default multicast rule - |
| 1564 | * receive all Eth multicast traffic which isn't steered to any QP |
| 1565 | */ |
| 1566 | IB_FLOW_ATTR_MC_DEFAULT = 0x2, |
| 1567 | /* sniffer rule - receive all port traffic */ |
| 1568 | IB_FLOW_ATTR_SNIFFER = 0x3 |
| 1569 | }; |
| 1570 | |
| 1571 | /* Supported steering header types */ |
| 1572 | enum ib_flow_spec_type { |
| 1573 | /* L2 headers*/ |
| 1574 | IB_FLOW_SPEC_ETH = 0x20, |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1575 | IB_FLOW_SPEC_IB = 0x22, |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1576 | /* L3 header*/ |
| 1577 | IB_FLOW_SPEC_IPV4 = 0x30, |
Maor Gottlieb | 4c2aae7 | 2016-06-17 15:14:50 +0300 | [diff] [blame] | 1578 | IB_FLOW_SPEC_IPV6 = 0x31, |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1579 | /* L4 headers*/ |
| 1580 | IB_FLOW_SPEC_TCP = 0x40, |
| 1581 | IB_FLOW_SPEC_UDP = 0x41 |
| 1582 | }; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1583 | #define IB_FLOW_SPEC_LAYER_MASK 0xF0 |
Matan Barak | 22878db | 2013-09-01 18:39:52 +0300 | [diff] [blame] | 1584 | #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 |
| 1585 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1586 | /* Flow steering rule priority is set according to it's domain. |
| 1587 | * Lower domain value means higher priority. |
| 1588 | */ |
| 1589 | enum ib_flow_domain { |
| 1590 | IB_FLOW_DOMAIN_USER, |
| 1591 | IB_FLOW_DOMAIN_ETHTOOL, |
| 1592 | IB_FLOW_DOMAIN_RFS, |
| 1593 | IB_FLOW_DOMAIN_NIC, |
| 1594 | IB_FLOW_DOMAIN_NUM /* Must be last */ |
| 1595 | }; |
| 1596 | |
Marina Varshaver | a3100a7 | 2016-02-18 18:31:05 +0200 | [diff] [blame] | 1597 | enum ib_flow_flags { |
| 1598 | IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ |
| 1599 | IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ |
| 1600 | }; |
| 1601 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1602 | struct ib_flow_eth_filter { |
| 1603 | u8 dst_mac[6]; |
| 1604 | u8 src_mac[6]; |
| 1605 | __be16 ether_type; |
| 1606 | __be16 vlan_tag; |
| 1607 | }; |
| 1608 | |
| 1609 | struct ib_flow_spec_eth { |
| 1610 | enum ib_flow_spec_type type; |
| 1611 | u16 size; |
| 1612 | struct ib_flow_eth_filter val; |
| 1613 | struct ib_flow_eth_filter mask; |
| 1614 | }; |
| 1615 | |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1616 | struct ib_flow_ib_filter { |
| 1617 | __be16 dlid; |
| 1618 | __u8 sl; |
| 1619 | }; |
| 1620 | |
| 1621 | struct ib_flow_spec_ib { |
| 1622 | enum ib_flow_spec_type type; |
| 1623 | u16 size; |
| 1624 | struct ib_flow_ib_filter val; |
| 1625 | struct ib_flow_ib_filter mask; |
| 1626 | }; |
| 1627 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1628 | struct ib_flow_ipv4_filter { |
| 1629 | __be32 src_ip; |
| 1630 | __be32 dst_ip; |
| 1631 | }; |
| 1632 | |
| 1633 | struct ib_flow_spec_ipv4 { |
| 1634 | enum ib_flow_spec_type type; |
| 1635 | u16 size; |
| 1636 | struct ib_flow_ipv4_filter val; |
| 1637 | struct ib_flow_ipv4_filter mask; |
| 1638 | }; |
| 1639 | |
Maor Gottlieb | 4c2aae7 | 2016-06-17 15:14:50 +0300 | [diff] [blame] | 1640 | struct ib_flow_ipv6_filter { |
| 1641 | u8 src_ip[16]; |
| 1642 | u8 dst_ip[16]; |
| 1643 | }; |
| 1644 | |
| 1645 | struct ib_flow_spec_ipv6 { |
| 1646 | enum ib_flow_spec_type type; |
| 1647 | u16 size; |
| 1648 | struct ib_flow_ipv6_filter val; |
| 1649 | struct ib_flow_ipv6_filter mask; |
| 1650 | }; |
| 1651 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1652 | struct ib_flow_tcp_udp_filter { |
| 1653 | __be16 dst_port; |
| 1654 | __be16 src_port; |
| 1655 | }; |
| 1656 | |
| 1657 | struct ib_flow_spec_tcp_udp { |
| 1658 | enum ib_flow_spec_type type; |
| 1659 | u16 size; |
| 1660 | struct ib_flow_tcp_udp_filter val; |
| 1661 | struct ib_flow_tcp_udp_filter mask; |
| 1662 | }; |
| 1663 | |
| 1664 | union ib_flow_spec { |
| 1665 | struct { |
| 1666 | enum ib_flow_spec_type type; |
| 1667 | u16 size; |
| 1668 | }; |
| 1669 | struct ib_flow_spec_eth eth; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1670 | struct ib_flow_spec_ib ib; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1671 | struct ib_flow_spec_ipv4 ipv4; |
| 1672 | struct ib_flow_spec_tcp_udp tcp_udp; |
Maor Gottlieb | 4c2aae7 | 2016-06-17 15:14:50 +0300 | [diff] [blame] | 1673 | struct ib_flow_spec_ipv6 ipv6; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1674 | }; |
| 1675 | |
| 1676 | struct ib_flow_attr { |
| 1677 | enum ib_flow_attr_type type; |
| 1678 | u16 size; |
| 1679 | u16 priority; |
| 1680 | u32 flags; |
| 1681 | u8 num_of_specs; |
| 1682 | u8 port; |
| 1683 | /* Following are the optional layers according to user request |
| 1684 | * struct ib_flow_spec_xxx |
| 1685 | * struct ib_flow_spec_yyy |
| 1686 | */ |
| 1687 | }; |
| 1688 | |
| 1689 | struct ib_flow { |
| 1690 | struct ib_qp *qp; |
| 1691 | struct ib_uobject *uobject; |
| 1692 | }; |
| 1693 | |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 1694 | struct ib_mad_hdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | struct ib_grh; |
| 1696 | |
| 1697 | enum ib_process_mad_flags { |
| 1698 | IB_MAD_IGNORE_MKEY = 1, |
| 1699 | IB_MAD_IGNORE_BKEY = 2, |
| 1700 | IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY |
| 1701 | }; |
| 1702 | |
| 1703 | enum ib_mad_result { |
| 1704 | IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ |
| 1705 | IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ |
| 1706 | IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ |
| 1707 | IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ |
| 1708 | }; |
| 1709 | |
| 1710 | #define IB_DEVICE_NAME_MAX 64 |
| 1711 | |
| 1712 | struct ib_cache { |
| 1713 | rwlock_t lock; |
| 1714 | struct ib_event_handler event_handler; |
| 1715 | struct ib_pkey_cache **pkey_cache; |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 1716 | struct ib_gid_table **gid_cache; |
Jack Morgenstein | 6fb9cdb | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1717 | u8 *lmc_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1718 | }; |
| 1719 | |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 1720 | struct ib_dma_mapping_ops { |
| 1721 | int (*mapping_error)(struct ib_device *dev, |
| 1722 | u64 dma_addr); |
| 1723 | u64 (*map_single)(struct ib_device *dev, |
| 1724 | void *ptr, size_t size, |
| 1725 | enum dma_data_direction direction); |
| 1726 | void (*unmap_single)(struct ib_device *dev, |
| 1727 | u64 addr, size_t size, |
| 1728 | enum dma_data_direction direction); |
| 1729 | u64 (*map_page)(struct ib_device *dev, |
| 1730 | struct page *page, unsigned long offset, |
| 1731 | size_t size, |
| 1732 | enum dma_data_direction direction); |
| 1733 | void (*unmap_page)(struct ib_device *dev, |
| 1734 | u64 addr, size_t size, |
| 1735 | enum dma_data_direction direction); |
| 1736 | int (*map_sg)(struct ib_device *dev, |
| 1737 | struct scatterlist *sg, int nents, |
| 1738 | enum dma_data_direction direction); |
| 1739 | void (*unmap_sg)(struct ib_device *dev, |
| 1740 | struct scatterlist *sg, int nents, |
| 1741 | enum dma_data_direction direction); |
Parav Pandit | d970365 | 2016-09-28 20:25:47 +0000 | [diff] [blame^] | 1742 | int (*map_sg_attrs)(struct ib_device *dev, |
| 1743 | struct scatterlist *sg, int nents, |
| 1744 | enum dma_data_direction direction, |
| 1745 | unsigned long attrs); |
| 1746 | void (*unmap_sg_attrs)(struct ib_device *dev, |
| 1747 | struct scatterlist *sg, int nents, |
| 1748 | enum dma_data_direction direction, |
| 1749 | unsigned long attrs); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 1750 | void (*sync_single_for_cpu)(struct ib_device *dev, |
| 1751 | u64 dma_handle, |
| 1752 | size_t size, |
Dotan Barak | 4deccd6 | 2008-07-14 23:48:44 -0700 | [diff] [blame] | 1753 | enum dma_data_direction dir); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 1754 | void (*sync_single_for_device)(struct ib_device *dev, |
| 1755 | u64 dma_handle, |
| 1756 | size_t size, |
| 1757 | enum dma_data_direction dir); |
| 1758 | void *(*alloc_coherent)(struct ib_device *dev, |
| 1759 | size_t size, |
| 1760 | u64 *dma_handle, |
| 1761 | gfp_t flag); |
| 1762 | void (*free_coherent)(struct ib_device *dev, |
| 1763 | size_t size, void *cpu_addr, |
| 1764 | u64 dma_handle); |
| 1765 | }; |
| 1766 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 1767 | struct iw_cm_verbs; |
| 1768 | |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 1769 | struct ib_port_immutable { |
| 1770 | int pkey_tbl_len; |
| 1771 | int gid_tbl_len; |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 1772 | u32 core_cap_flags; |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 1773 | u32 max_mad_size; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 1774 | }; |
| 1775 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | struct ib_device { |
| 1777 | struct device *dma_device; |
| 1778 | |
| 1779 | char name[IB_DEVICE_NAME_MAX]; |
| 1780 | |
| 1781 | struct list_head event_handler_list; |
| 1782 | spinlock_t event_handler_lock; |
| 1783 | |
Alexander Chiang | 17a55f7 | 2010-02-02 19:09:16 +0000 | [diff] [blame] | 1784 | spinlock_t client_data_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | struct list_head core_list; |
Haggai Eran | 7c1eb45 | 2015-07-30 17:50:14 +0300 | [diff] [blame] | 1786 | /* Access to the client_data_list is protected by the client_data_lock |
| 1787 | * spinlock and the lists_rwsem read-write semaphore */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1788 | struct list_head client_data_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1789 | |
| 1790 | struct ib_cache cache; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 1791 | /** |
| 1792 | * port_immutable is indexed by port number |
| 1793 | */ |
| 1794 | struct ib_port_immutable *port_immutable; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1795 | |
Michael S. Tsirkin | f4fd0b2 | 2007-05-03 13:48:47 +0300 | [diff] [blame] | 1796 | int num_comp_vectors; |
| 1797 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 1798 | struct iw_cm_verbs *iwcm; |
| 1799 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 1800 | /** |
| 1801 | * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the |
| 1802 | * driver initialized data. The struct is kfree()'ed by the sysfs |
| 1803 | * core when the device is removed. A lifespan of -1 in the return |
| 1804 | * struct tells the core to set a default lifespan. |
| 1805 | */ |
| 1806 | struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, |
| 1807 | u8 port_num); |
| 1808 | /** |
| 1809 | * get_hw_stats - Fill in the counter value(s) in the stats struct. |
| 1810 | * @index - The index in the value array we wish to have updated, or |
| 1811 | * num_counters if we want all stats updated |
| 1812 | * Return codes - |
| 1813 | * < 0 - Error, no counters updated |
| 1814 | * index - Updated the single counter pointed to by index |
| 1815 | * num_counters - Updated all counters (will reset the timestamp |
| 1816 | * and prevent further calls for lifespan milliseconds) |
| 1817 | * Drivers are allowed to update all counters in leiu of just the |
| 1818 | * one given in index at their option |
| 1819 | */ |
| 1820 | int (*get_hw_stats)(struct ib_device *device, |
| 1821 | struct rdma_hw_stats *stats, |
| 1822 | u8 port, int index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1823 | int (*query_device)(struct ib_device *device, |
Matan Barak | 2528e33 | 2015-06-11 16:35:25 +0300 | [diff] [blame] | 1824 | struct ib_device_attr *device_attr, |
| 1825 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 | int (*query_port)(struct ib_device *device, |
| 1827 | u8 port_num, |
| 1828 | struct ib_port_attr *port_attr); |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 1829 | enum rdma_link_layer (*get_link_layer)(struct ib_device *device, |
| 1830 | u8 port_num); |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 1831 | /* When calling get_netdev, the HW vendor's driver should return the |
| 1832 | * net device of device @device at port @port_num or NULL if such |
| 1833 | * a net device doesn't exist. The vendor driver should call dev_hold |
| 1834 | * on this net device. The HW vendor's device driver must guarantee |
| 1835 | * that this function returns NULL before the net device reaches |
| 1836 | * NETDEV_UNREGISTER_FINAL state. |
| 1837 | */ |
| 1838 | struct net_device *(*get_netdev)(struct ib_device *device, |
| 1839 | u8 port_num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1840 | int (*query_gid)(struct ib_device *device, |
| 1841 | u8 port_num, int index, |
| 1842 | union ib_gid *gid); |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 1843 | /* When calling add_gid, the HW vendor's driver should |
| 1844 | * add the gid of device @device at gid index @index of |
| 1845 | * port @port_num to be @gid. Meta-info of that gid (for example, |
| 1846 | * the network device related to this gid is available |
| 1847 | * at @attr. @context allows the HW vendor driver to store extra |
| 1848 | * information together with a GID entry. The HW vendor may allocate |
| 1849 | * memory to contain this information and store it in @context when a |
| 1850 | * new GID entry is written to. Params are consistent until the next |
| 1851 | * call of add_gid or delete_gid. The function should return 0 on |
| 1852 | * success or error otherwise. The function could be called |
| 1853 | * concurrently for different ports. This function is only called |
| 1854 | * when roce_gid_table is used. |
| 1855 | */ |
| 1856 | int (*add_gid)(struct ib_device *device, |
| 1857 | u8 port_num, |
| 1858 | unsigned int index, |
| 1859 | const union ib_gid *gid, |
| 1860 | const struct ib_gid_attr *attr, |
| 1861 | void **context); |
| 1862 | /* When calling del_gid, the HW vendor's driver should delete the |
| 1863 | * gid of device @device at gid index @index of port @port_num. |
| 1864 | * Upon the deletion of a GID entry, the HW vendor must free any |
| 1865 | * allocated memory. The caller will clear @context afterwards. |
| 1866 | * This function is only called when roce_gid_table is used. |
| 1867 | */ |
| 1868 | int (*del_gid)(struct ib_device *device, |
| 1869 | u8 port_num, |
| 1870 | unsigned int index, |
| 1871 | void **context); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1872 | int (*query_pkey)(struct ib_device *device, |
| 1873 | u8 port_num, u16 index, u16 *pkey); |
| 1874 | int (*modify_device)(struct ib_device *device, |
| 1875 | int device_modify_mask, |
| 1876 | struct ib_device_modify *device_modify); |
| 1877 | int (*modify_port)(struct ib_device *device, |
| 1878 | u8 port_num, int port_modify_mask, |
| 1879 | struct ib_port_modify *port_modify); |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1880 | struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, |
| 1881 | struct ib_udata *udata); |
| 1882 | int (*dealloc_ucontext)(struct ib_ucontext *context); |
| 1883 | int (*mmap)(struct ib_ucontext *context, |
| 1884 | struct vm_area_struct *vma); |
| 1885 | struct ib_pd * (*alloc_pd)(struct ib_device *device, |
| 1886 | struct ib_ucontext *context, |
| 1887 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | int (*dealloc_pd)(struct ib_pd *pd); |
| 1889 | struct ib_ah * (*create_ah)(struct ib_pd *pd, |
| 1890 | struct ib_ah_attr *ah_attr); |
| 1891 | int (*modify_ah)(struct ib_ah *ah, |
| 1892 | struct ib_ah_attr *ah_attr); |
| 1893 | int (*query_ah)(struct ib_ah *ah, |
| 1894 | struct ib_ah_attr *ah_attr); |
| 1895 | int (*destroy_ah)(struct ib_ah *ah); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1896 | struct ib_srq * (*create_srq)(struct ib_pd *pd, |
| 1897 | struct ib_srq_init_attr *srq_init_attr, |
| 1898 | struct ib_udata *udata); |
| 1899 | int (*modify_srq)(struct ib_srq *srq, |
| 1900 | struct ib_srq_attr *srq_attr, |
Ralph Campbell | 9bc57e2 | 2006-08-11 14:58:09 -0700 | [diff] [blame] | 1901 | enum ib_srq_attr_mask srq_attr_mask, |
| 1902 | struct ib_udata *udata); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1903 | int (*query_srq)(struct ib_srq *srq, |
| 1904 | struct ib_srq_attr *srq_attr); |
| 1905 | int (*destroy_srq)(struct ib_srq *srq); |
| 1906 | int (*post_srq_recv)(struct ib_srq *srq, |
| 1907 | struct ib_recv_wr *recv_wr, |
| 1908 | struct ib_recv_wr **bad_recv_wr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 | struct ib_qp * (*create_qp)(struct ib_pd *pd, |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1910 | struct ib_qp_init_attr *qp_init_attr, |
| 1911 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | int (*modify_qp)(struct ib_qp *qp, |
| 1913 | struct ib_qp_attr *qp_attr, |
Ralph Campbell | 9bc57e2 | 2006-08-11 14:58:09 -0700 | [diff] [blame] | 1914 | int qp_attr_mask, |
| 1915 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1916 | int (*query_qp)(struct ib_qp *qp, |
| 1917 | struct ib_qp_attr *qp_attr, |
| 1918 | int qp_attr_mask, |
| 1919 | struct ib_qp_init_attr *qp_init_attr); |
| 1920 | int (*destroy_qp)(struct ib_qp *qp); |
| 1921 | int (*post_send)(struct ib_qp *qp, |
| 1922 | struct ib_send_wr *send_wr, |
| 1923 | struct ib_send_wr **bad_send_wr); |
| 1924 | int (*post_recv)(struct ib_qp *qp, |
| 1925 | struct ib_recv_wr *recv_wr, |
| 1926 | struct ib_recv_wr **bad_recv_wr); |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 1927 | struct ib_cq * (*create_cq)(struct ib_device *device, |
| 1928 | const struct ib_cq_init_attr *attr, |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1929 | struct ib_ucontext *context, |
| 1930 | struct ib_udata *udata); |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1931 | int (*modify_cq)(struct ib_cq *cq, u16 cq_count, |
| 1932 | u16 cq_period); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 | int (*destroy_cq)(struct ib_cq *cq); |
Roland Dreier | 33b9b3e | 2006-01-30 14:29:21 -0800 | [diff] [blame] | 1934 | int (*resize_cq)(struct ib_cq *cq, int cqe, |
| 1935 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1936 | int (*poll_cq)(struct ib_cq *cq, int num_entries, |
| 1937 | struct ib_wc *wc); |
| 1938 | int (*peek_cq)(struct ib_cq *cq, int wc_cnt); |
| 1939 | int (*req_notify_cq)(struct ib_cq *cq, |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 1940 | enum ib_cq_notify_flags flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1941 | int (*req_ncomp_notif)(struct ib_cq *cq, |
| 1942 | int wc_cnt); |
| 1943 | struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, |
| 1944 | int mr_access_flags); |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1945 | struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 1946 | u64 start, u64 length, |
| 1947 | u64 virt_addr, |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1948 | int mr_access_flags, |
| 1949 | struct ib_udata *udata); |
Matan Barak | 7e6edb9 | 2014-07-31 11:01:28 +0300 | [diff] [blame] | 1950 | int (*rereg_user_mr)(struct ib_mr *mr, |
| 1951 | int flags, |
| 1952 | u64 start, u64 length, |
| 1953 | u64 virt_addr, |
| 1954 | int mr_access_flags, |
| 1955 | struct ib_pd *pd, |
| 1956 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1957 | int (*dereg_mr)(struct ib_mr *mr); |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1958 | struct ib_mr * (*alloc_mr)(struct ib_pd *pd, |
| 1959 | enum ib_mr_type mr_type, |
| 1960 | u32 max_num_sg); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1961 | int (*map_mr_sg)(struct ib_mr *mr, |
| 1962 | struct scatterlist *sg, |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1963 | int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1964 | unsigned int *sg_offset); |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1965 | struct ib_mw * (*alloc_mw)(struct ib_pd *pd, |
Matan Barak | b2a239d | 2016-02-29 18:05:29 +0200 | [diff] [blame] | 1966 | enum ib_mw_type type, |
| 1967 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1968 | int (*dealloc_mw)(struct ib_mw *mw); |
| 1969 | struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, |
| 1970 | int mr_access_flags, |
| 1971 | struct ib_fmr_attr *fmr_attr); |
| 1972 | int (*map_phys_fmr)(struct ib_fmr *fmr, |
| 1973 | u64 *page_list, int list_len, |
| 1974 | u64 iova); |
| 1975 | int (*unmap_fmr)(struct list_head *fmr_list); |
| 1976 | int (*dealloc_fmr)(struct ib_fmr *fmr); |
| 1977 | int (*attach_mcast)(struct ib_qp *qp, |
| 1978 | union ib_gid *gid, |
| 1979 | u16 lid); |
| 1980 | int (*detach_mcast)(struct ib_qp *qp, |
| 1981 | union ib_gid *gid, |
| 1982 | u16 lid); |
| 1983 | int (*process_mad)(struct ib_device *device, |
| 1984 | int process_mad_flags, |
| 1985 | u8 port_num, |
Ira Weiny | a97e2d8 | 2015-05-31 17:15:30 -0400 | [diff] [blame] | 1986 | const struct ib_wc *in_wc, |
| 1987 | const struct ib_grh *in_grh, |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 1988 | const struct ib_mad_hdr *in_mad, |
| 1989 | size_t in_mad_size, |
| 1990 | struct ib_mad_hdr *out_mad, |
| 1991 | size_t *out_mad_size, |
| 1992 | u16 *out_mad_pkey_index); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1993 | struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, |
| 1994 | struct ib_ucontext *ucontext, |
| 1995 | struct ib_udata *udata); |
| 1996 | int (*dealloc_xrcd)(struct ib_xrcd *xrcd); |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1997 | struct ib_flow * (*create_flow)(struct ib_qp *qp, |
| 1998 | struct ib_flow_attr |
| 1999 | *flow_attr, |
| 2000 | int domain); |
| 2001 | int (*destroy_flow)(struct ib_flow *flow_id); |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 2002 | int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, |
| 2003 | struct ib_mr_status *mr_status); |
Yishai Hadas | 036b106 | 2015-08-13 18:32:05 +0300 | [diff] [blame] | 2004 | void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2005 | void (*drain_rq)(struct ib_qp *qp); |
| 2006 | void (*drain_sq)(struct ib_qp *qp); |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 2007 | int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, |
| 2008 | int state); |
| 2009 | int (*get_vf_config)(struct ib_device *device, int vf, u8 port, |
| 2010 | struct ifla_vf_info *ivf); |
| 2011 | int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, |
| 2012 | struct ifla_vf_stats *stats); |
| 2013 | int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, |
| 2014 | int type); |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 2015 | struct ib_wq * (*create_wq)(struct ib_pd *pd, |
| 2016 | struct ib_wq_init_attr *init_attr, |
| 2017 | struct ib_udata *udata); |
| 2018 | int (*destroy_wq)(struct ib_wq *wq); |
| 2019 | int (*modify_wq)(struct ib_wq *wq, |
| 2020 | struct ib_wq_attr *attr, |
| 2021 | u32 wq_attr_mask, |
| 2022 | struct ib_udata *udata); |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 2023 | struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, |
| 2024 | struct ib_rwq_ind_table_init_attr *init_attr, |
| 2025 | struct ib_udata *udata); |
| 2026 | int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2027 | struct ib_dma_mapping_ops *dma_ops; |
| 2028 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 2029 | struct module *owner; |
Tony Jones | f4e91eb | 2008-02-22 00:13:36 +0100 | [diff] [blame] | 2030 | struct device dev; |
Greg Kroah-Hartman | 35be068 | 2007-12-17 15:54:39 -0400 | [diff] [blame] | 2031 | struct kobject *ports_parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2032 | struct list_head port_list; |
| 2033 | |
| 2034 | enum { |
| 2035 | IB_DEV_UNINITIALIZED, |
| 2036 | IB_DEV_REGISTERED, |
| 2037 | IB_DEV_UNREGISTERED |
| 2038 | } reg_state; |
| 2039 | |
Roland Dreier | 274c089 | 2005-09-29 14:17:48 -0700 | [diff] [blame] | 2040 | int uverbs_abi_ver; |
Alexander Chiang | 17a55f7 | 2010-02-02 19:09:16 +0000 | [diff] [blame] | 2041 | u64 uverbs_cmd_mask; |
Yann Droneaud | f21519b | 2013-11-06 23:21:49 +0100 | [diff] [blame] | 2042 | u64 uverbs_ex_cmd_mask; |
Roland Dreier | 274c089 | 2005-09-29 14:17:48 -0700 | [diff] [blame] | 2043 | |
Roland Dreier | c5bcbbb | 2006-02-02 09:47:14 -0800 | [diff] [blame] | 2044 | char node_desc[64]; |
Sean Hefty | cf311cd | 2006-01-10 07:39:34 -0800 | [diff] [blame] | 2045 | __be64 node_guid; |
Steve Wise | 96f15c0 | 2008-07-14 23:48:53 -0700 | [diff] [blame] | 2046 | u32 local_dma_lkey; |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2047 | u16 is_switch:1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2048 | u8 node_type; |
| 2049 | u8 phys_port_cnt; |
Ira Weiny | 3e153a9 | 2015-12-18 10:59:44 +0200 | [diff] [blame] | 2050 | struct ib_device_attr attrs; |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 2051 | struct attribute_group *hw_stats_ag; |
| 2052 | struct rdma_hw_stats *hw_stats; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 2053 | |
| 2054 | /** |
| 2055 | * The following mandatory functions are used only at device |
| 2056 | * registration. Keep functions such as these at the end of this |
| 2057 | * structure to avoid cache line misses when accessing struct ib_device |
| 2058 | * in fast paths. |
| 2059 | */ |
| 2060 | int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); |
Ira Weiny | 5fa76c2 | 2016-06-15 02:21:56 -0400 | [diff] [blame] | 2061 | void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2062 | }; |
| 2063 | |
| 2064 | struct ib_client { |
| 2065 | char *name; |
| 2066 | void (*add) (struct ib_device *); |
Haggai Eran | 7c1eb45 | 2015-07-30 17:50:14 +0300 | [diff] [blame] | 2067 | void (*remove)(struct ib_device *, void *client_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2068 | |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 2069 | /* Returns the net_dev belonging to this ib_client and matching the |
| 2070 | * given parameters. |
| 2071 | * @dev: An RDMA device that the net_dev use for communication. |
| 2072 | * @port: A physical port number on the RDMA device. |
| 2073 | * @pkey: P_Key that the net_dev uses if applicable. |
| 2074 | * @gid: A GID that the net_dev uses to communicate. |
| 2075 | * @addr: An IP address the net_dev is configured with. |
| 2076 | * @client_data: The device's client data set by ib_set_client_data(). |
| 2077 | * |
| 2078 | * An ib_client that implements a net_dev on top of RDMA devices |
| 2079 | * (such as IP over IB) should implement this callback, allowing the |
| 2080 | * rdma_cm module to find the right net_dev for a given request. |
| 2081 | * |
| 2082 | * The caller is responsible for calling dev_put on the returned |
| 2083 | * netdev. */ |
| 2084 | struct net_device *(*get_net_dev_by_params)( |
| 2085 | struct ib_device *dev, |
| 2086 | u8 port, |
| 2087 | u16 pkey, |
| 2088 | const union ib_gid *gid, |
| 2089 | const struct sockaddr *addr, |
| 2090 | void *client_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2091 | struct list_head list; |
| 2092 | }; |
| 2093 | |
| 2094 | struct ib_device *ib_alloc_device(size_t size); |
| 2095 | void ib_dealloc_device(struct ib_device *device); |
| 2096 | |
Ira Weiny | 5fa76c2 | 2016-06-15 02:21:56 -0400 | [diff] [blame] | 2097 | void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); |
| 2098 | |
Ralph Campbell | 9a6edb6 | 2010-05-06 17:03:25 -0700 | [diff] [blame] | 2099 | int ib_register_device(struct ib_device *device, |
| 2100 | int (*port_callback)(struct ib_device *, |
| 2101 | u8, struct kobject *)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2102 | void ib_unregister_device(struct ib_device *device); |
| 2103 | |
| 2104 | int ib_register_client (struct ib_client *client); |
| 2105 | void ib_unregister_client(struct ib_client *client); |
| 2106 | |
| 2107 | void *ib_get_client_data(struct ib_device *device, struct ib_client *client); |
| 2108 | void ib_set_client_data(struct ib_device *device, struct ib_client *client, |
| 2109 | void *data); |
| 2110 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 2111 | static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) |
| 2112 | { |
| 2113 | return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; |
| 2114 | } |
| 2115 | |
| 2116 | static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) |
| 2117 | { |
Yann Droneaud | 43c61165 | 2015-02-05 22:10:18 +0100 | [diff] [blame] | 2118 | return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 2119 | } |
| 2120 | |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2121 | static inline bool ib_is_udata_cleared(struct ib_udata *udata, |
| 2122 | size_t offset, |
| 2123 | size_t len) |
| 2124 | { |
| 2125 | const void __user *p = udata->inbuf + offset; |
Markus Elfring | 92d27ae | 2016-08-22 18:23:24 +0200 | [diff] [blame] | 2126 | bool ret; |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2127 | u8 *buf; |
| 2128 | |
| 2129 | if (len > USHRT_MAX) |
| 2130 | return false; |
| 2131 | |
Markus Elfring | 92d27ae | 2016-08-22 18:23:24 +0200 | [diff] [blame] | 2132 | buf = memdup_user(p, len); |
| 2133 | if (IS_ERR(buf)) |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2134 | return false; |
| 2135 | |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2136 | ret = !memchr_inv(buf, 0, len); |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2137 | kfree(buf); |
| 2138 | return ret; |
| 2139 | } |
| 2140 | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 2141 | /** |
| 2142 | * ib_modify_qp_is_ok - Check that the supplied attribute mask |
| 2143 | * contains all required attributes and no attributes not allowed for |
| 2144 | * the given QP state transition. |
| 2145 | * @cur_state: Current QP state |
| 2146 | * @next_state: Next QP state |
| 2147 | * @type: QP type |
| 2148 | * @mask: Mask of supplied QP attributes |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 2149 | * @ll : link layer of port |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 2150 | * |
| 2151 | * This function is a helper function that a low-level driver's |
| 2152 | * modify_qp method can use to validate the consumer's input. It |
| 2153 | * checks that cur_state and next_state are valid QP states, that a |
| 2154 | * transition from cur_state to next_state is allowed by the IB spec, |
| 2155 | * and that the attribute mask supplied is allowed for the transition. |
| 2156 | */ |
| 2157 | int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 2158 | enum ib_qp_type type, enum ib_qp_attr_mask mask, |
| 2159 | enum rdma_link_layer ll); |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 2160 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2161 | int ib_register_event_handler (struct ib_event_handler *event_handler); |
| 2162 | int ib_unregister_event_handler(struct ib_event_handler *event_handler); |
| 2163 | void ib_dispatch_event(struct ib_event *event); |
| 2164 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2165 | int ib_query_port(struct ib_device *device, |
| 2166 | u8 port_num, struct ib_port_attr *port_attr); |
| 2167 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 2168 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, |
| 2169 | u8 port_num); |
| 2170 | |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 2171 | /** |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2172 | * rdma_cap_ib_switch - Check if the device is IB switch |
| 2173 | * @device: Device to check |
| 2174 | * |
| 2175 | * Device driver is responsible for setting is_switch bit on |
| 2176 | * in ib_device structure at init time. |
| 2177 | * |
| 2178 | * Return: true if the device is IB switch. |
| 2179 | */ |
| 2180 | static inline bool rdma_cap_ib_switch(const struct ib_device *device) |
| 2181 | { |
| 2182 | return device->is_switch; |
| 2183 | } |
| 2184 | |
| 2185 | /** |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 2186 | * rdma_start_port - Return the first valid port number for the device |
| 2187 | * specified |
| 2188 | * |
| 2189 | * @device: Device to be checked |
| 2190 | * |
| 2191 | * Return start port number |
| 2192 | */ |
| 2193 | static inline u8 rdma_start_port(const struct ib_device *device) |
| 2194 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2195 | return rdma_cap_ib_switch(device) ? 0 : 1; |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 2196 | } |
| 2197 | |
| 2198 | /** |
| 2199 | * rdma_end_port - Return the last valid port number for the device |
| 2200 | * specified |
| 2201 | * |
| 2202 | * @device: Device to be checked |
| 2203 | * |
| 2204 | * Return last port number |
| 2205 | */ |
| 2206 | static inline u8 rdma_end_port(const struct ib_device *device) |
| 2207 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2208 | return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 2209 | } |
| 2210 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2211 | static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2212 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2213 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2214 | } |
| 2215 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2216 | static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2217 | { |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 2218 | return device->port_immutable[port_num].core_cap_flags & |
| 2219 | (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); |
| 2220 | } |
| 2221 | |
| 2222 | static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) |
| 2223 | { |
| 2224 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; |
| 2225 | } |
| 2226 | |
| 2227 | static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) |
| 2228 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2229 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2230 | } |
| 2231 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2232 | static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2233 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2234 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2235 | } |
| 2236 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2237 | static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2238 | { |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 2239 | return rdma_protocol_ib(device, port_num) || |
| 2240 | rdma_protocol_roce(device, port_num); |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2241 | } |
| 2242 | |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2243 | /** |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2244 | * rdma_cap_ib_mad - Check if the port of a device supports Infiniband |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2245 | * Management Datagrams. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2246 | * @device: Device to check |
| 2247 | * @port_num: Port number to check |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2248 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2249 | * Management Datagrams (MAD) are a required part of the InfiniBand |
| 2250 | * specification and are supported on all InfiniBand devices. A slightly |
| 2251 | * extended version are also supported on OPA interfaces. |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2252 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2253 | * Return: true if the port supports sending/receiving of MAD packets. |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2254 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2255 | static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2256 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2257 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2258 | } |
| 2259 | |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2260 | /** |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 2261 | * rdma_cap_opa_mad - Check if the port of device provides support for OPA |
| 2262 | * Management Datagrams. |
| 2263 | * @device: Device to check |
| 2264 | * @port_num: Port number to check |
| 2265 | * |
| 2266 | * Intel OmniPath devices extend and/or replace the InfiniBand Management |
| 2267 | * datagrams with their own versions. These OPA MADs share many but not all of |
| 2268 | * the characteristics of InfiniBand MADs. |
| 2269 | * |
| 2270 | * OPA MADs differ in the following ways: |
| 2271 | * |
| 2272 | * 1) MADs are variable size up to 2K |
| 2273 | * IBTA defined MADs remain fixed at 256 bytes |
| 2274 | * 2) OPA SMPs must carry valid PKeys |
| 2275 | * 3) OPA SMP packets are a different format |
| 2276 | * |
| 2277 | * Return: true if the port supports OPA MAD packet formats. |
| 2278 | */ |
| 2279 | static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) |
| 2280 | { |
| 2281 | return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) |
| 2282 | == RDMA_CORE_CAP_OPA_MAD; |
| 2283 | } |
| 2284 | |
| 2285 | /** |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2286 | * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband |
| 2287 | * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). |
| 2288 | * @device: Device to check |
| 2289 | * @port_num: Port number to check |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2290 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2291 | * Each InfiniBand node is required to provide a Subnet Management Agent |
| 2292 | * that the subnet manager can access. Prior to the fabric being fully |
| 2293 | * configured by the subnet manager, the SMA is accessed via a well known |
| 2294 | * interface called the Subnet Management Interface (SMI). This interface |
| 2295 | * uses directed route packets to communicate with the SM to get around the |
| 2296 | * chicken and egg problem of the SM needing to know what's on the fabric |
| 2297 | * in order to configure the fabric, and needing to configure the fabric in |
| 2298 | * order to send packets to the devices on the fabric. These directed |
| 2299 | * route packets do not need the fabric fully configured in order to reach |
| 2300 | * their destination. The SMI is the only method allowed to send |
| 2301 | * directed route packets on an InfiniBand fabric. |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2302 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2303 | * Return: true if the port provides an SMI. |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2304 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2305 | static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2306 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2307 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2308 | } |
| 2309 | |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2310 | /** |
| 2311 | * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband |
| 2312 | * Communication Manager. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2313 | * @device: Device to check |
| 2314 | * @port_num: Port number to check |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2315 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2316 | * The InfiniBand Communication Manager is one of many pre-defined General |
| 2317 | * Service Agents (GSA) that are accessed via the General Service |
| 2318 | * Interface (GSI). It's role is to facilitate establishment of connections |
| 2319 | * between nodes as well as other management related tasks for established |
| 2320 | * connections. |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2321 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2322 | * Return: true if the port supports an IB CM (this does not guarantee that |
| 2323 | * a CM is actually running however). |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2324 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2325 | static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2326 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2327 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2328 | } |
| 2329 | |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2330 | /** |
| 2331 | * rdma_cap_iw_cm - Check if the port of device has the capability IWARP |
| 2332 | * Communication Manager. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2333 | * @device: Device to check |
| 2334 | * @port_num: Port number to check |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2335 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2336 | * Similar to above, but specific to iWARP connections which have a different |
| 2337 | * managment protocol than InfiniBand. |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2338 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2339 | * Return: true if the port supports an iWARP CM (this does not guarantee that |
| 2340 | * a CM is actually running however). |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2341 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2342 | static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2343 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2344 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2345 | } |
| 2346 | |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2347 | /** |
| 2348 | * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband |
| 2349 | * Subnet Administration. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2350 | * @device: Device to check |
| 2351 | * @port_num: Port number to check |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2352 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2353 | * An InfiniBand Subnet Administration (SA) service is a pre-defined General |
| 2354 | * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand |
| 2355 | * fabrics, devices should resolve routes to other hosts by contacting the |
| 2356 | * SA to query the proper route. |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2357 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2358 | * Return: true if the port should act as a client to the fabric Subnet |
| 2359 | * Administration interface. This does not imply that the SA service is |
| 2360 | * running locally. |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2361 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2362 | static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2363 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2364 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2365 | } |
| 2366 | |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2367 | /** |
| 2368 | * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband |
| 2369 | * Multicast. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2370 | * @device: Device to check |
| 2371 | * @port_num: Port number to check |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2372 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2373 | * InfiniBand multicast registration is more complex than normal IPv4 or |
| 2374 | * IPv6 multicast registration. Each Host Channel Adapter must register |
| 2375 | * with the Subnet Manager when it wishes to join a multicast group. It |
| 2376 | * should do so only once regardless of how many queue pairs it subscribes |
| 2377 | * to this group. And it should leave the group only after all queue pairs |
| 2378 | * attached to the group have been detached. |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2379 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2380 | * Return: true if the port must undertake the additional adminstrative |
| 2381 | * overhead of registering/unregistering with the SM and tracking of the |
| 2382 | * total number of queue pairs attached to the multicast group. |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2383 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2384 | static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2385 | { |
| 2386 | return rdma_cap_ib_sa(device, port_num); |
| 2387 | } |
| 2388 | |
Michael Wang | bc0f1d7 | 2015-05-05 14:50:38 +0200 | [diff] [blame] | 2389 | /** |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2390 | * rdma_cap_af_ib - Check if the port of device has the capability |
| 2391 | * Native Infiniband Address. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2392 | * @device: Device to check |
| 2393 | * @port_num: Port number to check |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2394 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2395 | * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default |
| 2396 | * GID. RoCE uses a different mechanism, but still generates a GID via |
| 2397 | * a prescribed mechanism and port specific data. |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2398 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2399 | * Return: true if the port uses a GID address to identify devices on the |
| 2400 | * network. |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2401 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2402 | static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2403 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2404 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2405 | } |
| 2406 | |
| 2407 | /** |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2408 | * rdma_cap_eth_ah - Check if the port of device has the capability |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2409 | * Ethernet Address Handle. |
| 2410 | * @device: Device to check |
| 2411 | * @port_num: Port number to check |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2412 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2413 | * RoCE is InfiniBand over Ethernet, and it uses a well defined technique |
| 2414 | * to fabricate GIDs over Ethernet/IP specific addresses native to the |
| 2415 | * port. Normally, packet headers are generated by the sending host |
| 2416 | * adapter, but when sending connectionless datagrams, we must manually |
| 2417 | * inject the proper headers for the fabric we are communicating over. |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2418 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2419 | * Return: true if we are running as a RoCE port and must force the |
| 2420 | * addition of a Global Route Header built from our Ethernet Address |
| 2421 | * Handle into our header list for connectionless packets. |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2422 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2423 | static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2424 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2425 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2426 | } |
| 2427 | |
| 2428 | /** |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 2429 | * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. |
| 2430 | * |
| 2431 | * @device: Device |
| 2432 | * @port_num: Port number |
| 2433 | * |
| 2434 | * This MAD size includes the MAD headers and MAD payload. No other headers |
| 2435 | * are included. |
| 2436 | * |
| 2437 | * Return the max MAD size required by the Port. Will return 0 if the port |
| 2438 | * does not support MADs |
| 2439 | */ |
| 2440 | static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) |
| 2441 | { |
| 2442 | return device->port_immutable[port_num].max_mad_size; |
| 2443 | } |
| 2444 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 2445 | /** |
| 2446 | * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table |
| 2447 | * @device: Device to check |
| 2448 | * @port_num: Port number to check |
| 2449 | * |
| 2450 | * RoCE GID table mechanism manages the various GIDs for a device. |
| 2451 | * |
| 2452 | * NOTE: if allocating the port's GID table has failed, this call will still |
| 2453 | * return true, but any RoCE GID table API will fail. |
| 2454 | * |
| 2455 | * Return: true if the port uses RoCE GID table mechanism in order to manage |
| 2456 | * its GIDs. |
| 2457 | */ |
| 2458 | static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, |
| 2459 | u8 port_num) |
| 2460 | { |
| 2461 | return rdma_protocol_roce(device, port_num) && |
| 2462 | device->add_gid && device->del_gid; |
| 2463 | } |
| 2464 | |
Christoph Hellwig | 002516e | 2016-05-03 18:01:05 +0200 | [diff] [blame] | 2465 | /* |
| 2466 | * Check if the device supports READ W/ INVALIDATE. |
| 2467 | */ |
| 2468 | static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) |
| 2469 | { |
| 2470 | /* |
| 2471 | * iWarp drivers must support READ W/ INVALIDATE. No other protocol |
| 2472 | * has support for it yet. |
| 2473 | */ |
| 2474 | return rdma_protocol_iwarp(dev, port_num); |
| 2475 | } |
| 2476 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2477 | int ib_query_gid(struct ib_device *device, |
Matan Barak | 55ee3ab | 2015-10-15 18:38:45 +0300 | [diff] [blame] | 2478 | u8 port_num, int index, union ib_gid *gid, |
| 2479 | struct ib_gid_attr *attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2480 | |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 2481 | int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, |
| 2482 | int state); |
| 2483 | int ib_get_vf_config(struct ib_device *device, int vf, u8 port, |
| 2484 | struct ifla_vf_info *info); |
| 2485 | int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, |
| 2486 | struct ifla_vf_stats *stats); |
| 2487 | int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, |
| 2488 | int type); |
| 2489 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2490 | int ib_query_pkey(struct ib_device *device, |
| 2491 | u8 port_num, u16 index, u16 *pkey); |
| 2492 | |
| 2493 | int ib_modify_device(struct ib_device *device, |
| 2494 | int device_modify_mask, |
| 2495 | struct ib_device_modify *device_modify); |
| 2496 | |
| 2497 | int ib_modify_port(struct ib_device *device, |
| 2498 | u8 port_num, int port_modify_mask, |
| 2499 | struct ib_port_modify *port_modify); |
| 2500 | |
Yosef Etigin | 5eb620c | 2007-05-14 07:26:51 +0300 | [diff] [blame] | 2501 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 2502 | enum ib_gid_type gid_type, struct net_device *ndev, |
| 2503 | u8 *port_num, u16 *index); |
Yosef Etigin | 5eb620c | 2007-05-14 07:26:51 +0300 | [diff] [blame] | 2504 | |
| 2505 | int ib_find_pkey(struct ib_device *device, |
| 2506 | u8 port_num, u16 pkey, u16 *index); |
| 2507 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2508 | struct ib_pd *ib_alloc_pd(struct ib_device *device); |
| 2509 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 2510 | void ib_dealloc_pd(struct ib_pd *pd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2511 | |
| 2512 | /** |
| 2513 | * ib_create_ah - Creates an address handle for the given address vector. |
| 2514 | * @pd: The protection domain associated with the address handle. |
| 2515 | * @ah_attr: The attributes of the address vector. |
| 2516 | * |
| 2517 | * The address handle is used to reference a local or global destination |
| 2518 | * in all UD QP post sends. |
| 2519 | */ |
| 2520 | struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); |
| 2521 | |
| 2522 | /** |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 2523 | * ib_init_ah_from_wc - Initializes address handle attributes from a |
| 2524 | * work completion. |
| 2525 | * @device: Device on which the received message arrived. |
| 2526 | * @port_num: Port on which the received message arrived. |
| 2527 | * @wc: Work completion associated with the received message. |
| 2528 | * @grh: References the received global route header. This parameter is |
| 2529 | * ignored unless the work completion indicates that the GRH is valid. |
| 2530 | * @ah_attr: Returned attributes that can be used when creating an address |
| 2531 | * handle for replying to the message. |
| 2532 | */ |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 2533 | int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, |
| 2534 | const struct ib_wc *wc, const struct ib_grh *grh, |
| 2535 | struct ib_ah_attr *ah_attr); |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 2536 | |
| 2537 | /** |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 2538 | * ib_create_ah_from_wc - Creates an address handle associated with the |
| 2539 | * sender of the specified work completion. |
| 2540 | * @pd: The protection domain associated with the address handle. |
| 2541 | * @wc: Work completion information associated with a received message. |
| 2542 | * @grh: References the received global route header. This parameter is |
| 2543 | * ignored unless the work completion indicates that the GRH is valid. |
| 2544 | * @port_num: The outbound port number to associate with the address. |
| 2545 | * |
| 2546 | * The address handle is used to reference a local or global destination |
| 2547 | * in all UD QP post sends. |
| 2548 | */ |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 2549 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
| 2550 | const struct ib_grh *grh, u8 port_num); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 2551 | |
| 2552 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2553 | * ib_modify_ah - Modifies the address vector associated with an address |
| 2554 | * handle. |
| 2555 | * @ah: The address handle to modify. |
| 2556 | * @ah_attr: The new address vector attributes to associate with the |
| 2557 | * address handle. |
| 2558 | */ |
| 2559 | int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); |
| 2560 | |
| 2561 | /** |
| 2562 | * ib_query_ah - Queries the address vector associated with an address |
| 2563 | * handle. |
| 2564 | * @ah: The address handle to query. |
| 2565 | * @ah_attr: The address vector attributes associated with the address |
| 2566 | * handle. |
| 2567 | */ |
| 2568 | int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); |
| 2569 | |
| 2570 | /** |
| 2571 | * ib_destroy_ah - Destroys an address handle. |
| 2572 | * @ah: The address handle to destroy. |
| 2573 | */ |
| 2574 | int ib_destroy_ah(struct ib_ah *ah); |
| 2575 | |
| 2576 | /** |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 2577 | * ib_create_srq - Creates a SRQ associated with the specified protection |
| 2578 | * domain. |
| 2579 | * @pd: The protection domain associated with the SRQ. |
Dotan Barak | abb6e9b | 2006-02-23 12:13:51 -0800 | [diff] [blame] | 2580 | * @srq_init_attr: A list of initial attributes required to create the |
| 2581 | * SRQ. If SRQ creation succeeds, then the attributes are updated to |
| 2582 | * the actual capabilities of the created SRQ. |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 2583 | * |
| 2584 | * srq_attr->max_wr and srq_attr->max_sge are read the determine the |
| 2585 | * requested size of the SRQ, and set to the actual values allocated |
| 2586 | * on return. If ib_create_srq() succeeds, then max_wr and max_sge |
| 2587 | * will always be at least as large as the requested values. |
| 2588 | */ |
| 2589 | struct ib_srq *ib_create_srq(struct ib_pd *pd, |
| 2590 | struct ib_srq_init_attr *srq_init_attr); |
| 2591 | |
| 2592 | /** |
| 2593 | * ib_modify_srq - Modifies the attributes for the specified SRQ. |
| 2594 | * @srq: The SRQ to modify. |
| 2595 | * @srq_attr: On input, specifies the SRQ attributes to modify. On output, |
| 2596 | * the current values of selected SRQ attributes are returned. |
| 2597 | * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ |
| 2598 | * are being modified. |
| 2599 | * |
| 2600 | * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or |
| 2601 | * IB_SRQ_LIMIT to set the SRQ's limit and request notification when |
| 2602 | * the number of receives queued drops below the limit. |
| 2603 | */ |
| 2604 | int ib_modify_srq(struct ib_srq *srq, |
| 2605 | struct ib_srq_attr *srq_attr, |
| 2606 | enum ib_srq_attr_mask srq_attr_mask); |
| 2607 | |
| 2608 | /** |
| 2609 | * ib_query_srq - Returns the attribute list and current values for the |
| 2610 | * specified SRQ. |
| 2611 | * @srq: The SRQ to query. |
| 2612 | * @srq_attr: The attributes of the specified SRQ. |
| 2613 | */ |
| 2614 | int ib_query_srq(struct ib_srq *srq, |
| 2615 | struct ib_srq_attr *srq_attr); |
| 2616 | |
| 2617 | /** |
| 2618 | * ib_destroy_srq - Destroys the specified SRQ. |
| 2619 | * @srq: The SRQ to destroy. |
| 2620 | */ |
| 2621 | int ib_destroy_srq(struct ib_srq *srq); |
| 2622 | |
| 2623 | /** |
| 2624 | * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. |
| 2625 | * @srq: The SRQ to post the work request on. |
| 2626 | * @recv_wr: A list of work requests to post on the receive queue. |
| 2627 | * @bad_recv_wr: On an immediate failure, this parameter will reference |
| 2628 | * the work request that failed to be posted on the QP. |
| 2629 | */ |
| 2630 | static inline int ib_post_srq_recv(struct ib_srq *srq, |
| 2631 | struct ib_recv_wr *recv_wr, |
| 2632 | struct ib_recv_wr **bad_recv_wr) |
| 2633 | { |
| 2634 | return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); |
| 2635 | } |
| 2636 | |
| 2637 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2638 | * ib_create_qp - Creates a QP associated with the specified protection |
| 2639 | * domain. |
| 2640 | * @pd: The protection domain associated with the QP. |
Dotan Barak | abb6e9b | 2006-02-23 12:13:51 -0800 | [diff] [blame] | 2641 | * @qp_init_attr: A list of initial attributes required to create the |
| 2642 | * QP. If QP creation succeeds, then the attributes are updated to |
| 2643 | * the actual capabilities of the created QP. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2644 | */ |
| 2645 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
| 2646 | struct ib_qp_init_attr *qp_init_attr); |
| 2647 | |
| 2648 | /** |
| 2649 | * ib_modify_qp - Modifies the attributes for the specified QP and then |
| 2650 | * transitions the QP to the given state. |
| 2651 | * @qp: The QP to modify. |
| 2652 | * @qp_attr: On input, specifies the QP attributes to modify. On output, |
| 2653 | * the current values of selected QP attributes are returned. |
| 2654 | * @qp_attr_mask: A bit-mask used to specify which attributes of the QP |
| 2655 | * are being modified. |
| 2656 | */ |
| 2657 | int ib_modify_qp(struct ib_qp *qp, |
| 2658 | struct ib_qp_attr *qp_attr, |
| 2659 | int qp_attr_mask); |
| 2660 | |
| 2661 | /** |
| 2662 | * ib_query_qp - Returns the attribute list and current values for the |
| 2663 | * specified QP. |
| 2664 | * @qp: The QP to query. |
| 2665 | * @qp_attr: The attributes of the specified QP. |
| 2666 | * @qp_attr_mask: A bit-mask used to select specific attributes to query. |
| 2667 | * @qp_init_attr: Additional attributes of the selected QP. |
| 2668 | * |
| 2669 | * The qp_attr_mask may be used to limit the query to gathering only the |
| 2670 | * selected attributes. |
| 2671 | */ |
| 2672 | int ib_query_qp(struct ib_qp *qp, |
| 2673 | struct ib_qp_attr *qp_attr, |
| 2674 | int qp_attr_mask, |
| 2675 | struct ib_qp_init_attr *qp_init_attr); |
| 2676 | |
| 2677 | /** |
| 2678 | * ib_destroy_qp - Destroys the specified QP. |
| 2679 | * @qp: The QP to destroy. |
| 2680 | */ |
| 2681 | int ib_destroy_qp(struct ib_qp *qp); |
| 2682 | |
| 2683 | /** |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 2684 | * ib_open_qp - Obtain a reference to an existing sharable QP. |
| 2685 | * @xrcd - XRC domain |
| 2686 | * @qp_open_attr: Attributes identifying the QP to open. |
| 2687 | * |
| 2688 | * Returns a reference to a sharable QP. |
| 2689 | */ |
| 2690 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, |
| 2691 | struct ib_qp_open_attr *qp_open_attr); |
| 2692 | |
| 2693 | /** |
| 2694 | * ib_close_qp - Release an external reference to a QP. |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 2695 | * @qp: The QP handle to release |
| 2696 | * |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 2697 | * The opened QP handle is released by the caller. The underlying |
| 2698 | * shared QP is not destroyed until all internal references are released. |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 2699 | */ |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 2700 | int ib_close_qp(struct ib_qp *qp); |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 2701 | |
| 2702 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2703 | * ib_post_send - Posts a list of work requests to the send queue of |
| 2704 | * the specified QP. |
| 2705 | * @qp: The QP to post the work request on. |
| 2706 | * @send_wr: A list of work requests to post on the send queue. |
| 2707 | * @bad_send_wr: On an immediate failure, this parameter will reference |
| 2708 | * the work request that failed to be posted on the QP. |
Bart Van Assche | 55464d4 | 2009-12-09 14:20:04 -0800 | [diff] [blame] | 2709 | * |
| 2710 | * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate |
| 2711 | * error is returned, the QP state shall not be affected, |
| 2712 | * ib_post_send() will return an immediate error after queueing any |
| 2713 | * earlier work requests in the list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2714 | */ |
| 2715 | static inline int ib_post_send(struct ib_qp *qp, |
| 2716 | struct ib_send_wr *send_wr, |
| 2717 | struct ib_send_wr **bad_send_wr) |
| 2718 | { |
| 2719 | return qp->device->post_send(qp, send_wr, bad_send_wr); |
| 2720 | } |
| 2721 | |
| 2722 | /** |
| 2723 | * ib_post_recv - Posts a list of work requests to the receive queue of |
| 2724 | * the specified QP. |
| 2725 | * @qp: The QP to post the work request on. |
| 2726 | * @recv_wr: A list of work requests to post on the receive queue. |
| 2727 | * @bad_recv_wr: On an immediate failure, this parameter will reference |
| 2728 | * the work request that failed to be posted on the QP. |
| 2729 | */ |
| 2730 | static inline int ib_post_recv(struct ib_qp *qp, |
| 2731 | struct ib_recv_wr *recv_wr, |
| 2732 | struct ib_recv_wr **bad_recv_wr) |
| 2733 | { |
| 2734 | return qp->device->post_recv(qp, recv_wr, bad_recv_wr); |
| 2735 | } |
| 2736 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 2737 | struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, |
| 2738 | int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); |
| 2739 | void ib_free_cq(struct ib_cq *cq); |
| 2740 | int ib_process_cq_direct(struct ib_cq *cq, int budget); |
| 2741 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2742 | /** |
| 2743 | * ib_create_cq - Creates a CQ on the specified device. |
| 2744 | * @device: The device on which to create the CQ. |
| 2745 | * @comp_handler: A user-specified callback that is invoked when a |
| 2746 | * completion event occurs on the CQ. |
| 2747 | * @event_handler: A user-specified callback that is invoked when an |
| 2748 | * asynchronous event not associated with a completion occurs on the CQ. |
| 2749 | * @cq_context: Context associated with the CQ returned to the user via |
| 2750 | * the associated completion and event handlers. |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 2751 | * @cq_attr: The attributes the CQ should be created upon. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2752 | * |
| 2753 | * Users can examine the cq structure to determine the actual CQ size. |
| 2754 | */ |
| 2755 | struct ib_cq *ib_create_cq(struct ib_device *device, |
| 2756 | ib_comp_handler comp_handler, |
| 2757 | void (*event_handler)(struct ib_event *, void *), |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 2758 | void *cq_context, |
| 2759 | const struct ib_cq_init_attr *cq_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2760 | |
| 2761 | /** |
| 2762 | * ib_resize_cq - Modifies the capacity of the CQ. |
| 2763 | * @cq: The CQ to resize. |
| 2764 | * @cqe: The minimum size of the CQ. |
| 2765 | * |
| 2766 | * Users can examine the cq structure to determine the actual CQ size. |
| 2767 | */ |
| 2768 | int ib_resize_cq(struct ib_cq *cq, int cqe); |
| 2769 | |
| 2770 | /** |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 2771 | * ib_modify_cq - Modifies moderation params of the CQ |
| 2772 | * @cq: The CQ to modify. |
| 2773 | * @cq_count: number of CQEs that will trigger an event |
| 2774 | * @cq_period: max period of time in usec before triggering an event |
| 2775 | * |
| 2776 | */ |
| 2777 | int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
| 2778 | |
| 2779 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2780 | * ib_destroy_cq - Destroys the specified CQ. |
| 2781 | * @cq: The CQ to destroy. |
| 2782 | */ |
| 2783 | int ib_destroy_cq(struct ib_cq *cq); |
| 2784 | |
| 2785 | /** |
| 2786 | * ib_poll_cq - poll a CQ for completion(s) |
| 2787 | * @cq:the CQ being polled |
| 2788 | * @num_entries:maximum number of completions to return |
| 2789 | * @wc:array of at least @num_entries &struct ib_wc where completions |
| 2790 | * will be returned |
| 2791 | * |
| 2792 | * Poll a CQ for (possibly multiple) completions. If the return value |
| 2793 | * is < 0, an error occurred. If the return value is >= 0, it is the |
| 2794 | * number of completions returned. If the return value is |
| 2795 | * non-negative and < num_entries, then the CQ was emptied. |
| 2796 | */ |
| 2797 | static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, |
| 2798 | struct ib_wc *wc) |
| 2799 | { |
| 2800 | return cq->device->poll_cq(cq, num_entries, wc); |
| 2801 | } |
| 2802 | |
| 2803 | /** |
| 2804 | * ib_peek_cq - Returns the number of unreaped completions currently |
| 2805 | * on the specified CQ. |
| 2806 | * @cq: The CQ to peek. |
| 2807 | * @wc_cnt: A minimum number of unreaped completions to check for. |
| 2808 | * |
| 2809 | * If the number of unreaped completions is greater than or equal to wc_cnt, |
| 2810 | * this function returns wc_cnt, otherwise, it returns the actual number of |
| 2811 | * unreaped completions. |
| 2812 | */ |
| 2813 | int ib_peek_cq(struct ib_cq *cq, int wc_cnt); |
| 2814 | |
| 2815 | /** |
| 2816 | * ib_req_notify_cq - Request completion notification on a CQ. |
| 2817 | * @cq: The CQ to generate an event for. |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 2818 | * @flags: |
| 2819 | * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP |
| 2820 | * to request an event on the next solicited event or next work |
| 2821 | * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS |
| 2822 | * may also be |ed in to request a hint about missed events, as |
| 2823 | * described below. |
| 2824 | * |
| 2825 | * Return Value: |
| 2826 | * < 0 means an error occurred while requesting notification |
| 2827 | * == 0 means notification was requested successfully, and if |
| 2828 | * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events |
| 2829 | * were missed and it is safe to wait for another event. In |
| 2830 | * this case is it guaranteed that any work completions added |
| 2831 | * to the CQ since the last CQ poll will trigger a completion |
| 2832 | * notification event. |
| 2833 | * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed |
| 2834 | * in. It means that the consumer must poll the CQ again to |
| 2835 | * make sure it is empty to avoid missing an event because of a |
| 2836 | * race between requesting notification and an entry being |
| 2837 | * added to the CQ. This return value means it is possible |
| 2838 | * (but not guaranteed) that a work completion has been added |
| 2839 | * to the CQ since the last poll without triggering a |
| 2840 | * completion notification event. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2841 | */ |
| 2842 | static inline int ib_req_notify_cq(struct ib_cq *cq, |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 2843 | enum ib_cq_notify_flags flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | { |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 2845 | return cq->device->req_notify_cq(cq, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2846 | } |
| 2847 | |
| 2848 | /** |
| 2849 | * ib_req_ncomp_notif - Request completion notification when there are |
| 2850 | * at least the specified number of unreaped completions on the CQ. |
| 2851 | * @cq: The CQ to generate an event for. |
| 2852 | * @wc_cnt: The number of unreaped completions that should be on the |
| 2853 | * CQ before an event is generated. |
| 2854 | */ |
| 2855 | static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) |
| 2856 | { |
| 2857 | return cq->device->req_ncomp_notif ? |
| 2858 | cq->device->req_ncomp_notif(cq, wc_cnt) : |
| 2859 | -ENOSYS; |
| 2860 | } |
| 2861 | |
| 2862 | /** |
| 2863 | * ib_get_dma_mr - Returns a memory region for system memory that is |
| 2864 | * usable for DMA. |
| 2865 | * @pd: The protection domain associated with the memory region. |
| 2866 | * @mr_access_flags: Specifies the memory access rights. |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2867 | * |
| 2868 | * Note that the ib_dma_*() functions defined below must be used |
| 2869 | * to create/destroy addresses used with the Lkey or Rkey returned |
| 2870 | * by ib_get_dma_mr(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2871 | */ |
| 2872 | struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); |
| 2873 | |
| 2874 | /** |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2875 | * ib_dma_mapping_error - check a DMA addr for error |
| 2876 | * @dev: The device for which the dma_addr was created |
| 2877 | * @dma_addr: The DMA address to check |
| 2878 | */ |
| 2879 | static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) |
| 2880 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2881 | if (dev->dma_ops) |
| 2882 | return dev->dma_ops->mapping_error(dev, dma_addr); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 2883 | return dma_mapping_error(dev->dma_device, dma_addr); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2884 | } |
| 2885 | |
| 2886 | /** |
| 2887 | * ib_dma_map_single - Map a kernel virtual address to DMA address |
| 2888 | * @dev: The device for which the dma_addr is to be created |
| 2889 | * @cpu_addr: The kernel virtual address |
| 2890 | * @size: The size of the region in bytes |
| 2891 | * @direction: The direction of the DMA |
| 2892 | */ |
| 2893 | static inline u64 ib_dma_map_single(struct ib_device *dev, |
| 2894 | void *cpu_addr, size_t size, |
| 2895 | enum dma_data_direction direction) |
| 2896 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2897 | if (dev->dma_ops) |
| 2898 | return dev->dma_ops->map_single(dev, cpu_addr, size, direction); |
| 2899 | return dma_map_single(dev->dma_device, cpu_addr, size, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2900 | } |
| 2901 | |
| 2902 | /** |
| 2903 | * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() |
| 2904 | * @dev: The device for which the DMA address was created |
| 2905 | * @addr: The DMA address |
| 2906 | * @size: The size of the region in bytes |
| 2907 | * @direction: The direction of the DMA |
| 2908 | */ |
| 2909 | static inline void ib_dma_unmap_single(struct ib_device *dev, |
| 2910 | u64 addr, size_t size, |
| 2911 | enum dma_data_direction direction) |
| 2912 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2913 | if (dev->dma_ops) |
| 2914 | dev->dma_ops->unmap_single(dev, addr, size, direction); |
| 2915 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2916 | dma_unmap_single(dev->dma_device, addr, size, direction); |
| 2917 | } |
| 2918 | |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 2919 | static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, |
| 2920 | void *cpu_addr, size_t size, |
| 2921 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 2922 | unsigned long dma_attrs) |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 2923 | { |
| 2924 | return dma_map_single_attrs(dev->dma_device, cpu_addr, size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 2925 | direction, dma_attrs); |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 2926 | } |
| 2927 | |
| 2928 | static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, |
| 2929 | u64 addr, size_t size, |
| 2930 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 2931 | unsigned long dma_attrs) |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 2932 | { |
| 2933 | return dma_unmap_single_attrs(dev->dma_device, addr, size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 2934 | direction, dma_attrs); |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 2935 | } |
| 2936 | |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2937 | /** |
| 2938 | * ib_dma_map_page - Map a physical page to DMA address |
| 2939 | * @dev: The device for which the dma_addr is to be created |
| 2940 | * @page: The page to be mapped |
| 2941 | * @offset: The offset within the page |
| 2942 | * @size: The size of the region in bytes |
| 2943 | * @direction: The direction of the DMA |
| 2944 | */ |
| 2945 | static inline u64 ib_dma_map_page(struct ib_device *dev, |
| 2946 | struct page *page, |
| 2947 | unsigned long offset, |
| 2948 | size_t size, |
| 2949 | enum dma_data_direction direction) |
| 2950 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2951 | if (dev->dma_ops) |
| 2952 | return dev->dma_ops->map_page(dev, page, offset, size, direction); |
| 2953 | return dma_map_page(dev->dma_device, page, offset, size, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2954 | } |
| 2955 | |
| 2956 | /** |
| 2957 | * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() |
| 2958 | * @dev: The device for which the DMA address was created |
| 2959 | * @addr: The DMA address |
| 2960 | * @size: The size of the region in bytes |
| 2961 | * @direction: The direction of the DMA |
| 2962 | */ |
| 2963 | static inline void ib_dma_unmap_page(struct ib_device *dev, |
| 2964 | u64 addr, size_t size, |
| 2965 | enum dma_data_direction direction) |
| 2966 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2967 | if (dev->dma_ops) |
| 2968 | dev->dma_ops->unmap_page(dev, addr, size, direction); |
| 2969 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2970 | dma_unmap_page(dev->dma_device, addr, size, direction); |
| 2971 | } |
| 2972 | |
| 2973 | /** |
| 2974 | * ib_dma_map_sg - Map a scatter/gather list to DMA addresses |
| 2975 | * @dev: The device for which the DMA addresses are to be created |
| 2976 | * @sg: The array of scatter/gather entries |
| 2977 | * @nents: The number of scatter/gather entries |
| 2978 | * @direction: The direction of the DMA |
| 2979 | */ |
| 2980 | static inline int ib_dma_map_sg(struct ib_device *dev, |
| 2981 | struct scatterlist *sg, int nents, |
| 2982 | enum dma_data_direction direction) |
| 2983 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2984 | if (dev->dma_ops) |
| 2985 | return dev->dma_ops->map_sg(dev, sg, nents, direction); |
| 2986 | return dma_map_sg(dev->dma_device, sg, nents, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2987 | } |
| 2988 | |
| 2989 | /** |
| 2990 | * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses |
| 2991 | * @dev: The device for which the DMA addresses were created |
| 2992 | * @sg: The array of scatter/gather entries |
| 2993 | * @nents: The number of scatter/gather entries |
| 2994 | * @direction: The direction of the DMA |
| 2995 | */ |
| 2996 | static inline void ib_dma_unmap_sg(struct ib_device *dev, |
| 2997 | struct scatterlist *sg, int nents, |
| 2998 | enum dma_data_direction direction) |
| 2999 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 3000 | if (dev->dma_ops) |
| 3001 | dev->dma_ops->unmap_sg(dev, sg, nents, direction); |
| 3002 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3003 | dma_unmap_sg(dev->dma_device, sg, nents, direction); |
| 3004 | } |
| 3005 | |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 3006 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, |
| 3007 | struct scatterlist *sg, int nents, |
| 3008 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 3009 | unsigned long dma_attrs) |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 3010 | { |
Parav Pandit | d970365 | 2016-09-28 20:25:47 +0000 | [diff] [blame^] | 3011 | if (dev->dma_ops) |
| 3012 | return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, |
| 3013 | dma_attrs); |
| 3014 | else |
| 3015 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, |
| 3016 | dma_attrs); |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 3017 | } |
| 3018 | |
| 3019 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, |
| 3020 | struct scatterlist *sg, int nents, |
| 3021 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 3022 | unsigned long dma_attrs) |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 3023 | { |
Parav Pandit | d970365 | 2016-09-28 20:25:47 +0000 | [diff] [blame^] | 3024 | if (dev->dma_ops) |
| 3025 | return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, |
| 3026 | dma_attrs); |
| 3027 | else |
| 3028 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, |
| 3029 | dma_attrs); |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 3030 | } |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3031 | /** |
| 3032 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry |
| 3033 | * @dev: The device for which the DMA addresses were created |
| 3034 | * @sg: The scatter/gather entry |
Mike Marciniszyn | ea58a59 | 2014-03-28 13:26:59 -0400 | [diff] [blame] | 3035 | * |
| 3036 | * Note: this function is obsolete. To do: change all occurrences of |
| 3037 | * ib_sg_dma_address() into sg_dma_address(). |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3038 | */ |
| 3039 | static inline u64 ib_sg_dma_address(struct ib_device *dev, |
| 3040 | struct scatterlist *sg) |
| 3041 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 3042 | return sg_dma_address(sg); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3043 | } |
| 3044 | |
| 3045 | /** |
| 3046 | * ib_sg_dma_len - Return the DMA length from a scatter/gather entry |
| 3047 | * @dev: The device for which the DMA addresses were created |
| 3048 | * @sg: The scatter/gather entry |
Mike Marciniszyn | ea58a59 | 2014-03-28 13:26:59 -0400 | [diff] [blame] | 3049 | * |
| 3050 | * Note: this function is obsolete. To do: change all occurrences of |
| 3051 | * ib_sg_dma_len() into sg_dma_len(). |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3052 | */ |
| 3053 | static inline unsigned int ib_sg_dma_len(struct ib_device *dev, |
| 3054 | struct scatterlist *sg) |
| 3055 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 3056 | return sg_dma_len(sg); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3057 | } |
| 3058 | |
| 3059 | /** |
| 3060 | * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU |
| 3061 | * @dev: The device for which the DMA address was created |
| 3062 | * @addr: The DMA address |
| 3063 | * @size: The size of the region in bytes |
| 3064 | * @dir: The direction of the DMA |
| 3065 | */ |
| 3066 | static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, |
| 3067 | u64 addr, |
| 3068 | size_t size, |
| 3069 | enum dma_data_direction dir) |
| 3070 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 3071 | if (dev->dma_ops) |
| 3072 | dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); |
| 3073 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3074 | dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); |
| 3075 | } |
| 3076 | |
| 3077 | /** |
| 3078 | * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device |
| 3079 | * @dev: The device for which the DMA address was created |
| 3080 | * @addr: The DMA address |
| 3081 | * @size: The size of the region in bytes |
| 3082 | * @dir: The direction of the DMA |
| 3083 | */ |
| 3084 | static inline void ib_dma_sync_single_for_device(struct ib_device *dev, |
| 3085 | u64 addr, |
| 3086 | size_t size, |
| 3087 | enum dma_data_direction dir) |
| 3088 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 3089 | if (dev->dma_ops) |
| 3090 | dev->dma_ops->sync_single_for_device(dev, addr, size, dir); |
| 3091 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3092 | dma_sync_single_for_device(dev->dma_device, addr, size, dir); |
| 3093 | } |
| 3094 | |
| 3095 | /** |
| 3096 | * ib_dma_alloc_coherent - Allocate memory and map it for DMA |
| 3097 | * @dev: The device for which the DMA address is requested |
| 3098 | * @size: The size of the region to allocate in bytes |
| 3099 | * @dma_handle: A pointer for returning the DMA address of the region |
| 3100 | * @flag: memory allocator flags |
| 3101 | */ |
| 3102 | static inline void *ib_dma_alloc_coherent(struct ib_device *dev, |
| 3103 | size_t size, |
| 3104 | u64 *dma_handle, |
| 3105 | gfp_t flag) |
| 3106 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 3107 | if (dev->dma_ops) |
| 3108 | return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); |
Roland Dreier | c59a3da | 2006-12-15 13:57:26 -0800 | [diff] [blame] | 3109 | else { |
| 3110 | dma_addr_t handle; |
| 3111 | void *ret; |
| 3112 | |
| 3113 | ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); |
| 3114 | *dma_handle = handle; |
| 3115 | return ret; |
| 3116 | } |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3117 | } |
| 3118 | |
| 3119 | /** |
| 3120 | * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() |
| 3121 | * @dev: The device for which the DMA addresses were allocated |
| 3122 | * @size: The size of the region |
| 3123 | * @cpu_addr: the address returned by ib_dma_alloc_coherent() |
| 3124 | * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() |
| 3125 | */ |
| 3126 | static inline void ib_dma_free_coherent(struct ib_device *dev, |
| 3127 | size_t size, void *cpu_addr, |
| 3128 | u64 dma_handle) |
| 3129 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 3130 | if (dev->dma_ops) |
| 3131 | dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
| 3132 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3133 | dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); |
| 3134 | } |
| 3135 | |
| 3136 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3137 | * ib_dereg_mr - Deregisters a memory region and removes it from the |
| 3138 | * HCA translation table. |
| 3139 | * @mr: The memory region to deregister. |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 3140 | * |
| 3141 | * This function can fail, if the memory region has memory windows bound to it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3142 | */ |
| 3143 | int ib_dereg_mr(struct ib_mr *mr); |
| 3144 | |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 3145 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, |
| 3146 | enum ib_mr_type mr_type, |
| 3147 | u32 max_num_sg); |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 3148 | |
| 3149 | /** |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 3150 | * ib_update_fast_reg_key - updates the key portion of the fast_reg MR |
| 3151 | * R_Key and L_Key. |
| 3152 | * @mr - struct ib_mr pointer to be updated. |
| 3153 | * @newkey - new key to be used. |
| 3154 | */ |
| 3155 | static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) |
| 3156 | { |
| 3157 | mr->lkey = (mr->lkey & 0xffffff00) | newkey; |
| 3158 | mr->rkey = (mr->rkey & 0xffffff00) | newkey; |
| 3159 | } |
| 3160 | |
| 3161 | /** |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 3162 | * ib_inc_rkey - increments the key portion of the given rkey. Can be used |
| 3163 | * for calculating a new rkey for type 2 memory windows. |
| 3164 | * @rkey - the rkey to increment. |
| 3165 | */ |
| 3166 | static inline u32 ib_inc_rkey(u32 rkey) |
| 3167 | { |
| 3168 | const u32 mask = 0x000000ff; |
| 3169 | return ((rkey + 1) & mask) | (rkey & ~mask); |
| 3170 | } |
| 3171 | |
| 3172 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3173 | * ib_alloc_fmr - Allocates a unmapped fast memory region. |
| 3174 | * @pd: The protection domain associated with the unmapped region. |
| 3175 | * @mr_access_flags: Specifies the memory access rights. |
| 3176 | * @fmr_attr: Attributes of the unmapped region. |
| 3177 | * |
| 3178 | * A fast memory region must be mapped before it can be used as part of |
| 3179 | * a work request. |
| 3180 | */ |
| 3181 | struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, |
| 3182 | int mr_access_flags, |
| 3183 | struct ib_fmr_attr *fmr_attr); |
| 3184 | |
| 3185 | /** |
| 3186 | * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. |
| 3187 | * @fmr: The fast memory region to associate with the pages. |
| 3188 | * @page_list: An array of physical pages to map to the fast memory region. |
| 3189 | * @list_len: The number of pages in page_list. |
| 3190 | * @iova: The I/O virtual address to use with the mapped region. |
| 3191 | */ |
| 3192 | static inline int ib_map_phys_fmr(struct ib_fmr *fmr, |
| 3193 | u64 *page_list, int list_len, |
| 3194 | u64 iova) |
| 3195 | { |
| 3196 | return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); |
| 3197 | } |
| 3198 | |
| 3199 | /** |
| 3200 | * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. |
| 3201 | * @fmr_list: A linked list of fast memory regions to unmap. |
| 3202 | */ |
| 3203 | int ib_unmap_fmr(struct list_head *fmr_list); |
| 3204 | |
| 3205 | /** |
| 3206 | * ib_dealloc_fmr - Deallocates a fast memory region. |
| 3207 | * @fmr: The fast memory region to deallocate. |
| 3208 | */ |
| 3209 | int ib_dealloc_fmr(struct ib_fmr *fmr); |
| 3210 | |
| 3211 | /** |
| 3212 | * ib_attach_mcast - Attaches the specified QP to a multicast group. |
| 3213 | * @qp: QP to attach to the multicast group. The QP must be type |
| 3214 | * IB_QPT_UD. |
| 3215 | * @gid: Multicast group GID. |
| 3216 | * @lid: Multicast group LID in host byte order. |
| 3217 | * |
| 3218 | * In order to send and receive multicast packets, subnet |
| 3219 | * administration must have created the multicast group and configured |
| 3220 | * the fabric appropriately. The port associated with the specified |
| 3221 | * QP must also be a member of the multicast group. |
| 3222 | */ |
| 3223 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
| 3224 | |
| 3225 | /** |
| 3226 | * ib_detach_mcast - Detaches the specified QP from a multicast group. |
| 3227 | * @qp: QP to detach from the multicast group. |
| 3228 | * @gid: Multicast group GID. |
| 3229 | * @lid: Multicast group LID in host byte order. |
| 3230 | */ |
| 3231 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
| 3232 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 3233 | /** |
| 3234 | * ib_alloc_xrcd - Allocates an XRC domain. |
| 3235 | * @device: The device on which to allocate the XRC domain. |
| 3236 | */ |
| 3237 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); |
| 3238 | |
| 3239 | /** |
| 3240 | * ib_dealloc_xrcd - Deallocates an XRC domain. |
| 3241 | * @xrcd: The XRC domain to deallocate. |
| 3242 | */ |
| 3243 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd); |
| 3244 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 3245 | struct ib_flow *ib_create_flow(struct ib_qp *qp, |
| 3246 | struct ib_flow_attr *flow_attr, int domain); |
| 3247 | int ib_destroy_flow(struct ib_flow *flow_id); |
| 3248 | |
Eli Cohen | 1c636f8 | 2013-10-31 15:26:32 +0200 | [diff] [blame] | 3249 | static inline int ib_check_mr_access(int flags) |
| 3250 | { |
| 3251 | /* |
| 3252 | * Local write permission is required if remote write or |
| 3253 | * remote atomic permission is also requested. |
| 3254 | */ |
| 3255 | if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && |
| 3256 | !(flags & IB_ACCESS_LOCAL_WRITE)) |
| 3257 | return -EINVAL; |
| 3258 | |
| 3259 | return 0; |
| 3260 | } |
| 3261 | |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 3262 | /** |
| 3263 | * ib_check_mr_status: lightweight check of MR status. |
| 3264 | * This routine may provide status checks on a selected |
| 3265 | * ib_mr. first use is for signature status check. |
| 3266 | * |
| 3267 | * @mr: A memory region. |
| 3268 | * @check_mask: Bitmask of which checks to perform from |
| 3269 | * ib_mr_status_check enumeration. |
| 3270 | * @mr_status: The container of relevant status checks. |
| 3271 | * failed checks will be indicated in the status bitmask |
| 3272 | * and the relevant info shall be in the error item. |
| 3273 | */ |
| 3274 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
| 3275 | struct ib_mr_status *mr_status); |
| 3276 | |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 3277 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, |
| 3278 | u16 pkey, const union ib_gid *gid, |
| 3279 | const struct sockaddr *addr); |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 3280 | struct ib_wq *ib_create_wq(struct ib_pd *pd, |
| 3281 | struct ib_wq_init_attr *init_attr); |
| 3282 | int ib_destroy_wq(struct ib_wq *wq); |
| 3283 | int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, |
| 3284 | u32 wq_attr_mask); |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 3285 | struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, |
| 3286 | struct ib_rwq_ind_table_init_attr* |
| 3287 | wq_ind_table_init_attr); |
| 3288 | int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 3289 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 3290 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 3291 | unsigned int *sg_offset, unsigned int page_size); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 3292 | |
| 3293 | static inline int |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 3294 | ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 3295 | unsigned int *sg_offset, unsigned int page_size) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 3296 | { |
| 3297 | int n; |
| 3298 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 3299 | n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 3300 | mr->iova = 0; |
| 3301 | |
| 3302 | return n; |
| 3303 | } |
| 3304 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 3305 | int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 3306 | unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 3307 | |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 3308 | void ib_drain_rq(struct ib_qp *qp); |
| 3309 | void ib_drain_sq(struct ib_qp *qp); |
| 3310 | void ib_drain_qp(struct ib_qp *qp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3311 | #endif /* IB_VERBS_H */ |