Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. |
| 3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
| 4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
| 5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
| 6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
Roland Dreier | 2a1d9b7 | 2005-08-10 23:03:10 -0700 | [diff] [blame] | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
Roland Dreier | 33b9b3e | 2006-01-30 14:29:21 -0800 | [diff] [blame] | 8 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * This software is available to you under a choice of one of two |
| 11 | * licenses. You may choose to be licensed under the terms of the GNU |
| 12 | * General Public License (GPL) Version 2, available from the file |
| 13 | * COPYING in the main directory of this source tree, or the |
| 14 | * OpenIB.org BSD license below: |
| 15 | * |
| 16 | * Redistribution and use in source and binary forms, with or |
| 17 | * without modification, are permitted provided that the following |
| 18 | * conditions are met: |
| 19 | * |
| 20 | * - Redistributions of source code must retain the above |
| 21 | * copyright notice, this list of conditions and the following |
| 22 | * disclaimer. |
| 23 | * |
| 24 | * - Redistributions in binary form must reproduce the above |
| 25 | * copyright notice, this list of conditions and the following |
| 26 | * disclaimer in the documentation and/or other materials |
| 27 | * provided with the distribution. |
| 28 | * |
| 29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 30 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 31 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 32 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 33 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 34 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 35 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 36 | * SOFTWARE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | */ |
| 38 | |
| 39 | #include <linux/errno.h> |
| 40 | #include <linux/err.h> |
Paul Gortmaker | b108d97 | 2011-05-27 15:29:33 -0400 | [diff] [blame] | 41 | #include <linux/export.h> |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 42 | #include <linux/string.h> |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 43 | #include <linux/slab.h> |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 44 | #include <linux/in.h> |
| 45 | #include <linux/in6.h> |
| 46 | #include <net/addrconf.h> |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 47 | #include <linux/security.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Roland Dreier | a4d61e8 | 2005-08-25 13:40:04 -0700 | [diff] [blame] | 49 | #include <rdma/ib_verbs.h> |
| 50 | #include <rdma/ib_cache.h> |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 51 | #include <rdma/ib_addr.h> |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 52 | #include <rdma/rw.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 54 | #include "core_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Parav Pandit | c0348eb | 2017-10-16 08:45:13 +0300 | [diff] [blame] | 56 | static int ib_resolve_eth_dmac(struct ib_device *device, |
| 57 | struct rdma_ah_attr *ah_attr); |
| 58 | |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 59 | static const char * const ib_events[] = { |
| 60 | [IB_EVENT_CQ_ERR] = "CQ error", |
| 61 | [IB_EVENT_QP_FATAL] = "QP fatal error", |
| 62 | [IB_EVENT_QP_REQ_ERR] = "QP request error", |
| 63 | [IB_EVENT_QP_ACCESS_ERR] = "QP access error", |
| 64 | [IB_EVENT_COMM_EST] = "communication established", |
| 65 | [IB_EVENT_SQ_DRAINED] = "send queue drained", |
| 66 | [IB_EVENT_PATH_MIG] = "path migration successful", |
| 67 | [IB_EVENT_PATH_MIG_ERR] = "path migration error", |
| 68 | [IB_EVENT_DEVICE_FATAL] = "device fatal error", |
| 69 | [IB_EVENT_PORT_ACTIVE] = "port active", |
| 70 | [IB_EVENT_PORT_ERR] = "port error", |
| 71 | [IB_EVENT_LID_CHANGE] = "LID change", |
| 72 | [IB_EVENT_PKEY_CHANGE] = "P_key change", |
| 73 | [IB_EVENT_SM_CHANGE] = "SM change", |
| 74 | [IB_EVENT_SRQ_ERR] = "SRQ error", |
| 75 | [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", |
| 76 | [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", |
| 77 | [IB_EVENT_CLIENT_REREGISTER] = "client reregister", |
| 78 | [IB_EVENT_GID_CHANGE] = "GID changed", |
| 79 | }; |
| 80 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 81 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event) |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 82 | { |
| 83 | size_t index = event; |
| 84 | |
| 85 | return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? |
| 86 | ib_events[index] : "unrecognized event"; |
| 87 | } |
| 88 | EXPORT_SYMBOL(ib_event_msg); |
| 89 | |
| 90 | static const char * const wc_statuses[] = { |
| 91 | [IB_WC_SUCCESS] = "success", |
| 92 | [IB_WC_LOC_LEN_ERR] = "local length error", |
| 93 | [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", |
| 94 | [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", |
| 95 | [IB_WC_LOC_PROT_ERR] = "local protection error", |
| 96 | [IB_WC_WR_FLUSH_ERR] = "WR flushed", |
| 97 | [IB_WC_MW_BIND_ERR] = "memory management operation error", |
| 98 | [IB_WC_BAD_RESP_ERR] = "bad response error", |
| 99 | [IB_WC_LOC_ACCESS_ERR] = "local access error", |
| 100 | [IB_WC_REM_INV_REQ_ERR] = "invalid request error", |
| 101 | [IB_WC_REM_ACCESS_ERR] = "remote access error", |
| 102 | [IB_WC_REM_OP_ERR] = "remote operation error", |
| 103 | [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", |
| 104 | [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", |
| 105 | [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", |
| 106 | [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", |
| 107 | [IB_WC_REM_ABORT_ERR] = "operation aborted", |
| 108 | [IB_WC_INV_EECN_ERR] = "invalid EE context number", |
| 109 | [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", |
| 110 | [IB_WC_FATAL_ERR] = "fatal error", |
| 111 | [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", |
| 112 | [IB_WC_GENERAL_ERR] = "general error", |
| 113 | }; |
| 114 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 115 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 116 | { |
| 117 | size_t index = status; |
| 118 | |
| 119 | return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? |
| 120 | wc_statuses[index] : "unrecognized status"; |
| 121 | } |
| 122 | EXPORT_SYMBOL(ib_wc_status_msg); |
| 123 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 124 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 125 | { |
| 126 | switch (rate) { |
| 127 | case IB_RATE_2_5_GBPS: return 1; |
| 128 | case IB_RATE_5_GBPS: return 2; |
| 129 | case IB_RATE_10_GBPS: return 4; |
| 130 | case IB_RATE_20_GBPS: return 8; |
| 131 | case IB_RATE_30_GBPS: return 12; |
| 132 | case IB_RATE_40_GBPS: return 16; |
| 133 | case IB_RATE_60_GBPS: return 24; |
| 134 | case IB_RATE_80_GBPS: return 32; |
| 135 | case IB_RATE_120_GBPS: return 48; |
| 136 | default: return -1; |
| 137 | } |
| 138 | } |
| 139 | EXPORT_SYMBOL(ib_rate_to_mult); |
| 140 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 141 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 142 | { |
| 143 | switch (mult) { |
| 144 | case 1: return IB_RATE_2_5_GBPS; |
| 145 | case 2: return IB_RATE_5_GBPS; |
| 146 | case 4: return IB_RATE_10_GBPS; |
| 147 | case 8: return IB_RATE_20_GBPS; |
| 148 | case 12: return IB_RATE_30_GBPS; |
| 149 | case 16: return IB_RATE_40_GBPS; |
| 150 | case 24: return IB_RATE_60_GBPS; |
| 151 | case 32: return IB_RATE_80_GBPS; |
| 152 | case 48: return IB_RATE_120_GBPS; |
| 153 | default: return IB_RATE_PORT_CURRENT; |
| 154 | } |
| 155 | } |
| 156 | EXPORT_SYMBOL(mult_to_ib_rate); |
| 157 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 158 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 159 | { |
| 160 | switch (rate) { |
| 161 | case IB_RATE_2_5_GBPS: return 2500; |
| 162 | case IB_RATE_5_GBPS: return 5000; |
| 163 | case IB_RATE_10_GBPS: return 10000; |
| 164 | case IB_RATE_20_GBPS: return 20000; |
| 165 | case IB_RATE_30_GBPS: return 30000; |
| 166 | case IB_RATE_40_GBPS: return 40000; |
| 167 | case IB_RATE_60_GBPS: return 60000; |
| 168 | case IB_RATE_80_GBPS: return 80000; |
| 169 | case IB_RATE_120_GBPS: return 120000; |
| 170 | case IB_RATE_14_GBPS: return 14062; |
| 171 | case IB_RATE_56_GBPS: return 56250; |
| 172 | case IB_RATE_112_GBPS: return 112500; |
| 173 | case IB_RATE_168_GBPS: return 168750; |
| 174 | case IB_RATE_25_GBPS: return 25781; |
| 175 | case IB_RATE_100_GBPS: return 103125; |
| 176 | case IB_RATE_200_GBPS: return 206250; |
| 177 | case IB_RATE_300_GBPS: return 309375; |
| 178 | default: return -1; |
| 179 | } |
| 180 | } |
| 181 | EXPORT_SYMBOL(ib_rate_to_mbps); |
| 182 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 183 | __attribute_const__ enum rdma_transport_type |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 184 | rdma_node_get_transport(enum rdma_node_type node_type) |
| 185 | { |
Leon Romanovsky | cdc596d | 2017-08-17 15:50:38 +0300 | [diff] [blame] | 186 | |
| 187 | if (node_type == RDMA_NODE_USNIC) |
Upinder Malhi | 5db5765 | 2014-01-15 17:02:36 -0800 | [diff] [blame] | 188 | return RDMA_TRANSPORT_USNIC; |
Leon Romanovsky | cdc596d | 2017-08-17 15:50:38 +0300 | [diff] [blame] | 189 | if (node_type == RDMA_NODE_USNIC_UDP) |
Upinder Malhi | 248567f | 2014-01-09 14:48:19 -0800 | [diff] [blame] | 190 | return RDMA_TRANSPORT_USNIC_UDP; |
Leon Romanovsky | cdc596d | 2017-08-17 15:50:38 +0300 | [diff] [blame] | 191 | if (node_type == RDMA_NODE_RNIC) |
| 192 | return RDMA_TRANSPORT_IWARP; |
| 193 | |
| 194 | return RDMA_TRANSPORT_IB; |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 195 | } |
| 196 | EXPORT_SYMBOL(rdma_node_get_transport); |
| 197 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 198 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) |
| 199 | { |
Leon Romanovsky | 82901e3 | 2017-08-17 15:50:39 +0300 | [diff] [blame] | 200 | enum rdma_transport_type lt; |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 201 | if (device->get_link_layer) |
| 202 | return device->get_link_layer(device, port_num); |
| 203 | |
Leon Romanovsky | 82901e3 | 2017-08-17 15:50:39 +0300 | [diff] [blame] | 204 | lt = rdma_node_get_transport(device->node_type); |
| 205 | if (lt == RDMA_TRANSPORT_IB) |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 206 | return IB_LINK_LAYER_INFINIBAND; |
Leon Romanovsky | 82901e3 | 2017-08-17 15:50:39 +0300 | [diff] [blame] | 207 | |
| 208 | return IB_LINK_LAYER_ETHERNET; |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 209 | } |
| 210 | EXPORT_SYMBOL(rdma_port_get_link_layer); |
| 211 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | /* Protection domains */ |
| 213 | |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 214 | /** |
| 215 | * ib_alloc_pd - Allocates an unused protection domain. |
| 216 | * @device: The device on which to allocate the protection domain. |
| 217 | * |
| 218 | * A protection domain object provides an association between QPs, shared |
| 219 | * receive queues, address handles, memory regions, and memory windows. |
| 220 | * |
| 221 | * Every PD has a local_dma_lkey which can be used as the lkey value for local |
| 222 | * memory operations. |
| 223 | */ |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 224 | struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, |
| 225 | const char *caller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | { |
| 227 | struct ib_pd *pd; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 228 | int mr_access_flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 230 | pd = device->alloc_pd(device, NULL, NULL); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 231 | if (IS_ERR(pd)) |
| 232 | return pd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 234 | pd->device = device; |
| 235 | pd->uobject = NULL; |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 236 | pd->__internal_mr = NULL; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 237 | atomic_set(&pd->usecnt, 0); |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 238 | pd->flags = flags; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 239 | |
Or Gerlitz | 86bee4c | 2015-12-18 10:59:45 +0200 | [diff] [blame] | 240 | if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 241 | pd->local_dma_lkey = device->local_dma_lkey; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 242 | else |
| 243 | mr_access_flags |= IB_ACCESS_LOCAL_WRITE; |
| 244 | |
| 245 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { |
| 246 | pr_warn("%s: enabling unsafe global rkey\n", caller); |
| 247 | mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; |
| 248 | } |
| 249 | |
| 250 | if (mr_access_flags) { |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 251 | struct ib_mr *mr; |
| 252 | |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 253 | mr = pd->device->get_dma_mr(pd, mr_access_flags); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 254 | if (IS_ERR(mr)) { |
| 255 | ib_dealloc_pd(pd); |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 256 | return ERR_CAST(mr); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 257 | } |
| 258 | |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 259 | mr->device = pd->device; |
| 260 | mr->pd = pd; |
| 261 | mr->uobject = NULL; |
| 262 | mr->need_inval = false; |
| 263 | |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 264 | pd->__internal_mr = mr; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 265 | |
| 266 | if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) |
| 267 | pd->local_dma_lkey = pd->__internal_mr->lkey; |
| 268 | |
| 269 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) |
| 270 | pd->unsafe_global_rkey = pd->__internal_mr->rkey; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | return pd; |
| 274 | } |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 275 | EXPORT_SYMBOL(__ib_alloc_pd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 277 | /** |
| 278 | * ib_dealloc_pd - Deallocates a protection domain. |
| 279 | * @pd: The protection domain to deallocate. |
| 280 | * |
| 281 | * It is an error to call this function while any resources in the pd still |
| 282 | * exist. The caller is responsible to synchronously destroy them and |
| 283 | * guarantee no new allocations will happen. |
| 284 | */ |
| 285 | void ib_dealloc_pd(struct ib_pd *pd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | { |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 287 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 289 | if (pd->__internal_mr) { |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 290 | ret = pd->device->dereg_mr(pd->__internal_mr); |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 291 | WARN_ON(ret); |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 292 | pd->__internal_mr = NULL; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 293 | } |
| 294 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 295 | /* uverbs manipulates usecnt with proper locking, while the kabi |
| 296 | requires the caller to guarantee we can't race here. */ |
| 297 | WARN_ON(atomic_read(&pd->usecnt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 299 | /* Making delalloc_pd a void return is a WIP, no driver should return |
| 300 | an error here. */ |
| 301 | ret = pd->device->dealloc_pd(pd); |
| 302 | WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | } |
| 304 | EXPORT_SYMBOL(ib_dealloc_pd); |
| 305 | |
| 306 | /* Address handles */ |
| 307 | |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 308 | static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, |
| 309 | struct rdma_ah_attr *ah_attr, |
| 310 | struct ib_udata *udata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | { |
| 312 | struct ib_ah *ah; |
| 313 | |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 314 | ah = pd->device->create_ah(pd, ah_attr, udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
| 316 | if (!IS_ERR(ah)) { |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 317 | ah->device = pd->device; |
| 318 | ah->pd = pd; |
| 319 | ah->uobject = NULL; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 320 | ah->type = ah_attr->type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | atomic_inc(&pd->usecnt); |
| 322 | } |
| 323 | |
| 324 | return ah; |
| 325 | } |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 326 | |
| 327 | struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr) |
| 328 | { |
| 329 | return _rdma_create_ah(pd, ah_attr, NULL); |
| 330 | } |
Dasaratharaman Chandramouli | 0a18cfe | 2017-04-29 14:41:19 -0400 | [diff] [blame] | 331 | EXPORT_SYMBOL(rdma_create_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 333 | /** |
| 334 | * rdma_create_user_ah - Creates an address handle for the |
| 335 | * given address vector. |
| 336 | * It resolves destination mac address for ah attribute of RoCE type. |
| 337 | * @pd: The protection domain associated with the address handle. |
| 338 | * @ah_attr: The attributes of the address vector. |
| 339 | * @udata: pointer to user's input output buffer information need by |
| 340 | * provider driver. |
| 341 | * |
| 342 | * It returns 0 on success and returns appropriate error code on error. |
| 343 | * The address handle is used to reference a local or global destination |
| 344 | * in all UD QP post sends. |
| 345 | */ |
| 346 | struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, |
| 347 | struct rdma_ah_attr *ah_attr, |
| 348 | struct ib_udata *udata) |
| 349 | { |
| 350 | int err; |
| 351 | |
| 352 | if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { |
| 353 | err = ib_resolve_eth_dmac(pd->device, ah_attr); |
| 354 | if (err) |
| 355 | return ERR_PTR(err); |
| 356 | } |
| 357 | |
| 358 | return _rdma_create_ah(pd, ah_attr, udata); |
| 359 | } |
| 360 | EXPORT_SYMBOL(rdma_create_user_ah); |
| 361 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 362 | int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 363 | { |
| 364 | const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; |
| 365 | struct iphdr ip4h_checked; |
| 366 | const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; |
| 367 | |
| 368 | /* If it's IPv6, the version must be 6, otherwise, the first |
| 369 | * 20 bytes (before the IPv4 header) are garbled. |
| 370 | */ |
| 371 | if (ip6h->version != 6) |
| 372 | return (ip4h->version == 4) ? 4 : 0; |
| 373 | /* version may be 6 or 4 because the first 20 bytes could be garbled */ |
| 374 | |
| 375 | /* RoCE v2 requires no options, thus header length |
| 376 | * must be 5 words |
| 377 | */ |
| 378 | if (ip4h->ihl != 5) |
| 379 | return 6; |
| 380 | |
| 381 | /* Verify checksum. |
| 382 | * We can't write on scattered buffers so we need to copy to |
| 383 | * temp buffer. |
| 384 | */ |
| 385 | memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); |
| 386 | ip4h_checked.check = 0; |
| 387 | ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); |
| 388 | /* if IPv4 header checksum is OK, believe it */ |
| 389 | if (ip4h->check == ip4h_checked.check) |
| 390 | return 4; |
| 391 | return 6; |
| 392 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 393 | EXPORT_SYMBOL(ib_get_rdma_header_version); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 394 | |
| 395 | static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, |
| 396 | u8 port_num, |
| 397 | const struct ib_grh *grh) |
| 398 | { |
| 399 | int grh_version; |
| 400 | |
| 401 | if (rdma_protocol_ib(device, port_num)) |
| 402 | return RDMA_NETWORK_IB; |
| 403 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 404 | grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 405 | |
| 406 | if (grh_version == 4) |
| 407 | return RDMA_NETWORK_IPV4; |
| 408 | |
| 409 | if (grh->next_hdr == IPPROTO_UDP) |
| 410 | return RDMA_NETWORK_IPV6; |
| 411 | |
| 412 | return RDMA_NETWORK_ROCE_V1; |
| 413 | } |
| 414 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 415 | struct find_gid_index_context { |
| 416 | u16 vlan_id; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 417 | enum ib_gid_type gid_type; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 418 | }; |
| 419 | |
| 420 | static bool find_gid_index(const union ib_gid *gid, |
| 421 | const struct ib_gid_attr *gid_attr, |
| 422 | void *context) |
| 423 | { |
| 424 | struct find_gid_index_context *ctx = |
| 425 | (struct find_gid_index_context *)context; |
| 426 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 427 | if (ctx->gid_type != gid_attr->gid_type) |
| 428 | return false; |
| 429 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 430 | if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || |
| 431 | (is_vlan_dev(gid_attr->ndev) && |
| 432 | vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) |
| 433 | return false; |
| 434 | |
| 435 | return true; |
| 436 | } |
| 437 | |
| 438 | static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, |
| 439 | u16 vlan_id, const union ib_gid *sgid, |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 440 | enum ib_gid_type gid_type, |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 441 | u16 *gid_index) |
| 442 | { |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 443 | struct find_gid_index_context context = {.vlan_id = vlan_id, |
| 444 | .gid_type = gid_type}; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 445 | |
| 446 | return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, |
| 447 | &context, gid_index); |
| 448 | } |
| 449 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 450 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, |
| 451 | enum rdma_network_type net_type, |
| 452 | union ib_gid *sgid, union ib_gid *dgid) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 453 | { |
| 454 | struct sockaddr_in src_in; |
| 455 | struct sockaddr_in dst_in; |
| 456 | __be32 src_saddr, dst_saddr; |
| 457 | |
| 458 | if (!sgid || !dgid) |
| 459 | return -EINVAL; |
| 460 | |
| 461 | if (net_type == RDMA_NETWORK_IPV4) { |
| 462 | memcpy(&src_in.sin_addr.s_addr, |
| 463 | &hdr->roce4grh.saddr, 4); |
| 464 | memcpy(&dst_in.sin_addr.s_addr, |
| 465 | &hdr->roce4grh.daddr, 4); |
| 466 | src_saddr = src_in.sin_addr.s_addr; |
| 467 | dst_saddr = dst_in.sin_addr.s_addr; |
| 468 | ipv6_addr_set_v4mapped(src_saddr, |
| 469 | (struct in6_addr *)sgid); |
| 470 | ipv6_addr_set_v4mapped(dst_saddr, |
| 471 | (struct in6_addr *)dgid); |
| 472 | return 0; |
| 473 | } else if (net_type == RDMA_NETWORK_IPV6 || |
| 474 | net_type == RDMA_NETWORK_IB) { |
| 475 | *dgid = hdr->ibgrh.dgid; |
| 476 | *sgid = hdr->ibgrh.sgid; |
| 477 | return 0; |
| 478 | } else { |
| 479 | return -EINVAL; |
| 480 | } |
| 481 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 482 | EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 483 | |
Gustavo A. R. Silva | 28b5b3a | 2017-05-04 20:38:20 -0500 | [diff] [blame] | 484 | /* |
| 485 | * This function creates ah from the incoming packet. |
| 486 | * Incoming packet has dgid of the receiver node on which this code is |
| 487 | * getting executed and, sgid contains the GID of the sender. |
| 488 | * |
| 489 | * When resolving mac address of destination, the arrived dgid is used |
| 490 | * as sgid and, sgid is used as dgid because sgid contains destinations |
| 491 | * GID whom to respond to. |
| 492 | * |
| 493 | * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the |
| 494 | * position of arguments dgid and sgid do not match the order of the |
| 495 | * parameters. |
| 496 | */ |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 497 | int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, |
| 498 | const struct ib_wc *wc, const struct ib_grh *grh, |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 499 | struct rdma_ah_attr *ah_attr) |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 500 | { |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 501 | u32 flow_class; |
| 502 | u16 gid_index; |
| 503 | int ret; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 504 | enum rdma_network_type net_type = RDMA_NETWORK_IB; |
| 505 | enum ib_gid_type gid_type = IB_GID_TYPE_IB; |
Matan Barak | c3efe75 | 2016-01-04 10:49:54 +0200 | [diff] [blame] | 506 | int hoplimit = 0xff; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 507 | union ib_gid dgid; |
| 508 | union ib_gid sgid; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 509 | |
Roland Dreier | 7936422 | 2017-08-29 10:34:44 -0700 | [diff] [blame] | 510 | might_sleep(); |
| 511 | |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 512 | memset(ah_attr, 0, sizeof *ah_attr); |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 513 | ah_attr->type = rdma_ah_find_type(device, port_num); |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 514 | if (rdma_cap_eth_ah(device, port_num)) { |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 515 | if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) |
| 516 | net_type = wc->network_hdr_type; |
| 517 | else |
| 518 | net_type = ib_get_net_type_by_grh(device, port_num, grh); |
| 519 | gid_type = ib_network_to_gid_type(net_type); |
| 520 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 521 | ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, |
| 522 | &sgid, &dgid); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 523 | if (ret) |
| 524 | return ret; |
| 525 | |
| 526 | if (rdma_protocol_roce(device, port_num)) { |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 527 | int if_index = 0; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 528 | u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? |
| 529 | wc->vlan_id : 0xffff; |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 530 | struct net_device *idev; |
| 531 | struct net_device *resolved_dev; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 532 | |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 533 | if (!(wc->wc_flags & IB_WC_GRH)) |
| 534 | return -EPROTOTYPE; |
| 535 | |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 536 | if (!device->get_netdev) |
| 537 | return -EOPNOTSUPP; |
| 538 | |
| 539 | idev = device->get_netdev(device, port_num); |
| 540 | if (!idev) |
| 541 | return -ENODEV; |
| 542 | |
Matan Barak | f7f4b23e | 2016-01-04 10:49:53 +0200 | [diff] [blame] | 543 | ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 544 | ah_attr->roce.dmac, |
Matan Barak | f7f4b23e | 2016-01-04 10:49:53 +0200 | [diff] [blame] | 545 | wc->wc_flags & IB_WC_WITH_VLAN ? |
| 546 | NULL : &vlan_id, |
Matan Barak | c3efe75 | 2016-01-04 10:49:54 +0200 | [diff] [blame] | 547 | &if_index, &hoplimit); |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 548 | if (ret) { |
| 549 | dev_put(idev); |
| 550 | return ret; |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 551 | } |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 552 | |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 553 | resolved_dev = dev_get_by_index(&init_net, if_index); |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 554 | rcu_read_lock(); |
| 555 | if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, |
| 556 | resolved_dev)) |
| 557 | ret = -EHOSTUNREACH; |
| 558 | rcu_read_unlock(); |
| 559 | dev_put(idev); |
| 560 | dev_put(resolved_dev); |
| 561 | if (ret) |
| 562 | return ret; |
| 563 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 564 | ret = get_sgid_index_from_eth(device, port_num, vlan_id, |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 565 | &dgid, gid_type, &gid_index); |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 566 | if (ret) |
| 567 | return ret; |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 568 | } |
| 569 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 570 | rdma_ah_set_dlid(ah_attr, wc->slid); |
| 571 | rdma_ah_set_sl(ah_attr, wc->sl); |
| 572 | rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits); |
| 573 | rdma_ah_set_port_num(ah_attr, port_num); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 574 | |
| 575 | if (wc->wc_flags & IB_WC_GRH) { |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 576 | if (!rdma_cap_eth_ah(device, port_num)) { |
Eli Cohen | b355600 | 2016-06-22 17:27:24 +0300 | [diff] [blame] | 577 | if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { |
| 578 | ret = ib_find_cached_gid_by_port(device, &dgid, |
| 579 | IB_GID_TYPE_IB, |
| 580 | port_num, NULL, |
| 581 | &gid_index); |
| 582 | if (ret) |
| 583 | return ret; |
| 584 | } else { |
| 585 | gid_index = 0; |
| 586 | } |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 587 | } |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 588 | |
Hal Rosenstock | 497677a | 2005-07-27 11:45:35 -0700 | [diff] [blame] | 589 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 590 | rdma_ah_set_grh(ah_attr, &sgid, |
| 591 | flow_class & 0xFFFFF, |
| 592 | (u8)gid_index, hoplimit, |
| 593 | (flow_class >> 20) & 0xFF); |
| 594 | |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 595 | } |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 596 | return 0; |
| 597 | } |
| 598 | EXPORT_SYMBOL(ib_init_ah_from_wc); |
| 599 | |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 600 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
| 601 | const struct ib_grh *grh, u8 port_num) |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 602 | { |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 603 | struct rdma_ah_attr ah_attr; |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 604 | int ret; |
| 605 | |
| 606 | ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); |
| 607 | if (ret) |
| 608 | return ERR_PTR(ret); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 609 | |
Dasaratharaman Chandramouli | 0a18cfe | 2017-04-29 14:41:19 -0400 | [diff] [blame] | 610 | return rdma_create_ah(pd, &ah_attr); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 611 | } |
| 612 | EXPORT_SYMBOL(ib_create_ah_from_wc); |
| 613 | |
Dasaratharaman Chandramouli | 67b985b | 2017-04-29 14:41:20 -0400 | [diff] [blame] | 614 | int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | { |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 616 | if (ah->type != ah_attr->type) |
| 617 | return -EINVAL; |
| 618 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | return ah->device->modify_ah ? |
| 620 | ah->device->modify_ah(ah, ah_attr) : |
| 621 | -ENOSYS; |
| 622 | } |
Dasaratharaman Chandramouli | 67b985b | 2017-04-29 14:41:20 -0400 | [diff] [blame] | 623 | EXPORT_SYMBOL(rdma_modify_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | |
Dasaratharaman Chandramouli | bfbfd66 | 2017-04-29 14:41:21 -0400 | [diff] [blame] | 625 | int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | { |
| 627 | return ah->device->query_ah ? |
| 628 | ah->device->query_ah(ah, ah_attr) : |
| 629 | -ENOSYS; |
| 630 | } |
Dasaratharaman Chandramouli | bfbfd66 | 2017-04-29 14:41:21 -0400 | [diff] [blame] | 631 | EXPORT_SYMBOL(rdma_query_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | |
Dasaratharaman Chandramouli | 3652315 | 2017-04-29 14:41:22 -0400 | [diff] [blame] | 633 | int rdma_destroy_ah(struct ib_ah *ah) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | { |
| 635 | struct ib_pd *pd; |
| 636 | int ret; |
| 637 | |
| 638 | pd = ah->pd; |
| 639 | ret = ah->device->destroy_ah(ah); |
| 640 | if (!ret) |
| 641 | atomic_dec(&pd->usecnt); |
| 642 | |
| 643 | return ret; |
| 644 | } |
Dasaratharaman Chandramouli | 3652315 | 2017-04-29 14:41:22 -0400 | [diff] [blame] | 645 | EXPORT_SYMBOL(rdma_destroy_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 647 | /* Shared receive queues */ |
| 648 | |
| 649 | struct ib_srq *ib_create_srq(struct ib_pd *pd, |
| 650 | struct ib_srq_init_attr *srq_init_attr) |
| 651 | { |
| 652 | struct ib_srq *srq; |
| 653 | |
| 654 | if (!pd->device->create_srq) |
| 655 | return ERR_PTR(-ENOSYS); |
| 656 | |
| 657 | srq = pd->device->create_srq(pd, srq_init_attr, NULL); |
| 658 | |
| 659 | if (!IS_ERR(srq)) { |
| 660 | srq->device = pd->device; |
| 661 | srq->pd = pd; |
| 662 | srq->uobject = NULL; |
| 663 | srq->event_handler = srq_init_attr->event_handler; |
| 664 | srq->srq_context = srq_init_attr->srq_context; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 665 | srq->srq_type = srq_init_attr->srq_type; |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 666 | if (ib_srq_has_cq(srq->srq_type)) { |
| 667 | srq->ext.cq = srq_init_attr->ext.cq; |
| 668 | atomic_inc(&srq->ext.cq->usecnt); |
| 669 | } |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 670 | if (srq->srq_type == IB_SRQT_XRC) { |
| 671 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 672 | atomic_inc(&srq->ext.xrc.xrcd->usecnt); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 673 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 674 | atomic_inc(&pd->usecnt); |
| 675 | atomic_set(&srq->usecnt, 0); |
| 676 | } |
| 677 | |
| 678 | return srq; |
| 679 | } |
| 680 | EXPORT_SYMBOL(ib_create_srq); |
| 681 | |
| 682 | int ib_modify_srq(struct ib_srq *srq, |
| 683 | struct ib_srq_attr *srq_attr, |
| 684 | enum ib_srq_attr_mask srq_attr_mask) |
| 685 | { |
Dotan Barak | 7ce5eac | 2008-04-16 21:09:28 -0700 | [diff] [blame] | 686 | return srq->device->modify_srq ? |
| 687 | srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : |
| 688 | -ENOSYS; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 689 | } |
| 690 | EXPORT_SYMBOL(ib_modify_srq); |
| 691 | |
| 692 | int ib_query_srq(struct ib_srq *srq, |
| 693 | struct ib_srq_attr *srq_attr) |
| 694 | { |
| 695 | return srq->device->query_srq ? |
| 696 | srq->device->query_srq(srq, srq_attr) : -ENOSYS; |
| 697 | } |
| 698 | EXPORT_SYMBOL(ib_query_srq); |
| 699 | |
| 700 | int ib_destroy_srq(struct ib_srq *srq) |
| 701 | { |
| 702 | struct ib_pd *pd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 703 | enum ib_srq_type srq_type; |
| 704 | struct ib_xrcd *uninitialized_var(xrcd); |
| 705 | struct ib_cq *uninitialized_var(cq); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 706 | int ret; |
| 707 | |
| 708 | if (atomic_read(&srq->usecnt)) |
| 709 | return -EBUSY; |
| 710 | |
| 711 | pd = srq->pd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 712 | srq_type = srq->srq_type; |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 713 | if (ib_srq_has_cq(srq_type)) |
| 714 | cq = srq->ext.cq; |
| 715 | if (srq_type == IB_SRQT_XRC) |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 716 | xrcd = srq->ext.xrc.xrcd; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 717 | |
| 718 | ret = srq->device->destroy_srq(srq); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 719 | if (!ret) { |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 720 | atomic_dec(&pd->usecnt); |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 721 | if (srq_type == IB_SRQT_XRC) |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 722 | atomic_dec(&xrcd->usecnt); |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 723 | if (ib_srq_has_cq(srq_type)) |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 724 | atomic_dec(&cq->usecnt); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 725 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 726 | |
| 727 | return ret; |
| 728 | } |
| 729 | EXPORT_SYMBOL(ib_destroy_srq); |
| 730 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | /* Queue pairs */ |
| 732 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 733 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) |
| 734 | { |
| 735 | struct ib_qp *qp = context; |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 736 | unsigned long flags; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 737 | |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 738 | spin_lock_irqsave(&qp->device->event_handler_lock, flags); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 739 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) |
Shlomo Pongratz | eec9e29f | 2013-04-10 14:26:46 +0000 | [diff] [blame] | 740 | if (event->element.qp->event_handler) |
| 741 | event->element.qp->event_handler(event, event->element.qp->qp_context); |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 742 | spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 743 | } |
| 744 | |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 745 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) |
| 746 | { |
| 747 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 748 | list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); |
| 749 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 750 | } |
| 751 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 752 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, |
| 753 | void (*event_handler)(struct ib_event *, void *), |
| 754 | void *qp_context) |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 755 | { |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 756 | struct ib_qp *qp; |
| 757 | unsigned long flags; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 758 | int err; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 759 | |
| 760 | qp = kzalloc(sizeof *qp, GFP_KERNEL); |
| 761 | if (!qp) |
| 762 | return ERR_PTR(-ENOMEM); |
| 763 | |
| 764 | qp->real_qp = real_qp; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 765 | err = ib_open_shared_qp_security(qp, real_qp->device); |
| 766 | if (err) { |
| 767 | kfree(qp); |
| 768 | return ERR_PTR(err); |
| 769 | } |
| 770 | |
| 771 | qp->real_qp = real_qp; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 772 | atomic_inc(&real_qp->usecnt); |
| 773 | qp->device = real_qp->device; |
| 774 | qp->event_handler = event_handler; |
| 775 | qp->qp_context = qp_context; |
| 776 | qp->qp_num = real_qp->qp_num; |
| 777 | qp->qp_type = real_qp->qp_type; |
| 778 | |
| 779 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
| 780 | list_add(&qp->open_list, &real_qp->open_list); |
| 781 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
| 782 | |
| 783 | return qp; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 784 | } |
| 785 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 786 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, |
| 787 | struct ib_qp_open_attr *qp_open_attr) |
| 788 | { |
| 789 | struct ib_qp *qp, *real_qp; |
| 790 | |
| 791 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) |
| 792 | return ERR_PTR(-EINVAL); |
| 793 | |
| 794 | qp = ERR_PTR(-EINVAL); |
| 795 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 796 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { |
| 797 | if (real_qp->qp_num == qp_open_attr->qp_num) { |
| 798 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, |
| 799 | qp_open_attr->qp_context); |
| 800 | break; |
| 801 | } |
| 802 | } |
| 803 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 804 | return qp; |
| 805 | } |
| 806 | EXPORT_SYMBOL(ib_open_qp); |
| 807 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 808 | static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, |
| 809 | struct ib_qp_init_attr *qp_init_attr) |
| 810 | { |
| 811 | struct ib_qp *real_qp = qp; |
| 812 | |
| 813 | qp->event_handler = __ib_shared_qp_event_handler; |
| 814 | qp->qp_context = qp; |
| 815 | qp->pd = NULL; |
| 816 | qp->send_cq = qp->recv_cq = NULL; |
| 817 | qp->srq = NULL; |
| 818 | qp->xrcd = qp_init_attr->xrcd; |
| 819 | atomic_inc(&qp_init_attr->xrcd->usecnt); |
| 820 | INIT_LIST_HEAD(&qp->open_list); |
| 821 | |
| 822 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, |
| 823 | qp_init_attr->qp_context); |
| 824 | if (!IS_ERR(qp)) |
| 825 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); |
| 826 | else |
| 827 | real_qp->device->destroy_qp(real_qp); |
| 828 | return qp; |
| 829 | } |
| 830 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
| 832 | struct ib_qp_init_attr *qp_init_attr) |
| 833 | { |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 834 | struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; |
| 835 | struct ib_qp *qp; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 836 | int ret; |
| 837 | |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 838 | if (qp_init_attr->rwq_ind_tbl && |
| 839 | (qp_init_attr->recv_cq || |
| 840 | qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || |
| 841 | qp_init_attr->cap.max_recv_sge)) |
| 842 | return ERR_PTR(-EINVAL); |
| 843 | |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 844 | /* |
| 845 | * If the callers is using the RDMA API calculate the resources |
| 846 | * needed for the RDMA READ/WRITE operations. |
| 847 | * |
| 848 | * Note that these callers need to pass in a port number. |
| 849 | */ |
| 850 | if (qp_init_attr->cap.max_rdma_ctxs) |
| 851 | rdma_rw_init_qp(device, qp_init_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 853 | qp = device->create_qp(pd, qp_init_attr, NULL); |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 854 | if (IS_ERR(qp)) |
| 855 | return qp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 857 | ret = ib_create_qp_security(qp, device); |
| 858 | if (ret) { |
| 859 | ib_destroy_qp(qp); |
| 860 | return ERR_PTR(ret); |
| 861 | } |
| 862 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 863 | qp->device = device; |
| 864 | qp->real_qp = qp; |
| 865 | qp->uobject = NULL; |
| 866 | qp->qp_type = qp_init_attr->qp_type; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 867 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 868 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 869 | atomic_set(&qp->usecnt, 0); |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 870 | qp->mrs_used = 0; |
| 871 | spin_lock_init(&qp->mr_lock); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 872 | INIT_LIST_HEAD(&qp->rdma_mrs); |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 873 | INIT_LIST_HEAD(&qp->sig_mrs); |
Noa Osherovich | 498ca3c | 2017-08-23 08:35:40 +0300 | [diff] [blame] | 874 | qp->port = 0; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 875 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 876 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) |
| 877 | return ib_create_xrc_qp(qp, qp_init_attr); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 878 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 879 | qp->event_handler = qp_init_attr->event_handler; |
| 880 | qp->qp_context = qp_init_attr->qp_context; |
| 881 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { |
| 882 | qp->recv_cq = NULL; |
| 883 | qp->srq = NULL; |
| 884 | } else { |
| 885 | qp->recv_cq = qp_init_attr->recv_cq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 886 | if (qp_init_attr->recv_cq) |
| 887 | atomic_inc(&qp_init_attr->recv_cq->usecnt); |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 888 | qp->srq = qp_init_attr->srq; |
| 889 | if (qp->srq) |
| 890 | atomic_inc(&qp_init_attr->srq->usecnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | } |
| 892 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 893 | qp->pd = pd; |
| 894 | qp->send_cq = qp_init_attr->send_cq; |
| 895 | qp->xrcd = NULL; |
| 896 | |
| 897 | atomic_inc(&pd->usecnt); |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 898 | if (qp_init_attr->send_cq) |
| 899 | atomic_inc(&qp_init_attr->send_cq->usecnt); |
| 900 | if (qp_init_attr->rwq_ind_tbl) |
| 901 | atomic_inc(&qp->rwq_ind_tbl->usecnt); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 902 | |
| 903 | if (qp_init_attr->cap.max_rdma_ctxs) { |
| 904 | ret = rdma_rw_init_mrs(qp, qp_init_attr); |
| 905 | if (ret) { |
| 906 | pr_err("failed to init MR pool ret= %d\n", ret); |
| 907 | ib_destroy_qp(qp); |
Steve Wise | b6bc1c7 | 2016-09-29 07:31:33 -0700 | [diff] [blame] | 908 | return ERR_PTR(ret); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 909 | } |
| 910 | } |
| 911 | |
Bart Van Assche | 632bc3f | 2016-07-21 13:03:30 -0700 | [diff] [blame] | 912 | /* |
| 913 | * Note: all hw drivers guarantee that max_send_sge is lower than |
| 914 | * the device RDMA WRITE SGE limit but not all hw drivers ensure that |
| 915 | * max_send_sge <= max_sge_rd. |
| 916 | */ |
| 917 | qp->max_write_sge = qp_init_attr->cap.max_send_sge; |
| 918 | qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, |
| 919 | device->attrs.max_sge_rd); |
| 920 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | return qp; |
| 922 | } |
| 923 | EXPORT_SYMBOL(ib_create_qp); |
| 924 | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 925 | static const struct { |
| 926 | int valid; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 927 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; |
| 928 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 929 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
| 930 | [IB_QPS_RESET] = { |
| 931 | [IB_QPS_RESET] = { .valid = 1 }, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 932 | [IB_QPS_INIT] = { |
| 933 | .valid = 1, |
| 934 | .req_param = { |
| 935 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 936 | IB_QP_PORT | |
| 937 | IB_QP_QKEY), |
Or Gerlitz | c938a61 | 2012-03-01 12:17:51 +0200 | [diff] [blame] | 938 | [IB_QPT_RAW_PACKET] = IB_QP_PORT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 939 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
| 940 | IB_QP_PORT | |
| 941 | IB_QP_ACCESS_FLAGS), |
| 942 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
| 943 | IB_QP_PORT | |
| 944 | IB_QP_ACCESS_FLAGS), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 945 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
| 946 | IB_QP_PORT | |
| 947 | IB_QP_ACCESS_FLAGS), |
| 948 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
| 949 | IB_QP_PORT | |
| 950 | IB_QP_ACCESS_FLAGS), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 951 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 952 | IB_QP_QKEY), |
| 953 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 954 | IB_QP_QKEY), |
| 955 | } |
| 956 | }, |
| 957 | }, |
| 958 | [IB_QPS_INIT] = { |
| 959 | [IB_QPS_RESET] = { .valid = 1 }, |
| 960 | [IB_QPS_ERR] = { .valid = 1 }, |
| 961 | [IB_QPS_INIT] = { |
| 962 | .valid = 1, |
| 963 | .opt_param = { |
| 964 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 965 | IB_QP_PORT | |
| 966 | IB_QP_QKEY), |
| 967 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
| 968 | IB_QP_PORT | |
| 969 | IB_QP_ACCESS_FLAGS), |
| 970 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
| 971 | IB_QP_PORT | |
| 972 | IB_QP_ACCESS_FLAGS), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 973 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
| 974 | IB_QP_PORT | |
| 975 | IB_QP_ACCESS_FLAGS), |
| 976 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
| 977 | IB_QP_PORT | |
| 978 | IB_QP_ACCESS_FLAGS), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 979 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 980 | IB_QP_QKEY), |
| 981 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 982 | IB_QP_QKEY), |
| 983 | } |
| 984 | }, |
| 985 | [IB_QPS_RTR] = { |
| 986 | .valid = 1, |
| 987 | .req_param = { |
| 988 | [IB_QPT_UC] = (IB_QP_AV | |
| 989 | IB_QP_PATH_MTU | |
| 990 | IB_QP_DEST_QPN | |
| 991 | IB_QP_RQ_PSN), |
| 992 | [IB_QPT_RC] = (IB_QP_AV | |
| 993 | IB_QP_PATH_MTU | |
| 994 | IB_QP_DEST_QPN | |
| 995 | IB_QP_RQ_PSN | |
| 996 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 997 | IB_QP_MIN_RNR_TIMER), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 998 | [IB_QPT_XRC_INI] = (IB_QP_AV | |
| 999 | IB_QP_PATH_MTU | |
| 1000 | IB_QP_DEST_QPN | |
| 1001 | IB_QP_RQ_PSN), |
| 1002 | [IB_QPT_XRC_TGT] = (IB_QP_AV | |
| 1003 | IB_QP_PATH_MTU | |
| 1004 | IB_QP_DEST_QPN | |
| 1005 | IB_QP_RQ_PSN | |
| 1006 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 1007 | IB_QP_MIN_RNR_TIMER), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1008 | }, |
| 1009 | .opt_param = { |
| 1010 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 1011 | IB_QP_QKEY), |
| 1012 | [IB_QPT_UC] = (IB_QP_ALT_PATH | |
| 1013 | IB_QP_ACCESS_FLAGS | |
| 1014 | IB_QP_PKEY_INDEX), |
| 1015 | [IB_QPT_RC] = (IB_QP_ALT_PATH | |
| 1016 | IB_QP_ACCESS_FLAGS | |
| 1017 | IB_QP_PKEY_INDEX), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1018 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | |
| 1019 | IB_QP_ACCESS_FLAGS | |
| 1020 | IB_QP_PKEY_INDEX), |
| 1021 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | |
| 1022 | IB_QP_ACCESS_FLAGS | |
| 1023 | IB_QP_PKEY_INDEX), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1024 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 1025 | IB_QP_QKEY), |
| 1026 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 1027 | IB_QP_QKEY), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1028 | }, |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1029 | }, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1030 | }, |
| 1031 | [IB_QPS_RTR] = { |
| 1032 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1033 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1034 | [IB_QPS_RTS] = { |
| 1035 | .valid = 1, |
| 1036 | .req_param = { |
| 1037 | [IB_QPT_UD] = IB_QP_SQ_PSN, |
| 1038 | [IB_QPT_UC] = IB_QP_SQ_PSN, |
| 1039 | [IB_QPT_RC] = (IB_QP_TIMEOUT | |
| 1040 | IB_QP_RETRY_CNT | |
| 1041 | IB_QP_RNR_RETRY | |
| 1042 | IB_QP_SQ_PSN | |
| 1043 | IB_QP_MAX_QP_RD_ATOMIC), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1044 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | |
| 1045 | IB_QP_RETRY_CNT | |
| 1046 | IB_QP_RNR_RETRY | |
| 1047 | IB_QP_SQ_PSN | |
| 1048 | IB_QP_MAX_QP_RD_ATOMIC), |
| 1049 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | |
| 1050 | IB_QP_SQ_PSN), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1051 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
| 1052 | [IB_QPT_GSI] = IB_QP_SQ_PSN, |
| 1053 | }, |
| 1054 | .opt_param = { |
| 1055 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1056 | IB_QP_QKEY), |
| 1057 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1058 | IB_QP_ALT_PATH | |
| 1059 | IB_QP_ACCESS_FLAGS | |
| 1060 | IB_QP_PATH_MIG_STATE), |
| 1061 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1062 | IB_QP_ALT_PATH | |
| 1063 | IB_QP_ACCESS_FLAGS | |
| 1064 | IB_QP_MIN_RNR_TIMER | |
| 1065 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1066 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1067 | IB_QP_ALT_PATH | |
| 1068 | IB_QP_ACCESS_FLAGS | |
| 1069 | IB_QP_PATH_MIG_STATE), |
| 1070 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1071 | IB_QP_ALT_PATH | |
| 1072 | IB_QP_ACCESS_FLAGS | |
| 1073 | IB_QP_MIN_RNR_TIMER | |
| 1074 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1075 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1076 | IB_QP_QKEY), |
| 1077 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1078 | IB_QP_QKEY), |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1079 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1080 | } |
| 1081 | } |
| 1082 | }, |
| 1083 | [IB_QPS_RTS] = { |
| 1084 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1085 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1086 | [IB_QPS_RTS] = { |
| 1087 | .valid = 1, |
| 1088 | .opt_param = { |
| 1089 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1090 | IB_QP_QKEY), |
Dotan Barak | 4546d31 | 2006-03-02 11:22:28 -0800 | [diff] [blame] | 1091 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1092 | IB_QP_ACCESS_FLAGS | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1093 | IB_QP_ALT_PATH | |
| 1094 | IB_QP_PATH_MIG_STATE), |
Dotan Barak | 4546d31 | 2006-03-02 11:22:28 -0800 | [diff] [blame] | 1095 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1096 | IB_QP_ACCESS_FLAGS | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1097 | IB_QP_ALT_PATH | |
| 1098 | IB_QP_PATH_MIG_STATE | |
| 1099 | IB_QP_MIN_RNR_TIMER), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1100 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1101 | IB_QP_ACCESS_FLAGS | |
| 1102 | IB_QP_ALT_PATH | |
| 1103 | IB_QP_PATH_MIG_STATE), |
| 1104 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1105 | IB_QP_ACCESS_FLAGS | |
| 1106 | IB_QP_ALT_PATH | |
| 1107 | IB_QP_PATH_MIG_STATE | |
| 1108 | IB_QP_MIN_RNR_TIMER), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1109 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1110 | IB_QP_QKEY), |
| 1111 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1112 | IB_QP_QKEY), |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1113 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1114 | } |
| 1115 | }, |
| 1116 | [IB_QPS_SQD] = { |
| 1117 | .valid = 1, |
| 1118 | .opt_param = { |
| 1119 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1120 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1121 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1122 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1123 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1124 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1125 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY |
| 1126 | } |
| 1127 | }, |
| 1128 | }, |
| 1129 | [IB_QPS_SQD] = { |
| 1130 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1131 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1132 | [IB_QPS_RTS] = { |
| 1133 | .valid = 1, |
| 1134 | .opt_param = { |
| 1135 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1136 | IB_QP_QKEY), |
| 1137 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1138 | IB_QP_ALT_PATH | |
| 1139 | IB_QP_ACCESS_FLAGS | |
| 1140 | IB_QP_PATH_MIG_STATE), |
| 1141 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1142 | IB_QP_ALT_PATH | |
| 1143 | IB_QP_ACCESS_FLAGS | |
| 1144 | IB_QP_MIN_RNR_TIMER | |
| 1145 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1146 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1147 | IB_QP_ALT_PATH | |
| 1148 | IB_QP_ACCESS_FLAGS | |
| 1149 | IB_QP_PATH_MIG_STATE), |
| 1150 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1151 | IB_QP_ALT_PATH | |
| 1152 | IB_QP_ACCESS_FLAGS | |
| 1153 | IB_QP_MIN_RNR_TIMER | |
| 1154 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1155 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1156 | IB_QP_QKEY), |
| 1157 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1158 | IB_QP_QKEY), |
| 1159 | } |
| 1160 | }, |
| 1161 | [IB_QPS_SQD] = { |
| 1162 | .valid = 1, |
| 1163 | .opt_param = { |
| 1164 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 1165 | IB_QP_QKEY), |
| 1166 | [IB_QPT_UC] = (IB_QP_AV | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1167 | IB_QP_ALT_PATH | |
| 1168 | IB_QP_ACCESS_FLAGS | |
| 1169 | IB_QP_PKEY_INDEX | |
| 1170 | IB_QP_PATH_MIG_STATE), |
| 1171 | [IB_QPT_RC] = (IB_QP_PORT | |
| 1172 | IB_QP_AV | |
| 1173 | IB_QP_TIMEOUT | |
| 1174 | IB_QP_RETRY_CNT | |
| 1175 | IB_QP_RNR_RETRY | |
| 1176 | IB_QP_MAX_QP_RD_ATOMIC | |
| 1177 | IB_QP_MAX_DEST_RD_ATOMIC | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1178 | IB_QP_ALT_PATH | |
| 1179 | IB_QP_ACCESS_FLAGS | |
| 1180 | IB_QP_PKEY_INDEX | |
| 1181 | IB_QP_MIN_RNR_TIMER | |
| 1182 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1183 | [IB_QPT_XRC_INI] = (IB_QP_PORT | |
| 1184 | IB_QP_AV | |
| 1185 | IB_QP_TIMEOUT | |
| 1186 | IB_QP_RETRY_CNT | |
| 1187 | IB_QP_RNR_RETRY | |
| 1188 | IB_QP_MAX_QP_RD_ATOMIC | |
| 1189 | IB_QP_ALT_PATH | |
| 1190 | IB_QP_ACCESS_FLAGS | |
| 1191 | IB_QP_PKEY_INDEX | |
| 1192 | IB_QP_PATH_MIG_STATE), |
| 1193 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | |
| 1194 | IB_QP_AV | |
| 1195 | IB_QP_TIMEOUT | |
| 1196 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 1197 | IB_QP_ALT_PATH | |
| 1198 | IB_QP_ACCESS_FLAGS | |
| 1199 | IB_QP_PKEY_INDEX | |
| 1200 | IB_QP_MIN_RNR_TIMER | |
| 1201 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1202 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 1203 | IB_QP_QKEY), |
| 1204 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 1205 | IB_QP_QKEY), |
| 1206 | } |
| 1207 | } |
| 1208 | }, |
| 1209 | [IB_QPS_SQE] = { |
| 1210 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1211 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1212 | [IB_QPS_RTS] = { |
| 1213 | .valid = 1, |
| 1214 | .opt_param = { |
| 1215 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1216 | IB_QP_QKEY), |
| 1217 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1218 | IB_QP_ACCESS_FLAGS), |
| 1219 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1220 | IB_QP_QKEY), |
| 1221 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1222 | IB_QP_QKEY), |
| 1223 | } |
| 1224 | } |
| 1225 | }, |
| 1226 | [IB_QPS_ERR] = { |
| 1227 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1228 | [IB_QPS_ERR] = { .valid = 1 } |
| 1229 | } |
| 1230 | }; |
| 1231 | |
| 1232 | int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1233 | enum ib_qp_type type, enum ib_qp_attr_mask mask, |
| 1234 | enum rdma_link_layer ll) |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1235 | { |
| 1236 | enum ib_qp_attr_mask req_param, opt_param; |
| 1237 | |
| 1238 | if (cur_state < 0 || cur_state > IB_QPS_ERR || |
| 1239 | next_state < 0 || next_state > IB_QPS_ERR) |
| 1240 | return 0; |
| 1241 | |
| 1242 | if (mask & IB_QP_CUR_STATE && |
| 1243 | cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && |
| 1244 | cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) |
| 1245 | return 0; |
| 1246 | |
| 1247 | if (!qp_state_table[cur_state][next_state].valid) |
| 1248 | return 0; |
| 1249 | |
| 1250 | req_param = qp_state_table[cur_state][next_state].req_param[type]; |
| 1251 | opt_param = qp_state_table[cur_state][next_state].opt_param[type]; |
| 1252 | |
| 1253 | if ((mask & req_param) != req_param) |
| 1254 | return 0; |
| 1255 | |
| 1256 | if (mask & ~(req_param | opt_param | IB_QP_STATE)) |
| 1257 | return 0; |
| 1258 | |
| 1259 | return 1; |
| 1260 | } |
| 1261 | EXPORT_SYMBOL(ib_modify_qp_is_ok); |
| 1262 | |
Parav Pandit | c0348eb | 2017-10-16 08:45:13 +0300 | [diff] [blame] | 1263 | static int ib_resolve_eth_dmac(struct ib_device *device, |
| 1264 | struct rdma_ah_attr *ah_attr) |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1265 | { |
| 1266 | int ret = 0; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1267 | struct ib_global_route *grh; |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1268 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1269 | if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr))) |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1270 | return -EINVAL; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1271 | |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 1272 | if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE) |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1273 | return 0; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1274 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1275 | grh = rdma_ah_retrieve_grh(ah_attr); |
| 1276 | |
| 1277 | if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) { |
| 1278 | rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw, |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 1279 | ah_attr->roce.dmac); |
Noa Osherovich | 9636a56 | 2017-06-12 11:14:04 +0300 | [diff] [blame] | 1280 | return 0; |
| 1281 | } |
| 1282 | if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { |
| 1283 | if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { |
| 1284 | __be32 addr = 0; |
| 1285 | |
| 1286 | memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); |
| 1287 | ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac); |
| 1288 | } else { |
| 1289 | ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, |
| 1290 | (char *)ah_attr->roce.dmac); |
| 1291 | } |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1292 | } else { |
| 1293 | union ib_gid sgid; |
| 1294 | struct ib_gid_attr sgid_attr; |
| 1295 | int ifindex; |
| 1296 | int hop_limit; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1297 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1298 | ret = ib_query_gid(device, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1299 | rdma_ah_get_port_num(ah_attr), |
| 1300 | grh->sgid_index, |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1301 | &sgid, &sgid_attr); |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1302 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1303 | if (ret || !sgid_attr.ndev) { |
| 1304 | if (!ret) |
| 1305 | ret = -ENXIO; |
| 1306 | goto out; |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1307 | } |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1308 | |
| 1309 | ifindex = sgid_attr.ndev->ifindex; |
| 1310 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1311 | ret = |
| 1312 | rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid, |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 1313 | ah_attr->roce.dmac, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1314 | NULL, &ifindex, &hop_limit); |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1315 | |
| 1316 | dev_put(sgid_attr.ndev); |
| 1317 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1318 | grh->hop_limit = hop_limit; |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1319 | } |
| 1320 | out: |
| 1321 | return ret; |
| 1322 | } |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1323 | |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1324 | /** |
| 1325 | * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. |
| 1326 | * @qp: The QP to modify. |
| 1327 | * @attr: On input, specifies the QP attributes to modify. On output, |
| 1328 | * the current values of selected QP attributes are returned. |
| 1329 | * @attr_mask: A bit-mask used to specify which attributes of the QP |
| 1330 | * are being modified. |
| 1331 | * @udata: pointer to user's input output buffer information |
| 1332 | * are being modified. |
| 1333 | * It returns 0 on success and returns appropriate error code on error. |
| 1334 | */ |
| 1335 | int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, |
| 1336 | int attr_mask, struct ib_udata *udata) |
| 1337 | { |
| 1338 | int ret; |
| 1339 | |
| 1340 | if (attr_mask & IB_QP_AV) { |
| 1341 | ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); |
| 1342 | if (ret) |
| 1343 | return ret; |
| 1344 | } |
Noa Osherovich | 498ca3c | 2017-08-23 08:35:40 +0300 | [diff] [blame] | 1345 | ret = ib_security_modify_qp(qp, attr, attr_mask, udata); |
| 1346 | if (!ret && (attr_mask & IB_QP_PORT)) |
| 1347 | qp->port = attr->port_num; |
| 1348 | |
| 1349 | return ret; |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1350 | } |
| 1351 | EXPORT_SYMBOL(ib_modify_qp_with_udata); |
| 1352 | |
Yuval Shaia | d418619 | 2017-06-14 23:13:34 +0300 | [diff] [blame] | 1353 | int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) |
| 1354 | { |
| 1355 | int rc; |
| 1356 | u32 netdev_speed; |
| 1357 | struct net_device *netdev; |
| 1358 | struct ethtool_link_ksettings lksettings; |
| 1359 | |
| 1360 | if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) |
| 1361 | return -EINVAL; |
| 1362 | |
| 1363 | if (!dev->get_netdev) |
| 1364 | return -EOPNOTSUPP; |
| 1365 | |
| 1366 | netdev = dev->get_netdev(dev, port_num); |
| 1367 | if (!netdev) |
| 1368 | return -ENODEV; |
| 1369 | |
| 1370 | rtnl_lock(); |
| 1371 | rc = __ethtool_get_link_ksettings(netdev, &lksettings); |
| 1372 | rtnl_unlock(); |
| 1373 | |
| 1374 | dev_put(netdev); |
| 1375 | |
| 1376 | if (!rc) { |
| 1377 | netdev_speed = lksettings.base.speed; |
| 1378 | } else { |
| 1379 | netdev_speed = SPEED_1000; |
| 1380 | pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name, |
| 1381 | netdev_speed); |
| 1382 | } |
| 1383 | |
| 1384 | if (netdev_speed <= SPEED_1000) { |
| 1385 | *width = IB_WIDTH_1X; |
| 1386 | *speed = IB_SPEED_SDR; |
| 1387 | } else if (netdev_speed <= SPEED_10000) { |
| 1388 | *width = IB_WIDTH_1X; |
| 1389 | *speed = IB_SPEED_FDR10; |
| 1390 | } else if (netdev_speed <= SPEED_20000) { |
| 1391 | *width = IB_WIDTH_4X; |
| 1392 | *speed = IB_SPEED_DDR; |
| 1393 | } else if (netdev_speed <= SPEED_25000) { |
| 1394 | *width = IB_WIDTH_1X; |
| 1395 | *speed = IB_SPEED_EDR; |
| 1396 | } else if (netdev_speed <= SPEED_40000) { |
| 1397 | *width = IB_WIDTH_4X; |
| 1398 | *speed = IB_SPEED_FDR10; |
| 1399 | } else { |
| 1400 | *width = IB_WIDTH_4X; |
| 1401 | *speed = IB_SPEED_EDR; |
| 1402 | } |
| 1403 | |
| 1404 | return 0; |
| 1405 | } |
| 1406 | EXPORT_SYMBOL(ib_get_eth_speed); |
| 1407 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | int ib_modify_qp(struct ib_qp *qp, |
| 1409 | struct ib_qp_attr *qp_attr, |
| 1410 | int qp_attr_mask) |
| 1411 | { |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1412 | return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1413 | } |
| 1414 | EXPORT_SYMBOL(ib_modify_qp); |
| 1415 | |
| 1416 | int ib_query_qp(struct ib_qp *qp, |
| 1417 | struct ib_qp_attr *qp_attr, |
| 1418 | int qp_attr_mask, |
| 1419 | struct ib_qp_init_attr *qp_init_attr) |
| 1420 | { |
| 1421 | return qp->device->query_qp ? |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1422 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | -ENOSYS; |
| 1424 | } |
| 1425 | EXPORT_SYMBOL(ib_query_qp); |
| 1426 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1427 | int ib_close_qp(struct ib_qp *qp) |
| 1428 | { |
| 1429 | struct ib_qp *real_qp; |
| 1430 | unsigned long flags; |
| 1431 | |
| 1432 | real_qp = qp->real_qp; |
| 1433 | if (real_qp == qp) |
| 1434 | return -EINVAL; |
| 1435 | |
| 1436 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
| 1437 | list_del(&qp->open_list); |
| 1438 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
| 1439 | |
| 1440 | atomic_dec(&real_qp->usecnt); |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1441 | ib_close_shared_qp_security(qp->qp_sec); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1442 | kfree(qp); |
| 1443 | |
| 1444 | return 0; |
| 1445 | } |
| 1446 | EXPORT_SYMBOL(ib_close_qp); |
| 1447 | |
| 1448 | static int __ib_destroy_shared_qp(struct ib_qp *qp) |
| 1449 | { |
| 1450 | struct ib_xrcd *xrcd; |
| 1451 | struct ib_qp *real_qp; |
| 1452 | int ret; |
| 1453 | |
| 1454 | real_qp = qp->real_qp; |
| 1455 | xrcd = real_qp->xrcd; |
| 1456 | |
| 1457 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 1458 | ib_close_qp(qp); |
| 1459 | if (atomic_read(&real_qp->usecnt) == 0) |
| 1460 | list_del(&real_qp->xrcd_list); |
| 1461 | else |
| 1462 | real_qp = NULL; |
| 1463 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 1464 | |
| 1465 | if (real_qp) { |
| 1466 | ret = ib_destroy_qp(real_qp); |
| 1467 | if (!ret) |
| 1468 | atomic_dec(&xrcd->usecnt); |
| 1469 | else |
| 1470 | __ib_insert_xrcd_qp(xrcd, real_qp); |
| 1471 | } |
| 1472 | |
| 1473 | return 0; |
| 1474 | } |
| 1475 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | int ib_destroy_qp(struct ib_qp *qp) |
| 1477 | { |
| 1478 | struct ib_pd *pd; |
| 1479 | struct ib_cq *scq, *rcq; |
| 1480 | struct ib_srq *srq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1481 | struct ib_rwq_ind_table *ind_tbl; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1482 | struct ib_qp_security *sec; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | int ret; |
| 1484 | |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1485 | WARN_ON_ONCE(qp->mrs_used > 0); |
| 1486 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1487 | if (atomic_read(&qp->usecnt)) |
| 1488 | return -EBUSY; |
| 1489 | |
| 1490 | if (qp->real_qp != qp) |
| 1491 | return __ib_destroy_shared_qp(qp); |
| 1492 | |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1493 | pd = qp->pd; |
| 1494 | scq = qp->send_cq; |
| 1495 | rcq = qp->recv_cq; |
| 1496 | srq = qp->srq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1497 | ind_tbl = qp->rwq_ind_tbl; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1498 | sec = qp->qp_sec; |
| 1499 | if (sec) |
| 1500 | ib_destroy_qp_security_begin(sec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1502 | if (!qp->uobject) |
| 1503 | rdma_rw_cleanup_mrs(qp); |
| 1504 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | ret = qp->device->destroy_qp(qp); |
| 1506 | if (!ret) { |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1507 | if (pd) |
| 1508 | atomic_dec(&pd->usecnt); |
| 1509 | if (scq) |
| 1510 | atomic_dec(&scq->usecnt); |
| 1511 | if (rcq) |
| 1512 | atomic_dec(&rcq->usecnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | if (srq) |
| 1514 | atomic_dec(&srq->usecnt); |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1515 | if (ind_tbl) |
| 1516 | atomic_dec(&ind_tbl->usecnt); |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1517 | if (sec) |
| 1518 | ib_destroy_qp_security_end(sec); |
| 1519 | } else { |
| 1520 | if (sec) |
| 1521 | ib_destroy_qp_security_abort(sec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | } |
| 1523 | |
| 1524 | return ret; |
| 1525 | } |
| 1526 | EXPORT_SYMBOL(ib_destroy_qp); |
| 1527 | |
| 1528 | /* Completion queues */ |
| 1529 | |
| 1530 | struct ib_cq *ib_create_cq(struct ib_device *device, |
| 1531 | ib_comp_handler comp_handler, |
| 1532 | void (*event_handler)(struct ib_event *, void *), |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 1533 | void *cq_context, |
| 1534 | const struct ib_cq_init_attr *cq_attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | { |
| 1536 | struct ib_cq *cq; |
| 1537 | |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 1538 | cq = device->create_cq(device, cq_attr, NULL, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 | |
| 1540 | if (!IS_ERR(cq)) { |
| 1541 | cq->device = device; |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 1542 | cq->uobject = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | cq->comp_handler = comp_handler; |
| 1544 | cq->event_handler = event_handler; |
| 1545 | cq->cq_context = cq_context; |
| 1546 | atomic_set(&cq->usecnt, 0); |
| 1547 | } |
| 1548 | |
| 1549 | return cq; |
| 1550 | } |
| 1551 | EXPORT_SYMBOL(ib_create_cq); |
| 1552 | |
Leon Romanovsky | 4190b4e | 2017-11-13 10:51:19 +0200 | [diff] [blame] | 1553 | int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1554 | { |
| 1555 | return cq->device->modify_cq ? |
| 1556 | cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; |
| 1557 | } |
Leon Romanovsky | 4190b4e | 2017-11-13 10:51:19 +0200 | [diff] [blame] | 1558 | EXPORT_SYMBOL(rdma_set_cq_moderation); |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1559 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | int ib_destroy_cq(struct ib_cq *cq) |
| 1561 | { |
| 1562 | if (atomic_read(&cq->usecnt)) |
| 1563 | return -EBUSY; |
| 1564 | |
| 1565 | return cq->device->destroy_cq(cq); |
| 1566 | } |
| 1567 | EXPORT_SYMBOL(ib_destroy_cq); |
| 1568 | |
Roland Dreier | a74cd4a | 2006-02-13 16:30:49 -0800 | [diff] [blame] | 1569 | int ib_resize_cq(struct ib_cq *cq, int cqe) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1570 | { |
Roland Dreier | 40de2e5 | 2005-11-08 11:10:25 -0800 | [diff] [blame] | 1571 | return cq->device->resize_cq ? |
Roland Dreier | 33b9b3e | 2006-01-30 14:29:21 -0800 | [diff] [blame] | 1572 | cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1573 | } |
| 1574 | EXPORT_SYMBOL(ib_resize_cq); |
| 1575 | |
| 1576 | /* Memory regions */ |
| 1577 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | int ib_dereg_mr(struct ib_mr *mr) |
| 1579 | { |
Christoph Hellwig | ab67ed8 | 2015-12-23 19:12:54 +0100 | [diff] [blame] | 1580 | struct ib_pd *pd = mr->pd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | int ret; |
| 1582 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1583 | ret = mr->device->dereg_mr(mr); |
| 1584 | if (!ret) |
| 1585 | atomic_dec(&pd->usecnt); |
| 1586 | |
| 1587 | return ret; |
| 1588 | } |
| 1589 | EXPORT_SYMBOL(ib_dereg_mr); |
| 1590 | |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1591 | /** |
| 1592 | * ib_alloc_mr() - Allocates a memory region |
| 1593 | * @pd: protection domain associated with the region |
| 1594 | * @mr_type: memory region type |
| 1595 | * @max_num_sg: maximum sg entries available for registration. |
| 1596 | * |
| 1597 | * Notes: |
| 1598 | * Memory registeration page/sg lists must not exceed max_num_sg. |
| 1599 | * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed |
| 1600 | * max_num_sg * used_page_size. |
| 1601 | * |
| 1602 | */ |
| 1603 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, |
| 1604 | enum ib_mr_type mr_type, |
| 1605 | u32 max_num_sg) |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1606 | { |
| 1607 | struct ib_mr *mr; |
| 1608 | |
Sagi Grimberg | d9f272c | 2015-07-30 10:32:48 +0300 | [diff] [blame] | 1609 | if (!pd->device->alloc_mr) |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1610 | return ERR_PTR(-ENOSYS); |
| 1611 | |
Sagi Grimberg | d9f272c | 2015-07-30 10:32:48 +0300 | [diff] [blame] | 1612 | mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1613 | if (!IS_ERR(mr)) { |
| 1614 | mr->device = pd->device; |
| 1615 | mr->pd = pd; |
| 1616 | mr->uobject = NULL; |
| 1617 | atomic_inc(&pd->usecnt); |
Steve Wise | d4a85c3 | 2016-05-03 18:01:08 +0200 | [diff] [blame] | 1618 | mr->need_inval = false; |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1619 | } |
| 1620 | |
| 1621 | return mr; |
| 1622 | } |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1623 | EXPORT_SYMBOL(ib_alloc_mr); |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 1624 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1625 | /* "Fast" memory regions */ |
| 1626 | |
| 1627 | struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, |
| 1628 | int mr_access_flags, |
| 1629 | struct ib_fmr_attr *fmr_attr) |
| 1630 | { |
| 1631 | struct ib_fmr *fmr; |
| 1632 | |
| 1633 | if (!pd->device->alloc_fmr) |
| 1634 | return ERR_PTR(-ENOSYS); |
| 1635 | |
| 1636 | fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); |
| 1637 | if (!IS_ERR(fmr)) { |
| 1638 | fmr->device = pd->device; |
| 1639 | fmr->pd = pd; |
| 1640 | atomic_inc(&pd->usecnt); |
| 1641 | } |
| 1642 | |
| 1643 | return fmr; |
| 1644 | } |
| 1645 | EXPORT_SYMBOL(ib_alloc_fmr); |
| 1646 | |
| 1647 | int ib_unmap_fmr(struct list_head *fmr_list) |
| 1648 | { |
| 1649 | struct ib_fmr *fmr; |
| 1650 | |
| 1651 | if (list_empty(fmr_list)) |
| 1652 | return 0; |
| 1653 | |
| 1654 | fmr = list_entry(fmr_list->next, struct ib_fmr, list); |
| 1655 | return fmr->device->unmap_fmr(fmr_list); |
| 1656 | } |
| 1657 | EXPORT_SYMBOL(ib_unmap_fmr); |
| 1658 | |
| 1659 | int ib_dealloc_fmr(struct ib_fmr *fmr) |
| 1660 | { |
| 1661 | struct ib_pd *pd; |
| 1662 | int ret; |
| 1663 | |
| 1664 | pd = fmr->pd; |
| 1665 | ret = fmr->device->dealloc_fmr(fmr); |
| 1666 | if (!ret) |
| 1667 | atomic_dec(&pd->usecnt); |
| 1668 | |
| 1669 | return ret; |
| 1670 | } |
| 1671 | EXPORT_SYMBOL(ib_dealloc_fmr); |
| 1672 | |
| 1673 | /* Multicast groups */ |
| 1674 | |
Noa Osherovich | 5236333 | 2017-06-12 11:14:02 +0300 | [diff] [blame] | 1675 | static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) |
| 1676 | { |
| 1677 | struct ib_qp_init_attr init_attr = {}; |
| 1678 | struct ib_qp_attr attr = {}; |
| 1679 | int num_eth_ports = 0; |
| 1680 | int port; |
| 1681 | |
| 1682 | /* If QP state >= init, it is assigned to a port and we can check this |
| 1683 | * port only. |
| 1684 | */ |
| 1685 | if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { |
| 1686 | if (attr.qp_state >= IB_QPS_INIT) { |
Alex Estrin | e6f9bc3 | 2017-08-31 09:30:34 -0700 | [diff] [blame] | 1687 | if (rdma_port_get_link_layer(qp->device, attr.port_num) != |
Noa Osherovich | 5236333 | 2017-06-12 11:14:02 +0300 | [diff] [blame] | 1688 | IB_LINK_LAYER_INFINIBAND) |
| 1689 | return true; |
| 1690 | goto lid_check; |
| 1691 | } |
| 1692 | } |
| 1693 | |
| 1694 | /* Can't get a quick answer, iterate over all ports */ |
| 1695 | for (port = 0; port < qp->device->phys_port_cnt; port++) |
Alex Estrin | e6f9bc3 | 2017-08-31 09:30:34 -0700 | [diff] [blame] | 1696 | if (rdma_port_get_link_layer(qp->device, port) != |
Noa Osherovich | 5236333 | 2017-06-12 11:14:02 +0300 | [diff] [blame] | 1697 | IB_LINK_LAYER_INFINIBAND) |
| 1698 | num_eth_ports++; |
| 1699 | |
| 1700 | /* If we have at lease one Ethernet port, RoCE annex declares that |
| 1701 | * multicast LID should be ignored. We can't tell at this step if the |
| 1702 | * QP belongs to an IB or Ethernet port. |
| 1703 | */ |
| 1704 | if (num_eth_ports) |
| 1705 | return true; |
| 1706 | |
| 1707 | /* If all the ports are IB, we can check according to IB spec. */ |
| 1708 | lid_check: |
| 1709 | return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || |
| 1710 | lid == be16_to_cpu(IB_LID_PERMISSIVE)); |
| 1711 | } |
| 1712 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1713 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1714 | { |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1715 | int ret; |
| 1716 | |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1717 | if (!qp->device->attach_mcast) |
| 1718 | return -ENOSYS; |
Noa Osherovich | be1d325 | 2017-06-12 11:14:03 +0300 | [diff] [blame] | 1719 | |
| 1720 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || |
| 1721 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1722 | return -EINVAL; |
| 1723 | |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1724 | ret = qp->device->attach_mcast(qp, gid, lid); |
| 1725 | if (!ret) |
| 1726 | atomic_inc(&qp->usecnt); |
| 1727 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1728 | } |
| 1729 | EXPORT_SYMBOL(ib_attach_mcast); |
| 1730 | |
| 1731 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1732 | { |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1733 | int ret; |
| 1734 | |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1735 | if (!qp->device->detach_mcast) |
| 1736 | return -ENOSYS; |
Noa Osherovich | be1d325 | 2017-06-12 11:14:03 +0300 | [diff] [blame] | 1737 | |
| 1738 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || |
| 1739 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1740 | return -EINVAL; |
| 1741 | |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1742 | ret = qp->device->detach_mcast(qp, gid, lid); |
| 1743 | if (!ret) |
| 1744 | atomic_dec(&qp->usecnt); |
| 1745 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1746 | } |
| 1747 | EXPORT_SYMBOL(ib_detach_mcast); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1748 | |
| 1749 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) |
| 1750 | { |
| 1751 | struct ib_xrcd *xrcd; |
| 1752 | |
| 1753 | if (!device->alloc_xrcd) |
| 1754 | return ERR_PTR(-ENOSYS); |
| 1755 | |
| 1756 | xrcd = device->alloc_xrcd(device, NULL, NULL); |
| 1757 | if (!IS_ERR(xrcd)) { |
| 1758 | xrcd->device = device; |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1759 | xrcd->inode = NULL; |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1760 | atomic_set(&xrcd->usecnt, 0); |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1761 | mutex_init(&xrcd->tgt_qp_mutex); |
| 1762 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1763 | } |
| 1764 | |
| 1765 | return xrcd; |
| 1766 | } |
| 1767 | EXPORT_SYMBOL(ib_alloc_xrcd); |
| 1768 | |
| 1769 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) |
| 1770 | { |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1771 | struct ib_qp *qp; |
| 1772 | int ret; |
| 1773 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1774 | if (atomic_read(&xrcd->usecnt)) |
| 1775 | return -EBUSY; |
| 1776 | |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1777 | while (!list_empty(&xrcd->tgt_qp_list)) { |
| 1778 | qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); |
| 1779 | ret = ib_destroy_qp(qp); |
| 1780 | if (ret) |
| 1781 | return ret; |
| 1782 | } |
| 1783 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1784 | return xrcd->device->dealloc_xrcd(xrcd); |
| 1785 | } |
| 1786 | EXPORT_SYMBOL(ib_dealloc_xrcd); |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1787 | |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1788 | /** |
| 1789 | * ib_create_wq - Creates a WQ associated with the specified protection |
| 1790 | * domain. |
| 1791 | * @pd: The protection domain associated with the WQ. |
| 1792 | * @wq_init_attr: A list of initial attributes required to create the |
| 1793 | * WQ. If WQ creation succeeds, then the attributes are updated to |
| 1794 | * the actual capabilities of the created WQ. |
| 1795 | * |
| 1796 | * wq_init_attr->max_wr and wq_init_attr->max_sge determine |
| 1797 | * the requested size of the WQ, and set to the actual values allocated |
| 1798 | * on return. |
| 1799 | * If ib_create_wq() succeeds, then max_wr and max_sge will always be |
| 1800 | * at least as large as the requested values. |
| 1801 | */ |
| 1802 | struct ib_wq *ib_create_wq(struct ib_pd *pd, |
| 1803 | struct ib_wq_init_attr *wq_attr) |
| 1804 | { |
| 1805 | struct ib_wq *wq; |
| 1806 | |
| 1807 | if (!pd->device->create_wq) |
| 1808 | return ERR_PTR(-ENOSYS); |
| 1809 | |
| 1810 | wq = pd->device->create_wq(pd, wq_attr, NULL); |
| 1811 | if (!IS_ERR(wq)) { |
| 1812 | wq->event_handler = wq_attr->event_handler; |
| 1813 | wq->wq_context = wq_attr->wq_context; |
| 1814 | wq->wq_type = wq_attr->wq_type; |
| 1815 | wq->cq = wq_attr->cq; |
| 1816 | wq->device = pd->device; |
| 1817 | wq->pd = pd; |
| 1818 | wq->uobject = NULL; |
| 1819 | atomic_inc(&pd->usecnt); |
| 1820 | atomic_inc(&wq_attr->cq->usecnt); |
| 1821 | atomic_set(&wq->usecnt, 0); |
| 1822 | } |
| 1823 | return wq; |
| 1824 | } |
| 1825 | EXPORT_SYMBOL(ib_create_wq); |
| 1826 | |
| 1827 | /** |
| 1828 | * ib_destroy_wq - Destroys the specified WQ. |
| 1829 | * @wq: The WQ to destroy. |
| 1830 | */ |
| 1831 | int ib_destroy_wq(struct ib_wq *wq) |
| 1832 | { |
| 1833 | int err; |
| 1834 | struct ib_cq *cq = wq->cq; |
| 1835 | struct ib_pd *pd = wq->pd; |
| 1836 | |
| 1837 | if (atomic_read(&wq->usecnt)) |
| 1838 | return -EBUSY; |
| 1839 | |
| 1840 | err = wq->device->destroy_wq(wq); |
| 1841 | if (!err) { |
| 1842 | atomic_dec(&pd->usecnt); |
| 1843 | atomic_dec(&cq->usecnt); |
| 1844 | } |
| 1845 | return err; |
| 1846 | } |
| 1847 | EXPORT_SYMBOL(ib_destroy_wq); |
| 1848 | |
| 1849 | /** |
| 1850 | * ib_modify_wq - Modifies the specified WQ. |
| 1851 | * @wq: The WQ to modify. |
| 1852 | * @wq_attr: On input, specifies the WQ attributes to modify. |
| 1853 | * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ |
| 1854 | * are being modified. |
| 1855 | * On output, the current values of selected WQ attributes are returned. |
| 1856 | */ |
| 1857 | int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
| 1858 | u32 wq_attr_mask) |
| 1859 | { |
| 1860 | int err; |
| 1861 | |
| 1862 | if (!wq->device->modify_wq) |
| 1863 | return -ENOSYS; |
| 1864 | |
| 1865 | err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); |
| 1866 | return err; |
| 1867 | } |
| 1868 | EXPORT_SYMBOL(ib_modify_wq); |
| 1869 | |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 1870 | /* |
| 1871 | * ib_create_rwq_ind_table - Creates a RQ Indirection Table. |
| 1872 | * @device: The device on which to create the rwq indirection table. |
| 1873 | * @ib_rwq_ind_table_init_attr: A list of initial attributes required to |
| 1874 | * create the Indirection Table. |
| 1875 | * |
| 1876 | * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less |
| 1877 | * than the created ib_rwq_ind_table object and the caller is responsible |
| 1878 | * for its memory allocation/free. |
| 1879 | */ |
| 1880 | struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, |
| 1881 | struct ib_rwq_ind_table_init_attr *init_attr) |
| 1882 | { |
| 1883 | struct ib_rwq_ind_table *rwq_ind_table; |
| 1884 | int i; |
| 1885 | u32 table_size; |
| 1886 | |
| 1887 | if (!device->create_rwq_ind_table) |
| 1888 | return ERR_PTR(-ENOSYS); |
| 1889 | |
| 1890 | table_size = (1 << init_attr->log_ind_tbl_size); |
| 1891 | rwq_ind_table = device->create_rwq_ind_table(device, |
| 1892 | init_attr, NULL); |
| 1893 | if (IS_ERR(rwq_ind_table)) |
| 1894 | return rwq_ind_table; |
| 1895 | |
| 1896 | rwq_ind_table->ind_tbl = init_attr->ind_tbl; |
| 1897 | rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; |
| 1898 | rwq_ind_table->device = device; |
| 1899 | rwq_ind_table->uobject = NULL; |
| 1900 | atomic_set(&rwq_ind_table->usecnt, 0); |
| 1901 | |
| 1902 | for (i = 0; i < table_size; i++) |
| 1903 | atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); |
| 1904 | |
| 1905 | return rwq_ind_table; |
| 1906 | } |
| 1907 | EXPORT_SYMBOL(ib_create_rwq_ind_table); |
| 1908 | |
| 1909 | /* |
| 1910 | * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. |
| 1911 | * @wq_ind_table: The Indirection Table to destroy. |
| 1912 | */ |
| 1913 | int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) |
| 1914 | { |
| 1915 | int err, i; |
| 1916 | u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); |
| 1917 | struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; |
| 1918 | |
| 1919 | if (atomic_read(&rwq_ind_table->usecnt)) |
| 1920 | return -EBUSY; |
| 1921 | |
| 1922 | err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); |
| 1923 | if (!err) { |
| 1924 | for (i = 0; i < table_size; i++) |
| 1925 | atomic_dec(&ind_tbl[i]->usecnt); |
| 1926 | } |
| 1927 | |
| 1928 | return err; |
| 1929 | } |
| 1930 | EXPORT_SYMBOL(ib_destroy_rwq_ind_table); |
| 1931 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1932 | struct ib_flow *ib_create_flow(struct ib_qp *qp, |
| 1933 | struct ib_flow_attr *flow_attr, |
| 1934 | int domain) |
| 1935 | { |
| 1936 | struct ib_flow *flow_id; |
| 1937 | if (!qp->device->create_flow) |
| 1938 | return ERR_PTR(-ENOSYS); |
| 1939 | |
| 1940 | flow_id = qp->device->create_flow(qp, flow_attr, domain); |
Mark Bloch | 8ecc798 | 2016-10-27 16:36:30 +0300 | [diff] [blame] | 1941 | if (!IS_ERR(flow_id)) { |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1942 | atomic_inc(&qp->usecnt); |
Mark Bloch | 8ecc798 | 2016-10-27 16:36:30 +0300 | [diff] [blame] | 1943 | flow_id->qp = qp; |
| 1944 | } |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1945 | return flow_id; |
| 1946 | } |
| 1947 | EXPORT_SYMBOL(ib_create_flow); |
| 1948 | |
| 1949 | int ib_destroy_flow(struct ib_flow *flow_id) |
| 1950 | { |
| 1951 | int err; |
| 1952 | struct ib_qp *qp = flow_id->qp; |
| 1953 | |
| 1954 | err = qp->device->destroy_flow(flow_id); |
| 1955 | if (!err) |
| 1956 | atomic_dec(&qp->usecnt); |
| 1957 | return err; |
| 1958 | } |
| 1959 | EXPORT_SYMBOL(ib_destroy_flow); |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 1960 | |
| 1961 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
| 1962 | struct ib_mr_status *mr_status) |
| 1963 | { |
| 1964 | return mr->device->check_mr_status ? |
| 1965 | mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; |
| 1966 | } |
| 1967 | EXPORT_SYMBOL(ib_check_mr_status); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1968 | |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 1969 | int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, |
| 1970 | int state) |
| 1971 | { |
| 1972 | if (!device->set_vf_link_state) |
| 1973 | return -ENOSYS; |
| 1974 | |
| 1975 | return device->set_vf_link_state(device, vf, port, state); |
| 1976 | } |
| 1977 | EXPORT_SYMBOL(ib_set_vf_link_state); |
| 1978 | |
| 1979 | int ib_get_vf_config(struct ib_device *device, int vf, u8 port, |
| 1980 | struct ifla_vf_info *info) |
| 1981 | { |
| 1982 | if (!device->get_vf_config) |
| 1983 | return -ENOSYS; |
| 1984 | |
| 1985 | return device->get_vf_config(device, vf, port, info); |
| 1986 | } |
| 1987 | EXPORT_SYMBOL(ib_get_vf_config); |
| 1988 | |
| 1989 | int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, |
| 1990 | struct ifla_vf_stats *stats) |
| 1991 | { |
| 1992 | if (!device->get_vf_stats) |
| 1993 | return -ENOSYS; |
| 1994 | |
| 1995 | return device->get_vf_stats(device, vf, port, stats); |
| 1996 | } |
| 1997 | EXPORT_SYMBOL(ib_get_vf_stats); |
| 1998 | |
| 1999 | int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, |
| 2000 | int type) |
| 2001 | { |
| 2002 | if (!device->set_vf_guid) |
| 2003 | return -ENOSYS; |
| 2004 | |
| 2005 | return device->set_vf_guid(device, vf, port, guid, type); |
| 2006 | } |
| 2007 | EXPORT_SYMBOL(ib_set_vf_guid); |
| 2008 | |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2009 | /** |
| 2010 | * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list |
| 2011 | * and set it the memory region. |
| 2012 | * @mr: memory region |
| 2013 | * @sg: dma mapped scatterlist |
| 2014 | * @sg_nents: number of entries in sg |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2015 | * @sg_offset: offset in bytes into sg |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2016 | * @page_size: page vector desired page size |
| 2017 | * |
| 2018 | * Constraints: |
| 2019 | * - The first sg element is allowed to have an offset. |
Bart Van Assche | 5274612 | 2016-09-26 09:09:42 -0700 | [diff] [blame] | 2020 | * - Each sg element must either be aligned to page_size or virtually |
| 2021 | * contiguous to the previous element. In case an sg element has a |
| 2022 | * non-contiguous offset, the mapping prefix will not include it. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2023 | * - The last sg element is allowed to have length less than page_size. |
| 2024 | * - If sg_nents total byte length exceeds the mr max_num_sge * page_size |
| 2025 | * then only max_num_sg entries will be mapped. |
Bart Van Assche | 5274612 | 2016-09-26 09:09:42 -0700 | [diff] [blame] | 2026 | * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 2027 | * constraints holds and the page_size argument is ignored. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2028 | * |
| 2029 | * Returns the number of sg elements that were mapped to the memory region. |
| 2030 | * |
| 2031 | * After this completes successfully, the memory region |
| 2032 | * is ready for registration. |
| 2033 | */ |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2034 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2035 | unsigned int *sg_offset, unsigned int page_size) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2036 | { |
| 2037 | if (unlikely(!mr->device->map_mr_sg)) |
| 2038 | return -ENOSYS; |
| 2039 | |
| 2040 | mr->page_size = page_size; |
| 2041 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2042 | return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2043 | } |
| 2044 | EXPORT_SYMBOL(ib_map_mr_sg); |
| 2045 | |
| 2046 | /** |
| 2047 | * ib_sg_to_pages() - Convert the largest prefix of a sg list |
| 2048 | * to a page vector |
| 2049 | * @mr: memory region |
| 2050 | * @sgl: dma mapped scatterlist |
| 2051 | * @sg_nents: number of entries in sg |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2052 | * @sg_offset_p: IN: start offset in bytes into sg |
| 2053 | * OUT: offset in bytes for element n of the sg of the first |
| 2054 | * byte that has not been processed where n is the return |
| 2055 | * value of this function. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2056 | * @set_page: driver page assignment function pointer |
| 2057 | * |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2058 | * Core service helper for drivers to convert the largest |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2059 | * prefix of given sg list to a page vector. The sg list |
| 2060 | * prefix converted is the prefix that meet the requirements |
| 2061 | * of ib_map_mr_sg. |
| 2062 | * |
| 2063 | * Returns the number of sg elements that were assigned to |
| 2064 | * a page vector. |
| 2065 | */ |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2066 | int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2067 | unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2068 | { |
| 2069 | struct scatterlist *sg; |
Bart Van Assche | b6aeb98 | 2015-12-29 10:45:03 +0100 | [diff] [blame] | 2070 | u64 last_end_dma_addr = 0; |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2071 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2072 | unsigned int last_page_off = 0; |
| 2073 | u64 page_mask = ~((u64)mr->page_size - 1); |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2074 | int i, ret; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2075 | |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2076 | if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) |
| 2077 | return -EINVAL; |
| 2078 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2079 | mr->iova = sg_dma_address(&sgl[0]) + sg_offset; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2080 | mr->length = 0; |
| 2081 | |
| 2082 | for_each_sg(sgl, sg, sg_nents, i) { |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2083 | u64 dma_addr = sg_dma_address(sg) + sg_offset; |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2084 | u64 prev_addr = dma_addr; |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2085 | unsigned int dma_len = sg_dma_len(sg) - sg_offset; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2086 | u64 end_dma_addr = dma_addr + dma_len; |
| 2087 | u64 page_addr = dma_addr & page_mask; |
| 2088 | |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2089 | /* |
| 2090 | * For the second and later elements, check whether either the |
| 2091 | * end of element i-1 or the start of element i is not aligned |
| 2092 | * on a page boundary. |
| 2093 | */ |
| 2094 | if (i && (last_page_off != 0 || page_addr != dma_addr)) { |
| 2095 | /* Stop mapping if there is a gap. */ |
| 2096 | if (last_end_dma_addr != dma_addr) |
| 2097 | break; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2098 | |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2099 | /* |
| 2100 | * Coalesce this element with the last. If it is small |
| 2101 | * enough just update mr->length. Otherwise start |
| 2102 | * mapping from the next page. |
| 2103 | */ |
| 2104 | goto next_page; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2105 | } |
| 2106 | |
| 2107 | do { |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2108 | ret = set_page(mr, page_addr); |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2109 | if (unlikely(ret < 0)) { |
| 2110 | sg_offset = prev_addr - sg_dma_address(sg); |
| 2111 | mr->length += prev_addr - dma_addr; |
| 2112 | if (sg_offset_p) |
| 2113 | *sg_offset_p = sg_offset; |
| 2114 | return i || sg_offset ? i : ret; |
| 2115 | } |
| 2116 | prev_addr = page_addr; |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2117 | next_page: |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2118 | page_addr += mr->page_size; |
| 2119 | } while (page_addr < end_dma_addr); |
| 2120 | |
| 2121 | mr->length += dma_len; |
| 2122 | last_end_dma_addr = end_dma_addr; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2123 | last_page_off = end_dma_addr & ~page_mask; |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2124 | |
| 2125 | sg_offset = 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2126 | } |
| 2127 | |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2128 | if (sg_offset_p) |
| 2129 | *sg_offset_p = 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2130 | return i; |
| 2131 | } |
| 2132 | EXPORT_SYMBOL(ib_sg_to_pages); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2133 | |
| 2134 | struct ib_drain_cqe { |
| 2135 | struct ib_cqe cqe; |
| 2136 | struct completion done; |
| 2137 | }; |
| 2138 | |
| 2139 | static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) |
| 2140 | { |
| 2141 | struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, |
| 2142 | cqe); |
| 2143 | |
| 2144 | complete(&cqe->done); |
| 2145 | } |
| 2146 | |
| 2147 | /* |
| 2148 | * Post a WR and block until its completion is reaped for the SQ. |
| 2149 | */ |
| 2150 | static void __ib_drain_sq(struct ib_qp *qp) |
| 2151 | { |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2152 | struct ib_cq *cq = qp->send_cq; |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2153 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
| 2154 | struct ib_drain_cqe sdrain; |
| 2155 | struct ib_send_wr swr = {}, *bad_swr; |
| 2156 | int ret; |
| 2157 | |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2158 | swr.wr_cqe = &sdrain.cqe; |
| 2159 | sdrain.cqe.done = ib_drain_qp_done; |
| 2160 | init_completion(&sdrain.done); |
| 2161 | |
| 2162 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
| 2163 | if (ret) { |
| 2164 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); |
| 2165 | return; |
| 2166 | } |
| 2167 | |
| 2168 | ret = ib_post_send(qp, &swr, &bad_swr); |
| 2169 | if (ret) { |
| 2170 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); |
| 2171 | return; |
| 2172 | } |
| 2173 | |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2174 | if (cq->poll_ctx == IB_POLL_DIRECT) |
| 2175 | while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0) |
| 2176 | ib_process_cq_direct(cq, -1); |
| 2177 | else |
| 2178 | wait_for_completion(&sdrain.done); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2179 | } |
| 2180 | |
| 2181 | /* |
| 2182 | * Post a WR and block until its completion is reaped for the RQ. |
| 2183 | */ |
| 2184 | static void __ib_drain_rq(struct ib_qp *qp) |
| 2185 | { |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2186 | struct ib_cq *cq = qp->recv_cq; |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2187 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
| 2188 | struct ib_drain_cqe rdrain; |
| 2189 | struct ib_recv_wr rwr = {}, *bad_rwr; |
| 2190 | int ret; |
| 2191 | |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2192 | rwr.wr_cqe = &rdrain.cqe; |
| 2193 | rdrain.cqe.done = ib_drain_qp_done; |
| 2194 | init_completion(&rdrain.done); |
| 2195 | |
| 2196 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
| 2197 | if (ret) { |
| 2198 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); |
| 2199 | return; |
| 2200 | } |
| 2201 | |
| 2202 | ret = ib_post_recv(qp, &rwr, &bad_rwr); |
| 2203 | if (ret) { |
| 2204 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); |
| 2205 | return; |
| 2206 | } |
| 2207 | |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2208 | if (cq->poll_ctx == IB_POLL_DIRECT) |
| 2209 | while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0) |
| 2210 | ib_process_cq_direct(cq, -1); |
| 2211 | else |
| 2212 | wait_for_completion(&rdrain.done); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2213 | } |
| 2214 | |
| 2215 | /** |
| 2216 | * ib_drain_sq() - Block until all SQ CQEs have been consumed by the |
| 2217 | * application. |
| 2218 | * @qp: queue pair to drain |
| 2219 | * |
| 2220 | * If the device has a provider-specific drain function, then |
| 2221 | * call that. Otherwise call the generic drain function |
| 2222 | * __ib_drain_sq(). |
| 2223 | * |
| 2224 | * The caller must: |
| 2225 | * |
| 2226 | * ensure there is room in the CQ and SQ for the drain work request and |
| 2227 | * completion. |
| 2228 | * |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2229 | * allocate the CQ using ib_alloc_cq(). |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2230 | * |
| 2231 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2232 | * Otherwise the drain is not guaranteed. |
| 2233 | */ |
| 2234 | void ib_drain_sq(struct ib_qp *qp) |
| 2235 | { |
| 2236 | if (qp->device->drain_sq) |
| 2237 | qp->device->drain_sq(qp); |
| 2238 | else |
| 2239 | __ib_drain_sq(qp); |
| 2240 | } |
| 2241 | EXPORT_SYMBOL(ib_drain_sq); |
| 2242 | |
| 2243 | /** |
| 2244 | * ib_drain_rq() - Block until all RQ CQEs have been consumed by the |
| 2245 | * application. |
| 2246 | * @qp: queue pair to drain |
| 2247 | * |
| 2248 | * If the device has a provider-specific drain function, then |
| 2249 | * call that. Otherwise call the generic drain function |
| 2250 | * __ib_drain_rq(). |
| 2251 | * |
| 2252 | * The caller must: |
| 2253 | * |
| 2254 | * ensure there is room in the CQ and RQ for the drain work request and |
| 2255 | * completion. |
| 2256 | * |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2257 | * allocate the CQ using ib_alloc_cq(). |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2258 | * |
| 2259 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2260 | * Otherwise the drain is not guaranteed. |
| 2261 | */ |
| 2262 | void ib_drain_rq(struct ib_qp *qp) |
| 2263 | { |
| 2264 | if (qp->device->drain_rq) |
| 2265 | qp->device->drain_rq(qp); |
| 2266 | else |
| 2267 | __ib_drain_rq(qp); |
| 2268 | } |
| 2269 | EXPORT_SYMBOL(ib_drain_rq); |
| 2270 | |
| 2271 | /** |
| 2272 | * ib_drain_qp() - Block until all CQEs have been consumed by the |
| 2273 | * application on both the RQ and SQ. |
| 2274 | * @qp: queue pair to drain |
| 2275 | * |
| 2276 | * The caller must: |
| 2277 | * |
| 2278 | * ensure there is room in the CQ(s), SQ, and RQ for drain work requests |
| 2279 | * and completions. |
| 2280 | * |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2281 | * allocate the CQs using ib_alloc_cq(). |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2282 | * |
| 2283 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2284 | * Otherwise the drain is not guaranteed. |
| 2285 | */ |
| 2286 | void ib_drain_qp(struct ib_qp *qp) |
| 2287 | { |
| 2288 | ib_drain_sq(qp); |
Sagi Grimberg | 42235f8 | 2016-04-26 17:55:38 +0300 | [diff] [blame] | 2289 | if (!qp->srq) |
| 2290 | ib_drain_rq(qp); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2291 | } |
| 2292 | EXPORT_SYMBOL(ib_drain_qp); |