Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. |
| 3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
| 4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
| 5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
| 6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
Roland Dreier | 2a1d9b7 | 2005-08-10 23:03:10 -0700 | [diff] [blame] | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
Roland Dreier | 33b9b3e | 2006-01-30 14:29:21 -0800 | [diff] [blame] | 8 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * This software is available to you under a choice of one of two |
| 11 | * licenses. You may choose to be licensed under the terms of the GNU |
| 12 | * General Public License (GPL) Version 2, available from the file |
| 13 | * COPYING in the main directory of this source tree, or the |
| 14 | * OpenIB.org BSD license below: |
| 15 | * |
| 16 | * Redistribution and use in source and binary forms, with or |
| 17 | * without modification, are permitted provided that the following |
| 18 | * conditions are met: |
| 19 | * |
| 20 | * - Redistributions of source code must retain the above |
| 21 | * copyright notice, this list of conditions and the following |
| 22 | * disclaimer. |
| 23 | * |
| 24 | * - Redistributions in binary form must reproduce the above |
| 25 | * copyright notice, this list of conditions and the following |
| 26 | * disclaimer in the documentation and/or other materials |
| 27 | * provided with the distribution. |
| 28 | * |
| 29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 30 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 31 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 32 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 33 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 34 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 35 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 36 | * SOFTWARE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | */ |
| 38 | |
| 39 | #include <linux/errno.h> |
| 40 | #include <linux/err.h> |
Paul Gortmaker | b108d97 | 2011-05-27 15:29:33 -0400 | [diff] [blame] | 41 | #include <linux/export.h> |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 42 | #include <linux/string.h> |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 43 | #include <linux/slab.h> |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 44 | #include <linux/in.h> |
| 45 | #include <linux/in6.h> |
| 46 | #include <net/addrconf.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Roland Dreier | a4d61e8 | 2005-08-25 13:40:04 -0700 | [diff] [blame] | 48 | #include <rdma/ib_verbs.h> |
| 49 | #include <rdma/ib_cache.h> |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 50 | #include <rdma/ib_addr.h> |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 51 | #include <rdma/rw.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 53 | #include "core_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 55 | static const char * const ib_events[] = { |
| 56 | [IB_EVENT_CQ_ERR] = "CQ error", |
| 57 | [IB_EVENT_QP_FATAL] = "QP fatal error", |
| 58 | [IB_EVENT_QP_REQ_ERR] = "QP request error", |
| 59 | [IB_EVENT_QP_ACCESS_ERR] = "QP access error", |
| 60 | [IB_EVENT_COMM_EST] = "communication established", |
| 61 | [IB_EVENT_SQ_DRAINED] = "send queue drained", |
| 62 | [IB_EVENT_PATH_MIG] = "path migration successful", |
| 63 | [IB_EVENT_PATH_MIG_ERR] = "path migration error", |
| 64 | [IB_EVENT_DEVICE_FATAL] = "device fatal error", |
| 65 | [IB_EVENT_PORT_ACTIVE] = "port active", |
| 66 | [IB_EVENT_PORT_ERR] = "port error", |
| 67 | [IB_EVENT_LID_CHANGE] = "LID change", |
| 68 | [IB_EVENT_PKEY_CHANGE] = "P_key change", |
| 69 | [IB_EVENT_SM_CHANGE] = "SM change", |
| 70 | [IB_EVENT_SRQ_ERR] = "SRQ error", |
| 71 | [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", |
| 72 | [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", |
| 73 | [IB_EVENT_CLIENT_REREGISTER] = "client reregister", |
| 74 | [IB_EVENT_GID_CHANGE] = "GID changed", |
| 75 | }; |
| 76 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 77 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event) |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 78 | { |
| 79 | size_t index = event; |
| 80 | |
| 81 | return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? |
| 82 | ib_events[index] : "unrecognized event"; |
| 83 | } |
| 84 | EXPORT_SYMBOL(ib_event_msg); |
| 85 | |
| 86 | static const char * const wc_statuses[] = { |
| 87 | [IB_WC_SUCCESS] = "success", |
| 88 | [IB_WC_LOC_LEN_ERR] = "local length error", |
| 89 | [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", |
| 90 | [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", |
| 91 | [IB_WC_LOC_PROT_ERR] = "local protection error", |
| 92 | [IB_WC_WR_FLUSH_ERR] = "WR flushed", |
| 93 | [IB_WC_MW_BIND_ERR] = "memory management operation error", |
| 94 | [IB_WC_BAD_RESP_ERR] = "bad response error", |
| 95 | [IB_WC_LOC_ACCESS_ERR] = "local access error", |
| 96 | [IB_WC_REM_INV_REQ_ERR] = "invalid request error", |
| 97 | [IB_WC_REM_ACCESS_ERR] = "remote access error", |
| 98 | [IB_WC_REM_OP_ERR] = "remote operation error", |
| 99 | [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", |
| 100 | [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", |
| 101 | [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", |
| 102 | [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", |
| 103 | [IB_WC_REM_ABORT_ERR] = "operation aborted", |
| 104 | [IB_WC_INV_EECN_ERR] = "invalid EE context number", |
| 105 | [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", |
| 106 | [IB_WC_FATAL_ERR] = "fatal error", |
| 107 | [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", |
| 108 | [IB_WC_GENERAL_ERR] = "general error", |
| 109 | }; |
| 110 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 111 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 112 | { |
| 113 | size_t index = status; |
| 114 | |
| 115 | return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? |
| 116 | wc_statuses[index] : "unrecognized status"; |
| 117 | } |
| 118 | EXPORT_SYMBOL(ib_wc_status_msg); |
| 119 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 120 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 121 | { |
| 122 | switch (rate) { |
| 123 | case IB_RATE_2_5_GBPS: return 1; |
| 124 | case IB_RATE_5_GBPS: return 2; |
| 125 | case IB_RATE_10_GBPS: return 4; |
| 126 | case IB_RATE_20_GBPS: return 8; |
| 127 | case IB_RATE_30_GBPS: return 12; |
| 128 | case IB_RATE_40_GBPS: return 16; |
| 129 | case IB_RATE_60_GBPS: return 24; |
| 130 | case IB_RATE_80_GBPS: return 32; |
| 131 | case IB_RATE_120_GBPS: return 48; |
| 132 | default: return -1; |
| 133 | } |
| 134 | } |
| 135 | EXPORT_SYMBOL(ib_rate_to_mult); |
| 136 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 137 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 138 | { |
| 139 | switch (mult) { |
| 140 | case 1: return IB_RATE_2_5_GBPS; |
| 141 | case 2: return IB_RATE_5_GBPS; |
| 142 | case 4: return IB_RATE_10_GBPS; |
| 143 | case 8: return IB_RATE_20_GBPS; |
| 144 | case 12: return IB_RATE_30_GBPS; |
| 145 | case 16: return IB_RATE_40_GBPS; |
| 146 | case 24: return IB_RATE_60_GBPS; |
| 147 | case 32: return IB_RATE_80_GBPS; |
| 148 | case 48: return IB_RATE_120_GBPS; |
| 149 | default: return IB_RATE_PORT_CURRENT; |
| 150 | } |
| 151 | } |
| 152 | EXPORT_SYMBOL(mult_to_ib_rate); |
| 153 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 154 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 155 | { |
| 156 | switch (rate) { |
| 157 | case IB_RATE_2_5_GBPS: return 2500; |
| 158 | case IB_RATE_5_GBPS: return 5000; |
| 159 | case IB_RATE_10_GBPS: return 10000; |
| 160 | case IB_RATE_20_GBPS: return 20000; |
| 161 | case IB_RATE_30_GBPS: return 30000; |
| 162 | case IB_RATE_40_GBPS: return 40000; |
| 163 | case IB_RATE_60_GBPS: return 60000; |
| 164 | case IB_RATE_80_GBPS: return 80000; |
| 165 | case IB_RATE_120_GBPS: return 120000; |
| 166 | case IB_RATE_14_GBPS: return 14062; |
| 167 | case IB_RATE_56_GBPS: return 56250; |
| 168 | case IB_RATE_112_GBPS: return 112500; |
| 169 | case IB_RATE_168_GBPS: return 168750; |
| 170 | case IB_RATE_25_GBPS: return 25781; |
| 171 | case IB_RATE_100_GBPS: return 103125; |
| 172 | case IB_RATE_200_GBPS: return 206250; |
| 173 | case IB_RATE_300_GBPS: return 309375; |
| 174 | default: return -1; |
| 175 | } |
| 176 | } |
| 177 | EXPORT_SYMBOL(ib_rate_to_mbps); |
| 178 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 179 | __attribute_const__ enum rdma_transport_type |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 180 | rdma_node_get_transport(enum rdma_node_type node_type) |
| 181 | { |
| 182 | switch (node_type) { |
| 183 | case RDMA_NODE_IB_CA: |
| 184 | case RDMA_NODE_IB_SWITCH: |
| 185 | case RDMA_NODE_IB_ROUTER: |
| 186 | return RDMA_TRANSPORT_IB; |
| 187 | case RDMA_NODE_RNIC: |
| 188 | return RDMA_TRANSPORT_IWARP; |
Upinder Malhi \(umalhi\) | 180771a | 2013-09-10 03:36:59 +0000 | [diff] [blame] | 189 | case RDMA_NODE_USNIC: |
Upinder Malhi | 5db5765 | 2014-01-15 17:02:36 -0800 | [diff] [blame] | 190 | return RDMA_TRANSPORT_USNIC; |
| 191 | case RDMA_NODE_USNIC_UDP: |
Upinder Malhi | 248567f | 2014-01-09 14:48:19 -0800 | [diff] [blame] | 192 | return RDMA_TRANSPORT_USNIC_UDP; |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 193 | default: |
| 194 | BUG(); |
| 195 | return 0; |
| 196 | } |
| 197 | } |
| 198 | EXPORT_SYMBOL(rdma_node_get_transport); |
| 199 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 200 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) |
| 201 | { |
| 202 | if (device->get_link_layer) |
| 203 | return device->get_link_layer(device, port_num); |
| 204 | |
| 205 | switch (rdma_node_get_transport(device->node_type)) { |
| 206 | case RDMA_TRANSPORT_IB: |
| 207 | return IB_LINK_LAYER_INFINIBAND; |
| 208 | case RDMA_TRANSPORT_IWARP: |
Upinder Malhi \(umalhi\) | 180771a | 2013-09-10 03:36:59 +0000 | [diff] [blame] | 209 | case RDMA_TRANSPORT_USNIC: |
Upinder Malhi | 248567f | 2014-01-09 14:48:19 -0800 | [diff] [blame] | 210 | case RDMA_TRANSPORT_USNIC_UDP: |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 211 | return IB_LINK_LAYER_ETHERNET; |
| 212 | default: |
| 213 | return IB_LINK_LAYER_UNSPECIFIED; |
| 214 | } |
| 215 | } |
| 216 | EXPORT_SYMBOL(rdma_port_get_link_layer); |
| 217 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | /* Protection domains */ |
| 219 | |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 220 | /** |
| 221 | * ib_alloc_pd - Allocates an unused protection domain. |
| 222 | * @device: The device on which to allocate the protection domain. |
| 223 | * |
| 224 | * A protection domain object provides an association between QPs, shared |
| 225 | * receive queues, address handles, memory regions, and memory windows. |
| 226 | * |
| 227 | * Every PD has a local_dma_lkey which can be used as the lkey value for local |
| 228 | * memory operations. |
| 229 | */ |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 230 | struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, |
| 231 | const char *caller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | { |
| 233 | struct ib_pd *pd; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 234 | int mr_access_flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 236 | pd = device->alloc_pd(device, NULL, NULL); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 237 | if (IS_ERR(pd)) |
| 238 | return pd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 240 | pd->device = device; |
| 241 | pd->uobject = NULL; |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 242 | pd->__internal_mr = NULL; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 243 | atomic_set(&pd->usecnt, 0); |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 244 | pd->flags = flags; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 245 | |
Or Gerlitz | 86bee4c | 2015-12-18 10:59:45 +0200 | [diff] [blame] | 246 | if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 247 | pd->local_dma_lkey = device->local_dma_lkey; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 248 | else |
| 249 | mr_access_flags |= IB_ACCESS_LOCAL_WRITE; |
| 250 | |
| 251 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { |
| 252 | pr_warn("%s: enabling unsafe global rkey\n", caller); |
| 253 | mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; |
| 254 | } |
| 255 | |
| 256 | if (mr_access_flags) { |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 257 | struct ib_mr *mr; |
| 258 | |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 259 | mr = pd->device->get_dma_mr(pd, mr_access_flags); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 260 | if (IS_ERR(mr)) { |
| 261 | ib_dealloc_pd(pd); |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 262 | return ERR_CAST(mr); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 263 | } |
| 264 | |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 265 | mr->device = pd->device; |
| 266 | mr->pd = pd; |
| 267 | mr->uobject = NULL; |
| 268 | mr->need_inval = false; |
| 269 | |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 270 | pd->__internal_mr = mr; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 271 | |
| 272 | if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) |
| 273 | pd->local_dma_lkey = pd->__internal_mr->lkey; |
| 274 | |
| 275 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) |
| 276 | pd->unsafe_global_rkey = pd->__internal_mr->rkey; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | } |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 278 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | return pd; |
| 280 | } |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 281 | EXPORT_SYMBOL(__ib_alloc_pd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 283 | /** |
| 284 | * ib_dealloc_pd - Deallocates a protection domain. |
| 285 | * @pd: The protection domain to deallocate. |
| 286 | * |
| 287 | * It is an error to call this function while any resources in the pd still |
| 288 | * exist. The caller is responsible to synchronously destroy them and |
| 289 | * guarantee no new allocations will happen. |
| 290 | */ |
| 291 | void ib_dealloc_pd(struct ib_pd *pd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | { |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 293 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 295 | if (pd->__internal_mr) { |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 296 | ret = pd->device->dereg_mr(pd->__internal_mr); |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 297 | WARN_ON(ret); |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 298 | pd->__internal_mr = NULL; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 299 | } |
| 300 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 301 | /* uverbs manipulates usecnt with proper locking, while the kabi |
| 302 | requires the caller to guarantee we can't race here. */ |
| 303 | WARN_ON(atomic_read(&pd->usecnt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 305 | /* Making delalloc_pd a void return is a WIP, no driver should return |
| 306 | an error here. */ |
| 307 | ret = pd->device->dealloc_pd(pd); |
| 308 | WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | } |
| 310 | EXPORT_SYMBOL(ib_dealloc_pd); |
| 311 | |
| 312 | /* Address handles */ |
| 313 | |
| 314 | struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) |
| 315 | { |
| 316 | struct ib_ah *ah; |
| 317 | |
Moni Shoua | 477864c | 2016-11-23 08:23:24 +0200 | [diff] [blame] | 318 | ah = pd->device->create_ah(pd, ah_attr, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | |
| 320 | if (!IS_ERR(ah)) { |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 321 | ah->device = pd->device; |
| 322 | ah->pd = pd; |
| 323 | ah->uobject = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | atomic_inc(&pd->usecnt); |
| 325 | } |
| 326 | |
| 327 | return ah; |
| 328 | } |
| 329 | EXPORT_SYMBOL(ib_create_ah); |
| 330 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 331 | int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 332 | { |
| 333 | const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; |
| 334 | struct iphdr ip4h_checked; |
| 335 | const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; |
| 336 | |
| 337 | /* If it's IPv6, the version must be 6, otherwise, the first |
| 338 | * 20 bytes (before the IPv4 header) are garbled. |
| 339 | */ |
| 340 | if (ip6h->version != 6) |
| 341 | return (ip4h->version == 4) ? 4 : 0; |
| 342 | /* version may be 6 or 4 because the first 20 bytes could be garbled */ |
| 343 | |
| 344 | /* RoCE v2 requires no options, thus header length |
| 345 | * must be 5 words |
| 346 | */ |
| 347 | if (ip4h->ihl != 5) |
| 348 | return 6; |
| 349 | |
| 350 | /* Verify checksum. |
| 351 | * We can't write on scattered buffers so we need to copy to |
| 352 | * temp buffer. |
| 353 | */ |
| 354 | memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); |
| 355 | ip4h_checked.check = 0; |
| 356 | ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); |
| 357 | /* if IPv4 header checksum is OK, believe it */ |
| 358 | if (ip4h->check == ip4h_checked.check) |
| 359 | return 4; |
| 360 | return 6; |
| 361 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 362 | EXPORT_SYMBOL(ib_get_rdma_header_version); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 363 | |
| 364 | static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, |
| 365 | u8 port_num, |
| 366 | const struct ib_grh *grh) |
| 367 | { |
| 368 | int grh_version; |
| 369 | |
| 370 | if (rdma_protocol_ib(device, port_num)) |
| 371 | return RDMA_NETWORK_IB; |
| 372 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 373 | grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 374 | |
| 375 | if (grh_version == 4) |
| 376 | return RDMA_NETWORK_IPV4; |
| 377 | |
| 378 | if (grh->next_hdr == IPPROTO_UDP) |
| 379 | return RDMA_NETWORK_IPV6; |
| 380 | |
| 381 | return RDMA_NETWORK_ROCE_V1; |
| 382 | } |
| 383 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 384 | struct find_gid_index_context { |
| 385 | u16 vlan_id; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 386 | enum ib_gid_type gid_type; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 387 | }; |
| 388 | |
| 389 | static bool find_gid_index(const union ib_gid *gid, |
| 390 | const struct ib_gid_attr *gid_attr, |
| 391 | void *context) |
| 392 | { |
| 393 | struct find_gid_index_context *ctx = |
| 394 | (struct find_gid_index_context *)context; |
| 395 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 396 | if (ctx->gid_type != gid_attr->gid_type) |
| 397 | return false; |
| 398 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 399 | if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || |
| 400 | (is_vlan_dev(gid_attr->ndev) && |
| 401 | vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) |
| 402 | return false; |
| 403 | |
| 404 | return true; |
| 405 | } |
| 406 | |
| 407 | static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, |
| 408 | u16 vlan_id, const union ib_gid *sgid, |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 409 | enum ib_gid_type gid_type, |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 410 | u16 *gid_index) |
| 411 | { |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 412 | struct find_gid_index_context context = {.vlan_id = vlan_id, |
| 413 | .gid_type = gid_type}; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 414 | |
| 415 | return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, |
| 416 | &context, gid_index); |
| 417 | } |
| 418 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 419 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, |
| 420 | enum rdma_network_type net_type, |
| 421 | union ib_gid *sgid, union ib_gid *dgid) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 422 | { |
| 423 | struct sockaddr_in src_in; |
| 424 | struct sockaddr_in dst_in; |
| 425 | __be32 src_saddr, dst_saddr; |
| 426 | |
| 427 | if (!sgid || !dgid) |
| 428 | return -EINVAL; |
| 429 | |
| 430 | if (net_type == RDMA_NETWORK_IPV4) { |
| 431 | memcpy(&src_in.sin_addr.s_addr, |
| 432 | &hdr->roce4grh.saddr, 4); |
| 433 | memcpy(&dst_in.sin_addr.s_addr, |
| 434 | &hdr->roce4grh.daddr, 4); |
| 435 | src_saddr = src_in.sin_addr.s_addr; |
| 436 | dst_saddr = dst_in.sin_addr.s_addr; |
| 437 | ipv6_addr_set_v4mapped(src_saddr, |
| 438 | (struct in6_addr *)sgid); |
| 439 | ipv6_addr_set_v4mapped(dst_saddr, |
| 440 | (struct in6_addr *)dgid); |
| 441 | return 0; |
| 442 | } else if (net_type == RDMA_NETWORK_IPV6 || |
| 443 | net_type == RDMA_NETWORK_IB) { |
| 444 | *dgid = hdr->ibgrh.dgid; |
| 445 | *sgid = hdr->ibgrh.sgid; |
| 446 | return 0; |
| 447 | } else { |
| 448 | return -EINVAL; |
| 449 | } |
| 450 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 451 | EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 452 | |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 453 | int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, |
| 454 | const struct ib_wc *wc, const struct ib_grh *grh, |
| 455 | struct ib_ah_attr *ah_attr) |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 456 | { |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 457 | u32 flow_class; |
| 458 | u16 gid_index; |
| 459 | int ret; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 460 | enum rdma_network_type net_type = RDMA_NETWORK_IB; |
| 461 | enum ib_gid_type gid_type = IB_GID_TYPE_IB; |
Matan Barak | c3efe75 | 2016-01-04 10:49:54 +0200 | [diff] [blame] | 462 | int hoplimit = 0xff; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 463 | union ib_gid dgid; |
| 464 | union ib_gid sgid; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 465 | |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 466 | memset(ah_attr, 0, sizeof *ah_attr); |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 467 | if (rdma_cap_eth_ah(device, port_num)) { |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 468 | if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) |
| 469 | net_type = wc->network_hdr_type; |
| 470 | else |
| 471 | net_type = ib_get_net_type_by_grh(device, port_num, grh); |
| 472 | gid_type = ib_network_to_gid_type(net_type); |
| 473 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 474 | ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, |
| 475 | &sgid, &dgid); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 476 | if (ret) |
| 477 | return ret; |
| 478 | |
| 479 | if (rdma_protocol_roce(device, port_num)) { |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 480 | int if_index = 0; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 481 | u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? |
| 482 | wc->vlan_id : 0xffff; |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 483 | struct net_device *idev; |
| 484 | struct net_device *resolved_dev; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 485 | |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 486 | if (!(wc->wc_flags & IB_WC_GRH)) |
| 487 | return -EPROTOTYPE; |
| 488 | |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 489 | if (!device->get_netdev) |
| 490 | return -EOPNOTSUPP; |
| 491 | |
| 492 | idev = device->get_netdev(device, port_num); |
| 493 | if (!idev) |
| 494 | return -ENODEV; |
| 495 | |
Matan Barak | f7f4b23e | 2016-01-04 10:49:53 +0200 | [diff] [blame] | 496 | ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, |
| 497 | ah_attr->dmac, |
| 498 | wc->wc_flags & IB_WC_WITH_VLAN ? |
| 499 | NULL : &vlan_id, |
Matan Barak | c3efe75 | 2016-01-04 10:49:54 +0200 | [diff] [blame] | 500 | &if_index, &hoplimit); |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 501 | if (ret) { |
| 502 | dev_put(idev); |
| 503 | return ret; |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 504 | } |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 505 | |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 506 | resolved_dev = dev_get_by_index(&init_net, if_index); |
| 507 | if (resolved_dev->flags & IFF_LOOPBACK) { |
| 508 | dev_put(resolved_dev); |
| 509 | resolved_dev = idev; |
| 510 | dev_hold(resolved_dev); |
| 511 | } |
| 512 | rcu_read_lock(); |
| 513 | if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, |
| 514 | resolved_dev)) |
| 515 | ret = -EHOSTUNREACH; |
| 516 | rcu_read_unlock(); |
| 517 | dev_put(idev); |
| 518 | dev_put(resolved_dev); |
| 519 | if (ret) |
| 520 | return ret; |
| 521 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 522 | ret = get_sgid_index_from_eth(device, port_num, vlan_id, |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 523 | &dgid, gid_type, &gid_index); |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 524 | if (ret) |
| 525 | return ret; |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 526 | } |
| 527 | |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 528 | ah_attr->dlid = wc->slid; |
| 529 | ah_attr->sl = wc->sl; |
| 530 | ah_attr->src_path_bits = wc->dlid_path_bits; |
| 531 | ah_attr->port_num = port_num; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 532 | |
| 533 | if (wc->wc_flags & IB_WC_GRH) { |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 534 | ah_attr->ah_flags = IB_AH_GRH; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 535 | ah_attr->grh.dgid = sgid; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 536 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 537 | if (!rdma_cap_eth_ah(device, port_num)) { |
Eli Cohen | b355600 | 2016-06-22 17:27:24 +0300 | [diff] [blame] | 538 | if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { |
| 539 | ret = ib_find_cached_gid_by_port(device, &dgid, |
| 540 | IB_GID_TYPE_IB, |
| 541 | port_num, NULL, |
| 542 | &gid_index); |
| 543 | if (ret) |
| 544 | return ret; |
| 545 | } else { |
| 546 | gid_index = 0; |
| 547 | } |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 548 | } |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 549 | |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 550 | ah_attr->grh.sgid_index = (u8) gid_index; |
Hal Rosenstock | 497677a | 2005-07-27 11:45:35 -0700 | [diff] [blame] | 551 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 552 | ah_attr->grh.flow_label = flow_class & 0xFFFFF; |
Matan Barak | c3efe75 | 2016-01-04 10:49:54 +0200 | [diff] [blame] | 553 | ah_attr->grh.hop_limit = hoplimit; |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 554 | ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 555 | } |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 556 | return 0; |
| 557 | } |
| 558 | EXPORT_SYMBOL(ib_init_ah_from_wc); |
| 559 | |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 560 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
| 561 | const struct ib_grh *grh, u8 port_num) |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 562 | { |
| 563 | struct ib_ah_attr ah_attr; |
| 564 | int ret; |
| 565 | |
| 566 | ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); |
| 567 | if (ret) |
| 568 | return ERR_PTR(ret); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 569 | |
| 570 | return ib_create_ah(pd, &ah_attr); |
| 571 | } |
| 572 | EXPORT_SYMBOL(ib_create_ah_from_wc); |
| 573 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) |
| 575 | { |
| 576 | return ah->device->modify_ah ? |
| 577 | ah->device->modify_ah(ah, ah_attr) : |
| 578 | -ENOSYS; |
| 579 | } |
| 580 | EXPORT_SYMBOL(ib_modify_ah); |
| 581 | |
| 582 | int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) |
| 583 | { |
| 584 | return ah->device->query_ah ? |
| 585 | ah->device->query_ah(ah, ah_attr) : |
| 586 | -ENOSYS; |
| 587 | } |
| 588 | EXPORT_SYMBOL(ib_query_ah); |
| 589 | |
| 590 | int ib_destroy_ah(struct ib_ah *ah) |
| 591 | { |
| 592 | struct ib_pd *pd; |
| 593 | int ret; |
| 594 | |
| 595 | pd = ah->pd; |
| 596 | ret = ah->device->destroy_ah(ah); |
| 597 | if (!ret) |
| 598 | atomic_dec(&pd->usecnt); |
| 599 | |
| 600 | return ret; |
| 601 | } |
| 602 | EXPORT_SYMBOL(ib_destroy_ah); |
| 603 | |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 604 | /* Shared receive queues */ |
| 605 | |
| 606 | struct ib_srq *ib_create_srq(struct ib_pd *pd, |
| 607 | struct ib_srq_init_attr *srq_init_attr) |
| 608 | { |
| 609 | struct ib_srq *srq; |
| 610 | |
| 611 | if (!pd->device->create_srq) |
| 612 | return ERR_PTR(-ENOSYS); |
| 613 | |
| 614 | srq = pd->device->create_srq(pd, srq_init_attr, NULL); |
| 615 | |
| 616 | if (!IS_ERR(srq)) { |
| 617 | srq->device = pd->device; |
| 618 | srq->pd = pd; |
| 619 | srq->uobject = NULL; |
| 620 | srq->event_handler = srq_init_attr->event_handler; |
| 621 | srq->srq_context = srq_init_attr->srq_context; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 622 | srq->srq_type = srq_init_attr->srq_type; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 623 | if (srq->srq_type == IB_SRQT_XRC) { |
| 624 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; |
| 625 | srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; |
| 626 | atomic_inc(&srq->ext.xrc.xrcd->usecnt); |
| 627 | atomic_inc(&srq->ext.xrc.cq->usecnt); |
| 628 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 629 | atomic_inc(&pd->usecnt); |
| 630 | atomic_set(&srq->usecnt, 0); |
| 631 | } |
| 632 | |
| 633 | return srq; |
| 634 | } |
| 635 | EXPORT_SYMBOL(ib_create_srq); |
| 636 | |
| 637 | int ib_modify_srq(struct ib_srq *srq, |
| 638 | struct ib_srq_attr *srq_attr, |
| 639 | enum ib_srq_attr_mask srq_attr_mask) |
| 640 | { |
Dotan Barak | 7ce5eac | 2008-04-16 21:09:28 -0700 | [diff] [blame] | 641 | return srq->device->modify_srq ? |
| 642 | srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : |
| 643 | -ENOSYS; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 644 | } |
| 645 | EXPORT_SYMBOL(ib_modify_srq); |
| 646 | |
| 647 | int ib_query_srq(struct ib_srq *srq, |
| 648 | struct ib_srq_attr *srq_attr) |
| 649 | { |
| 650 | return srq->device->query_srq ? |
| 651 | srq->device->query_srq(srq, srq_attr) : -ENOSYS; |
| 652 | } |
| 653 | EXPORT_SYMBOL(ib_query_srq); |
| 654 | |
| 655 | int ib_destroy_srq(struct ib_srq *srq) |
| 656 | { |
| 657 | struct ib_pd *pd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 658 | enum ib_srq_type srq_type; |
| 659 | struct ib_xrcd *uninitialized_var(xrcd); |
| 660 | struct ib_cq *uninitialized_var(cq); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 661 | int ret; |
| 662 | |
| 663 | if (atomic_read(&srq->usecnt)) |
| 664 | return -EBUSY; |
| 665 | |
| 666 | pd = srq->pd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 667 | srq_type = srq->srq_type; |
| 668 | if (srq_type == IB_SRQT_XRC) { |
| 669 | xrcd = srq->ext.xrc.xrcd; |
| 670 | cq = srq->ext.xrc.cq; |
| 671 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 672 | |
| 673 | ret = srq->device->destroy_srq(srq); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 674 | if (!ret) { |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 675 | atomic_dec(&pd->usecnt); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 676 | if (srq_type == IB_SRQT_XRC) { |
| 677 | atomic_dec(&xrcd->usecnt); |
| 678 | atomic_dec(&cq->usecnt); |
| 679 | } |
| 680 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 681 | |
| 682 | return ret; |
| 683 | } |
| 684 | EXPORT_SYMBOL(ib_destroy_srq); |
| 685 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | /* Queue pairs */ |
| 687 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 688 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) |
| 689 | { |
| 690 | struct ib_qp *qp = context; |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 691 | unsigned long flags; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 692 | |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 693 | spin_lock_irqsave(&qp->device->event_handler_lock, flags); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 694 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) |
Shlomo Pongratz | eec9e29f | 2013-04-10 14:26:46 +0000 | [diff] [blame] | 695 | if (event->element.qp->event_handler) |
| 696 | event->element.qp->event_handler(event, event->element.qp->qp_context); |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 697 | spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 698 | } |
| 699 | |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 700 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) |
| 701 | { |
| 702 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 703 | list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); |
| 704 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 705 | } |
| 706 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 707 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, |
| 708 | void (*event_handler)(struct ib_event *, void *), |
| 709 | void *qp_context) |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 710 | { |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 711 | struct ib_qp *qp; |
| 712 | unsigned long flags; |
| 713 | |
| 714 | qp = kzalloc(sizeof *qp, GFP_KERNEL); |
| 715 | if (!qp) |
| 716 | return ERR_PTR(-ENOMEM); |
| 717 | |
| 718 | qp->real_qp = real_qp; |
| 719 | atomic_inc(&real_qp->usecnt); |
| 720 | qp->device = real_qp->device; |
| 721 | qp->event_handler = event_handler; |
| 722 | qp->qp_context = qp_context; |
| 723 | qp->qp_num = real_qp->qp_num; |
| 724 | qp->qp_type = real_qp->qp_type; |
| 725 | |
| 726 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
| 727 | list_add(&qp->open_list, &real_qp->open_list); |
| 728 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
| 729 | |
| 730 | return qp; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 731 | } |
| 732 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 733 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, |
| 734 | struct ib_qp_open_attr *qp_open_attr) |
| 735 | { |
| 736 | struct ib_qp *qp, *real_qp; |
| 737 | |
| 738 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) |
| 739 | return ERR_PTR(-EINVAL); |
| 740 | |
| 741 | qp = ERR_PTR(-EINVAL); |
| 742 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 743 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { |
| 744 | if (real_qp->qp_num == qp_open_attr->qp_num) { |
| 745 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, |
| 746 | qp_open_attr->qp_context); |
| 747 | break; |
| 748 | } |
| 749 | } |
| 750 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 751 | return qp; |
| 752 | } |
| 753 | EXPORT_SYMBOL(ib_open_qp); |
| 754 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 755 | static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, |
| 756 | struct ib_qp_init_attr *qp_init_attr) |
| 757 | { |
| 758 | struct ib_qp *real_qp = qp; |
| 759 | |
| 760 | qp->event_handler = __ib_shared_qp_event_handler; |
| 761 | qp->qp_context = qp; |
| 762 | qp->pd = NULL; |
| 763 | qp->send_cq = qp->recv_cq = NULL; |
| 764 | qp->srq = NULL; |
| 765 | qp->xrcd = qp_init_attr->xrcd; |
| 766 | atomic_inc(&qp_init_attr->xrcd->usecnt); |
| 767 | INIT_LIST_HEAD(&qp->open_list); |
| 768 | |
| 769 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, |
| 770 | qp_init_attr->qp_context); |
| 771 | if (!IS_ERR(qp)) |
| 772 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); |
| 773 | else |
| 774 | real_qp->device->destroy_qp(real_qp); |
| 775 | return qp; |
| 776 | } |
| 777 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
| 779 | struct ib_qp_init_attr *qp_init_attr) |
| 780 | { |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 781 | struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; |
| 782 | struct ib_qp *qp; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 783 | int ret; |
| 784 | |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 785 | if (qp_init_attr->rwq_ind_tbl && |
| 786 | (qp_init_attr->recv_cq || |
| 787 | qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || |
| 788 | qp_init_attr->cap.max_recv_sge)) |
| 789 | return ERR_PTR(-EINVAL); |
| 790 | |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 791 | /* |
| 792 | * If the callers is using the RDMA API calculate the resources |
| 793 | * needed for the RDMA READ/WRITE operations. |
| 794 | * |
| 795 | * Note that these callers need to pass in a port number. |
| 796 | */ |
| 797 | if (qp_init_attr->cap.max_rdma_ctxs) |
| 798 | rdma_rw_init_qp(device, qp_init_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 800 | qp = device->create_qp(pd, qp_init_attr, NULL); |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 801 | if (IS_ERR(qp)) |
| 802 | return qp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 804 | qp->device = device; |
| 805 | qp->real_qp = qp; |
| 806 | qp->uobject = NULL; |
| 807 | qp->qp_type = qp_init_attr->qp_type; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 808 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 809 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 810 | atomic_set(&qp->usecnt, 0); |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 811 | qp->mrs_used = 0; |
| 812 | spin_lock_init(&qp->mr_lock); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 813 | INIT_LIST_HEAD(&qp->rdma_mrs); |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 814 | INIT_LIST_HEAD(&qp->sig_mrs); |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 815 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 816 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) |
| 817 | return ib_create_xrc_qp(qp, qp_init_attr); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 818 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 819 | qp->event_handler = qp_init_attr->event_handler; |
| 820 | qp->qp_context = qp_init_attr->qp_context; |
| 821 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { |
| 822 | qp->recv_cq = NULL; |
| 823 | qp->srq = NULL; |
| 824 | } else { |
| 825 | qp->recv_cq = qp_init_attr->recv_cq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 826 | if (qp_init_attr->recv_cq) |
| 827 | atomic_inc(&qp_init_attr->recv_cq->usecnt); |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 828 | qp->srq = qp_init_attr->srq; |
| 829 | if (qp->srq) |
| 830 | atomic_inc(&qp_init_attr->srq->usecnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | } |
| 832 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 833 | qp->pd = pd; |
| 834 | qp->send_cq = qp_init_attr->send_cq; |
| 835 | qp->xrcd = NULL; |
| 836 | |
| 837 | atomic_inc(&pd->usecnt); |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 838 | if (qp_init_attr->send_cq) |
| 839 | atomic_inc(&qp_init_attr->send_cq->usecnt); |
| 840 | if (qp_init_attr->rwq_ind_tbl) |
| 841 | atomic_inc(&qp->rwq_ind_tbl->usecnt); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 842 | |
| 843 | if (qp_init_attr->cap.max_rdma_ctxs) { |
| 844 | ret = rdma_rw_init_mrs(qp, qp_init_attr); |
| 845 | if (ret) { |
| 846 | pr_err("failed to init MR pool ret= %d\n", ret); |
| 847 | ib_destroy_qp(qp); |
Steve Wise | b6bc1c7 | 2016-09-29 07:31:33 -0700 | [diff] [blame] | 848 | return ERR_PTR(ret); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 849 | } |
| 850 | } |
| 851 | |
Bart Van Assche | 632bc3f | 2016-07-21 13:03:30 -0700 | [diff] [blame] | 852 | /* |
| 853 | * Note: all hw drivers guarantee that max_send_sge is lower than |
| 854 | * the device RDMA WRITE SGE limit but not all hw drivers ensure that |
| 855 | * max_send_sge <= max_sge_rd. |
| 856 | */ |
| 857 | qp->max_write_sge = qp_init_attr->cap.max_send_sge; |
| 858 | qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, |
| 859 | device->attrs.max_sge_rd); |
| 860 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | return qp; |
| 862 | } |
| 863 | EXPORT_SYMBOL(ib_create_qp); |
| 864 | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 865 | static const struct { |
| 866 | int valid; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 867 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; |
| 868 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 869 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
| 870 | [IB_QPS_RESET] = { |
| 871 | [IB_QPS_RESET] = { .valid = 1 }, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 872 | [IB_QPS_INIT] = { |
| 873 | .valid = 1, |
| 874 | .req_param = { |
| 875 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 876 | IB_QP_PORT | |
| 877 | IB_QP_QKEY), |
Or Gerlitz | c938a61 | 2012-03-01 12:17:51 +0200 | [diff] [blame] | 878 | [IB_QPT_RAW_PACKET] = IB_QP_PORT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 879 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
| 880 | IB_QP_PORT | |
| 881 | IB_QP_ACCESS_FLAGS), |
| 882 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
| 883 | IB_QP_PORT | |
| 884 | IB_QP_ACCESS_FLAGS), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 885 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
| 886 | IB_QP_PORT | |
| 887 | IB_QP_ACCESS_FLAGS), |
| 888 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
| 889 | IB_QP_PORT | |
| 890 | IB_QP_ACCESS_FLAGS), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 891 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 892 | IB_QP_QKEY), |
| 893 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 894 | IB_QP_QKEY), |
| 895 | } |
| 896 | }, |
| 897 | }, |
| 898 | [IB_QPS_INIT] = { |
| 899 | [IB_QPS_RESET] = { .valid = 1 }, |
| 900 | [IB_QPS_ERR] = { .valid = 1 }, |
| 901 | [IB_QPS_INIT] = { |
| 902 | .valid = 1, |
| 903 | .opt_param = { |
| 904 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 905 | IB_QP_PORT | |
| 906 | IB_QP_QKEY), |
| 907 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
| 908 | IB_QP_PORT | |
| 909 | IB_QP_ACCESS_FLAGS), |
| 910 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
| 911 | IB_QP_PORT | |
| 912 | IB_QP_ACCESS_FLAGS), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 913 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
| 914 | IB_QP_PORT | |
| 915 | IB_QP_ACCESS_FLAGS), |
| 916 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
| 917 | IB_QP_PORT | |
| 918 | IB_QP_ACCESS_FLAGS), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 919 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 920 | IB_QP_QKEY), |
| 921 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 922 | IB_QP_QKEY), |
| 923 | } |
| 924 | }, |
| 925 | [IB_QPS_RTR] = { |
| 926 | .valid = 1, |
| 927 | .req_param = { |
| 928 | [IB_QPT_UC] = (IB_QP_AV | |
| 929 | IB_QP_PATH_MTU | |
| 930 | IB_QP_DEST_QPN | |
| 931 | IB_QP_RQ_PSN), |
| 932 | [IB_QPT_RC] = (IB_QP_AV | |
| 933 | IB_QP_PATH_MTU | |
| 934 | IB_QP_DEST_QPN | |
| 935 | IB_QP_RQ_PSN | |
| 936 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 937 | IB_QP_MIN_RNR_TIMER), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 938 | [IB_QPT_XRC_INI] = (IB_QP_AV | |
| 939 | IB_QP_PATH_MTU | |
| 940 | IB_QP_DEST_QPN | |
| 941 | IB_QP_RQ_PSN), |
| 942 | [IB_QPT_XRC_TGT] = (IB_QP_AV | |
| 943 | IB_QP_PATH_MTU | |
| 944 | IB_QP_DEST_QPN | |
| 945 | IB_QP_RQ_PSN | |
| 946 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 947 | IB_QP_MIN_RNR_TIMER), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 948 | }, |
| 949 | .opt_param = { |
| 950 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 951 | IB_QP_QKEY), |
| 952 | [IB_QPT_UC] = (IB_QP_ALT_PATH | |
| 953 | IB_QP_ACCESS_FLAGS | |
| 954 | IB_QP_PKEY_INDEX), |
| 955 | [IB_QPT_RC] = (IB_QP_ALT_PATH | |
| 956 | IB_QP_ACCESS_FLAGS | |
| 957 | IB_QP_PKEY_INDEX), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 958 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | |
| 959 | IB_QP_ACCESS_FLAGS | |
| 960 | IB_QP_PKEY_INDEX), |
| 961 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | |
| 962 | IB_QP_ACCESS_FLAGS | |
| 963 | IB_QP_PKEY_INDEX), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 964 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 965 | IB_QP_QKEY), |
| 966 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 967 | IB_QP_QKEY), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 968 | }, |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 969 | }, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 970 | }, |
| 971 | [IB_QPS_RTR] = { |
| 972 | [IB_QPS_RESET] = { .valid = 1 }, |
| 973 | [IB_QPS_ERR] = { .valid = 1 }, |
| 974 | [IB_QPS_RTS] = { |
| 975 | .valid = 1, |
| 976 | .req_param = { |
| 977 | [IB_QPT_UD] = IB_QP_SQ_PSN, |
| 978 | [IB_QPT_UC] = IB_QP_SQ_PSN, |
| 979 | [IB_QPT_RC] = (IB_QP_TIMEOUT | |
| 980 | IB_QP_RETRY_CNT | |
| 981 | IB_QP_RNR_RETRY | |
| 982 | IB_QP_SQ_PSN | |
| 983 | IB_QP_MAX_QP_RD_ATOMIC), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 984 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | |
| 985 | IB_QP_RETRY_CNT | |
| 986 | IB_QP_RNR_RETRY | |
| 987 | IB_QP_SQ_PSN | |
| 988 | IB_QP_MAX_QP_RD_ATOMIC), |
| 989 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | |
| 990 | IB_QP_SQ_PSN), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 991 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
| 992 | [IB_QPT_GSI] = IB_QP_SQ_PSN, |
| 993 | }, |
| 994 | .opt_param = { |
| 995 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 996 | IB_QP_QKEY), |
| 997 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 998 | IB_QP_ALT_PATH | |
| 999 | IB_QP_ACCESS_FLAGS | |
| 1000 | IB_QP_PATH_MIG_STATE), |
| 1001 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1002 | IB_QP_ALT_PATH | |
| 1003 | IB_QP_ACCESS_FLAGS | |
| 1004 | IB_QP_MIN_RNR_TIMER | |
| 1005 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1006 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1007 | IB_QP_ALT_PATH | |
| 1008 | IB_QP_ACCESS_FLAGS | |
| 1009 | IB_QP_PATH_MIG_STATE), |
| 1010 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1011 | IB_QP_ALT_PATH | |
| 1012 | IB_QP_ACCESS_FLAGS | |
| 1013 | IB_QP_MIN_RNR_TIMER | |
| 1014 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1015 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1016 | IB_QP_QKEY), |
| 1017 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1018 | IB_QP_QKEY), |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1019 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1020 | } |
| 1021 | } |
| 1022 | }, |
| 1023 | [IB_QPS_RTS] = { |
| 1024 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1025 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1026 | [IB_QPS_RTS] = { |
| 1027 | .valid = 1, |
| 1028 | .opt_param = { |
| 1029 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1030 | IB_QP_QKEY), |
Dotan Barak | 4546d31 | 2006-03-02 11:22:28 -0800 | [diff] [blame] | 1031 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1032 | IB_QP_ACCESS_FLAGS | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1033 | IB_QP_ALT_PATH | |
| 1034 | IB_QP_PATH_MIG_STATE), |
Dotan Barak | 4546d31 | 2006-03-02 11:22:28 -0800 | [diff] [blame] | 1035 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1036 | IB_QP_ACCESS_FLAGS | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1037 | IB_QP_ALT_PATH | |
| 1038 | IB_QP_PATH_MIG_STATE | |
| 1039 | IB_QP_MIN_RNR_TIMER), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1040 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1041 | IB_QP_ACCESS_FLAGS | |
| 1042 | IB_QP_ALT_PATH | |
| 1043 | IB_QP_PATH_MIG_STATE), |
| 1044 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1045 | IB_QP_ACCESS_FLAGS | |
| 1046 | IB_QP_ALT_PATH | |
| 1047 | IB_QP_PATH_MIG_STATE | |
| 1048 | IB_QP_MIN_RNR_TIMER), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1049 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1050 | IB_QP_QKEY), |
| 1051 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1052 | IB_QP_QKEY), |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1053 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1054 | } |
| 1055 | }, |
| 1056 | [IB_QPS_SQD] = { |
| 1057 | .valid = 1, |
| 1058 | .opt_param = { |
| 1059 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1060 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1061 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1062 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1063 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1064 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1065 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY |
| 1066 | } |
| 1067 | }, |
| 1068 | }, |
| 1069 | [IB_QPS_SQD] = { |
| 1070 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1071 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1072 | [IB_QPS_RTS] = { |
| 1073 | .valid = 1, |
| 1074 | .opt_param = { |
| 1075 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1076 | IB_QP_QKEY), |
| 1077 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1078 | IB_QP_ALT_PATH | |
| 1079 | IB_QP_ACCESS_FLAGS | |
| 1080 | IB_QP_PATH_MIG_STATE), |
| 1081 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1082 | IB_QP_ALT_PATH | |
| 1083 | IB_QP_ACCESS_FLAGS | |
| 1084 | IB_QP_MIN_RNR_TIMER | |
| 1085 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1086 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1087 | IB_QP_ALT_PATH | |
| 1088 | IB_QP_ACCESS_FLAGS | |
| 1089 | IB_QP_PATH_MIG_STATE), |
| 1090 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1091 | IB_QP_ALT_PATH | |
| 1092 | IB_QP_ACCESS_FLAGS | |
| 1093 | IB_QP_MIN_RNR_TIMER | |
| 1094 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1095 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1096 | IB_QP_QKEY), |
| 1097 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1098 | IB_QP_QKEY), |
| 1099 | } |
| 1100 | }, |
| 1101 | [IB_QPS_SQD] = { |
| 1102 | .valid = 1, |
| 1103 | .opt_param = { |
| 1104 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 1105 | IB_QP_QKEY), |
| 1106 | [IB_QPT_UC] = (IB_QP_AV | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1107 | IB_QP_ALT_PATH | |
| 1108 | IB_QP_ACCESS_FLAGS | |
| 1109 | IB_QP_PKEY_INDEX | |
| 1110 | IB_QP_PATH_MIG_STATE), |
| 1111 | [IB_QPT_RC] = (IB_QP_PORT | |
| 1112 | IB_QP_AV | |
| 1113 | IB_QP_TIMEOUT | |
| 1114 | IB_QP_RETRY_CNT | |
| 1115 | IB_QP_RNR_RETRY | |
| 1116 | IB_QP_MAX_QP_RD_ATOMIC | |
| 1117 | IB_QP_MAX_DEST_RD_ATOMIC | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1118 | IB_QP_ALT_PATH | |
| 1119 | IB_QP_ACCESS_FLAGS | |
| 1120 | IB_QP_PKEY_INDEX | |
| 1121 | IB_QP_MIN_RNR_TIMER | |
| 1122 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1123 | [IB_QPT_XRC_INI] = (IB_QP_PORT | |
| 1124 | IB_QP_AV | |
| 1125 | IB_QP_TIMEOUT | |
| 1126 | IB_QP_RETRY_CNT | |
| 1127 | IB_QP_RNR_RETRY | |
| 1128 | IB_QP_MAX_QP_RD_ATOMIC | |
| 1129 | IB_QP_ALT_PATH | |
| 1130 | IB_QP_ACCESS_FLAGS | |
| 1131 | IB_QP_PKEY_INDEX | |
| 1132 | IB_QP_PATH_MIG_STATE), |
| 1133 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | |
| 1134 | IB_QP_AV | |
| 1135 | IB_QP_TIMEOUT | |
| 1136 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 1137 | IB_QP_ALT_PATH | |
| 1138 | IB_QP_ACCESS_FLAGS | |
| 1139 | IB_QP_PKEY_INDEX | |
| 1140 | IB_QP_MIN_RNR_TIMER | |
| 1141 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1142 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 1143 | IB_QP_QKEY), |
| 1144 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 1145 | IB_QP_QKEY), |
| 1146 | } |
| 1147 | } |
| 1148 | }, |
| 1149 | [IB_QPS_SQE] = { |
| 1150 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1151 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1152 | [IB_QPS_RTS] = { |
| 1153 | .valid = 1, |
| 1154 | .opt_param = { |
| 1155 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1156 | IB_QP_QKEY), |
| 1157 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1158 | IB_QP_ACCESS_FLAGS), |
| 1159 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1160 | IB_QP_QKEY), |
| 1161 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1162 | IB_QP_QKEY), |
| 1163 | } |
| 1164 | } |
| 1165 | }, |
| 1166 | [IB_QPS_ERR] = { |
| 1167 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1168 | [IB_QPS_ERR] = { .valid = 1 } |
| 1169 | } |
| 1170 | }; |
| 1171 | |
| 1172 | int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1173 | enum ib_qp_type type, enum ib_qp_attr_mask mask, |
| 1174 | enum rdma_link_layer ll) |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1175 | { |
| 1176 | enum ib_qp_attr_mask req_param, opt_param; |
| 1177 | |
| 1178 | if (cur_state < 0 || cur_state > IB_QPS_ERR || |
| 1179 | next_state < 0 || next_state > IB_QPS_ERR) |
| 1180 | return 0; |
| 1181 | |
| 1182 | if (mask & IB_QP_CUR_STATE && |
| 1183 | cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && |
| 1184 | cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) |
| 1185 | return 0; |
| 1186 | |
| 1187 | if (!qp_state_table[cur_state][next_state].valid) |
| 1188 | return 0; |
| 1189 | |
| 1190 | req_param = qp_state_table[cur_state][next_state].req_param[type]; |
| 1191 | opt_param = qp_state_table[cur_state][next_state].opt_param[type]; |
| 1192 | |
| 1193 | if ((mask & req_param) != req_param) |
| 1194 | return 0; |
| 1195 | |
| 1196 | if (mask & ~(req_param | opt_param | IB_QP_STATE)) |
| 1197 | return 0; |
| 1198 | |
| 1199 | return 1; |
| 1200 | } |
| 1201 | EXPORT_SYMBOL(ib_modify_qp_is_ok); |
| 1202 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1203 | int ib_resolve_eth_dmac(struct ib_device *device, |
| 1204 | struct ib_ah_attr *ah_attr) |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1205 | { |
| 1206 | int ret = 0; |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1207 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1208 | if (ah_attr->port_num < rdma_start_port(device) || |
| 1209 | ah_attr->port_num > rdma_end_port(device)) |
| 1210 | return -EINVAL; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1211 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1212 | if (!rdma_cap_eth_ah(device, ah_attr->port_num)) |
| 1213 | return 0; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1214 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1215 | if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { |
| 1216 | rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw, |
| 1217 | ah_attr->dmac); |
| 1218 | } else { |
| 1219 | union ib_gid sgid; |
| 1220 | struct ib_gid_attr sgid_attr; |
| 1221 | int ifindex; |
| 1222 | int hop_limit; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1223 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1224 | ret = ib_query_gid(device, |
| 1225 | ah_attr->port_num, |
| 1226 | ah_attr->grh.sgid_index, |
| 1227 | &sgid, &sgid_attr); |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1228 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1229 | if (ret || !sgid_attr.ndev) { |
| 1230 | if (!ret) |
| 1231 | ret = -ENXIO; |
| 1232 | goto out; |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1233 | } |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1234 | |
| 1235 | ifindex = sgid_attr.ndev->ifindex; |
| 1236 | |
| 1237 | ret = rdma_addr_find_l2_eth_by_grh(&sgid, |
| 1238 | &ah_attr->grh.dgid, |
| 1239 | ah_attr->dmac, |
| 1240 | NULL, &ifindex, &hop_limit); |
| 1241 | |
| 1242 | dev_put(sgid_attr.ndev); |
| 1243 | |
| 1244 | ah_attr->grh.hop_limit = hop_limit; |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1245 | } |
| 1246 | out: |
| 1247 | return ret; |
| 1248 | } |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1249 | EXPORT_SYMBOL(ib_resolve_eth_dmac); |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1250 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | int ib_modify_qp(struct ib_qp *qp, |
| 1252 | struct ib_qp_attr *qp_attr, |
| 1253 | int qp_attr_mask) |
| 1254 | { |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1255 | |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1256 | if (qp_attr_mask & IB_QP_AV) { |
| 1257 | int ret; |
| 1258 | |
| 1259 | ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); |
| 1260 | if (ret) |
| 1261 | return ret; |
| 1262 | } |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1263 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1264 | return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 | } |
| 1266 | EXPORT_SYMBOL(ib_modify_qp); |
| 1267 | |
| 1268 | int ib_query_qp(struct ib_qp *qp, |
| 1269 | struct ib_qp_attr *qp_attr, |
| 1270 | int qp_attr_mask, |
| 1271 | struct ib_qp_init_attr *qp_init_attr) |
| 1272 | { |
| 1273 | return qp->device->query_qp ? |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1274 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | -ENOSYS; |
| 1276 | } |
| 1277 | EXPORT_SYMBOL(ib_query_qp); |
| 1278 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1279 | int ib_close_qp(struct ib_qp *qp) |
| 1280 | { |
| 1281 | struct ib_qp *real_qp; |
| 1282 | unsigned long flags; |
| 1283 | |
| 1284 | real_qp = qp->real_qp; |
| 1285 | if (real_qp == qp) |
| 1286 | return -EINVAL; |
| 1287 | |
| 1288 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
| 1289 | list_del(&qp->open_list); |
| 1290 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
| 1291 | |
| 1292 | atomic_dec(&real_qp->usecnt); |
| 1293 | kfree(qp); |
| 1294 | |
| 1295 | return 0; |
| 1296 | } |
| 1297 | EXPORT_SYMBOL(ib_close_qp); |
| 1298 | |
| 1299 | static int __ib_destroy_shared_qp(struct ib_qp *qp) |
| 1300 | { |
| 1301 | struct ib_xrcd *xrcd; |
| 1302 | struct ib_qp *real_qp; |
| 1303 | int ret; |
| 1304 | |
| 1305 | real_qp = qp->real_qp; |
| 1306 | xrcd = real_qp->xrcd; |
| 1307 | |
| 1308 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 1309 | ib_close_qp(qp); |
| 1310 | if (atomic_read(&real_qp->usecnt) == 0) |
| 1311 | list_del(&real_qp->xrcd_list); |
| 1312 | else |
| 1313 | real_qp = NULL; |
| 1314 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 1315 | |
| 1316 | if (real_qp) { |
| 1317 | ret = ib_destroy_qp(real_qp); |
| 1318 | if (!ret) |
| 1319 | atomic_dec(&xrcd->usecnt); |
| 1320 | else |
| 1321 | __ib_insert_xrcd_qp(xrcd, real_qp); |
| 1322 | } |
| 1323 | |
| 1324 | return 0; |
| 1325 | } |
| 1326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | int ib_destroy_qp(struct ib_qp *qp) |
| 1328 | { |
| 1329 | struct ib_pd *pd; |
| 1330 | struct ib_cq *scq, *rcq; |
| 1331 | struct ib_srq *srq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1332 | struct ib_rwq_ind_table *ind_tbl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | int ret; |
| 1334 | |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1335 | WARN_ON_ONCE(qp->mrs_used > 0); |
| 1336 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1337 | if (atomic_read(&qp->usecnt)) |
| 1338 | return -EBUSY; |
| 1339 | |
| 1340 | if (qp->real_qp != qp) |
| 1341 | return __ib_destroy_shared_qp(qp); |
| 1342 | |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1343 | pd = qp->pd; |
| 1344 | scq = qp->send_cq; |
| 1345 | rcq = qp->recv_cq; |
| 1346 | srq = qp->srq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1347 | ind_tbl = qp->rwq_ind_tbl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1349 | if (!qp->uobject) |
| 1350 | rdma_rw_cleanup_mrs(qp); |
| 1351 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1352 | ret = qp->device->destroy_qp(qp); |
| 1353 | if (!ret) { |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1354 | if (pd) |
| 1355 | atomic_dec(&pd->usecnt); |
| 1356 | if (scq) |
| 1357 | atomic_dec(&scq->usecnt); |
| 1358 | if (rcq) |
| 1359 | atomic_dec(&rcq->usecnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | if (srq) |
| 1361 | atomic_dec(&srq->usecnt); |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1362 | if (ind_tbl) |
| 1363 | atomic_dec(&ind_tbl->usecnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | } |
| 1365 | |
| 1366 | return ret; |
| 1367 | } |
| 1368 | EXPORT_SYMBOL(ib_destroy_qp); |
| 1369 | |
| 1370 | /* Completion queues */ |
| 1371 | |
| 1372 | struct ib_cq *ib_create_cq(struct ib_device *device, |
| 1373 | ib_comp_handler comp_handler, |
| 1374 | void (*event_handler)(struct ib_event *, void *), |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 1375 | void *cq_context, |
| 1376 | const struct ib_cq_init_attr *cq_attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | { |
| 1378 | struct ib_cq *cq; |
| 1379 | |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 1380 | cq = device->create_cq(device, cq_attr, NULL, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | |
| 1382 | if (!IS_ERR(cq)) { |
| 1383 | cq->device = device; |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 1384 | cq->uobject = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1385 | cq->comp_handler = comp_handler; |
| 1386 | cq->event_handler = event_handler; |
| 1387 | cq->cq_context = cq_context; |
| 1388 | atomic_set(&cq->usecnt, 0); |
| 1389 | } |
| 1390 | |
| 1391 | return cq; |
| 1392 | } |
| 1393 | EXPORT_SYMBOL(ib_create_cq); |
| 1394 | |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1395 | int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
| 1396 | { |
| 1397 | return cq->device->modify_cq ? |
| 1398 | cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; |
| 1399 | } |
| 1400 | EXPORT_SYMBOL(ib_modify_cq); |
| 1401 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1402 | int ib_destroy_cq(struct ib_cq *cq) |
| 1403 | { |
| 1404 | if (atomic_read(&cq->usecnt)) |
| 1405 | return -EBUSY; |
| 1406 | |
| 1407 | return cq->device->destroy_cq(cq); |
| 1408 | } |
| 1409 | EXPORT_SYMBOL(ib_destroy_cq); |
| 1410 | |
Roland Dreier | a74cd4a | 2006-02-13 16:30:49 -0800 | [diff] [blame] | 1411 | int ib_resize_cq(struct ib_cq *cq, int cqe) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | { |
Roland Dreier | 40de2e5 | 2005-11-08 11:10:25 -0800 | [diff] [blame] | 1413 | return cq->device->resize_cq ? |
Roland Dreier | 33b9b3e | 2006-01-30 14:29:21 -0800 | [diff] [blame] | 1414 | cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | } |
| 1416 | EXPORT_SYMBOL(ib_resize_cq); |
| 1417 | |
| 1418 | /* Memory regions */ |
| 1419 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | int ib_dereg_mr(struct ib_mr *mr) |
| 1421 | { |
Christoph Hellwig | ab67ed8 | 2015-12-23 19:12:54 +0100 | [diff] [blame] | 1422 | struct ib_pd *pd = mr->pd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | int ret; |
| 1424 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 | ret = mr->device->dereg_mr(mr); |
| 1426 | if (!ret) |
| 1427 | atomic_dec(&pd->usecnt); |
| 1428 | |
| 1429 | return ret; |
| 1430 | } |
| 1431 | EXPORT_SYMBOL(ib_dereg_mr); |
| 1432 | |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1433 | /** |
| 1434 | * ib_alloc_mr() - Allocates a memory region |
| 1435 | * @pd: protection domain associated with the region |
| 1436 | * @mr_type: memory region type |
| 1437 | * @max_num_sg: maximum sg entries available for registration. |
| 1438 | * |
| 1439 | * Notes: |
| 1440 | * Memory registeration page/sg lists must not exceed max_num_sg. |
| 1441 | * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed |
| 1442 | * max_num_sg * used_page_size. |
| 1443 | * |
| 1444 | */ |
| 1445 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, |
| 1446 | enum ib_mr_type mr_type, |
| 1447 | u32 max_num_sg) |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1448 | { |
| 1449 | struct ib_mr *mr; |
| 1450 | |
Sagi Grimberg | d9f272c | 2015-07-30 10:32:48 +0300 | [diff] [blame] | 1451 | if (!pd->device->alloc_mr) |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1452 | return ERR_PTR(-ENOSYS); |
| 1453 | |
Sagi Grimberg | d9f272c | 2015-07-30 10:32:48 +0300 | [diff] [blame] | 1454 | mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1455 | if (!IS_ERR(mr)) { |
| 1456 | mr->device = pd->device; |
| 1457 | mr->pd = pd; |
| 1458 | mr->uobject = NULL; |
| 1459 | atomic_inc(&pd->usecnt); |
Steve Wise | d4a85c3 | 2016-05-03 18:01:08 +0200 | [diff] [blame] | 1460 | mr->need_inval = false; |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1461 | } |
| 1462 | |
| 1463 | return mr; |
| 1464 | } |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1465 | EXPORT_SYMBOL(ib_alloc_mr); |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 1466 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | /* "Fast" memory regions */ |
| 1468 | |
| 1469 | struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, |
| 1470 | int mr_access_flags, |
| 1471 | struct ib_fmr_attr *fmr_attr) |
| 1472 | { |
| 1473 | struct ib_fmr *fmr; |
| 1474 | |
| 1475 | if (!pd->device->alloc_fmr) |
| 1476 | return ERR_PTR(-ENOSYS); |
| 1477 | |
| 1478 | fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); |
| 1479 | if (!IS_ERR(fmr)) { |
| 1480 | fmr->device = pd->device; |
| 1481 | fmr->pd = pd; |
| 1482 | atomic_inc(&pd->usecnt); |
| 1483 | } |
| 1484 | |
| 1485 | return fmr; |
| 1486 | } |
| 1487 | EXPORT_SYMBOL(ib_alloc_fmr); |
| 1488 | |
| 1489 | int ib_unmap_fmr(struct list_head *fmr_list) |
| 1490 | { |
| 1491 | struct ib_fmr *fmr; |
| 1492 | |
| 1493 | if (list_empty(fmr_list)) |
| 1494 | return 0; |
| 1495 | |
| 1496 | fmr = list_entry(fmr_list->next, struct ib_fmr, list); |
| 1497 | return fmr->device->unmap_fmr(fmr_list); |
| 1498 | } |
| 1499 | EXPORT_SYMBOL(ib_unmap_fmr); |
| 1500 | |
| 1501 | int ib_dealloc_fmr(struct ib_fmr *fmr) |
| 1502 | { |
| 1503 | struct ib_pd *pd; |
| 1504 | int ret; |
| 1505 | |
| 1506 | pd = fmr->pd; |
| 1507 | ret = fmr->device->dealloc_fmr(fmr); |
| 1508 | if (!ret) |
| 1509 | atomic_dec(&pd->usecnt); |
| 1510 | |
| 1511 | return ret; |
| 1512 | } |
| 1513 | EXPORT_SYMBOL(ib_dealloc_fmr); |
| 1514 | |
| 1515 | /* Multicast groups */ |
| 1516 | |
| 1517 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1518 | { |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1519 | int ret; |
| 1520 | |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1521 | if (!qp->device->attach_mcast) |
| 1522 | return -ENOSYS; |
| 1523 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) |
| 1524 | return -EINVAL; |
| 1525 | |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1526 | ret = qp->device->attach_mcast(qp, gid, lid); |
| 1527 | if (!ret) |
| 1528 | atomic_inc(&qp->usecnt); |
| 1529 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1530 | } |
| 1531 | EXPORT_SYMBOL(ib_attach_mcast); |
| 1532 | |
| 1533 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1534 | { |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1535 | int ret; |
| 1536 | |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1537 | if (!qp->device->detach_mcast) |
| 1538 | return -ENOSYS; |
| 1539 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) |
| 1540 | return -EINVAL; |
| 1541 | |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1542 | ret = qp->device->detach_mcast(qp, gid, lid); |
| 1543 | if (!ret) |
| 1544 | atomic_dec(&qp->usecnt); |
| 1545 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | } |
| 1547 | EXPORT_SYMBOL(ib_detach_mcast); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1548 | |
| 1549 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) |
| 1550 | { |
| 1551 | struct ib_xrcd *xrcd; |
| 1552 | |
| 1553 | if (!device->alloc_xrcd) |
| 1554 | return ERR_PTR(-ENOSYS); |
| 1555 | |
| 1556 | xrcd = device->alloc_xrcd(device, NULL, NULL); |
| 1557 | if (!IS_ERR(xrcd)) { |
| 1558 | xrcd->device = device; |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1559 | xrcd->inode = NULL; |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1560 | atomic_set(&xrcd->usecnt, 0); |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1561 | mutex_init(&xrcd->tgt_qp_mutex); |
| 1562 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1563 | } |
| 1564 | |
| 1565 | return xrcd; |
| 1566 | } |
| 1567 | EXPORT_SYMBOL(ib_alloc_xrcd); |
| 1568 | |
| 1569 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) |
| 1570 | { |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1571 | struct ib_qp *qp; |
| 1572 | int ret; |
| 1573 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1574 | if (atomic_read(&xrcd->usecnt)) |
| 1575 | return -EBUSY; |
| 1576 | |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1577 | while (!list_empty(&xrcd->tgt_qp_list)) { |
| 1578 | qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); |
| 1579 | ret = ib_destroy_qp(qp); |
| 1580 | if (ret) |
| 1581 | return ret; |
| 1582 | } |
| 1583 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1584 | return xrcd->device->dealloc_xrcd(xrcd); |
| 1585 | } |
| 1586 | EXPORT_SYMBOL(ib_dealloc_xrcd); |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1587 | |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1588 | /** |
| 1589 | * ib_create_wq - Creates a WQ associated with the specified protection |
| 1590 | * domain. |
| 1591 | * @pd: The protection domain associated with the WQ. |
| 1592 | * @wq_init_attr: A list of initial attributes required to create the |
| 1593 | * WQ. If WQ creation succeeds, then the attributes are updated to |
| 1594 | * the actual capabilities of the created WQ. |
| 1595 | * |
| 1596 | * wq_init_attr->max_wr and wq_init_attr->max_sge determine |
| 1597 | * the requested size of the WQ, and set to the actual values allocated |
| 1598 | * on return. |
| 1599 | * If ib_create_wq() succeeds, then max_wr and max_sge will always be |
| 1600 | * at least as large as the requested values. |
| 1601 | */ |
| 1602 | struct ib_wq *ib_create_wq(struct ib_pd *pd, |
| 1603 | struct ib_wq_init_attr *wq_attr) |
| 1604 | { |
| 1605 | struct ib_wq *wq; |
| 1606 | |
| 1607 | if (!pd->device->create_wq) |
| 1608 | return ERR_PTR(-ENOSYS); |
| 1609 | |
| 1610 | wq = pd->device->create_wq(pd, wq_attr, NULL); |
| 1611 | if (!IS_ERR(wq)) { |
| 1612 | wq->event_handler = wq_attr->event_handler; |
| 1613 | wq->wq_context = wq_attr->wq_context; |
| 1614 | wq->wq_type = wq_attr->wq_type; |
| 1615 | wq->cq = wq_attr->cq; |
| 1616 | wq->device = pd->device; |
| 1617 | wq->pd = pd; |
| 1618 | wq->uobject = NULL; |
| 1619 | atomic_inc(&pd->usecnt); |
| 1620 | atomic_inc(&wq_attr->cq->usecnt); |
| 1621 | atomic_set(&wq->usecnt, 0); |
| 1622 | } |
| 1623 | return wq; |
| 1624 | } |
| 1625 | EXPORT_SYMBOL(ib_create_wq); |
| 1626 | |
| 1627 | /** |
| 1628 | * ib_destroy_wq - Destroys the specified WQ. |
| 1629 | * @wq: The WQ to destroy. |
| 1630 | */ |
| 1631 | int ib_destroy_wq(struct ib_wq *wq) |
| 1632 | { |
| 1633 | int err; |
| 1634 | struct ib_cq *cq = wq->cq; |
| 1635 | struct ib_pd *pd = wq->pd; |
| 1636 | |
| 1637 | if (atomic_read(&wq->usecnt)) |
| 1638 | return -EBUSY; |
| 1639 | |
| 1640 | err = wq->device->destroy_wq(wq); |
| 1641 | if (!err) { |
| 1642 | atomic_dec(&pd->usecnt); |
| 1643 | atomic_dec(&cq->usecnt); |
| 1644 | } |
| 1645 | return err; |
| 1646 | } |
| 1647 | EXPORT_SYMBOL(ib_destroy_wq); |
| 1648 | |
| 1649 | /** |
| 1650 | * ib_modify_wq - Modifies the specified WQ. |
| 1651 | * @wq: The WQ to modify. |
| 1652 | * @wq_attr: On input, specifies the WQ attributes to modify. |
| 1653 | * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ |
| 1654 | * are being modified. |
| 1655 | * On output, the current values of selected WQ attributes are returned. |
| 1656 | */ |
| 1657 | int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
| 1658 | u32 wq_attr_mask) |
| 1659 | { |
| 1660 | int err; |
| 1661 | |
| 1662 | if (!wq->device->modify_wq) |
| 1663 | return -ENOSYS; |
| 1664 | |
| 1665 | err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); |
| 1666 | return err; |
| 1667 | } |
| 1668 | EXPORT_SYMBOL(ib_modify_wq); |
| 1669 | |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 1670 | /* |
| 1671 | * ib_create_rwq_ind_table - Creates a RQ Indirection Table. |
| 1672 | * @device: The device on which to create the rwq indirection table. |
| 1673 | * @ib_rwq_ind_table_init_attr: A list of initial attributes required to |
| 1674 | * create the Indirection Table. |
| 1675 | * |
| 1676 | * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less |
| 1677 | * than the created ib_rwq_ind_table object and the caller is responsible |
| 1678 | * for its memory allocation/free. |
| 1679 | */ |
| 1680 | struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, |
| 1681 | struct ib_rwq_ind_table_init_attr *init_attr) |
| 1682 | { |
| 1683 | struct ib_rwq_ind_table *rwq_ind_table; |
| 1684 | int i; |
| 1685 | u32 table_size; |
| 1686 | |
| 1687 | if (!device->create_rwq_ind_table) |
| 1688 | return ERR_PTR(-ENOSYS); |
| 1689 | |
| 1690 | table_size = (1 << init_attr->log_ind_tbl_size); |
| 1691 | rwq_ind_table = device->create_rwq_ind_table(device, |
| 1692 | init_attr, NULL); |
| 1693 | if (IS_ERR(rwq_ind_table)) |
| 1694 | return rwq_ind_table; |
| 1695 | |
| 1696 | rwq_ind_table->ind_tbl = init_attr->ind_tbl; |
| 1697 | rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; |
| 1698 | rwq_ind_table->device = device; |
| 1699 | rwq_ind_table->uobject = NULL; |
| 1700 | atomic_set(&rwq_ind_table->usecnt, 0); |
| 1701 | |
| 1702 | for (i = 0; i < table_size; i++) |
| 1703 | atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); |
| 1704 | |
| 1705 | return rwq_ind_table; |
| 1706 | } |
| 1707 | EXPORT_SYMBOL(ib_create_rwq_ind_table); |
| 1708 | |
| 1709 | /* |
| 1710 | * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. |
| 1711 | * @wq_ind_table: The Indirection Table to destroy. |
| 1712 | */ |
| 1713 | int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) |
| 1714 | { |
| 1715 | int err, i; |
| 1716 | u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); |
| 1717 | struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; |
| 1718 | |
| 1719 | if (atomic_read(&rwq_ind_table->usecnt)) |
| 1720 | return -EBUSY; |
| 1721 | |
| 1722 | err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); |
| 1723 | if (!err) { |
| 1724 | for (i = 0; i < table_size; i++) |
| 1725 | atomic_dec(&ind_tbl[i]->usecnt); |
| 1726 | } |
| 1727 | |
| 1728 | return err; |
| 1729 | } |
| 1730 | EXPORT_SYMBOL(ib_destroy_rwq_ind_table); |
| 1731 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1732 | struct ib_flow *ib_create_flow(struct ib_qp *qp, |
| 1733 | struct ib_flow_attr *flow_attr, |
| 1734 | int domain) |
| 1735 | { |
| 1736 | struct ib_flow *flow_id; |
| 1737 | if (!qp->device->create_flow) |
| 1738 | return ERR_PTR(-ENOSYS); |
| 1739 | |
| 1740 | flow_id = qp->device->create_flow(qp, flow_attr, domain); |
Mark Bloch | 8ecc798 | 2016-10-27 16:36:30 +0300 | [diff] [blame] | 1741 | if (!IS_ERR(flow_id)) { |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1742 | atomic_inc(&qp->usecnt); |
Mark Bloch | 8ecc798 | 2016-10-27 16:36:30 +0300 | [diff] [blame] | 1743 | flow_id->qp = qp; |
| 1744 | } |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1745 | return flow_id; |
| 1746 | } |
| 1747 | EXPORT_SYMBOL(ib_create_flow); |
| 1748 | |
| 1749 | int ib_destroy_flow(struct ib_flow *flow_id) |
| 1750 | { |
| 1751 | int err; |
| 1752 | struct ib_qp *qp = flow_id->qp; |
| 1753 | |
| 1754 | err = qp->device->destroy_flow(flow_id); |
| 1755 | if (!err) |
| 1756 | atomic_dec(&qp->usecnt); |
| 1757 | return err; |
| 1758 | } |
| 1759 | EXPORT_SYMBOL(ib_destroy_flow); |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 1760 | |
| 1761 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
| 1762 | struct ib_mr_status *mr_status) |
| 1763 | { |
| 1764 | return mr->device->check_mr_status ? |
| 1765 | mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; |
| 1766 | } |
| 1767 | EXPORT_SYMBOL(ib_check_mr_status); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1768 | |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 1769 | int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, |
| 1770 | int state) |
| 1771 | { |
| 1772 | if (!device->set_vf_link_state) |
| 1773 | return -ENOSYS; |
| 1774 | |
| 1775 | return device->set_vf_link_state(device, vf, port, state); |
| 1776 | } |
| 1777 | EXPORT_SYMBOL(ib_set_vf_link_state); |
| 1778 | |
| 1779 | int ib_get_vf_config(struct ib_device *device, int vf, u8 port, |
| 1780 | struct ifla_vf_info *info) |
| 1781 | { |
| 1782 | if (!device->get_vf_config) |
| 1783 | return -ENOSYS; |
| 1784 | |
| 1785 | return device->get_vf_config(device, vf, port, info); |
| 1786 | } |
| 1787 | EXPORT_SYMBOL(ib_get_vf_config); |
| 1788 | |
| 1789 | int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, |
| 1790 | struct ifla_vf_stats *stats) |
| 1791 | { |
| 1792 | if (!device->get_vf_stats) |
| 1793 | return -ENOSYS; |
| 1794 | |
| 1795 | return device->get_vf_stats(device, vf, port, stats); |
| 1796 | } |
| 1797 | EXPORT_SYMBOL(ib_get_vf_stats); |
| 1798 | |
| 1799 | int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, |
| 1800 | int type) |
| 1801 | { |
| 1802 | if (!device->set_vf_guid) |
| 1803 | return -ENOSYS; |
| 1804 | |
| 1805 | return device->set_vf_guid(device, vf, port, guid, type); |
| 1806 | } |
| 1807 | EXPORT_SYMBOL(ib_set_vf_guid); |
| 1808 | |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1809 | /** |
| 1810 | * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list |
| 1811 | * and set it the memory region. |
| 1812 | * @mr: memory region |
| 1813 | * @sg: dma mapped scatterlist |
| 1814 | * @sg_nents: number of entries in sg |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1815 | * @sg_offset: offset in bytes into sg |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1816 | * @page_size: page vector desired page size |
| 1817 | * |
| 1818 | * Constraints: |
| 1819 | * - The first sg element is allowed to have an offset. |
Bart Van Assche | 5274612 | 2016-09-26 09:09:42 -0700 | [diff] [blame] | 1820 | * - Each sg element must either be aligned to page_size or virtually |
| 1821 | * contiguous to the previous element. In case an sg element has a |
| 1822 | * non-contiguous offset, the mapping prefix will not include it. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1823 | * - The last sg element is allowed to have length less than page_size. |
| 1824 | * - If sg_nents total byte length exceeds the mr max_num_sge * page_size |
| 1825 | * then only max_num_sg entries will be mapped. |
Bart Van Assche | 5274612 | 2016-09-26 09:09:42 -0700 | [diff] [blame] | 1826 | * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 1827 | * constraints holds and the page_size argument is ignored. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1828 | * |
| 1829 | * Returns the number of sg elements that were mapped to the memory region. |
| 1830 | * |
| 1831 | * After this completes successfully, the memory region |
| 1832 | * is ready for registration. |
| 1833 | */ |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1834 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1835 | unsigned int *sg_offset, unsigned int page_size) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1836 | { |
| 1837 | if (unlikely(!mr->device->map_mr_sg)) |
| 1838 | return -ENOSYS; |
| 1839 | |
| 1840 | mr->page_size = page_size; |
| 1841 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1842 | return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1843 | } |
| 1844 | EXPORT_SYMBOL(ib_map_mr_sg); |
| 1845 | |
| 1846 | /** |
| 1847 | * ib_sg_to_pages() - Convert the largest prefix of a sg list |
| 1848 | * to a page vector |
| 1849 | * @mr: memory region |
| 1850 | * @sgl: dma mapped scatterlist |
| 1851 | * @sg_nents: number of entries in sg |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1852 | * @sg_offset_p: IN: start offset in bytes into sg |
| 1853 | * OUT: offset in bytes for element n of the sg of the first |
| 1854 | * byte that has not been processed where n is the return |
| 1855 | * value of this function. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1856 | * @set_page: driver page assignment function pointer |
| 1857 | * |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 1858 | * Core service helper for drivers to convert the largest |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1859 | * prefix of given sg list to a page vector. The sg list |
| 1860 | * prefix converted is the prefix that meet the requirements |
| 1861 | * of ib_map_mr_sg. |
| 1862 | * |
| 1863 | * Returns the number of sg elements that were assigned to |
| 1864 | * a page vector. |
| 1865 | */ |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1866 | int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1867 | unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1868 | { |
| 1869 | struct scatterlist *sg; |
Bart Van Assche | b6aeb98 | 2015-12-29 10:45:03 +0100 | [diff] [blame] | 1870 | u64 last_end_dma_addr = 0; |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1871 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1872 | unsigned int last_page_off = 0; |
| 1873 | u64 page_mask = ~((u64)mr->page_size - 1); |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 1874 | int i, ret; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1875 | |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1876 | if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) |
| 1877 | return -EINVAL; |
| 1878 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1879 | mr->iova = sg_dma_address(&sgl[0]) + sg_offset; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1880 | mr->length = 0; |
| 1881 | |
| 1882 | for_each_sg(sgl, sg, sg_nents, i) { |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1883 | u64 dma_addr = sg_dma_address(sg) + sg_offset; |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1884 | u64 prev_addr = dma_addr; |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1885 | unsigned int dma_len = sg_dma_len(sg) - sg_offset; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1886 | u64 end_dma_addr = dma_addr + dma_len; |
| 1887 | u64 page_addr = dma_addr & page_mask; |
| 1888 | |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 1889 | /* |
| 1890 | * For the second and later elements, check whether either the |
| 1891 | * end of element i-1 or the start of element i is not aligned |
| 1892 | * on a page boundary. |
| 1893 | */ |
| 1894 | if (i && (last_page_off != 0 || page_addr != dma_addr)) { |
| 1895 | /* Stop mapping if there is a gap. */ |
| 1896 | if (last_end_dma_addr != dma_addr) |
| 1897 | break; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1898 | |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 1899 | /* |
| 1900 | * Coalesce this element with the last. If it is small |
| 1901 | * enough just update mr->length. Otherwise start |
| 1902 | * mapping from the next page. |
| 1903 | */ |
| 1904 | goto next_page; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1905 | } |
| 1906 | |
| 1907 | do { |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 1908 | ret = set_page(mr, page_addr); |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1909 | if (unlikely(ret < 0)) { |
| 1910 | sg_offset = prev_addr - sg_dma_address(sg); |
| 1911 | mr->length += prev_addr - dma_addr; |
| 1912 | if (sg_offset_p) |
| 1913 | *sg_offset_p = sg_offset; |
| 1914 | return i || sg_offset ? i : ret; |
| 1915 | } |
| 1916 | prev_addr = page_addr; |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 1917 | next_page: |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1918 | page_addr += mr->page_size; |
| 1919 | } while (page_addr < end_dma_addr); |
| 1920 | |
| 1921 | mr->length += dma_len; |
| 1922 | last_end_dma_addr = end_dma_addr; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1923 | last_page_off = end_dma_addr & ~page_mask; |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1924 | |
| 1925 | sg_offset = 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1926 | } |
| 1927 | |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1928 | if (sg_offset_p) |
| 1929 | *sg_offset_p = 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1930 | return i; |
| 1931 | } |
| 1932 | EXPORT_SYMBOL(ib_sg_to_pages); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 1933 | |
| 1934 | struct ib_drain_cqe { |
| 1935 | struct ib_cqe cqe; |
| 1936 | struct completion done; |
| 1937 | }; |
| 1938 | |
| 1939 | static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) |
| 1940 | { |
| 1941 | struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, |
| 1942 | cqe); |
| 1943 | |
| 1944 | complete(&cqe->done); |
| 1945 | } |
| 1946 | |
| 1947 | /* |
| 1948 | * Post a WR and block until its completion is reaped for the SQ. |
| 1949 | */ |
| 1950 | static void __ib_drain_sq(struct ib_qp *qp) |
| 1951 | { |
| 1952 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
| 1953 | struct ib_drain_cqe sdrain; |
| 1954 | struct ib_send_wr swr = {}, *bad_swr; |
| 1955 | int ret; |
| 1956 | |
| 1957 | if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { |
| 1958 | WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, |
| 1959 | "IB_POLL_DIRECT poll_ctx not supported for drain\n"); |
| 1960 | return; |
| 1961 | } |
| 1962 | |
| 1963 | swr.wr_cqe = &sdrain.cqe; |
| 1964 | sdrain.cqe.done = ib_drain_qp_done; |
| 1965 | init_completion(&sdrain.done); |
| 1966 | |
| 1967 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
| 1968 | if (ret) { |
| 1969 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); |
| 1970 | return; |
| 1971 | } |
| 1972 | |
| 1973 | ret = ib_post_send(qp, &swr, &bad_swr); |
| 1974 | if (ret) { |
| 1975 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); |
| 1976 | return; |
| 1977 | } |
| 1978 | |
| 1979 | wait_for_completion(&sdrain.done); |
| 1980 | } |
| 1981 | |
| 1982 | /* |
| 1983 | * Post a WR and block until its completion is reaped for the RQ. |
| 1984 | */ |
| 1985 | static void __ib_drain_rq(struct ib_qp *qp) |
| 1986 | { |
| 1987 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
| 1988 | struct ib_drain_cqe rdrain; |
| 1989 | struct ib_recv_wr rwr = {}, *bad_rwr; |
| 1990 | int ret; |
| 1991 | |
| 1992 | if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { |
| 1993 | WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, |
| 1994 | "IB_POLL_DIRECT poll_ctx not supported for drain\n"); |
| 1995 | return; |
| 1996 | } |
| 1997 | |
| 1998 | rwr.wr_cqe = &rdrain.cqe; |
| 1999 | rdrain.cqe.done = ib_drain_qp_done; |
| 2000 | init_completion(&rdrain.done); |
| 2001 | |
| 2002 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
| 2003 | if (ret) { |
| 2004 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); |
| 2005 | return; |
| 2006 | } |
| 2007 | |
| 2008 | ret = ib_post_recv(qp, &rwr, &bad_rwr); |
| 2009 | if (ret) { |
| 2010 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); |
| 2011 | return; |
| 2012 | } |
| 2013 | |
| 2014 | wait_for_completion(&rdrain.done); |
| 2015 | } |
| 2016 | |
| 2017 | /** |
| 2018 | * ib_drain_sq() - Block until all SQ CQEs have been consumed by the |
| 2019 | * application. |
| 2020 | * @qp: queue pair to drain |
| 2021 | * |
| 2022 | * If the device has a provider-specific drain function, then |
| 2023 | * call that. Otherwise call the generic drain function |
| 2024 | * __ib_drain_sq(). |
| 2025 | * |
| 2026 | * The caller must: |
| 2027 | * |
| 2028 | * ensure there is room in the CQ and SQ for the drain work request and |
| 2029 | * completion. |
| 2030 | * |
| 2031 | * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be |
| 2032 | * IB_POLL_DIRECT. |
| 2033 | * |
| 2034 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2035 | * Otherwise the drain is not guaranteed. |
| 2036 | */ |
| 2037 | void ib_drain_sq(struct ib_qp *qp) |
| 2038 | { |
| 2039 | if (qp->device->drain_sq) |
| 2040 | qp->device->drain_sq(qp); |
| 2041 | else |
| 2042 | __ib_drain_sq(qp); |
| 2043 | } |
| 2044 | EXPORT_SYMBOL(ib_drain_sq); |
| 2045 | |
| 2046 | /** |
| 2047 | * ib_drain_rq() - Block until all RQ CQEs have been consumed by the |
| 2048 | * application. |
| 2049 | * @qp: queue pair to drain |
| 2050 | * |
| 2051 | * If the device has a provider-specific drain function, then |
| 2052 | * call that. Otherwise call the generic drain function |
| 2053 | * __ib_drain_rq(). |
| 2054 | * |
| 2055 | * The caller must: |
| 2056 | * |
| 2057 | * ensure there is room in the CQ and RQ for the drain work request and |
| 2058 | * completion. |
| 2059 | * |
| 2060 | * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be |
| 2061 | * IB_POLL_DIRECT. |
| 2062 | * |
| 2063 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2064 | * Otherwise the drain is not guaranteed. |
| 2065 | */ |
| 2066 | void ib_drain_rq(struct ib_qp *qp) |
| 2067 | { |
| 2068 | if (qp->device->drain_rq) |
| 2069 | qp->device->drain_rq(qp); |
| 2070 | else |
| 2071 | __ib_drain_rq(qp); |
| 2072 | } |
| 2073 | EXPORT_SYMBOL(ib_drain_rq); |
| 2074 | |
| 2075 | /** |
| 2076 | * ib_drain_qp() - Block until all CQEs have been consumed by the |
| 2077 | * application on both the RQ and SQ. |
| 2078 | * @qp: queue pair to drain |
| 2079 | * |
| 2080 | * The caller must: |
| 2081 | * |
| 2082 | * ensure there is room in the CQ(s), SQ, and RQ for drain work requests |
| 2083 | * and completions. |
| 2084 | * |
| 2085 | * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be |
| 2086 | * IB_POLL_DIRECT. |
| 2087 | * |
| 2088 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2089 | * Otherwise the drain is not guaranteed. |
| 2090 | */ |
| 2091 | void ib_drain_qp(struct ib_qp *qp) |
| 2092 | { |
| 2093 | ib_drain_sq(qp); |
Sagi Grimberg | 42235f8 | 2016-04-26 17:55:38 +0300 | [diff] [blame] | 2094 | if (!qp->srq) |
| 2095 | ib_drain_rq(qp); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2096 | } |
| 2097 | EXPORT_SYMBOL(ib_drain_qp); |