Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. |
| 3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
| 4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
| 5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
| 6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
Roland Dreier | 2a1d9b7 | 2005-08-10 23:03:10 -0700 | [diff] [blame] | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
Roland Dreier | 33b9b3e | 2006-01-30 14:29:21 -0800 | [diff] [blame] | 8 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * This software is available to you under a choice of one of two |
| 11 | * licenses. You may choose to be licensed under the terms of the GNU |
| 12 | * General Public License (GPL) Version 2, available from the file |
| 13 | * COPYING in the main directory of this source tree, or the |
| 14 | * OpenIB.org BSD license below: |
| 15 | * |
| 16 | * Redistribution and use in source and binary forms, with or |
| 17 | * without modification, are permitted provided that the following |
| 18 | * conditions are met: |
| 19 | * |
| 20 | * - Redistributions of source code must retain the above |
| 21 | * copyright notice, this list of conditions and the following |
| 22 | * disclaimer. |
| 23 | * |
| 24 | * - Redistributions in binary form must reproduce the above |
| 25 | * copyright notice, this list of conditions and the following |
| 26 | * disclaimer in the documentation and/or other materials |
| 27 | * provided with the distribution. |
| 28 | * |
| 29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 30 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 31 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 32 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 33 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 34 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 35 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 36 | * SOFTWARE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | */ |
| 38 | |
| 39 | #include <linux/errno.h> |
| 40 | #include <linux/err.h> |
Paul Gortmaker | b108d97 | 2011-05-27 15:29:33 -0400 | [diff] [blame] | 41 | #include <linux/export.h> |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 42 | #include <linux/string.h> |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 43 | #include <linux/slab.h> |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 44 | #include <linux/in.h> |
| 45 | #include <linux/in6.h> |
| 46 | #include <net/addrconf.h> |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 47 | #include <linux/security.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Roland Dreier | a4d61e8 | 2005-08-25 13:40:04 -0700 | [diff] [blame] | 49 | #include <rdma/ib_verbs.h> |
| 50 | #include <rdma/ib_cache.h> |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 51 | #include <rdma/ib_addr.h> |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 52 | #include <rdma/rw.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 54 | #include "core_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Parav Pandit | c0348eb | 2017-10-16 08:45:13 +0300 | [diff] [blame] | 56 | static int ib_resolve_eth_dmac(struct ib_device *device, |
| 57 | struct rdma_ah_attr *ah_attr); |
| 58 | |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 59 | static const char * const ib_events[] = { |
| 60 | [IB_EVENT_CQ_ERR] = "CQ error", |
| 61 | [IB_EVENT_QP_FATAL] = "QP fatal error", |
| 62 | [IB_EVENT_QP_REQ_ERR] = "QP request error", |
| 63 | [IB_EVENT_QP_ACCESS_ERR] = "QP access error", |
| 64 | [IB_EVENT_COMM_EST] = "communication established", |
| 65 | [IB_EVENT_SQ_DRAINED] = "send queue drained", |
| 66 | [IB_EVENT_PATH_MIG] = "path migration successful", |
| 67 | [IB_EVENT_PATH_MIG_ERR] = "path migration error", |
| 68 | [IB_EVENT_DEVICE_FATAL] = "device fatal error", |
| 69 | [IB_EVENT_PORT_ACTIVE] = "port active", |
| 70 | [IB_EVENT_PORT_ERR] = "port error", |
| 71 | [IB_EVENT_LID_CHANGE] = "LID change", |
| 72 | [IB_EVENT_PKEY_CHANGE] = "P_key change", |
| 73 | [IB_EVENT_SM_CHANGE] = "SM change", |
| 74 | [IB_EVENT_SRQ_ERR] = "SRQ error", |
| 75 | [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", |
| 76 | [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", |
| 77 | [IB_EVENT_CLIENT_REREGISTER] = "client reregister", |
| 78 | [IB_EVENT_GID_CHANGE] = "GID changed", |
| 79 | }; |
| 80 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 81 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event) |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 82 | { |
| 83 | size_t index = event; |
| 84 | |
| 85 | return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? |
| 86 | ib_events[index] : "unrecognized event"; |
| 87 | } |
| 88 | EXPORT_SYMBOL(ib_event_msg); |
| 89 | |
| 90 | static const char * const wc_statuses[] = { |
| 91 | [IB_WC_SUCCESS] = "success", |
| 92 | [IB_WC_LOC_LEN_ERR] = "local length error", |
| 93 | [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", |
| 94 | [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", |
| 95 | [IB_WC_LOC_PROT_ERR] = "local protection error", |
| 96 | [IB_WC_WR_FLUSH_ERR] = "WR flushed", |
| 97 | [IB_WC_MW_BIND_ERR] = "memory management operation error", |
| 98 | [IB_WC_BAD_RESP_ERR] = "bad response error", |
| 99 | [IB_WC_LOC_ACCESS_ERR] = "local access error", |
| 100 | [IB_WC_REM_INV_REQ_ERR] = "invalid request error", |
| 101 | [IB_WC_REM_ACCESS_ERR] = "remote access error", |
| 102 | [IB_WC_REM_OP_ERR] = "remote operation error", |
| 103 | [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", |
| 104 | [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", |
| 105 | [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", |
| 106 | [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", |
| 107 | [IB_WC_REM_ABORT_ERR] = "operation aborted", |
| 108 | [IB_WC_INV_EECN_ERR] = "invalid EE context number", |
| 109 | [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", |
| 110 | [IB_WC_FATAL_ERR] = "fatal error", |
| 111 | [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", |
| 112 | [IB_WC_GENERAL_ERR] = "general error", |
| 113 | }; |
| 114 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 115 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 116 | { |
| 117 | size_t index = status; |
| 118 | |
| 119 | return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? |
| 120 | wc_statuses[index] : "unrecognized status"; |
| 121 | } |
| 122 | EXPORT_SYMBOL(ib_wc_status_msg); |
| 123 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 124 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 125 | { |
| 126 | switch (rate) { |
Hans Westgaard Ry | e2dda36 | 2018-01-02 14:50:40 +0100 | [diff] [blame] | 127 | case IB_RATE_2_5_GBPS: return 1; |
| 128 | case IB_RATE_5_GBPS: return 2; |
| 129 | case IB_RATE_10_GBPS: return 4; |
| 130 | case IB_RATE_20_GBPS: return 8; |
| 131 | case IB_RATE_30_GBPS: return 12; |
| 132 | case IB_RATE_40_GBPS: return 16; |
| 133 | case IB_RATE_60_GBPS: return 24; |
| 134 | case IB_RATE_80_GBPS: return 32; |
| 135 | case IB_RATE_120_GBPS: return 48; |
| 136 | case IB_RATE_14_GBPS: return 6; |
| 137 | case IB_RATE_56_GBPS: return 22; |
| 138 | case IB_RATE_112_GBPS: return 45; |
| 139 | case IB_RATE_168_GBPS: return 67; |
| 140 | case IB_RATE_25_GBPS: return 10; |
| 141 | case IB_RATE_100_GBPS: return 40; |
| 142 | case IB_RATE_200_GBPS: return 80; |
| 143 | case IB_RATE_300_GBPS: return 120; |
| 144 | default: return -1; |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 145 | } |
| 146 | } |
| 147 | EXPORT_SYMBOL(ib_rate_to_mult); |
| 148 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 149 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 150 | { |
| 151 | switch (mult) { |
Hans Westgaard Ry | e2dda36 | 2018-01-02 14:50:40 +0100 | [diff] [blame] | 152 | case 1: return IB_RATE_2_5_GBPS; |
| 153 | case 2: return IB_RATE_5_GBPS; |
| 154 | case 4: return IB_RATE_10_GBPS; |
| 155 | case 8: return IB_RATE_20_GBPS; |
| 156 | case 12: return IB_RATE_30_GBPS; |
| 157 | case 16: return IB_RATE_40_GBPS; |
| 158 | case 24: return IB_RATE_60_GBPS; |
| 159 | case 32: return IB_RATE_80_GBPS; |
| 160 | case 48: return IB_RATE_120_GBPS; |
| 161 | case 6: return IB_RATE_14_GBPS; |
| 162 | case 22: return IB_RATE_56_GBPS; |
| 163 | case 45: return IB_RATE_112_GBPS; |
| 164 | case 67: return IB_RATE_168_GBPS; |
| 165 | case 10: return IB_RATE_25_GBPS; |
| 166 | case 40: return IB_RATE_100_GBPS; |
| 167 | case 80: return IB_RATE_200_GBPS; |
| 168 | case 120: return IB_RATE_300_GBPS; |
| 169 | default: return IB_RATE_PORT_CURRENT; |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 170 | } |
| 171 | } |
| 172 | EXPORT_SYMBOL(mult_to_ib_rate); |
| 173 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 174 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 175 | { |
| 176 | switch (rate) { |
| 177 | case IB_RATE_2_5_GBPS: return 2500; |
| 178 | case IB_RATE_5_GBPS: return 5000; |
| 179 | case IB_RATE_10_GBPS: return 10000; |
| 180 | case IB_RATE_20_GBPS: return 20000; |
| 181 | case IB_RATE_30_GBPS: return 30000; |
| 182 | case IB_RATE_40_GBPS: return 40000; |
| 183 | case IB_RATE_60_GBPS: return 60000; |
| 184 | case IB_RATE_80_GBPS: return 80000; |
| 185 | case IB_RATE_120_GBPS: return 120000; |
| 186 | case IB_RATE_14_GBPS: return 14062; |
| 187 | case IB_RATE_56_GBPS: return 56250; |
| 188 | case IB_RATE_112_GBPS: return 112500; |
| 189 | case IB_RATE_168_GBPS: return 168750; |
| 190 | case IB_RATE_25_GBPS: return 25781; |
| 191 | case IB_RATE_100_GBPS: return 103125; |
| 192 | case IB_RATE_200_GBPS: return 206250; |
| 193 | case IB_RATE_300_GBPS: return 309375; |
| 194 | default: return -1; |
| 195 | } |
| 196 | } |
| 197 | EXPORT_SYMBOL(ib_rate_to_mbps); |
| 198 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 199 | __attribute_const__ enum rdma_transport_type |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 200 | rdma_node_get_transport(enum rdma_node_type node_type) |
| 201 | { |
Leon Romanovsky | cdc596d | 2017-08-17 15:50:38 +0300 | [diff] [blame] | 202 | |
| 203 | if (node_type == RDMA_NODE_USNIC) |
Upinder Malhi | 5db5765 | 2014-01-15 17:02:36 -0800 | [diff] [blame] | 204 | return RDMA_TRANSPORT_USNIC; |
Leon Romanovsky | cdc596d | 2017-08-17 15:50:38 +0300 | [diff] [blame] | 205 | if (node_type == RDMA_NODE_USNIC_UDP) |
Upinder Malhi | 248567f | 2014-01-09 14:48:19 -0800 | [diff] [blame] | 206 | return RDMA_TRANSPORT_USNIC_UDP; |
Leon Romanovsky | cdc596d | 2017-08-17 15:50:38 +0300 | [diff] [blame] | 207 | if (node_type == RDMA_NODE_RNIC) |
| 208 | return RDMA_TRANSPORT_IWARP; |
| 209 | |
| 210 | return RDMA_TRANSPORT_IB; |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 211 | } |
| 212 | EXPORT_SYMBOL(rdma_node_get_transport); |
| 213 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 214 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) |
| 215 | { |
Leon Romanovsky | 82901e3 | 2017-08-17 15:50:39 +0300 | [diff] [blame] | 216 | enum rdma_transport_type lt; |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 217 | if (device->get_link_layer) |
| 218 | return device->get_link_layer(device, port_num); |
| 219 | |
Leon Romanovsky | 82901e3 | 2017-08-17 15:50:39 +0300 | [diff] [blame] | 220 | lt = rdma_node_get_transport(device->node_type); |
| 221 | if (lt == RDMA_TRANSPORT_IB) |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 222 | return IB_LINK_LAYER_INFINIBAND; |
Leon Romanovsky | 82901e3 | 2017-08-17 15:50:39 +0300 | [diff] [blame] | 223 | |
| 224 | return IB_LINK_LAYER_ETHERNET; |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 225 | } |
| 226 | EXPORT_SYMBOL(rdma_port_get_link_layer); |
| 227 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | /* Protection domains */ |
| 229 | |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 230 | /** |
| 231 | * ib_alloc_pd - Allocates an unused protection domain. |
| 232 | * @device: The device on which to allocate the protection domain. |
| 233 | * |
| 234 | * A protection domain object provides an association between QPs, shared |
| 235 | * receive queues, address handles, memory regions, and memory windows. |
| 236 | * |
| 237 | * Every PD has a local_dma_lkey which can be used as the lkey value for local |
| 238 | * memory operations. |
| 239 | */ |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 240 | struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, |
| 241 | const char *caller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | { |
| 243 | struct ib_pd *pd; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 244 | int mr_access_flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 246 | pd = device->alloc_pd(device, NULL, NULL); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 247 | if (IS_ERR(pd)) |
| 248 | return pd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 250 | pd->device = device; |
| 251 | pd->uobject = NULL; |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 252 | pd->__internal_mr = NULL; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 253 | atomic_set(&pd->usecnt, 0); |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 254 | pd->flags = flags; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 255 | |
Or Gerlitz | 86bee4c | 2015-12-18 10:59:45 +0200 | [diff] [blame] | 256 | if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 257 | pd->local_dma_lkey = device->local_dma_lkey; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 258 | else |
| 259 | mr_access_flags |= IB_ACCESS_LOCAL_WRITE; |
| 260 | |
| 261 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { |
| 262 | pr_warn("%s: enabling unsafe global rkey\n", caller); |
| 263 | mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; |
| 264 | } |
| 265 | |
Leon Romanovsky | 9d5f8c2 | 2018-01-28 11:17:23 +0200 | [diff] [blame] | 266 | pd->res.type = RDMA_RESTRACK_PD; |
| 267 | pd->res.kern_name = caller; |
| 268 | rdma_restrack_add(&pd->res); |
| 269 | |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 270 | if (mr_access_flags) { |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 271 | struct ib_mr *mr; |
| 272 | |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 273 | mr = pd->device->get_dma_mr(pd, mr_access_flags); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 274 | if (IS_ERR(mr)) { |
| 275 | ib_dealloc_pd(pd); |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 276 | return ERR_CAST(mr); |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 277 | } |
| 278 | |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 279 | mr->device = pd->device; |
| 280 | mr->pd = pd; |
| 281 | mr->uobject = NULL; |
| 282 | mr->need_inval = false; |
| 283 | |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 284 | pd->__internal_mr = mr; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 285 | |
| 286 | if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) |
| 287 | pd->local_dma_lkey = pd->__internal_mr->lkey; |
| 288 | |
| 289 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) |
| 290 | pd->unsafe_global_rkey = pd->__internal_mr->rkey; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | } |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 292 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | return pd; |
| 294 | } |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 295 | EXPORT_SYMBOL(__ib_alloc_pd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 297 | /** |
| 298 | * ib_dealloc_pd - Deallocates a protection domain. |
| 299 | * @pd: The protection domain to deallocate. |
| 300 | * |
| 301 | * It is an error to call this function while any resources in the pd still |
| 302 | * exist. The caller is responsible to synchronously destroy them and |
| 303 | * guarantee no new allocations will happen. |
| 304 | */ |
| 305 | void ib_dealloc_pd(struct ib_pd *pd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | { |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 307 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 309 | if (pd->__internal_mr) { |
Christoph Hellwig | 5ef990f | 2016-09-05 12:56:21 +0200 | [diff] [blame] | 310 | ret = pd->device->dereg_mr(pd->__internal_mr); |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 311 | WARN_ON(ret); |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 312 | pd->__internal_mr = NULL; |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 313 | } |
| 314 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 315 | /* uverbs manipulates usecnt with proper locking, while the kabi |
| 316 | requires the caller to guarantee we can't race here. */ |
| 317 | WARN_ON(atomic_read(&pd->usecnt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | |
Leon Romanovsky | 9d5f8c2 | 2018-01-28 11:17:23 +0200 | [diff] [blame] | 319 | rdma_restrack_del(&pd->res); |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 320 | /* Making delalloc_pd a void return is a WIP, no driver should return |
| 321 | an error here. */ |
| 322 | ret = pd->device->dealloc_pd(pd); |
| 323 | WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | } |
| 325 | EXPORT_SYMBOL(ib_dealloc_pd); |
| 326 | |
| 327 | /* Address handles */ |
| 328 | |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 329 | static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, |
| 330 | struct rdma_ah_attr *ah_attr, |
| 331 | struct ib_udata *udata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | { |
| 333 | struct ib_ah *ah; |
| 334 | |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 335 | ah = pd->device->create_ah(pd, ah_attr, udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
| 337 | if (!IS_ERR(ah)) { |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 338 | ah->device = pd->device; |
| 339 | ah->pd = pd; |
| 340 | ah->uobject = NULL; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 341 | ah->type = ah_attr->type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | atomic_inc(&pd->usecnt); |
| 343 | } |
| 344 | |
| 345 | return ah; |
| 346 | } |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 347 | |
| 348 | struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr) |
| 349 | { |
| 350 | return _rdma_create_ah(pd, ah_attr, NULL); |
| 351 | } |
Dasaratharaman Chandramouli | 0a18cfe | 2017-04-29 14:41:19 -0400 | [diff] [blame] | 352 | EXPORT_SYMBOL(rdma_create_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 354 | /** |
| 355 | * rdma_create_user_ah - Creates an address handle for the |
| 356 | * given address vector. |
| 357 | * It resolves destination mac address for ah attribute of RoCE type. |
| 358 | * @pd: The protection domain associated with the address handle. |
| 359 | * @ah_attr: The attributes of the address vector. |
| 360 | * @udata: pointer to user's input output buffer information need by |
| 361 | * provider driver. |
| 362 | * |
| 363 | * It returns 0 on success and returns appropriate error code on error. |
| 364 | * The address handle is used to reference a local or global destination |
| 365 | * in all UD QP post sends. |
| 366 | */ |
| 367 | struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, |
| 368 | struct rdma_ah_attr *ah_attr, |
| 369 | struct ib_udata *udata) |
| 370 | { |
| 371 | int err; |
| 372 | |
| 373 | if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { |
| 374 | err = ib_resolve_eth_dmac(pd->device, ah_attr); |
| 375 | if (err) |
| 376 | return ERR_PTR(err); |
| 377 | } |
| 378 | |
| 379 | return _rdma_create_ah(pd, ah_attr, udata); |
| 380 | } |
| 381 | EXPORT_SYMBOL(rdma_create_user_ah); |
| 382 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 383 | int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 384 | { |
| 385 | const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; |
| 386 | struct iphdr ip4h_checked; |
| 387 | const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; |
| 388 | |
| 389 | /* If it's IPv6, the version must be 6, otherwise, the first |
| 390 | * 20 bytes (before the IPv4 header) are garbled. |
| 391 | */ |
| 392 | if (ip6h->version != 6) |
| 393 | return (ip4h->version == 4) ? 4 : 0; |
| 394 | /* version may be 6 or 4 because the first 20 bytes could be garbled */ |
| 395 | |
| 396 | /* RoCE v2 requires no options, thus header length |
| 397 | * must be 5 words |
| 398 | */ |
| 399 | if (ip4h->ihl != 5) |
| 400 | return 6; |
| 401 | |
| 402 | /* Verify checksum. |
| 403 | * We can't write on scattered buffers so we need to copy to |
| 404 | * temp buffer. |
| 405 | */ |
| 406 | memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); |
| 407 | ip4h_checked.check = 0; |
| 408 | ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); |
| 409 | /* if IPv4 header checksum is OK, believe it */ |
| 410 | if (ip4h->check == ip4h_checked.check) |
| 411 | return 4; |
| 412 | return 6; |
| 413 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 414 | EXPORT_SYMBOL(ib_get_rdma_header_version); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 415 | |
| 416 | static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, |
| 417 | u8 port_num, |
| 418 | const struct ib_grh *grh) |
| 419 | { |
| 420 | int grh_version; |
| 421 | |
| 422 | if (rdma_protocol_ib(device, port_num)) |
| 423 | return RDMA_NETWORK_IB; |
| 424 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 425 | grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 426 | |
| 427 | if (grh_version == 4) |
| 428 | return RDMA_NETWORK_IPV4; |
| 429 | |
| 430 | if (grh->next_hdr == IPPROTO_UDP) |
| 431 | return RDMA_NETWORK_IPV6; |
| 432 | |
| 433 | return RDMA_NETWORK_ROCE_V1; |
| 434 | } |
| 435 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 436 | struct find_gid_index_context { |
| 437 | u16 vlan_id; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 438 | enum ib_gid_type gid_type; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 439 | }; |
| 440 | |
| 441 | static bool find_gid_index(const union ib_gid *gid, |
| 442 | const struct ib_gid_attr *gid_attr, |
| 443 | void *context) |
| 444 | { |
Parav Pandit | b0dd0d3 | 2017-11-14 14:52:04 +0200 | [diff] [blame] | 445 | struct find_gid_index_context *ctx = context; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 446 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 447 | if (ctx->gid_type != gid_attr->gid_type) |
| 448 | return false; |
| 449 | |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 450 | if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || |
| 451 | (is_vlan_dev(gid_attr->ndev) && |
| 452 | vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) |
| 453 | return false; |
| 454 | |
| 455 | return true; |
| 456 | } |
| 457 | |
| 458 | static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, |
| 459 | u16 vlan_id, const union ib_gid *sgid, |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 460 | enum ib_gid_type gid_type, |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 461 | u16 *gid_index) |
| 462 | { |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 463 | struct find_gid_index_context context = {.vlan_id = vlan_id, |
| 464 | .gid_type = gid_type}; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 465 | |
| 466 | return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, |
| 467 | &context, gid_index); |
| 468 | } |
| 469 | |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 470 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, |
| 471 | enum rdma_network_type net_type, |
| 472 | union ib_gid *sgid, union ib_gid *dgid) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 473 | { |
| 474 | struct sockaddr_in src_in; |
| 475 | struct sockaddr_in dst_in; |
| 476 | __be32 src_saddr, dst_saddr; |
| 477 | |
| 478 | if (!sgid || !dgid) |
| 479 | return -EINVAL; |
| 480 | |
| 481 | if (net_type == RDMA_NETWORK_IPV4) { |
| 482 | memcpy(&src_in.sin_addr.s_addr, |
| 483 | &hdr->roce4grh.saddr, 4); |
| 484 | memcpy(&dst_in.sin_addr.s_addr, |
| 485 | &hdr->roce4grh.daddr, 4); |
| 486 | src_saddr = src_in.sin_addr.s_addr; |
| 487 | dst_saddr = dst_in.sin_addr.s_addr; |
| 488 | ipv6_addr_set_v4mapped(src_saddr, |
| 489 | (struct in6_addr *)sgid); |
| 490 | ipv6_addr_set_v4mapped(dst_saddr, |
| 491 | (struct in6_addr *)dgid); |
| 492 | return 0; |
| 493 | } else if (net_type == RDMA_NETWORK_IPV6 || |
| 494 | net_type == RDMA_NETWORK_IB) { |
| 495 | *dgid = hdr->ibgrh.dgid; |
| 496 | *sgid = hdr->ibgrh.sgid; |
| 497 | return 0; |
| 498 | } else { |
| 499 | return -EINVAL; |
| 500 | } |
| 501 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 502 | EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 503 | |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 504 | /* Resolve destination mac address and hop limit for unicast destination |
| 505 | * GID entry, considering the source GID entry as well. |
| 506 | * ah_attribute must have have valid port_num, sgid_index. |
| 507 | */ |
| 508 | static int ib_resolve_unicast_gid_dmac(struct ib_device *device, |
| 509 | struct rdma_ah_attr *ah_attr) |
| 510 | { |
| 511 | struct ib_gid_attr sgid_attr; |
| 512 | struct ib_global_route *grh; |
| 513 | int hop_limit = 0xff; |
| 514 | union ib_gid sgid; |
| 515 | int ret; |
| 516 | |
| 517 | grh = rdma_ah_retrieve_grh(ah_attr); |
| 518 | |
| 519 | ret = ib_query_gid(device, |
| 520 | rdma_ah_get_port_num(ah_attr), |
| 521 | grh->sgid_index, |
| 522 | &sgid, &sgid_attr); |
| 523 | if (ret || !sgid_attr.ndev) { |
| 524 | if (!ret) |
| 525 | ret = -ENXIO; |
| 526 | return ret; |
| 527 | } |
| 528 | |
Parav Pandit | 56d0a7d9 | 2017-11-14 14:51:50 +0200 | [diff] [blame] | 529 | /* If destination is link local and source GID is RoCEv1, |
| 530 | * IP stack is not used. |
| 531 | */ |
| 532 | if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) && |
| 533 | sgid_attr.gid_type == IB_GID_TYPE_ROCE) { |
| 534 | rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw, |
| 535 | ah_attr->roce.dmac); |
| 536 | goto done; |
| 537 | } |
| 538 | |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 539 | ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid, |
| 540 | ah_attr->roce.dmac, |
| 541 | sgid_attr.ndev, &hop_limit); |
Parav Pandit | 56d0a7d9 | 2017-11-14 14:51:50 +0200 | [diff] [blame] | 542 | done: |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 543 | dev_put(sgid_attr.ndev); |
| 544 | |
| 545 | grh->hop_limit = hop_limit; |
| 546 | return ret; |
| 547 | } |
| 548 | |
Gustavo A. R. Silva | 28b5b3a | 2017-05-04 20:38:20 -0500 | [diff] [blame] | 549 | /* |
Parav Pandit | f6bdb14 | 2017-11-14 14:52:17 +0200 | [diff] [blame] | 550 | * This function initializes address handle attributes from the incoming packet. |
Gustavo A. R. Silva | 28b5b3a | 2017-05-04 20:38:20 -0500 | [diff] [blame] | 551 | * Incoming packet has dgid of the receiver node on which this code is |
| 552 | * getting executed and, sgid contains the GID of the sender. |
| 553 | * |
| 554 | * When resolving mac address of destination, the arrived dgid is used |
| 555 | * as sgid and, sgid is used as dgid because sgid contains destinations |
| 556 | * GID whom to respond to. |
| 557 | * |
Gustavo A. R. Silva | 28b5b3a | 2017-05-04 20:38:20 -0500 | [diff] [blame] | 558 | */ |
Parav Pandit | f6bdb14 | 2017-11-14 14:52:17 +0200 | [diff] [blame] | 559 | int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, |
| 560 | const struct ib_wc *wc, const struct ib_grh *grh, |
| 561 | struct rdma_ah_attr *ah_attr) |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 562 | { |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 563 | u32 flow_class; |
| 564 | u16 gid_index; |
| 565 | int ret; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 566 | enum rdma_network_type net_type = RDMA_NETWORK_IB; |
| 567 | enum ib_gid_type gid_type = IB_GID_TYPE_IB; |
Matan Barak | c3efe75 | 2016-01-04 10:49:54 +0200 | [diff] [blame] | 568 | int hoplimit = 0xff; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 569 | union ib_gid dgid; |
| 570 | union ib_gid sgid; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 571 | |
Roland Dreier | 7936422 | 2017-08-29 10:34:44 -0700 | [diff] [blame] | 572 | might_sleep(); |
| 573 | |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 574 | memset(ah_attr, 0, sizeof *ah_attr); |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 575 | ah_attr->type = rdma_ah_find_type(device, port_num); |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 576 | if (rdma_cap_eth_ah(device, port_num)) { |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 577 | if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) |
| 578 | net_type = wc->network_hdr_type; |
| 579 | else |
| 580 | net_type = ib_get_net_type_by_grh(device, port_num, grh); |
| 581 | gid_type = ib_network_to_gid_type(net_type); |
| 582 | } |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 583 | ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, |
| 584 | &sgid, &dgid); |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 585 | if (ret) |
| 586 | return ret; |
| 587 | |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 588 | rdma_ah_set_sl(ah_attr, wc->sl); |
| 589 | rdma_ah_set_port_num(ah_attr, port_num); |
| 590 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 591 | if (rdma_protocol_roce(device, port_num)) { |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 592 | u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? |
| 593 | wc->vlan_id : 0xffff; |
| 594 | |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 595 | if (!(wc->wc_flags & IB_WC_GRH)) |
| 596 | return -EPROTOTYPE; |
| 597 | |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 598 | ret = get_sgid_index_from_eth(device, port_num, |
| 599 | vlan_id, &dgid, |
| 600 | gid_type, &gid_index); |
Matan Barak | 2002983 | 2015-12-23 14:56:53 +0200 | [diff] [blame] | 601 | if (ret) |
| 602 | return ret; |
| 603 | |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 604 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
| 605 | rdma_ah_set_grh(ah_attr, &sgid, |
| 606 | flow_class & 0xFFFFF, |
| 607 | (u8)gid_index, hoplimit, |
| 608 | (flow_class >> 20) & 0xFF); |
| 609 | return ib_resolve_unicast_gid_dmac(device, ah_attr); |
| 610 | } else { |
| 611 | rdma_ah_set_dlid(ah_attr, wc->slid); |
| 612 | rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits); |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 613 | |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 614 | if (wc->wc_flags & IB_WC_GRH) { |
Eli Cohen | b355600 | 2016-06-22 17:27:24 +0300 | [diff] [blame] | 615 | if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { |
| 616 | ret = ib_find_cached_gid_by_port(device, &dgid, |
| 617 | IB_GID_TYPE_IB, |
| 618 | port_num, NULL, |
| 619 | &gid_index); |
| 620 | if (ret) |
| 621 | return ret; |
| 622 | } else { |
| 623 | gid_index = 0; |
| 624 | } |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 625 | |
| 626 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
| 627 | rdma_ah_set_grh(ah_attr, &sgid, |
| 628 | flow_class & 0xFFFFF, |
| 629 | (u8)gid_index, hoplimit, |
| 630 | (flow_class >> 20) & 0xFF); |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 631 | } |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 632 | return 0; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 633 | } |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 634 | } |
Parav Pandit | f6bdb14 | 2017-11-14 14:52:17 +0200 | [diff] [blame] | 635 | EXPORT_SYMBOL(ib_init_ah_attr_from_wc); |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 636 | |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 637 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
| 638 | const struct ib_grh *grh, u8 port_num) |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 639 | { |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 640 | struct rdma_ah_attr ah_attr; |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 641 | int ret; |
| 642 | |
Parav Pandit | f6bdb14 | 2017-11-14 14:52:17 +0200 | [diff] [blame] | 643 | ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr); |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 644 | if (ret) |
| 645 | return ERR_PTR(ret); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 646 | |
Dasaratharaman Chandramouli | 0a18cfe | 2017-04-29 14:41:19 -0400 | [diff] [blame] | 647 | return rdma_create_ah(pd, &ah_attr); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 648 | } |
| 649 | EXPORT_SYMBOL(ib_create_ah_from_wc); |
| 650 | |
Dasaratharaman Chandramouli | 67b985b | 2017-04-29 14:41:20 -0400 | [diff] [blame] | 651 | int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | { |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 653 | if (ah->type != ah_attr->type) |
| 654 | return -EINVAL; |
| 655 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | return ah->device->modify_ah ? |
| 657 | ah->device->modify_ah(ah, ah_attr) : |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 658 | -EOPNOTSUPP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | } |
Dasaratharaman Chandramouli | 67b985b | 2017-04-29 14:41:20 -0400 | [diff] [blame] | 660 | EXPORT_SYMBOL(rdma_modify_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | |
Dasaratharaman Chandramouli | bfbfd66 | 2017-04-29 14:41:21 -0400 | [diff] [blame] | 662 | int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | { |
| 664 | return ah->device->query_ah ? |
| 665 | ah->device->query_ah(ah, ah_attr) : |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 666 | -EOPNOTSUPP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | } |
Dasaratharaman Chandramouli | bfbfd66 | 2017-04-29 14:41:21 -0400 | [diff] [blame] | 668 | EXPORT_SYMBOL(rdma_query_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | |
Dasaratharaman Chandramouli | 3652315 | 2017-04-29 14:41:22 -0400 | [diff] [blame] | 670 | int rdma_destroy_ah(struct ib_ah *ah) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | { |
| 672 | struct ib_pd *pd; |
| 673 | int ret; |
| 674 | |
| 675 | pd = ah->pd; |
| 676 | ret = ah->device->destroy_ah(ah); |
| 677 | if (!ret) |
| 678 | atomic_dec(&pd->usecnt); |
| 679 | |
| 680 | return ret; |
| 681 | } |
Dasaratharaman Chandramouli | 3652315 | 2017-04-29 14:41:22 -0400 | [diff] [blame] | 682 | EXPORT_SYMBOL(rdma_destroy_ah); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 684 | /* Shared receive queues */ |
| 685 | |
| 686 | struct ib_srq *ib_create_srq(struct ib_pd *pd, |
| 687 | struct ib_srq_init_attr *srq_init_attr) |
| 688 | { |
| 689 | struct ib_srq *srq; |
| 690 | |
| 691 | if (!pd->device->create_srq) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 692 | return ERR_PTR(-EOPNOTSUPP); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 693 | |
| 694 | srq = pd->device->create_srq(pd, srq_init_attr, NULL); |
| 695 | |
| 696 | if (!IS_ERR(srq)) { |
| 697 | srq->device = pd->device; |
| 698 | srq->pd = pd; |
| 699 | srq->uobject = NULL; |
| 700 | srq->event_handler = srq_init_attr->event_handler; |
| 701 | srq->srq_context = srq_init_attr->srq_context; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 702 | srq->srq_type = srq_init_attr->srq_type; |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 703 | if (ib_srq_has_cq(srq->srq_type)) { |
| 704 | srq->ext.cq = srq_init_attr->ext.cq; |
| 705 | atomic_inc(&srq->ext.cq->usecnt); |
| 706 | } |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 707 | if (srq->srq_type == IB_SRQT_XRC) { |
| 708 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 709 | atomic_inc(&srq->ext.xrc.xrcd->usecnt); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 710 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 711 | atomic_inc(&pd->usecnt); |
| 712 | atomic_set(&srq->usecnt, 0); |
| 713 | } |
| 714 | |
| 715 | return srq; |
| 716 | } |
| 717 | EXPORT_SYMBOL(ib_create_srq); |
| 718 | |
| 719 | int ib_modify_srq(struct ib_srq *srq, |
| 720 | struct ib_srq_attr *srq_attr, |
| 721 | enum ib_srq_attr_mask srq_attr_mask) |
| 722 | { |
Dotan Barak | 7ce5eac | 2008-04-16 21:09:28 -0700 | [diff] [blame] | 723 | return srq->device->modify_srq ? |
| 724 | srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 725 | -EOPNOTSUPP; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 726 | } |
| 727 | EXPORT_SYMBOL(ib_modify_srq); |
| 728 | |
| 729 | int ib_query_srq(struct ib_srq *srq, |
| 730 | struct ib_srq_attr *srq_attr) |
| 731 | { |
| 732 | return srq->device->query_srq ? |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 733 | srq->device->query_srq(srq, srq_attr) : -EOPNOTSUPP; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 734 | } |
| 735 | EXPORT_SYMBOL(ib_query_srq); |
| 736 | |
| 737 | int ib_destroy_srq(struct ib_srq *srq) |
| 738 | { |
| 739 | struct ib_pd *pd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 740 | enum ib_srq_type srq_type; |
| 741 | struct ib_xrcd *uninitialized_var(xrcd); |
| 742 | struct ib_cq *uninitialized_var(cq); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 743 | int ret; |
| 744 | |
| 745 | if (atomic_read(&srq->usecnt)) |
| 746 | return -EBUSY; |
| 747 | |
| 748 | pd = srq->pd; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 749 | srq_type = srq->srq_type; |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 750 | if (ib_srq_has_cq(srq_type)) |
| 751 | cq = srq->ext.cq; |
| 752 | if (srq_type == IB_SRQT_XRC) |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 753 | xrcd = srq->ext.xrc.xrcd; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 754 | |
| 755 | ret = srq->device->destroy_srq(srq); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 756 | if (!ret) { |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 757 | atomic_dec(&pd->usecnt); |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 758 | if (srq_type == IB_SRQT_XRC) |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 759 | atomic_dec(&xrcd->usecnt); |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 760 | if (ib_srq_has_cq(srq_type)) |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 761 | atomic_dec(&cq->usecnt); |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 762 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 763 | |
| 764 | return ret; |
| 765 | } |
| 766 | EXPORT_SYMBOL(ib_destroy_srq); |
| 767 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | /* Queue pairs */ |
| 769 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 770 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) |
| 771 | { |
| 772 | struct ib_qp *qp = context; |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 773 | unsigned long flags; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 774 | |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 775 | spin_lock_irqsave(&qp->device->event_handler_lock, flags); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 776 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) |
Shlomo Pongratz | eec9e29f | 2013-04-10 14:26:46 +0000 | [diff] [blame] | 777 | if (event->element.qp->event_handler) |
| 778 | event->element.qp->event_handler(event, event->element.qp->qp_context); |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 779 | spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 780 | } |
| 781 | |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 782 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) |
| 783 | { |
| 784 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 785 | list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); |
| 786 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 787 | } |
| 788 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 789 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, |
| 790 | void (*event_handler)(struct ib_event *, void *), |
| 791 | void *qp_context) |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 792 | { |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 793 | struct ib_qp *qp; |
| 794 | unsigned long flags; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 795 | int err; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 796 | |
| 797 | qp = kzalloc(sizeof *qp, GFP_KERNEL); |
| 798 | if (!qp) |
| 799 | return ERR_PTR(-ENOMEM); |
| 800 | |
| 801 | qp->real_qp = real_qp; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 802 | err = ib_open_shared_qp_security(qp, real_qp->device); |
| 803 | if (err) { |
| 804 | kfree(qp); |
| 805 | return ERR_PTR(err); |
| 806 | } |
| 807 | |
| 808 | qp->real_qp = real_qp; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 809 | atomic_inc(&real_qp->usecnt); |
| 810 | qp->device = real_qp->device; |
| 811 | qp->event_handler = event_handler; |
| 812 | qp->qp_context = qp_context; |
| 813 | qp->qp_num = real_qp->qp_num; |
| 814 | qp->qp_type = real_qp->qp_type; |
| 815 | |
| 816 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
| 817 | list_add(&qp->open_list, &real_qp->open_list); |
| 818 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
| 819 | |
| 820 | return qp; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 821 | } |
| 822 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 823 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, |
| 824 | struct ib_qp_open_attr *qp_open_attr) |
| 825 | { |
| 826 | struct ib_qp *qp, *real_qp; |
| 827 | |
| 828 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) |
| 829 | return ERR_PTR(-EINVAL); |
| 830 | |
| 831 | qp = ERR_PTR(-EINVAL); |
| 832 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 833 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { |
| 834 | if (real_qp->qp_num == qp_open_attr->qp_num) { |
| 835 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, |
| 836 | qp_open_attr->qp_context); |
| 837 | break; |
| 838 | } |
| 839 | } |
| 840 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 841 | return qp; |
| 842 | } |
| 843 | EXPORT_SYMBOL(ib_open_qp); |
| 844 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 845 | static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, |
| 846 | struct ib_qp_init_attr *qp_init_attr) |
| 847 | { |
| 848 | struct ib_qp *real_qp = qp; |
| 849 | |
| 850 | qp->event_handler = __ib_shared_qp_event_handler; |
| 851 | qp->qp_context = qp; |
| 852 | qp->pd = NULL; |
| 853 | qp->send_cq = qp->recv_cq = NULL; |
| 854 | qp->srq = NULL; |
| 855 | qp->xrcd = qp_init_attr->xrcd; |
| 856 | atomic_inc(&qp_init_attr->xrcd->usecnt); |
| 857 | INIT_LIST_HEAD(&qp->open_list); |
| 858 | |
| 859 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, |
| 860 | qp_init_attr->qp_context); |
| 861 | if (!IS_ERR(qp)) |
| 862 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); |
| 863 | else |
| 864 | real_qp->device->destroy_qp(real_qp); |
| 865 | return qp; |
| 866 | } |
| 867 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
| 869 | struct ib_qp_init_attr *qp_init_attr) |
| 870 | { |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 871 | struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; |
| 872 | struct ib_qp *qp; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 873 | int ret; |
| 874 | |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 875 | if (qp_init_attr->rwq_ind_tbl && |
| 876 | (qp_init_attr->recv_cq || |
| 877 | qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || |
| 878 | qp_init_attr->cap.max_recv_sge)) |
| 879 | return ERR_PTR(-EINVAL); |
| 880 | |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 881 | /* |
| 882 | * If the callers is using the RDMA API calculate the resources |
| 883 | * needed for the RDMA READ/WRITE operations. |
| 884 | * |
| 885 | * Note that these callers need to pass in a port number. |
| 886 | */ |
| 887 | if (qp_init_attr->cap.max_rdma_ctxs) |
| 888 | rdma_rw_init_qp(device, qp_init_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | |
Steve Wise | 2f08ee3 | 2018-02-14 18:43:36 -0800 | [diff] [blame] | 890 | qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL); |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 891 | if (IS_ERR(qp)) |
| 892 | return qp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 894 | ret = ib_create_qp_security(qp, device); |
| 895 | if (ret) { |
| 896 | ib_destroy_qp(qp); |
| 897 | return ERR_PTR(ret); |
| 898 | } |
| 899 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 900 | qp->real_qp = qp; |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 901 | qp->qp_type = qp_init_attr->qp_type; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 902 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 903 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 904 | atomic_set(&qp->usecnt, 0); |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 905 | qp->mrs_used = 0; |
| 906 | spin_lock_init(&qp->mr_lock); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 907 | INIT_LIST_HEAD(&qp->rdma_mrs); |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 908 | INIT_LIST_HEAD(&qp->sig_mrs); |
Noa Osherovich | 498ca3c | 2017-08-23 08:35:40 +0300 | [diff] [blame] | 909 | qp->port = 0; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 910 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 911 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) |
| 912 | return ib_create_xrc_qp(qp, qp_init_attr); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 913 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 914 | qp->event_handler = qp_init_attr->event_handler; |
| 915 | qp->qp_context = qp_init_attr->qp_context; |
| 916 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { |
| 917 | qp->recv_cq = NULL; |
| 918 | qp->srq = NULL; |
| 919 | } else { |
| 920 | qp->recv_cq = qp_init_attr->recv_cq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 921 | if (qp_init_attr->recv_cq) |
| 922 | atomic_inc(&qp_init_attr->recv_cq->usecnt); |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 923 | qp->srq = qp_init_attr->srq; |
| 924 | if (qp->srq) |
| 925 | atomic_inc(&qp_init_attr->srq->usecnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | } |
| 927 | |
Christoph Hellwig | 04c41bf | 2016-05-03 18:01:06 +0200 | [diff] [blame] | 928 | qp->send_cq = qp_init_attr->send_cq; |
| 929 | qp->xrcd = NULL; |
| 930 | |
| 931 | atomic_inc(&pd->usecnt); |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 932 | if (qp_init_attr->send_cq) |
| 933 | atomic_inc(&qp_init_attr->send_cq->usecnt); |
| 934 | if (qp_init_attr->rwq_ind_tbl) |
| 935 | atomic_inc(&qp->rwq_ind_tbl->usecnt); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 936 | |
| 937 | if (qp_init_attr->cap.max_rdma_ctxs) { |
| 938 | ret = rdma_rw_init_mrs(qp, qp_init_attr); |
| 939 | if (ret) { |
| 940 | pr_err("failed to init MR pool ret= %d\n", ret); |
| 941 | ib_destroy_qp(qp); |
Steve Wise | b6bc1c7 | 2016-09-29 07:31:33 -0700 | [diff] [blame] | 942 | return ERR_PTR(ret); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 943 | } |
| 944 | } |
| 945 | |
Bart Van Assche | 632bc3f | 2016-07-21 13:03:30 -0700 | [diff] [blame] | 946 | /* |
| 947 | * Note: all hw drivers guarantee that max_send_sge is lower than |
| 948 | * the device RDMA WRITE SGE limit but not all hw drivers ensure that |
| 949 | * max_send_sge <= max_sge_rd. |
| 950 | */ |
| 951 | qp->max_write_sge = qp_init_attr->cap.max_send_sge; |
| 952 | qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, |
| 953 | device->attrs.max_sge_rd); |
| 954 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | return qp; |
| 956 | } |
| 957 | EXPORT_SYMBOL(ib_create_qp); |
| 958 | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 959 | static const struct { |
| 960 | int valid; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 961 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; |
| 962 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 963 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
| 964 | [IB_QPS_RESET] = { |
| 965 | [IB_QPS_RESET] = { .valid = 1 }, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 966 | [IB_QPS_INIT] = { |
| 967 | .valid = 1, |
| 968 | .req_param = { |
| 969 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 970 | IB_QP_PORT | |
| 971 | IB_QP_QKEY), |
Or Gerlitz | c938a61 | 2012-03-01 12:17:51 +0200 | [diff] [blame] | 972 | [IB_QPT_RAW_PACKET] = IB_QP_PORT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 973 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
| 974 | IB_QP_PORT | |
| 975 | IB_QP_ACCESS_FLAGS), |
| 976 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
| 977 | IB_QP_PORT | |
| 978 | IB_QP_ACCESS_FLAGS), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 979 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
| 980 | IB_QP_PORT | |
| 981 | IB_QP_ACCESS_FLAGS), |
| 982 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
| 983 | IB_QP_PORT | |
| 984 | IB_QP_ACCESS_FLAGS), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 985 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 986 | IB_QP_QKEY), |
| 987 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 988 | IB_QP_QKEY), |
| 989 | } |
| 990 | }, |
| 991 | }, |
| 992 | [IB_QPS_INIT] = { |
| 993 | [IB_QPS_RESET] = { .valid = 1 }, |
| 994 | [IB_QPS_ERR] = { .valid = 1 }, |
| 995 | [IB_QPS_INIT] = { |
| 996 | .valid = 1, |
| 997 | .opt_param = { |
| 998 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 999 | IB_QP_PORT | |
| 1000 | IB_QP_QKEY), |
| 1001 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
| 1002 | IB_QP_PORT | |
| 1003 | IB_QP_ACCESS_FLAGS), |
| 1004 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
| 1005 | IB_QP_PORT | |
| 1006 | IB_QP_ACCESS_FLAGS), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1007 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
| 1008 | IB_QP_PORT | |
| 1009 | IB_QP_ACCESS_FLAGS), |
| 1010 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
| 1011 | IB_QP_PORT | |
| 1012 | IB_QP_ACCESS_FLAGS), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1013 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 1014 | IB_QP_QKEY), |
| 1015 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 1016 | IB_QP_QKEY), |
| 1017 | } |
| 1018 | }, |
| 1019 | [IB_QPS_RTR] = { |
| 1020 | .valid = 1, |
| 1021 | .req_param = { |
| 1022 | [IB_QPT_UC] = (IB_QP_AV | |
| 1023 | IB_QP_PATH_MTU | |
| 1024 | IB_QP_DEST_QPN | |
| 1025 | IB_QP_RQ_PSN), |
| 1026 | [IB_QPT_RC] = (IB_QP_AV | |
| 1027 | IB_QP_PATH_MTU | |
| 1028 | IB_QP_DEST_QPN | |
| 1029 | IB_QP_RQ_PSN | |
| 1030 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 1031 | IB_QP_MIN_RNR_TIMER), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1032 | [IB_QPT_XRC_INI] = (IB_QP_AV | |
| 1033 | IB_QP_PATH_MTU | |
| 1034 | IB_QP_DEST_QPN | |
| 1035 | IB_QP_RQ_PSN), |
| 1036 | [IB_QPT_XRC_TGT] = (IB_QP_AV | |
| 1037 | IB_QP_PATH_MTU | |
| 1038 | IB_QP_DEST_QPN | |
| 1039 | IB_QP_RQ_PSN | |
| 1040 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 1041 | IB_QP_MIN_RNR_TIMER), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1042 | }, |
| 1043 | .opt_param = { |
| 1044 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 1045 | IB_QP_QKEY), |
| 1046 | [IB_QPT_UC] = (IB_QP_ALT_PATH | |
| 1047 | IB_QP_ACCESS_FLAGS | |
| 1048 | IB_QP_PKEY_INDEX), |
| 1049 | [IB_QPT_RC] = (IB_QP_ALT_PATH | |
| 1050 | IB_QP_ACCESS_FLAGS | |
| 1051 | IB_QP_PKEY_INDEX), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1052 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | |
| 1053 | IB_QP_ACCESS_FLAGS | |
| 1054 | IB_QP_PKEY_INDEX), |
| 1055 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | |
| 1056 | IB_QP_ACCESS_FLAGS | |
| 1057 | IB_QP_PKEY_INDEX), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1058 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 1059 | IB_QP_QKEY), |
| 1060 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 1061 | IB_QP_QKEY), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1062 | }, |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1063 | }, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1064 | }, |
| 1065 | [IB_QPS_RTR] = { |
| 1066 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1067 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1068 | [IB_QPS_RTS] = { |
| 1069 | .valid = 1, |
| 1070 | .req_param = { |
| 1071 | [IB_QPT_UD] = IB_QP_SQ_PSN, |
| 1072 | [IB_QPT_UC] = IB_QP_SQ_PSN, |
| 1073 | [IB_QPT_RC] = (IB_QP_TIMEOUT | |
| 1074 | IB_QP_RETRY_CNT | |
| 1075 | IB_QP_RNR_RETRY | |
| 1076 | IB_QP_SQ_PSN | |
| 1077 | IB_QP_MAX_QP_RD_ATOMIC), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1078 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | |
| 1079 | IB_QP_RETRY_CNT | |
| 1080 | IB_QP_RNR_RETRY | |
| 1081 | IB_QP_SQ_PSN | |
| 1082 | IB_QP_MAX_QP_RD_ATOMIC), |
| 1083 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | |
| 1084 | IB_QP_SQ_PSN), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1085 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
| 1086 | [IB_QPT_GSI] = IB_QP_SQ_PSN, |
| 1087 | }, |
| 1088 | .opt_param = { |
| 1089 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1090 | IB_QP_QKEY), |
| 1091 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1092 | IB_QP_ALT_PATH | |
| 1093 | IB_QP_ACCESS_FLAGS | |
| 1094 | IB_QP_PATH_MIG_STATE), |
| 1095 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1096 | IB_QP_ALT_PATH | |
| 1097 | IB_QP_ACCESS_FLAGS | |
| 1098 | IB_QP_MIN_RNR_TIMER | |
| 1099 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1100 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1101 | IB_QP_ALT_PATH | |
| 1102 | IB_QP_ACCESS_FLAGS | |
| 1103 | IB_QP_PATH_MIG_STATE), |
| 1104 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1105 | IB_QP_ALT_PATH | |
| 1106 | IB_QP_ACCESS_FLAGS | |
| 1107 | IB_QP_MIN_RNR_TIMER | |
| 1108 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1109 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1110 | IB_QP_QKEY), |
| 1111 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1112 | IB_QP_QKEY), |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1113 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1114 | } |
| 1115 | } |
| 1116 | }, |
| 1117 | [IB_QPS_RTS] = { |
| 1118 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1119 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1120 | [IB_QPS_RTS] = { |
| 1121 | .valid = 1, |
| 1122 | .opt_param = { |
| 1123 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1124 | IB_QP_QKEY), |
Dotan Barak | 4546d31 | 2006-03-02 11:22:28 -0800 | [diff] [blame] | 1125 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1126 | IB_QP_ACCESS_FLAGS | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1127 | IB_QP_ALT_PATH | |
| 1128 | IB_QP_PATH_MIG_STATE), |
Dotan Barak | 4546d31 | 2006-03-02 11:22:28 -0800 | [diff] [blame] | 1129 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1130 | IB_QP_ACCESS_FLAGS | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1131 | IB_QP_ALT_PATH | |
| 1132 | IB_QP_PATH_MIG_STATE | |
| 1133 | IB_QP_MIN_RNR_TIMER), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1134 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1135 | IB_QP_ACCESS_FLAGS | |
| 1136 | IB_QP_ALT_PATH | |
| 1137 | IB_QP_PATH_MIG_STATE), |
| 1138 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1139 | IB_QP_ACCESS_FLAGS | |
| 1140 | IB_QP_ALT_PATH | |
| 1141 | IB_QP_PATH_MIG_STATE | |
| 1142 | IB_QP_MIN_RNR_TIMER), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1143 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1144 | IB_QP_QKEY), |
| 1145 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1146 | IB_QP_QKEY), |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1147 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1148 | } |
| 1149 | }, |
| 1150 | [IB_QPS_SQD] = { |
| 1151 | .valid = 1, |
| 1152 | .opt_param = { |
| 1153 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1154 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1155 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1156 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1157 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1158 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
| 1159 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY |
| 1160 | } |
| 1161 | }, |
| 1162 | }, |
| 1163 | [IB_QPS_SQD] = { |
| 1164 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1165 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1166 | [IB_QPS_RTS] = { |
| 1167 | .valid = 1, |
| 1168 | .opt_param = { |
| 1169 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1170 | IB_QP_QKEY), |
| 1171 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1172 | IB_QP_ALT_PATH | |
| 1173 | IB_QP_ACCESS_FLAGS | |
| 1174 | IB_QP_PATH_MIG_STATE), |
| 1175 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
| 1176 | IB_QP_ALT_PATH | |
| 1177 | IB_QP_ACCESS_FLAGS | |
| 1178 | IB_QP_MIN_RNR_TIMER | |
| 1179 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1180 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
| 1181 | IB_QP_ALT_PATH | |
| 1182 | IB_QP_ACCESS_FLAGS | |
| 1183 | IB_QP_PATH_MIG_STATE), |
| 1184 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
| 1185 | IB_QP_ALT_PATH | |
| 1186 | IB_QP_ACCESS_FLAGS | |
| 1187 | IB_QP_MIN_RNR_TIMER | |
| 1188 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1189 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1190 | IB_QP_QKEY), |
| 1191 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1192 | IB_QP_QKEY), |
| 1193 | } |
| 1194 | }, |
| 1195 | [IB_QPS_SQD] = { |
| 1196 | .valid = 1, |
| 1197 | .opt_param = { |
| 1198 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 1199 | IB_QP_QKEY), |
| 1200 | [IB_QPT_UC] = (IB_QP_AV | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1201 | IB_QP_ALT_PATH | |
| 1202 | IB_QP_ACCESS_FLAGS | |
| 1203 | IB_QP_PKEY_INDEX | |
| 1204 | IB_QP_PATH_MIG_STATE), |
| 1205 | [IB_QPT_RC] = (IB_QP_PORT | |
| 1206 | IB_QP_AV | |
| 1207 | IB_QP_TIMEOUT | |
| 1208 | IB_QP_RETRY_CNT | |
| 1209 | IB_QP_RNR_RETRY | |
| 1210 | IB_QP_MAX_QP_RD_ATOMIC | |
| 1211 | IB_QP_MAX_DEST_RD_ATOMIC | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1212 | IB_QP_ALT_PATH | |
| 1213 | IB_QP_ACCESS_FLAGS | |
| 1214 | IB_QP_PKEY_INDEX | |
| 1215 | IB_QP_MIN_RNR_TIMER | |
| 1216 | IB_QP_PATH_MIG_STATE), |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1217 | [IB_QPT_XRC_INI] = (IB_QP_PORT | |
| 1218 | IB_QP_AV | |
| 1219 | IB_QP_TIMEOUT | |
| 1220 | IB_QP_RETRY_CNT | |
| 1221 | IB_QP_RNR_RETRY | |
| 1222 | IB_QP_MAX_QP_RD_ATOMIC | |
| 1223 | IB_QP_ALT_PATH | |
| 1224 | IB_QP_ACCESS_FLAGS | |
| 1225 | IB_QP_PKEY_INDEX | |
| 1226 | IB_QP_PATH_MIG_STATE), |
| 1227 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | |
| 1228 | IB_QP_AV | |
| 1229 | IB_QP_TIMEOUT | |
| 1230 | IB_QP_MAX_DEST_RD_ATOMIC | |
| 1231 | IB_QP_ALT_PATH | |
| 1232 | IB_QP_ACCESS_FLAGS | |
| 1233 | IB_QP_PKEY_INDEX | |
| 1234 | IB_QP_MIN_RNR_TIMER | |
| 1235 | IB_QP_PATH_MIG_STATE), |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1236 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
| 1237 | IB_QP_QKEY), |
| 1238 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
| 1239 | IB_QP_QKEY), |
| 1240 | } |
| 1241 | } |
| 1242 | }, |
| 1243 | [IB_QPS_SQE] = { |
| 1244 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1245 | [IB_QPS_ERR] = { .valid = 1 }, |
| 1246 | [IB_QPS_RTS] = { |
| 1247 | .valid = 1, |
| 1248 | .opt_param = { |
| 1249 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
| 1250 | IB_QP_QKEY), |
| 1251 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
| 1252 | IB_QP_ACCESS_FLAGS), |
| 1253 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
| 1254 | IB_QP_QKEY), |
| 1255 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
| 1256 | IB_QP_QKEY), |
| 1257 | } |
| 1258 | } |
| 1259 | }, |
| 1260 | [IB_QPS_ERR] = { |
| 1261 | [IB_QPS_RESET] = { .valid = 1 }, |
| 1262 | [IB_QPS_ERR] = { .valid = 1 } |
| 1263 | } |
| 1264 | }; |
| 1265 | |
Leon Romanovsky | 19b1f54 | 2018-03-11 13:51:35 +0200 | [diff] [blame] | 1266 | bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
| 1267 | enum ib_qp_type type, enum ib_qp_attr_mask mask, |
| 1268 | enum rdma_link_layer ll) |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1269 | { |
| 1270 | enum ib_qp_attr_mask req_param, opt_param; |
| 1271 | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1272 | if (mask & IB_QP_CUR_STATE && |
| 1273 | cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && |
| 1274 | cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) |
Leon Romanovsky | 19b1f54 | 2018-03-11 13:51:35 +0200 | [diff] [blame] | 1275 | return false; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1276 | |
| 1277 | if (!qp_state_table[cur_state][next_state].valid) |
Leon Romanovsky | 19b1f54 | 2018-03-11 13:51:35 +0200 | [diff] [blame] | 1278 | return false; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1279 | |
| 1280 | req_param = qp_state_table[cur_state][next_state].req_param[type]; |
| 1281 | opt_param = qp_state_table[cur_state][next_state].opt_param[type]; |
| 1282 | |
| 1283 | if ((mask & req_param) != req_param) |
Leon Romanovsky | 19b1f54 | 2018-03-11 13:51:35 +0200 | [diff] [blame] | 1284 | return false; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1285 | |
| 1286 | if (mask & ~(req_param | opt_param | IB_QP_STATE)) |
Leon Romanovsky | 19b1f54 | 2018-03-11 13:51:35 +0200 | [diff] [blame] | 1287 | return false; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1288 | |
Leon Romanovsky | 19b1f54 | 2018-03-11 13:51:35 +0200 | [diff] [blame] | 1289 | return true; |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1290 | } |
| 1291 | EXPORT_SYMBOL(ib_modify_qp_is_ok); |
| 1292 | |
Parav Pandit | c0348eb | 2017-10-16 08:45:13 +0300 | [diff] [blame] | 1293 | static int ib_resolve_eth_dmac(struct ib_device *device, |
| 1294 | struct rdma_ah_attr *ah_attr) |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1295 | { |
| 1296 | int ret = 0; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1297 | struct ib_global_route *grh; |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1298 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1299 | if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr))) |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1300 | return -EINVAL; |
Matan Barak | dbf727d | 2015-10-15 18:38:51 +0300 | [diff] [blame] | 1301 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1302 | grh = rdma_ah_retrieve_grh(ah_attr); |
| 1303 | |
Noa Osherovich | 9636a56 | 2017-06-12 11:14:04 +0300 | [diff] [blame] | 1304 | if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { |
| 1305 | if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { |
| 1306 | __be32 addr = 0; |
| 1307 | |
| 1308 | memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); |
| 1309 | ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac); |
| 1310 | } else { |
| 1311 | ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, |
| 1312 | (char *)ah_attr->roce.dmac); |
| 1313 | } |
Moni Shoua | c90ea9d | 2016-11-23 08:23:22 +0200 | [diff] [blame] | 1314 | } else { |
Parav Pandit | 1060f86 | 2017-11-14 14:51:49 +0200 | [diff] [blame] | 1315 | ret = ib_resolve_unicast_gid_dmac(device, ah_attr); |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1316 | } |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1317 | return ret; |
| 1318 | } |
Or Gerlitz | ed4c54e | 2013-12-12 18:03:17 +0200 | [diff] [blame] | 1319 | |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1320 | /** |
Parav Pandit | b96ac05 | 2018-01-09 15:24:51 +0200 | [diff] [blame] | 1321 | * IB core internal function to perform QP attributes modification. |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1322 | */ |
Parav Pandit | b96ac05 | 2018-01-09 15:24:51 +0200 | [diff] [blame] | 1323 | static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
| 1324 | int attr_mask, struct ib_udata *udata) |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1325 | { |
Majd Dibbiny | 727b7e9 | 2017-11-14 14:51:56 +0200 | [diff] [blame] | 1326 | u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1327 | int ret; |
| 1328 | |
Majd Dibbiny | 727b7e9 | 2017-11-14 14:51:56 +0200 | [diff] [blame] | 1329 | if (rdma_ib_or_roce(qp->device, port)) { |
| 1330 | if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { |
| 1331 | pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n", |
| 1332 | __func__, qp->device->name); |
| 1333 | attr->rq_psn &= 0xffffff; |
| 1334 | } |
| 1335 | |
| 1336 | if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { |
| 1337 | pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n", |
| 1338 | __func__, qp->device->name); |
| 1339 | attr->sq_psn &= 0xffffff; |
| 1340 | } |
| 1341 | } |
| 1342 | |
Noa Osherovich | 498ca3c | 2017-08-23 08:35:40 +0300 | [diff] [blame] | 1343 | ret = ib_security_modify_qp(qp, attr, attr_mask, udata); |
| 1344 | if (!ret && (attr_mask & IB_QP_PORT)) |
| 1345 | qp->port = attr->port_num; |
| 1346 | |
| 1347 | return ret; |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1348 | } |
Parav Pandit | b96ac05 | 2018-01-09 15:24:51 +0200 | [diff] [blame] | 1349 | |
Parav Pandit | a6753c4 | 2018-01-09 15:24:53 +0200 | [diff] [blame] | 1350 | static bool is_qp_type_connected(const struct ib_qp *qp) |
| 1351 | { |
| 1352 | return (qp->qp_type == IB_QPT_UC || |
| 1353 | qp->qp_type == IB_QPT_RC || |
| 1354 | qp->qp_type == IB_QPT_XRC_INI || |
| 1355 | qp->qp_type == IB_QPT_XRC_TGT); |
| 1356 | } |
| 1357 | |
Parav Pandit | b96ac05 | 2018-01-09 15:24:51 +0200 | [diff] [blame] | 1358 | /** |
| 1359 | * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. |
| 1360 | * @ib_qp: The QP to modify. |
| 1361 | * @attr: On input, specifies the QP attributes to modify. On output, |
| 1362 | * the current values of selected QP attributes are returned. |
| 1363 | * @attr_mask: A bit-mask used to specify which attributes of the QP |
| 1364 | * are being modified. |
| 1365 | * @udata: pointer to user's input output buffer information |
| 1366 | * are being modified. |
| 1367 | * It returns 0 on success and returns appropriate error code on error. |
| 1368 | */ |
| 1369 | int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, |
| 1370 | int attr_mask, struct ib_udata *udata) |
| 1371 | { |
| 1372 | struct ib_qp *qp = ib_qp->real_qp; |
| 1373 | int ret; |
| 1374 | |
Parav Pandit | f2290d6 | 2018-01-09 15:24:52 +0200 | [diff] [blame] | 1375 | if (attr_mask & IB_QP_AV && |
Parav Pandit | a6753c4 | 2018-01-09 15:24:53 +0200 | [diff] [blame] | 1376 | attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && |
| 1377 | is_qp_type_connected(qp)) { |
Parav Pandit | b96ac05 | 2018-01-09 15:24:51 +0200 | [diff] [blame] | 1378 | ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); |
| 1379 | if (ret) |
| 1380 | return ret; |
| 1381 | } |
| 1382 | return _ib_modify_qp(qp, attr, attr_mask, udata); |
| 1383 | } |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 1384 | EXPORT_SYMBOL(ib_modify_qp_with_udata); |
| 1385 | |
Yuval Shaia | d418619 | 2017-06-14 23:13:34 +0300 | [diff] [blame] | 1386 | int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) |
| 1387 | { |
| 1388 | int rc; |
| 1389 | u32 netdev_speed; |
| 1390 | struct net_device *netdev; |
| 1391 | struct ethtool_link_ksettings lksettings; |
| 1392 | |
| 1393 | if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) |
| 1394 | return -EINVAL; |
| 1395 | |
| 1396 | if (!dev->get_netdev) |
| 1397 | return -EOPNOTSUPP; |
| 1398 | |
| 1399 | netdev = dev->get_netdev(dev, port_num); |
| 1400 | if (!netdev) |
| 1401 | return -ENODEV; |
| 1402 | |
| 1403 | rtnl_lock(); |
| 1404 | rc = __ethtool_get_link_ksettings(netdev, &lksettings); |
| 1405 | rtnl_unlock(); |
| 1406 | |
| 1407 | dev_put(netdev); |
| 1408 | |
| 1409 | if (!rc) { |
| 1410 | netdev_speed = lksettings.base.speed; |
| 1411 | } else { |
| 1412 | netdev_speed = SPEED_1000; |
| 1413 | pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name, |
| 1414 | netdev_speed); |
| 1415 | } |
| 1416 | |
| 1417 | if (netdev_speed <= SPEED_1000) { |
| 1418 | *width = IB_WIDTH_1X; |
| 1419 | *speed = IB_SPEED_SDR; |
| 1420 | } else if (netdev_speed <= SPEED_10000) { |
| 1421 | *width = IB_WIDTH_1X; |
| 1422 | *speed = IB_SPEED_FDR10; |
| 1423 | } else if (netdev_speed <= SPEED_20000) { |
| 1424 | *width = IB_WIDTH_4X; |
| 1425 | *speed = IB_SPEED_DDR; |
| 1426 | } else if (netdev_speed <= SPEED_25000) { |
| 1427 | *width = IB_WIDTH_1X; |
| 1428 | *speed = IB_SPEED_EDR; |
| 1429 | } else if (netdev_speed <= SPEED_40000) { |
| 1430 | *width = IB_WIDTH_4X; |
| 1431 | *speed = IB_SPEED_FDR10; |
| 1432 | } else { |
| 1433 | *width = IB_WIDTH_4X; |
| 1434 | *speed = IB_SPEED_EDR; |
| 1435 | } |
| 1436 | |
| 1437 | return 0; |
| 1438 | } |
| 1439 | EXPORT_SYMBOL(ib_get_eth_speed); |
| 1440 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1441 | int ib_modify_qp(struct ib_qp *qp, |
| 1442 | struct ib_qp_attr *qp_attr, |
| 1443 | int qp_attr_mask) |
| 1444 | { |
Parav Pandit | b96ac05 | 2018-01-09 15:24:51 +0200 | [diff] [blame] | 1445 | return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 | } |
| 1447 | EXPORT_SYMBOL(ib_modify_qp); |
| 1448 | |
| 1449 | int ib_query_qp(struct ib_qp *qp, |
| 1450 | struct ib_qp_attr *qp_attr, |
| 1451 | int qp_attr_mask, |
| 1452 | struct ib_qp_init_attr *qp_init_attr) |
| 1453 | { |
| 1454 | return qp->device->query_qp ? |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1455 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1456 | -EOPNOTSUPP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1457 | } |
| 1458 | EXPORT_SYMBOL(ib_query_qp); |
| 1459 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1460 | int ib_close_qp(struct ib_qp *qp) |
| 1461 | { |
| 1462 | struct ib_qp *real_qp; |
| 1463 | unsigned long flags; |
| 1464 | |
| 1465 | real_qp = qp->real_qp; |
| 1466 | if (real_qp == qp) |
| 1467 | return -EINVAL; |
| 1468 | |
| 1469 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
| 1470 | list_del(&qp->open_list); |
| 1471 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
| 1472 | |
| 1473 | atomic_dec(&real_qp->usecnt); |
Moni Shoua | 4a50881 | 2017-12-24 13:54:58 +0200 | [diff] [blame] | 1474 | if (qp->qp_sec) |
| 1475 | ib_close_shared_qp_security(qp->qp_sec); |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1476 | kfree(qp); |
| 1477 | |
| 1478 | return 0; |
| 1479 | } |
| 1480 | EXPORT_SYMBOL(ib_close_qp); |
| 1481 | |
| 1482 | static int __ib_destroy_shared_qp(struct ib_qp *qp) |
| 1483 | { |
| 1484 | struct ib_xrcd *xrcd; |
| 1485 | struct ib_qp *real_qp; |
| 1486 | int ret; |
| 1487 | |
| 1488 | real_qp = qp->real_qp; |
| 1489 | xrcd = real_qp->xrcd; |
| 1490 | |
| 1491 | mutex_lock(&xrcd->tgt_qp_mutex); |
| 1492 | ib_close_qp(qp); |
| 1493 | if (atomic_read(&real_qp->usecnt) == 0) |
| 1494 | list_del(&real_qp->xrcd_list); |
| 1495 | else |
| 1496 | real_qp = NULL; |
| 1497 | mutex_unlock(&xrcd->tgt_qp_mutex); |
| 1498 | |
| 1499 | if (real_qp) { |
| 1500 | ret = ib_destroy_qp(real_qp); |
| 1501 | if (!ret) |
| 1502 | atomic_dec(&xrcd->usecnt); |
| 1503 | else |
| 1504 | __ib_insert_xrcd_qp(xrcd, real_qp); |
| 1505 | } |
| 1506 | |
| 1507 | return 0; |
| 1508 | } |
| 1509 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | int ib_destroy_qp(struct ib_qp *qp) |
| 1511 | { |
| 1512 | struct ib_pd *pd; |
| 1513 | struct ib_cq *scq, *rcq; |
| 1514 | struct ib_srq *srq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1515 | struct ib_rwq_ind_table *ind_tbl; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1516 | struct ib_qp_security *sec; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | int ret; |
| 1518 | |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1519 | WARN_ON_ONCE(qp->mrs_used > 0); |
| 1520 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1521 | if (atomic_read(&qp->usecnt)) |
| 1522 | return -EBUSY; |
| 1523 | |
| 1524 | if (qp->real_qp != qp) |
| 1525 | return __ib_destroy_shared_qp(qp); |
| 1526 | |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1527 | pd = qp->pd; |
| 1528 | scq = qp->send_cq; |
| 1529 | rcq = qp->recv_cq; |
| 1530 | srq = qp->srq; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1531 | ind_tbl = qp->rwq_ind_tbl; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1532 | sec = qp->qp_sec; |
| 1533 | if (sec) |
| 1534 | ib_destroy_qp_security_begin(sec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1536 | if (!qp->uobject) |
| 1537 | rdma_rw_cleanup_mrs(qp); |
| 1538 | |
Leon Romanovsky | 78a0cd6 | 2018-01-28 11:17:21 +0200 | [diff] [blame] | 1539 | rdma_restrack_del(&qp->res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 | ret = qp->device->destroy_qp(qp); |
| 1541 | if (!ret) { |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1542 | if (pd) |
| 1543 | atomic_dec(&pd->usecnt); |
| 1544 | if (scq) |
| 1545 | atomic_dec(&scq->usecnt); |
| 1546 | if (rcq) |
| 1547 | atomic_dec(&rcq->usecnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1548 | if (srq) |
| 1549 | atomic_dec(&srq->usecnt); |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1550 | if (ind_tbl) |
| 1551 | atomic_dec(&ind_tbl->usecnt); |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1552 | if (sec) |
| 1553 | ib_destroy_qp_security_end(sec); |
| 1554 | } else { |
| 1555 | if (sec) |
| 1556 | ib_destroy_qp_security_abort(sec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | } |
| 1558 | |
| 1559 | return ret; |
| 1560 | } |
| 1561 | EXPORT_SYMBOL(ib_destroy_qp); |
| 1562 | |
| 1563 | /* Completion queues */ |
| 1564 | |
| 1565 | struct ib_cq *ib_create_cq(struct ib_device *device, |
| 1566 | ib_comp_handler comp_handler, |
| 1567 | void (*event_handler)(struct ib_event *, void *), |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 1568 | void *cq_context, |
| 1569 | const struct ib_cq_init_attr *cq_attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1570 | { |
| 1571 | struct ib_cq *cq; |
| 1572 | |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 1573 | cq = device->create_cq(device, cq_attr, NULL, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | |
| 1575 | if (!IS_ERR(cq)) { |
| 1576 | cq->device = device; |
Roland Dreier | b5e81bf | 2005-07-07 17:57:11 -0700 | [diff] [blame] | 1577 | cq->uobject = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | cq->comp_handler = comp_handler; |
| 1579 | cq->event_handler = event_handler; |
| 1580 | cq->cq_context = cq_context; |
| 1581 | atomic_set(&cq->usecnt, 0); |
Leon Romanovsky | 08f294a | 2018-01-28 11:17:22 +0200 | [diff] [blame] | 1582 | cq->res.type = RDMA_RESTRACK_CQ; |
| 1583 | rdma_restrack_add(&cq->res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | } |
| 1585 | |
| 1586 | return cq; |
| 1587 | } |
| 1588 | EXPORT_SYMBOL(ib_create_cq); |
| 1589 | |
Leon Romanovsky | 4190b4e | 2017-11-13 10:51:19 +0200 | [diff] [blame] | 1590 | int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1591 | { |
| 1592 | return cq->device->modify_cq ? |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1593 | cq->device->modify_cq(cq, cq_count, cq_period) : -EOPNOTSUPP; |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1594 | } |
Leon Romanovsky | 4190b4e | 2017-11-13 10:51:19 +0200 | [diff] [blame] | 1595 | EXPORT_SYMBOL(rdma_set_cq_moderation); |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1596 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | int ib_destroy_cq(struct ib_cq *cq) |
| 1598 | { |
| 1599 | if (atomic_read(&cq->usecnt)) |
| 1600 | return -EBUSY; |
| 1601 | |
Leon Romanovsky | 08f294a | 2018-01-28 11:17:22 +0200 | [diff] [blame] | 1602 | rdma_restrack_del(&cq->res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | return cq->device->destroy_cq(cq); |
| 1604 | } |
| 1605 | EXPORT_SYMBOL(ib_destroy_cq); |
| 1606 | |
Roland Dreier | a74cd4a | 2006-02-13 16:30:49 -0800 | [diff] [blame] | 1607 | int ib_resize_cq(struct ib_cq *cq, int cqe) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 | { |
Roland Dreier | 40de2e5 | 2005-11-08 11:10:25 -0800 | [diff] [blame] | 1609 | return cq->device->resize_cq ? |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1610 | cq->device->resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 | } |
| 1612 | EXPORT_SYMBOL(ib_resize_cq); |
| 1613 | |
| 1614 | /* Memory regions */ |
| 1615 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 | int ib_dereg_mr(struct ib_mr *mr) |
| 1617 | { |
Christoph Hellwig | ab67ed8 | 2015-12-23 19:12:54 +0100 | [diff] [blame] | 1618 | struct ib_pd *pd = mr->pd; |
Ariel Levkovich | be934cc | 2018-04-05 18:53:25 +0300 | [diff] [blame] | 1619 | struct ib_dm *dm = mr->dm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 | int ret; |
| 1621 | |
Steve Wise | fccec5b | 2018-03-01 13:58:13 -0800 | [diff] [blame] | 1622 | rdma_restrack_del(&mr->res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | ret = mr->device->dereg_mr(mr); |
Ariel Levkovich | be934cc | 2018-04-05 18:53:25 +0300 | [diff] [blame] | 1624 | if (!ret) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1625 | atomic_dec(&pd->usecnt); |
Ariel Levkovich | be934cc | 2018-04-05 18:53:25 +0300 | [diff] [blame] | 1626 | if (dm) |
| 1627 | atomic_dec(&dm->usecnt); |
| 1628 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | |
| 1630 | return ret; |
| 1631 | } |
| 1632 | EXPORT_SYMBOL(ib_dereg_mr); |
| 1633 | |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1634 | /** |
| 1635 | * ib_alloc_mr() - Allocates a memory region |
| 1636 | * @pd: protection domain associated with the region |
| 1637 | * @mr_type: memory region type |
| 1638 | * @max_num_sg: maximum sg entries available for registration. |
| 1639 | * |
| 1640 | * Notes: |
| 1641 | * Memory registeration page/sg lists must not exceed max_num_sg. |
| 1642 | * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed |
| 1643 | * max_num_sg * used_page_size. |
| 1644 | * |
| 1645 | */ |
| 1646 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, |
| 1647 | enum ib_mr_type mr_type, |
| 1648 | u32 max_num_sg) |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1649 | { |
| 1650 | struct ib_mr *mr; |
| 1651 | |
Sagi Grimberg | d9f272c | 2015-07-30 10:32:48 +0300 | [diff] [blame] | 1652 | if (!pd->device->alloc_mr) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1653 | return ERR_PTR(-EOPNOTSUPP); |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1654 | |
Sagi Grimberg | d9f272c | 2015-07-30 10:32:48 +0300 | [diff] [blame] | 1655 | mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1656 | if (!IS_ERR(mr)) { |
| 1657 | mr->device = pd->device; |
| 1658 | mr->pd = pd; |
Ariel Levkovich | 54e7e48 | 2018-04-26 15:42:55 +0300 | [diff] [blame] | 1659 | mr->dm = NULL; |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1660 | mr->uobject = NULL; |
| 1661 | atomic_inc(&pd->usecnt); |
Steve Wise | d4a85c3 | 2016-05-03 18:01:08 +0200 | [diff] [blame] | 1662 | mr->need_inval = false; |
Steve Wise | fccec5b | 2018-03-01 13:58:13 -0800 | [diff] [blame] | 1663 | mr->res.type = RDMA_RESTRACK_MR; |
| 1664 | rdma_restrack_add(&mr->res); |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 1665 | } |
| 1666 | |
| 1667 | return mr; |
| 1668 | } |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1669 | EXPORT_SYMBOL(ib_alloc_mr); |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 1670 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | /* "Fast" memory regions */ |
| 1672 | |
| 1673 | struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, |
| 1674 | int mr_access_flags, |
| 1675 | struct ib_fmr_attr *fmr_attr) |
| 1676 | { |
| 1677 | struct ib_fmr *fmr; |
| 1678 | |
| 1679 | if (!pd->device->alloc_fmr) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1680 | return ERR_PTR(-EOPNOTSUPP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 | |
| 1682 | fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); |
| 1683 | if (!IS_ERR(fmr)) { |
| 1684 | fmr->device = pd->device; |
| 1685 | fmr->pd = pd; |
| 1686 | atomic_inc(&pd->usecnt); |
| 1687 | } |
| 1688 | |
| 1689 | return fmr; |
| 1690 | } |
| 1691 | EXPORT_SYMBOL(ib_alloc_fmr); |
| 1692 | |
| 1693 | int ib_unmap_fmr(struct list_head *fmr_list) |
| 1694 | { |
| 1695 | struct ib_fmr *fmr; |
| 1696 | |
| 1697 | if (list_empty(fmr_list)) |
| 1698 | return 0; |
| 1699 | |
| 1700 | fmr = list_entry(fmr_list->next, struct ib_fmr, list); |
| 1701 | return fmr->device->unmap_fmr(fmr_list); |
| 1702 | } |
| 1703 | EXPORT_SYMBOL(ib_unmap_fmr); |
| 1704 | |
| 1705 | int ib_dealloc_fmr(struct ib_fmr *fmr) |
| 1706 | { |
| 1707 | struct ib_pd *pd; |
| 1708 | int ret; |
| 1709 | |
| 1710 | pd = fmr->pd; |
| 1711 | ret = fmr->device->dealloc_fmr(fmr); |
| 1712 | if (!ret) |
| 1713 | atomic_dec(&pd->usecnt); |
| 1714 | |
| 1715 | return ret; |
| 1716 | } |
| 1717 | EXPORT_SYMBOL(ib_dealloc_fmr); |
| 1718 | |
| 1719 | /* Multicast groups */ |
| 1720 | |
Noa Osherovich | 5236333 | 2017-06-12 11:14:02 +0300 | [diff] [blame] | 1721 | static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) |
| 1722 | { |
| 1723 | struct ib_qp_init_attr init_attr = {}; |
| 1724 | struct ib_qp_attr attr = {}; |
| 1725 | int num_eth_ports = 0; |
| 1726 | int port; |
| 1727 | |
| 1728 | /* If QP state >= init, it is assigned to a port and we can check this |
| 1729 | * port only. |
| 1730 | */ |
| 1731 | if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { |
| 1732 | if (attr.qp_state >= IB_QPS_INIT) { |
Alex Estrin | e6f9bc3 | 2017-08-31 09:30:34 -0700 | [diff] [blame] | 1733 | if (rdma_port_get_link_layer(qp->device, attr.port_num) != |
Noa Osherovich | 5236333 | 2017-06-12 11:14:02 +0300 | [diff] [blame] | 1734 | IB_LINK_LAYER_INFINIBAND) |
| 1735 | return true; |
| 1736 | goto lid_check; |
| 1737 | } |
| 1738 | } |
| 1739 | |
| 1740 | /* Can't get a quick answer, iterate over all ports */ |
| 1741 | for (port = 0; port < qp->device->phys_port_cnt; port++) |
Alex Estrin | e6f9bc3 | 2017-08-31 09:30:34 -0700 | [diff] [blame] | 1742 | if (rdma_port_get_link_layer(qp->device, port) != |
Noa Osherovich | 5236333 | 2017-06-12 11:14:02 +0300 | [diff] [blame] | 1743 | IB_LINK_LAYER_INFINIBAND) |
| 1744 | num_eth_ports++; |
| 1745 | |
| 1746 | /* If we have at lease one Ethernet port, RoCE annex declares that |
| 1747 | * multicast LID should be ignored. We can't tell at this step if the |
| 1748 | * QP belongs to an IB or Ethernet port. |
| 1749 | */ |
| 1750 | if (num_eth_ports) |
| 1751 | return true; |
| 1752 | |
| 1753 | /* If all the ports are IB, we can check according to IB spec. */ |
| 1754 | lid_check: |
| 1755 | return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || |
| 1756 | lid == be16_to_cpu(IB_LID_PERMISSIVE)); |
| 1757 | } |
| 1758 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1760 | { |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1761 | int ret; |
| 1762 | |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1763 | if (!qp->device->attach_mcast) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1764 | return -EOPNOTSUPP; |
Noa Osherovich | be1d325 | 2017-06-12 11:14:03 +0300 | [diff] [blame] | 1765 | |
| 1766 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || |
| 1767 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1768 | return -EINVAL; |
| 1769 | |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1770 | ret = qp->device->attach_mcast(qp, gid, lid); |
| 1771 | if (!ret) |
| 1772 | atomic_inc(&qp->usecnt); |
| 1773 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1774 | } |
| 1775 | EXPORT_SYMBOL(ib_attach_mcast); |
| 1776 | |
| 1777 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1778 | { |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1779 | int ret; |
| 1780 | |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1781 | if (!qp->device->detach_mcast) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1782 | return -EOPNOTSUPP; |
Noa Osherovich | be1d325 | 2017-06-12 11:14:03 +0300 | [diff] [blame] | 1783 | |
| 1784 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || |
| 1785 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
Jack Morgenstein | 0c33aee | 2005-09-26 11:47:53 -0700 | [diff] [blame] | 1786 | return -EINVAL; |
| 1787 | |
Or Gerlitz | c3bccbfb | 2012-04-29 17:04:22 +0300 | [diff] [blame] | 1788 | ret = qp->device->detach_mcast(qp, gid, lid); |
| 1789 | if (!ret) |
| 1790 | atomic_dec(&qp->usecnt); |
| 1791 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1792 | } |
| 1793 | EXPORT_SYMBOL(ib_detach_mcast); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1794 | |
Leon Romanovsky | f66c8ba | 2018-01-28 11:17:19 +0200 | [diff] [blame] | 1795 | struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1796 | { |
| 1797 | struct ib_xrcd *xrcd; |
| 1798 | |
| 1799 | if (!device->alloc_xrcd) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1800 | return ERR_PTR(-EOPNOTSUPP); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1801 | |
| 1802 | xrcd = device->alloc_xrcd(device, NULL, NULL); |
| 1803 | if (!IS_ERR(xrcd)) { |
| 1804 | xrcd->device = device; |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1805 | xrcd->inode = NULL; |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1806 | atomic_set(&xrcd->usecnt, 0); |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1807 | mutex_init(&xrcd->tgt_qp_mutex); |
| 1808 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1809 | } |
| 1810 | |
| 1811 | return xrcd; |
| 1812 | } |
Leon Romanovsky | f66c8ba | 2018-01-28 11:17:19 +0200 | [diff] [blame] | 1813 | EXPORT_SYMBOL(__ib_alloc_xrcd); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1814 | |
| 1815 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) |
| 1816 | { |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1817 | struct ib_qp *qp; |
| 1818 | int ret; |
| 1819 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1820 | if (atomic_read(&xrcd->usecnt)) |
| 1821 | return -EBUSY; |
| 1822 | |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1823 | while (!list_empty(&xrcd->tgt_qp_list)) { |
| 1824 | qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); |
| 1825 | ret = ib_destroy_qp(qp); |
| 1826 | if (ret) |
| 1827 | return ret; |
| 1828 | } |
| 1829 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1830 | return xrcd->device->dealloc_xrcd(xrcd); |
| 1831 | } |
| 1832 | EXPORT_SYMBOL(ib_dealloc_xrcd); |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1833 | |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1834 | /** |
| 1835 | * ib_create_wq - Creates a WQ associated with the specified protection |
| 1836 | * domain. |
| 1837 | * @pd: The protection domain associated with the WQ. |
Randy Dunlap | 1f58621 | 2018-01-05 16:21:40 -0800 | [diff] [blame] | 1838 | * @wq_attr: A list of initial attributes required to create the |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1839 | * WQ. If WQ creation succeeds, then the attributes are updated to |
| 1840 | * the actual capabilities of the created WQ. |
| 1841 | * |
Randy Dunlap | 1f58621 | 2018-01-05 16:21:40 -0800 | [diff] [blame] | 1842 | * wq_attr->max_wr and wq_attr->max_sge determine |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1843 | * the requested size of the WQ, and set to the actual values allocated |
| 1844 | * on return. |
| 1845 | * If ib_create_wq() succeeds, then max_wr and max_sge will always be |
| 1846 | * at least as large as the requested values. |
| 1847 | */ |
| 1848 | struct ib_wq *ib_create_wq(struct ib_pd *pd, |
| 1849 | struct ib_wq_init_attr *wq_attr) |
| 1850 | { |
| 1851 | struct ib_wq *wq; |
| 1852 | |
| 1853 | if (!pd->device->create_wq) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1854 | return ERR_PTR(-EOPNOTSUPP); |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1855 | |
| 1856 | wq = pd->device->create_wq(pd, wq_attr, NULL); |
| 1857 | if (!IS_ERR(wq)) { |
| 1858 | wq->event_handler = wq_attr->event_handler; |
| 1859 | wq->wq_context = wq_attr->wq_context; |
| 1860 | wq->wq_type = wq_attr->wq_type; |
| 1861 | wq->cq = wq_attr->cq; |
| 1862 | wq->device = pd->device; |
| 1863 | wq->pd = pd; |
| 1864 | wq->uobject = NULL; |
| 1865 | atomic_inc(&pd->usecnt); |
| 1866 | atomic_inc(&wq_attr->cq->usecnt); |
| 1867 | atomic_set(&wq->usecnt, 0); |
| 1868 | } |
| 1869 | return wq; |
| 1870 | } |
| 1871 | EXPORT_SYMBOL(ib_create_wq); |
| 1872 | |
| 1873 | /** |
| 1874 | * ib_destroy_wq - Destroys the specified WQ. |
| 1875 | * @wq: The WQ to destroy. |
| 1876 | */ |
| 1877 | int ib_destroy_wq(struct ib_wq *wq) |
| 1878 | { |
| 1879 | int err; |
| 1880 | struct ib_cq *cq = wq->cq; |
| 1881 | struct ib_pd *pd = wq->pd; |
| 1882 | |
| 1883 | if (atomic_read(&wq->usecnt)) |
| 1884 | return -EBUSY; |
| 1885 | |
| 1886 | err = wq->device->destroy_wq(wq); |
| 1887 | if (!err) { |
| 1888 | atomic_dec(&pd->usecnt); |
| 1889 | atomic_dec(&cq->usecnt); |
| 1890 | } |
| 1891 | return err; |
| 1892 | } |
| 1893 | EXPORT_SYMBOL(ib_destroy_wq); |
| 1894 | |
| 1895 | /** |
| 1896 | * ib_modify_wq - Modifies the specified WQ. |
| 1897 | * @wq: The WQ to modify. |
| 1898 | * @wq_attr: On input, specifies the WQ attributes to modify. |
| 1899 | * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ |
| 1900 | * are being modified. |
| 1901 | * On output, the current values of selected WQ attributes are returned. |
| 1902 | */ |
| 1903 | int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
| 1904 | u32 wq_attr_mask) |
| 1905 | { |
| 1906 | int err; |
| 1907 | |
| 1908 | if (!wq->device->modify_wq) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1909 | return -EOPNOTSUPP; |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1910 | |
| 1911 | err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); |
| 1912 | return err; |
| 1913 | } |
| 1914 | EXPORT_SYMBOL(ib_modify_wq); |
| 1915 | |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 1916 | /* |
| 1917 | * ib_create_rwq_ind_table - Creates a RQ Indirection Table. |
| 1918 | * @device: The device on which to create the rwq indirection table. |
| 1919 | * @ib_rwq_ind_table_init_attr: A list of initial attributes required to |
| 1920 | * create the Indirection Table. |
| 1921 | * |
| 1922 | * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less |
| 1923 | * than the created ib_rwq_ind_table object and the caller is responsible |
| 1924 | * for its memory allocation/free. |
| 1925 | */ |
| 1926 | struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, |
| 1927 | struct ib_rwq_ind_table_init_attr *init_attr) |
| 1928 | { |
| 1929 | struct ib_rwq_ind_table *rwq_ind_table; |
| 1930 | int i; |
| 1931 | u32 table_size; |
| 1932 | |
| 1933 | if (!device->create_rwq_ind_table) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1934 | return ERR_PTR(-EOPNOTSUPP); |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 1935 | |
| 1936 | table_size = (1 << init_attr->log_ind_tbl_size); |
| 1937 | rwq_ind_table = device->create_rwq_ind_table(device, |
| 1938 | init_attr, NULL); |
| 1939 | if (IS_ERR(rwq_ind_table)) |
| 1940 | return rwq_ind_table; |
| 1941 | |
| 1942 | rwq_ind_table->ind_tbl = init_attr->ind_tbl; |
| 1943 | rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; |
| 1944 | rwq_ind_table->device = device; |
| 1945 | rwq_ind_table->uobject = NULL; |
| 1946 | atomic_set(&rwq_ind_table->usecnt, 0); |
| 1947 | |
| 1948 | for (i = 0; i < table_size; i++) |
| 1949 | atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); |
| 1950 | |
| 1951 | return rwq_ind_table; |
| 1952 | } |
| 1953 | EXPORT_SYMBOL(ib_create_rwq_ind_table); |
| 1954 | |
| 1955 | /* |
| 1956 | * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. |
| 1957 | * @wq_ind_table: The Indirection Table to destroy. |
| 1958 | */ |
| 1959 | int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) |
| 1960 | { |
| 1961 | int err, i; |
| 1962 | u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); |
| 1963 | struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; |
| 1964 | |
| 1965 | if (atomic_read(&rwq_ind_table->usecnt)) |
| 1966 | return -EBUSY; |
| 1967 | |
| 1968 | err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); |
| 1969 | if (!err) { |
| 1970 | for (i = 0; i < table_size; i++) |
| 1971 | atomic_dec(&ind_tbl[i]->usecnt); |
| 1972 | } |
| 1973 | |
| 1974 | return err; |
| 1975 | } |
| 1976 | EXPORT_SYMBOL(ib_destroy_rwq_ind_table); |
| 1977 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1978 | struct ib_flow *ib_create_flow(struct ib_qp *qp, |
| 1979 | struct ib_flow_attr *flow_attr, |
| 1980 | int domain) |
| 1981 | { |
| 1982 | struct ib_flow *flow_id; |
| 1983 | if (!qp->device->create_flow) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 1984 | return ERR_PTR(-EOPNOTSUPP); |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1985 | |
Matan Barak | 59082a3 | 2018-05-31 16:43:35 +0300 | [diff] [blame] | 1986 | flow_id = qp->device->create_flow(qp, flow_attr, domain, NULL); |
Mark Bloch | 8ecc798 | 2016-10-27 16:36:30 +0300 | [diff] [blame] | 1987 | if (!IS_ERR(flow_id)) { |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1988 | atomic_inc(&qp->usecnt); |
Mark Bloch | 8ecc798 | 2016-10-27 16:36:30 +0300 | [diff] [blame] | 1989 | flow_id->qp = qp; |
| 1990 | } |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1991 | return flow_id; |
| 1992 | } |
| 1993 | EXPORT_SYMBOL(ib_create_flow); |
| 1994 | |
| 1995 | int ib_destroy_flow(struct ib_flow *flow_id) |
| 1996 | { |
| 1997 | int err; |
| 1998 | struct ib_qp *qp = flow_id->qp; |
| 1999 | |
| 2000 | err = qp->device->destroy_flow(flow_id); |
| 2001 | if (!err) |
| 2002 | atomic_dec(&qp->usecnt); |
| 2003 | return err; |
| 2004 | } |
| 2005 | EXPORT_SYMBOL(ib_destroy_flow); |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 2006 | |
| 2007 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
| 2008 | struct ib_mr_status *mr_status) |
| 2009 | { |
| 2010 | return mr->device->check_mr_status ? |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 2011 | mr->device->check_mr_status(mr, check_mask, mr_status) : -EOPNOTSUPP; |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 2012 | } |
| 2013 | EXPORT_SYMBOL(ib_check_mr_status); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2014 | |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 2015 | int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, |
| 2016 | int state) |
| 2017 | { |
| 2018 | if (!device->set_vf_link_state) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 2019 | return -EOPNOTSUPP; |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 2020 | |
| 2021 | return device->set_vf_link_state(device, vf, port, state); |
| 2022 | } |
| 2023 | EXPORT_SYMBOL(ib_set_vf_link_state); |
| 2024 | |
| 2025 | int ib_get_vf_config(struct ib_device *device, int vf, u8 port, |
| 2026 | struct ifla_vf_info *info) |
| 2027 | { |
| 2028 | if (!device->get_vf_config) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 2029 | return -EOPNOTSUPP; |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 2030 | |
| 2031 | return device->get_vf_config(device, vf, port, info); |
| 2032 | } |
| 2033 | EXPORT_SYMBOL(ib_get_vf_config); |
| 2034 | |
| 2035 | int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, |
| 2036 | struct ifla_vf_stats *stats) |
| 2037 | { |
| 2038 | if (!device->get_vf_stats) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 2039 | return -EOPNOTSUPP; |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 2040 | |
| 2041 | return device->get_vf_stats(device, vf, port, stats); |
| 2042 | } |
| 2043 | EXPORT_SYMBOL(ib_get_vf_stats); |
| 2044 | |
| 2045 | int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, |
| 2046 | int type) |
| 2047 | { |
| 2048 | if (!device->set_vf_guid) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 2049 | return -EOPNOTSUPP; |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 2050 | |
| 2051 | return device->set_vf_guid(device, vf, port, guid, type); |
| 2052 | } |
| 2053 | EXPORT_SYMBOL(ib_set_vf_guid); |
| 2054 | |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2055 | /** |
| 2056 | * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list |
| 2057 | * and set it the memory region. |
| 2058 | * @mr: memory region |
| 2059 | * @sg: dma mapped scatterlist |
| 2060 | * @sg_nents: number of entries in sg |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2061 | * @sg_offset: offset in bytes into sg |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2062 | * @page_size: page vector desired page size |
| 2063 | * |
| 2064 | * Constraints: |
| 2065 | * - The first sg element is allowed to have an offset. |
Bart Van Assche | 5274612 | 2016-09-26 09:09:42 -0700 | [diff] [blame] | 2066 | * - Each sg element must either be aligned to page_size or virtually |
| 2067 | * contiguous to the previous element. In case an sg element has a |
| 2068 | * non-contiguous offset, the mapping prefix will not include it. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2069 | * - The last sg element is allowed to have length less than page_size. |
| 2070 | * - If sg_nents total byte length exceeds the mr max_num_sge * page_size |
| 2071 | * then only max_num_sg entries will be mapped. |
Bart Van Assche | 5274612 | 2016-09-26 09:09:42 -0700 | [diff] [blame] | 2072 | * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 2073 | * constraints holds and the page_size argument is ignored. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2074 | * |
| 2075 | * Returns the number of sg elements that were mapped to the memory region. |
| 2076 | * |
| 2077 | * After this completes successfully, the memory region |
| 2078 | * is ready for registration. |
| 2079 | */ |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2080 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2081 | unsigned int *sg_offset, unsigned int page_size) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2082 | { |
| 2083 | if (unlikely(!mr->device->map_mr_sg)) |
Leon Romanovsky | 87915bf | 2018-02-21 18:12:44 +0200 | [diff] [blame] | 2084 | return -EOPNOTSUPP; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2085 | |
| 2086 | mr->page_size = page_size; |
| 2087 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2088 | return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2089 | } |
| 2090 | EXPORT_SYMBOL(ib_map_mr_sg); |
| 2091 | |
| 2092 | /** |
| 2093 | * ib_sg_to_pages() - Convert the largest prefix of a sg list |
| 2094 | * to a page vector |
| 2095 | * @mr: memory region |
| 2096 | * @sgl: dma mapped scatterlist |
| 2097 | * @sg_nents: number of entries in sg |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2098 | * @sg_offset_p: IN: start offset in bytes into sg |
| 2099 | * OUT: offset in bytes for element n of the sg of the first |
| 2100 | * byte that has not been processed where n is the return |
| 2101 | * value of this function. |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2102 | * @set_page: driver page assignment function pointer |
| 2103 | * |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2104 | * Core service helper for drivers to convert the largest |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2105 | * prefix of given sg list to a page vector. The sg list |
| 2106 | * prefix converted is the prefix that meet the requirements |
| 2107 | * of ib_map_mr_sg. |
| 2108 | * |
| 2109 | * Returns the number of sg elements that were assigned to |
| 2110 | * a page vector. |
| 2111 | */ |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2112 | int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2113 | unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2114 | { |
| 2115 | struct scatterlist *sg; |
Bart Van Assche | b6aeb98 | 2015-12-29 10:45:03 +0100 | [diff] [blame] | 2116 | u64 last_end_dma_addr = 0; |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2117 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2118 | unsigned int last_page_off = 0; |
| 2119 | u64 page_mask = ~((u64)mr->page_size - 1); |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2120 | int i, ret; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2121 | |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2122 | if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) |
| 2123 | return -EINVAL; |
| 2124 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2125 | mr->iova = sg_dma_address(&sgl[0]) + sg_offset; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2126 | mr->length = 0; |
| 2127 | |
| 2128 | for_each_sg(sgl, sg, sg_nents, i) { |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2129 | u64 dma_addr = sg_dma_address(sg) + sg_offset; |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2130 | u64 prev_addr = dma_addr; |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2131 | unsigned int dma_len = sg_dma_len(sg) - sg_offset; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2132 | u64 end_dma_addr = dma_addr + dma_len; |
| 2133 | u64 page_addr = dma_addr & page_mask; |
| 2134 | |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2135 | /* |
| 2136 | * For the second and later elements, check whether either the |
| 2137 | * end of element i-1 or the start of element i is not aligned |
| 2138 | * on a page boundary. |
| 2139 | */ |
| 2140 | if (i && (last_page_off != 0 || page_addr != dma_addr)) { |
| 2141 | /* Stop mapping if there is a gap. */ |
| 2142 | if (last_end_dma_addr != dma_addr) |
| 2143 | break; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2144 | |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2145 | /* |
| 2146 | * Coalesce this element with the last. If it is small |
| 2147 | * enough just update mr->length. Otherwise start |
| 2148 | * mapping from the next page. |
| 2149 | */ |
| 2150 | goto next_page; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2151 | } |
| 2152 | |
| 2153 | do { |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2154 | ret = set_page(mr, page_addr); |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2155 | if (unlikely(ret < 0)) { |
| 2156 | sg_offset = prev_addr - sg_dma_address(sg); |
| 2157 | mr->length += prev_addr - dma_addr; |
| 2158 | if (sg_offset_p) |
| 2159 | *sg_offset_p = sg_offset; |
| 2160 | return i || sg_offset ? i : ret; |
| 2161 | } |
| 2162 | prev_addr = page_addr; |
Bart Van Assche | 8f5ba10 | 2015-12-03 16:04:17 -0800 | [diff] [blame] | 2163 | next_page: |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2164 | page_addr += mr->page_size; |
| 2165 | } while (page_addr < end_dma_addr); |
| 2166 | |
| 2167 | mr->length += dma_len; |
| 2168 | last_end_dma_addr = end_dma_addr; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2169 | last_page_off = end_dma_addr & ~page_mask; |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 2170 | |
| 2171 | sg_offset = 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2172 | } |
| 2173 | |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 2174 | if (sg_offset_p) |
| 2175 | *sg_offset_p = 0; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 2176 | return i; |
| 2177 | } |
| 2178 | EXPORT_SYMBOL(ib_sg_to_pages); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2179 | |
| 2180 | struct ib_drain_cqe { |
| 2181 | struct ib_cqe cqe; |
| 2182 | struct completion done; |
| 2183 | }; |
| 2184 | |
| 2185 | static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) |
| 2186 | { |
| 2187 | struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, |
| 2188 | cqe); |
| 2189 | |
| 2190 | complete(&cqe->done); |
| 2191 | } |
| 2192 | |
| 2193 | /* |
| 2194 | * Post a WR and block until its completion is reaped for the SQ. |
| 2195 | */ |
| 2196 | static void __ib_drain_sq(struct ib_qp *qp) |
| 2197 | { |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2198 | struct ib_cq *cq = qp->send_cq; |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2199 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
| 2200 | struct ib_drain_cqe sdrain; |
Bart Van Assche | a1ae7d0 | 2018-03-01 14:00:28 -0800 | [diff] [blame] | 2201 | struct ib_send_wr *bad_swr; |
| 2202 | struct ib_rdma_wr swr = { |
| 2203 | .wr = { |
Andrew Morton | 6ee6877 | 2018-03-13 14:51:57 -0700 | [diff] [blame] | 2204 | .next = NULL, |
| 2205 | { .wr_cqe = &sdrain.cqe, }, |
Bart Van Assche | a1ae7d0 | 2018-03-01 14:00:28 -0800 | [diff] [blame] | 2206 | .opcode = IB_WR_RDMA_WRITE, |
Bart Van Assche | a1ae7d0 | 2018-03-01 14:00:28 -0800 | [diff] [blame] | 2207 | }, |
| 2208 | }; |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2209 | int ret; |
| 2210 | |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2211 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
| 2212 | if (ret) { |
| 2213 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); |
| 2214 | return; |
| 2215 | } |
| 2216 | |
Max Gurtovoy | aaebd37 | 2018-01-14 17:07:48 +0200 | [diff] [blame] | 2217 | sdrain.cqe.done = ib_drain_qp_done; |
| 2218 | init_completion(&sdrain.done); |
| 2219 | |
Bart Van Assche | a1ae7d0 | 2018-03-01 14:00:28 -0800 | [diff] [blame] | 2220 | ret = ib_post_send(qp, &swr.wr, &bad_swr); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2221 | if (ret) { |
| 2222 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); |
| 2223 | return; |
| 2224 | } |
| 2225 | |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2226 | if (cq->poll_ctx == IB_POLL_DIRECT) |
| 2227 | while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0) |
| 2228 | ib_process_cq_direct(cq, -1); |
| 2229 | else |
| 2230 | wait_for_completion(&sdrain.done); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2231 | } |
| 2232 | |
| 2233 | /* |
| 2234 | * Post a WR and block until its completion is reaped for the RQ. |
| 2235 | */ |
| 2236 | static void __ib_drain_rq(struct ib_qp *qp) |
| 2237 | { |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2238 | struct ib_cq *cq = qp->recv_cq; |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2239 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
| 2240 | struct ib_drain_cqe rdrain; |
| 2241 | struct ib_recv_wr rwr = {}, *bad_rwr; |
| 2242 | int ret; |
| 2243 | |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2244 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
| 2245 | if (ret) { |
| 2246 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); |
| 2247 | return; |
| 2248 | } |
| 2249 | |
Max Gurtovoy | aaebd37 | 2018-01-14 17:07:48 +0200 | [diff] [blame] | 2250 | rwr.wr_cqe = &rdrain.cqe; |
| 2251 | rdrain.cqe.done = ib_drain_qp_done; |
| 2252 | init_completion(&rdrain.done); |
| 2253 | |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2254 | ret = ib_post_recv(qp, &rwr, &bad_rwr); |
| 2255 | if (ret) { |
| 2256 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); |
| 2257 | return; |
| 2258 | } |
| 2259 | |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2260 | if (cq->poll_ctx == IB_POLL_DIRECT) |
| 2261 | while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0) |
| 2262 | ib_process_cq_direct(cq, -1); |
| 2263 | else |
| 2264 | wait_for_completion(&rdrain.done); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2265 | } |
| 2266 | |
| 2267 | /** |
| 2268 | * ib_drain_sq() - Block until all SQ CQEs have been consumed by the |
| 2269 | * application. |
| 2270 | * @qp: queue pair to drain |
| 2271 | * |
| 2272 | * If the device has a provider-specific drain function, then |
| 2273 | * call that. Otherwise call the generic drain function |
| 2274 | * __ib_drain_sq(). |
| 2275 | * |
| 2276 | * The caller must: |
| 2277 | * |
| 2278 | * ensure there is room in the CQ and SQ for the drain work request and |
| 2279 | * completion. |
| 2280 | * |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2281 | * allocate the CQ using ib_alloc_cq(). |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2282 | * |
| 2283 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2284 | * Otherwise the drain is not guaranteed. |
| 2285 | */ |
| 2286 | void ib_drain_sq(struct ib_qp *qp) |
| 2287 | { |
| 2288 | if (qp->device->drain_sq) |
| 2289 | qp->device->drain_sq(qp); |
| 2290 | else |
| 2291 | __ib_drain_sq(qp); |
| 2292 | } |
| 2293 | EXPORT_SYMBOL(ib_drain_sq); |
| 2294 | |
| 2295 | /** |
| 2296 | * ib_drain_rq() - Block until all RQ CQEs have been consumed by the |
| 2297 | * application. |
| 2298 | * @qp: queue pair to drain |
| 2299 | * |
| 2300 | * If the device has a provider-specific drain function, then |
| 2301 | * call that. Otherwise call the generic drain function |
| 2302 | * __ib_drain_rq(). |
| 2303 | * |
| 2304 | * The caller must: |
| 2305 | * |
| 2306 | * ensure there is room in the CQ and RQ for the drain work request and |
| 2307 | * completion. |
| 2308 | * |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2309 | * allocate the CQ using ib_alloc_cq(). |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2310 | * |
| 2311 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2312 | * Otherwise the drain is not guaranteed. |
| 2313 | */ |
| 2314 | void ib_drain_rq(struct ib_qp *qp) |
| 2315 | { |
| 2316 | if (qp->device->drain_rq) |
| 2317 | qp->device->drain_rq(qp); |
| 2318 | else |
| 2319 | __ib_drain_rq(qp); |
| 2320 | } |
| 2321 | EXPORT_SYMBOL(ib_drain_rq); |
| 2322 | |
| 2323 | /** |
| 2324 | * ib_drain_qp() - Block until all CQEs have been consumed by the |
| 2325 | * application on both the RQ and SQ. |
| 2326 | * @qp: queue pair to drain |
| 2327 | * |
| 2328 | * The caller must: |
| 2329 | * |
| 2330 | * ensure there is room in the CQ(s), SQ, and RQ for drain work requests |
| 2331 | * and completions. |
| 2332 | * |
Bart Van Assche | f039f44 | 2017-02-14 10:56:35 -0800 | [diff] [blame] | 2333 | * allocate the CQs using ib_alloc_cq(). |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2334 | * |
| 2335 | * ensure that there are no other contexts that are posting WRs concurrently. |
| 2336 | * Otherwise the drain is not guaranteed. |
| 2337 | */ |
| 2338 | void ib_drain_qp(struct ib_qp *qp) |
| 2339 | { |
| 2340 | ib_drain_sq(qp); |
Sagi Grimberg | 42235f8 | 2016-04-26 17:55:38 +0300 | [diff] [blame] | 2341 | if (!qp->srq) |
| 2342 | ib_drain_rq(qp); |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 2343 | } |
| 2344 | EXPORT_SYMBOL(ib_drain_qp); |