blob: b110db6dbef02649191443dbfe18952995ce7a2d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier33b9b3e2006-01-30 14:29:21 -08008 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040041#include <linux/export.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042#include <linux/string.h>
Sean Hefty0e0ec7e2011-08-08 15:31:51 -070043#include <linux/slab.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030044#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
Daniel Jurgensd291f1a2017-05-19 15:48:52 +030047#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Roland Dreiera4d61e82005-08-25 13:40:04 -070049#include <rdma/ib_verbs.h>
50#include <rdma/ib_cache.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020051#include <rdma/ib_addr.h>
Christoph Hellwiga060b562016-05-03 18:01:09 +020052#include <rdma/rw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Or Gerlitzed4c54e2013-12-12 18:03:17 +020054#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Parav Panditc0348eb2017-10-16 08:45:13 +030056static int ib_resolve_eth_dmac(struct ib_device *device,
57 struct rdma_ah_attr *ah_attr);
58
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030059static const char * const ib_events[] = {
60 [IB_EVENT_CQ_ERR] = "CQ error",
61 [IB_EVENT_QP_FATAL] = "QP fatal error",
62 [IB_EVENT_QP_REQ_ERR] = "QP request error",
63 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
64 [IB_EVENT_COMM_EST] = "communication established",
65 [IB_EVENT_SQ_DRAINED] = "send queue drained",
66 [IB_EVENT_PATH_MIG] = "path migration successful",
67 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
68 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
69 [IB_EVENT_PORT_ACTIVE] = "port active",
70 [IB_EVENT_PORT_ERR] = "port error",
71 [IB_EVENT_LID_CHANGE] = "LID change",
72 [IB_EVENT_PKEY_CHANGE] = "P_key change",
73 [IB_EVENT_SM_CHANGE] = "SM change",
74 [IB_EVENT_SRQ_ERR] = "SRQ error",
75 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
76 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
77 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
78 [IB_EVENT_GID_CHANGE] = "GID changed",
79};
80
Bart Van Asschedb7489e2015-08-03 10:01:52 -070081const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030082{
83 size_t index = event;
84
85 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
86 ib_events[index] : "unrecognized event";
87}
88EXPORT_SYMBOL(ib_event_msg);
89
90static const char * const wc_statuses[] = {
91 [IB_WC_SUCCESS] = "success",
92 [IB_WC_LOC_LEN_ERR] = "local length error",
93 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
94 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
95 [IB_WC_LOC_PROT_ERR] = "local protection error",
96 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
97 [IB_WC_MW_BIND_ERR] = "memory management operation error",
98 [IB_WC_BAD_RESP_ERR] = "bad response error",
99 [IB_WC_LOC_ACCESS_ERR] = "local access error",
100 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
101 [IB_WC_REM_ACCESS_ERR] = "remote access error",
102 [IB_WC_REM_OP_ERR] = "remote operation error",
103 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
104 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
105 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
106 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
107 [IB_WC_REM_ABORT_ERR] = "operation aborted",
108 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
109 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
110 [IB_WC_FATAL_ERR] = "fatal error",
111 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
112 [IB_WC_GENERAL_ERR] = "general error",
113};
114
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700115const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300116{
117 size_t index = status;
118
119 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
120 wc_statuses[index] : "unrecognized status";
121}
122EXPORT_SYMBOL(ib_wc_status_msg);
123
Roland Dreier8385fd82014-06-04 10:00:16 -0700124__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700125{
126 switch (rate) {
Hans Westgaard Rye2dda362018-01-02 14:50:40 +0100127 case IB_RATE_2_5_GBPS: return 1;
128 case IB_RATE_5_GBPS: return 2;
129 case IB_RATE_10_GBPS: return 4;
130 case IB_RATE_20_GBPS: return 8;
131 case IB_RATE_30_GBPS: return 12;
132 case IB_RATE_40_GBPS: return 16;
133 case IB_RATE_60_GBPS: return 24;
134 case IB_RATE_80_GBPS: return 32;
135 case IB_RATE_120_GBPS: return 48;
136 case IB_RATE_14_GBPS: return 6;
137 case IB_RATE_56_GBPS: return 22;
138 case IB_RATE_112_GBPS: return 45;
139 case IB_RATE_168_GBPS: return 67;
140 case IB_RATE_25_GBPS: return 10;
141 case IB_RATE_100_GBPS: return 40;
142 case IB_RATE_200_GBPS: return 80;
143 case IB_RATE_300_GBPS: return 120;
144 default: return -1;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700145 }
146}
147EXPORT_SYMBOL(ib_rate_to_mult);
148
Roland Dreier8385fd82014-06-04 10:00:16 -0700149__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700150{
151 switch (mult) {
Hans Westgaard Rye2dda362018-01-02 14:50:40 +0100152 case 1: return IB_RATE_2_5_GBPS;
153 case 2: return IB_RATE_5_GBPS;
154 case 4: return IB_RATE_10_GBPS;
155 case 8: return IB_RATE_20_GBPS;
156 case 12: return IB_RATE_30_GBPS;
157 case 16: return IB_RATE_40_GBPS;
158 case 24: return IB_RATE_60_GBPS;
159 case 32: return IB_RATE_80_GBPS;
160 case 48: return IB_RATE_120_GBPS;
161 case 6: return IB_RATE_14_GBPS;
162 case 22: return IB_RATE_56_GBPS;
163 case 45: return IB_RATE_112_GBPS;
164 case 67: return IB_RATE_168_GBPS;
165 case 10: return IB_RATE_25_GBPS;
166 case 40: return IB_RATE_100_GBPS;
167 case 80: return IB_RATE_200_GBPS;
168 case 120: return IB_RATE_300_GBPS;
169 default: return IB_RATE_PORT_CURRENT;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700170 }
171}
172EXPORT_SYMBOL(mult_to_ib_rate);
173
Roland Dreier8385fd82014-06-04 10:00:16 -0700174__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300175{
176 switch (rate) {
177 case IB_RATE_2_5_GBPS: return 2500;
178 case IB_RATE_5_GBPS: return 5000;
179 case IB_RATE_10_GBPS: return 10000;
180 case IB_RATE_20_GBPS: return 20000;
181 case IB_RATE_30_GBPS: return 30000;
182 case IB_RATE_40_GBPS: return 40000;
183 case IB_RATE_60_GBPS: return 60000;
184 case IB_RATE_80_GBPS: return 80000;
185 case IB_RATE_120_GBPS: return 120000;
186 case IB_RATE_14_GBPS: return 14062;
187 case IB_RATE_56_GBPS: return 56250;
188 case IB_RATE_112_GBPS: return 112500;
189 case IB_RATE_168_GBPS: return 168750;
190 case IB_RATE_25_GBPS: return 25781;
191 case IB_RATE_100_GBPS: return 103125;
192 case IB_RATE_200_GBPS: return 206250;
193 case IB_RATE_300_GBPS: return 309375;
194 default: return -1;
195 }
196}
197EXPORT_SYMBOL(ib_rate_to_mbps);
198
Roland Dreier8385fd82014-06-04 10:00:16 -0700199__attribute_const__ enum rdma_transport_type
Tom Tucker07ebafb2006-08-03 16:02:42 -0500200rdma_node_get_transport(enum rdma_node_type node_type)
201{
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300202
203 if (node_type == RDMA_NODE_USNIC)
Upinder Malhi5db57652014-01-15 17:02:36 -0800204 return RDMA_TRANSPORT_USNIC;
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300205 if (node_type == RDMA_NODE_USNIC_UDP)
Upinder Malhi248567f2014-01-09 14:48:19 -0800206 return RDMA_TRANSPORT_USNIC_UDP;
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300207 if (node_type == RDMA_NODE_RNIC)
208 return RDMA_TRANSPORT_IWARP;
209
210 return RDMA_TRANSPORT_IB;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500211}
212EXPORT_SYMBOL(rdma_node_get_transport);
213
Eli Cohena3f5ada2010-09-27 17:51:10 -0700214enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
215{
Leon Romanovsky82901e32017-08-17 15:50:39 +0300216 enum rdma_transport_type lt;
Eli Cohena3f5ada2010-09-27 17:51:10 -0700217 if (device->get_link_layer)
218 return device->get_link_layer(device, port_num);
219
Leon Romanovsky82901e32017-08-17 15:50:39 +0300220 lt = rdma_node_get_transport(device->node_type);
221 if (lt == RDMA_TRANSPORT_IB)
Eli Cohena3f5ada2010-09-27 17:51:10 -0700222 return IB_LINK_LAYER_INFINIBAND;
Leon Romanovsky82901e32017-08-17 15:50:39 +0300223
224 return IB_LINK_LAYER_ETHERNET;
Eli Cohena3f5ada2010-09-27 17:51:10 -0700225}
226EXPORT_SYMBOL(rdma_port_get_link_layer);
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228/* Protection domains */
229
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600230/**
231 * ib_alloc_pd - Allocates an unused protection domain.
232 * @device: The device on which to allocate the protection domain.
233 *
234 * A protection domain object provides an association between QPs, shared
235 * receive queues, address handles, memory regions, and memory windows.
236 *
237 * Every PD has a local_dma_lkey which can be used as the lkey value for local
238 * memory operations.
239 */
Christoph Hellwiged082d32016-09-05 12:56:17 +0200240struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
241 const char *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 struct ib_pd *pd;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200244 int mr_access_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700246 pd = device->alloc_pd(device, NULL, NULL);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600247 if (IS_ERR(pd))
248 return pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600250 pd->device = device;
251 pd->uobject = NULL;
Christoph Hellwig50d46332016-09-05 12:56:16 +0200252 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600253 atomic_set(&pd->usecnt, 0);
Christoph Hellwiged082d32016-09-05 12:56:17 +0200254 pd->flags = flags;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600255
Or Gerlitz86bee4c2015-12-18 10:59:45 +0200256 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600257 pd->local_dma_lkey = device->local_dma_lkey;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200258 else
259 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
260
261 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
262 pr_warn("%s: enabling unsafe global rkey\n", caller);
263 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
264 }
265
266 if (mr_access_flags) {
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600267 struct ib_mr *mr;
268
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200269 mr = pd->device->get_dma_mr(pd, mr_access_flags);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600270 if (IS_ERR(mr)) {
271 ib_dealloc_pd(pd);
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200272 return ERR_CAST(mr);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600273 }
274
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200275 mr->device = pd->device;
276 mr->pd = pd;
277 mr->uobject = NULL;
278 mr->need_inval = false;
279
Christoph Hellwig50d46332016-09-05 12:56:16 +0200280 pd->__internal_mr = mr;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200281
282 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
283 pd->local_dma_lkey = pd->__internal_mr->lkey;
284
285 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
286 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 }
Christoph Hellwiged082d32016-09-05 12:56:17 +0200288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 return pd;
290}
Christoph Hellwiged082d32016-09-05 12:56:17 +0200291EXPORT_SYMBOL(__ib_alloc_pd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600293/**
294 * ib_dealloc_pd - Deallocates a protection domain.
295 * @pd: The protection domain to deallocate.
296 *
297 * It is an error to call this function while any resources in the pd still
298 * exist. The caller is responsible to synchronously destroy them and
299 * guarantee no new allocations will happen.
300 */
301void ib_dealloc_pd(struct ib_pd *pd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600303 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Christoph Hellwig50d46332016-09-05 12:56:16 +0200305 if (pd->__internal_mr) {
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200306 ret = pd->device->dereg_mr(pd->__internal_mr);
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600307 WARN_ON(ret);
Christoph Hellwig50d46332016-09-05 12:56:16 +0200308 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600309 }
310
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600311 /* uverbs manipulates usecnt with proper locking, while the kabi
312 requires the caller to guarantee we can't race here. */
313 WARN_ON(atomic_read(&pd->usecnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600315 /* Making delalloc_pd a void return is a WIP, no driver should return
316 an error here. */
317 ret = pd->device->dealloc_pd(pd);
318 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319}
320EXPORT_SYMBOL(ib_dealloc_pd);
321
322/* Address handles */
323
Parav Pandit5cda6582017-10-16 08:45:12 +0300324static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
325 struct rdma_ah_attr *ah_attr,
326 struct ib_udata *udata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
328 struct ib_ah *ah;
329
Parav Pandit5cda6582017-10-16 08:45:12 +0300330 ah = pd->device->create_ah(pd, ah_attr, udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 if (!IS_ERR(ah)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700333 ah->device = pd->device;
334 ah->pd = pd;
335 ah->uobject = NULL;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400336 ah->type = ah_attr->type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 atomic_inc(&pd->usecnt);
338 }
339
340 return ah;
341}
Parav Pandit5cda6582017-10-16 08:45:12 +0300342
343struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
344{
345 return _rdma_create_ah(pd, ah_attr, NULL);
346}
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400347EXPORT_SYMBOL(rdma_create_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Parav Pandit5cda6582017-10-16 08:45:12 +0300349/**
350 * rdma_create_user_ah - Creates an address handle for the
351 * given address vector.
352 * It resolves destination mac address for ah attribute of RoCE type.
353 * @pd: The protection domain associated with the address handle.
354 * @ah_attr: The attributes of the address vector.
355 * @udata: pointer to user's input output buffer information need by
356 * provider driver.
357 *
358 * It returns 0 on success and returns appropriate error code on error.
359 * The address handle is used to reference a local or global destination
360 * in all UD QP post sends.
361 */
362struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
363 struct rdma_ah_attr *ah_attr,
364 struct ib_udata *udata)
365{
366 int err;
367
368 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
369 err = ib_resolve_eth_dmac(pd->device, ah_attr);
370 if (err)
371 return ERR_PTR(err);
372 }
373
374 return _rdma_create_ah(pd, ah_attr, udata);
375}
376EXPORT_SYMBOL(rdma_create_user_ah);
377
Moni Shoua850d8fd2016-11-10 11:30:56 +0200378int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
Somnath Koturc865f242015-12-23 14:56:51 +0200379{
380 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
381 struct iphdr ip4h_checked;
382 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
383
384 /* If it's IPv6, the version must be 6, otherwise, the first
385 * 20 bytes (before the IPv4 header) are garbled.
386 */
387 if (ip6h->version != 6)
388 return (ip4h->version == 4) ? 4 : 0;
389 /* version may be 6 or 4 because the first 20 bytes could be garbled */
390
391 /* RoCE v2 requires no options, thus header length
392 * must be 5 words
393 */
394 if (ip4h->ihl != 5)
395 return 6;
396
397 /* Verify checksum.
398 * We can't write on scattered buffers so we need to copy to
399 * temp buffer.
400 */
401 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
402 ip4h_checked.check = 0;
403 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
404 /* if IPv4 header checksum is OK, believe it */
405 if (ip4h->check == ip4h_checked.check)
406 return 4;
407 return 6;
408}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200409EXPORT_SYMBOL(ib_get_rdma_header_version);
Somnath Koturc865f242015-12-23 14:56:51 +0200410
411static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
412 u8 port_num,
413 const struct ib_grh *grh)
414{
415 int grh_version;
416
417 if (rdma_protocol_ib(device, port_num))
418 return RDMA_NETWORK_IB;
419
Moni Shoua850d8fd2016-11-10 11:30:56 +0200420 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
Somnath Koturc865f242015-12-23 14:56:51 +0200421
422 if (grh_version == 4)
423 return RDMA_NETWORK_IPV4;
424
425 if (grh->next_hdr == IPPROTO_UDP)
426 return RDMA_NETWORK_IPV6;
427
428 return RDMA_NETWORK_ROCE_V1;
429}
430
Matan Barakdbf727d2015-10-15 18:38:51 +0300431struct find_gid_index_context {
432 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +0200433 enum ib_gid_type gid_type;
Matan Barakdbf727d2015-10-15 18:38:51 +0300434};
435
436static bool find_gid_index(const union ib_gid *gid,
437 const struct ib_gid_attr *gid_attr,
438 void *context)
439{
Parav Panditb0dd0d32017-11-14 14:52:04 +0200440 struct find_gid_index_context *ctx = context;
Matan Barakdbf727d2015-10-15 18:38:51 +0300441
Somnath Koturc865f242015-12-23 14:56:51 +0200442 if (ctx->gid_type != gid_attr->gid_type)
443 return false;
444
Matan Barakdbf727d2015-10-15 18:38:51 +0300445 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
446 (is_vlan_dev(gid_attr->ndev) &&
447 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
448 return false;
449
450 return true;
451}
452
453static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
454 u16 vlan_id, const union ib_gid *sgid,
Somnath Koturc865f242015-12-23 14:56:51 +0200455 enum ib_gid_type gid_type,
Matan Barakdbf727d2015-10-15 18:38:51 +0300456 u16 *gid_index)
457{
Somnath Koturc865f242015-12-23 14:56:51 +0200458 struct find_gid_index_context context = {.vlan_id = vlan_id,
459 .gid_type = gid_type};
Matan Barakdbf727d2015-10-15 18:38:51 +0300460
461 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
462 &context, gid_index);
463}
464
Moni Shoua850d8fd2016-11-10 11:30:56 +0200465int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
466 enum rdma_network_type net_type,
467 union ib_gid *sgid, union ib_gid *dgid)
Somnath Koturc865f242015-12-23 14:56:51 +0200468{
469 struct sockaddr_in src_in;
470 struct sockaddr_in dst_in;
471 __be32 src_saddr, dst_saddr;
472
473 if (!sgid || !dgid)
474 return -EINVAL;
475
476 if (net_type == RDMA_NETWORK_IPV4) {
477 memcpy(&src_in.sin_addr.s_addr,
478 &hdr->roce4grh.saddr, 4);
479 memcpy(&dst_in.sin_addr.s_addr,
480 &hdr->roce4grh.daddr, 4);
481 src_saddr = src_in.sin_addr.s_addr;
482 dst_saddr = dst_in.sin_addr.s_addr;
483 ipv6_addr_set_v4mapped(src_saddr,
484 (struct in6_addr *)sgid);
485 ipv6_addr_set_v4mapped(dst_saddr,
486 (struct in6_addr *)dgid);
487 return 0;
488 } else if (net_type == RDMA_NETWORK_IPV6 ||
489 net_type == RDMA_NETWORK_IB) {
490 *dgid = hdr->ibgrh.dgid;
491 *sgid = hdr->ibgrh.sgid;
492 return 0;
493 } else {
494 return -EINVAL;
495 }
496}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200497EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
Somnath Koturc865f242015-12-23 14:56:51 +0200498
Parav Pandit1060f862017-11-14 14:51:49 +0200499/* Resolve destination mac address and hop limit for unicast destination
500 * GID entry, considering the source GID entry as well.
501 * ah_attribute must have have valid port_num, sgid_index.
502 */
503static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
504 struct rdma_ah_attr *ah_attr)
505{
506 struct ib_gid_attr sgid_attr;
507 struct ib_global_route *grh;
508 int hop_limit = 0xff;
509 union ib_gid sgid;
510 int ret;
511
512 grh = rdma_ah_retrieve_grh(ah_attr);
513
514 ret = ib_query_gid(device,
515 rdma_ah_get_port_num(ah_attr),
516 grh->sgid_index,
517 &sgid, &sgid_attr);
518 if (ret || !sgid_attr.ndev) {
519 if (!ret)
520 ret = -ENXIO;
521 return ret;
522 }
523
Parav Pandit56d0a7d92017-11-14 14:51:50 +0200524 /* If destination is link local and source GID is RoCEv1,
525 * IP stack is not used.
526 */
527 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
528 sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
529 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
530 ah_attr->roce.dmac);
531 goto done;
532 }
533
Parav Pandit1060f862017-11-14 14:51:49 +0200534 ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
535 ah_attr->roce.dmac,
536 sgid_attr.ndev, &hop_limit);
Parav Pandit56d0a7d92017-11-14 14:51:50 +0200537done:
Parav Pandit1060f862017-11-14 14:51:49 +0200538 dev_put(sgid_attr.ndev);
539
540 grh->hop_limit = hop_limit;
541 return ret;
542}
543
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500544/*
Parav Panditf6bdb142017-11-14 14:52:17 +0200545 * This function initializes address handle attributes from the incoming packet.
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500546 * Incoming packet has dgid of the receiver node on which this code is
547 * getting executed and, sgid contains the GID of the sender.
548 *
549 * When resolving mac address of destination, the arrived dgid is used
550 * as sgid and, sgid is used as dgid because sgid contains destinations
551 * GID whom to respond to.
552 *
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500553 */
Parav Panditf6bdb142017-11-14 14:52:17 +0200554int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
555 const struct ib_wc *wc, const struct ib_grh *grh,
556 struct rdma_ah_attr *ah_attr)
Hal Rosenstock513789e2005-07-27 11:45:34 -0700557{
Hal Rosenstock513789e2005-07-27 11:45:34 -0700558 u32 flow_class;
559 u16 gid_index;
560 int ret;
Somnath Koturc865f242015-12-23 14:56:51 +0200561 enum rdma_network_type net_type = RDMA_NETWORK_IB;
562 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
Matan Barakc3efe752016-01-04 10:49:54 +0200563 int hoplimit = 0xff;
Somnath Koturc865f242015-12-23 14:56:51 +0200564 union ib_gid dgid;
565 union ib_gid sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700566
Roland Dreier79364222017-08-29 10:34:44 -0700567 might_sleep();
568
Sean Hefty4e00d692006-06-17 20:37:39 -0700569 memset(ah_attr, 0, sizeof *ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400570 ah_attr->type = rdma_ah_find_type(device, port_num);
Michael Wang227128f2015-05-05 14:50:40 +0200571 if (rdma_cap_eth_ah(device, port_num)) {
Somnath Koturc865f242015-12-23 14:56:51 +0200572 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
573 net_type = wc->network_hdr_type;
574 else
575 net_type = ib_get_net_type_by_grh(device, port_num, grh);
576 gid_type = ib_network_to_gid_type(net_type);
577 }
Moni Shoua850d8fd2016-11-10 11:30:56 +0200578 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
579 &sgid, &dgid);
Somnath Koturc865f242015-12-23 14:56:51 +0200580 if (ret)
581 return ret;
582
Parav Pandit1060f862017-11-14 14:51:49 +0200583 rdma_ah_set_sl(ah_attr, wc->sl);
584 rdma_ah_set_port_num(ah_attr, port_num);
585
Somnath Koturc865f242015-12-23 14:56:51 +0200586 if (rdma_protocol_roce(device, port_num)) {
Matan Barakdbf727d2015-10-15 18:38:51 +0300587 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
588 wc->vlan_id : 0xffff;
589
Matan Barakdd5f03b2013-12-12 18:03:11 +0200590 if (!(wc->wc_flags & IB_WC_GRH))
591 return -EPROTOTYPE;
592
Parav Pandit1060f862017-11-14 14:51:49 +0200593 ret = get_sgid_index_from_eth(device, port_num,
594 vlan_id, &dgid,
595 gid_type, &gid_index);
Matan Barak20029832015-12-23 14:56:53 +0200596 if (ret)
597 return ret;
598
Parav Pandit1060f862017-11-14 14:51:49 +0200599 flow_class = be32_to_cpu(grh->version_tclass_flow);
600 rdma_ah_set_grh(ah_attr, &sgid,
601 flow_class & 0xFFFFF,
602 (u8)gid_index, hoplimit,
603 (flow_class >> 20) & 0xFF);
604 return ib_resolve_unicast_gid_dmac(device, ah_attr);
605 } else {
606 rdma_ah_set_dlid(ah_attr, wc->slid);
607 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200608
Parav Pandit1060f862017-11-14 14:51:49 +0200609 if (wc->wc_flags & IB_WC_GRH) {
Eli Cohenb3556002016-06-22 17:27:24 +0300610 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
611 ret = ib_find_cached_gid_by_port(device, &dgid,
612 IB_GID_TYPE_IB,
613 port_num, NULL,
614 &gid_index);
615 if (ret)
616 return ret;
617 } else {
618 gid_index = 0;
619 }
Parav Pandit1060f862017-11-14 14:51:49 +0200620
621 flow_class = be32_to_cpu(grh->version_tclass_flow);
622 rdma_ah_set_grh(ah_attr, &sgid,
623 flow_class & 0xFFFFF,
624 (u8)gid_index, hoplimit,
625 (flow_class >> 20) & 0xFF);
Matan Barakdbf727d2015-10-15 18:38:51 +0300626 }
Parav Pandit1060f862017-11-14 14:51:49 +0200627 return 0;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700628 }
Sean Hefty4e00d692006-06-17 20:37:39 -0700629}
Parav Panditf6bdb142017-11-14 14:52:17 +0200630EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
Sean Hefty4e00d692006-06-17 20:37:39 -0700631
Ira Weiny73cdaae2015-05-31 17:15:31 -0400632struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
633 const struct ib_grh *grh, u8 port_num)
Sean Hefty4e00d692006-06-17 20:37:39 -0700634{
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400635 struct rdma_ah_attr ah_attr;
Sean Hefty4e00d692006-06-17 20:37:39 -0700636 int ret;
637
Parav Panditf6bdb142017-11-14 14:52:17 +0200638 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
Sean Hefty4e00d692006-06-17 20:37:39 -0700639 if (ret)
640 return ERR_PTR(ret);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700641
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400642 return rdma_create_ah(pd, &ah_attr);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700643}
644EXPORT_SYMBOL(ib_create_ah_from_wc);
645
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400646int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400648 if (ah->type != ah_attr->type)
649 return -EINVAL;
650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return ah->device->modify_ah ?
652 ah->device->modify_ah(ah, ah_attr) :
653 -ENOSYS;
654}
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400655EXPORT_SYMBOL(rdma_modify_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400657int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
659 return ah->device->query_ah ?
660 ah->device->query_ah(ah, ah_attr) :
661 -ENOSYS;
662}
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400663EXPORT_SYMBOL(rdma_query_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400665int rdma_destroy_ah(struct ib_ah *ah)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
667 struct ib_pd *pd;
668 int ret;
669
670 pd = ah->pd;
671 ret = ah->device->destroy_ah(ah);
672 if (!ret)
673 atomic_dec(&pd->usecnt);
674
675 return ret;
676}
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400677EXPORT_SYMBOL(rdma_destroy_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Roland Dreierd41fcc62005-08-18 12:23:08 -0700679/* Shared receive queues */
680
681struct ib_srq *ib_create_srq(struct ib_pd *pd,
682 struct ib_srq_init_attr *srq_init_attr)
683{
684 struct ib_srq *srq;
685
686 if (!pd->device->create_srq)
687 return ERR_PTR(-ENOSYS);
688
689 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
690
691 if (!IS_ERR(srq)) {
692 srq->device = pd->device;
693 srq->pd = pd;
694 srq->uobject = NULL;
695 srq->event_handler = srq_init_attr->event_handler;
696 srq->srq_context = srq_init_attr->srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -0700697 srq->srq_type = srq_init_attr->srq_type;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300698 if (ib_srq_has_cq(srq->srq_type)) {
699 srq->ext.cq = srq_init_attr->ext.cq;
700 atomic_inc(&srq->ext.cq->usecnt);
701 }
Sean Hefty418d5132011-05-23 19:42:29 -0700702 if (srq->srq_type == IB_SRQT_XRC) {
703 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
Sean Hefty418d5132011-05-23 19:42:29 -0700704 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700705 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700706 atomic_inc(&pd->usecnt);
707 atomic_set(&srq->usecnt, 0);
708 }
709
710 return srq;
711}
712EXPORT_SYMBOL(ib_create_srq);
713
714int ib_modify_srq(struct ib_srq *srq,
715 struct ib_srq_attr *srq_attr,
716 enum ib_srq_attr_mask srq_attr_mask)
717{
Dotan Barak7ce5eac2008-04-16 21:09:28 -0700718 return srq->device->modify_srq ?
719 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
720 -ENOSYS;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700721}
722EXPORT_SYMBOL(ib_modify_srq);
723
724int ib_query_srq(struct ib_srq *srq,
725 struct ib_srq_attr *srq_attr)
726{
727 return srq->device->query_srq ?
728 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
729}
730EXPORT_SYMBOL(ib_query_srq);
731
732int ib_destroy_srq(struct ib_srq *srq)
733{
734 struct ib_pd *pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700735 enum ib_srq_type srq_type;
736 struct ib_xrcd *uninitialized_var(xrcd);
737 struct ib_cq *uninitialized_var(cq);
Roland Dreierd41fcc62005-08-18 12:23:08 -0700738 int ret;
739
740 if (atomic_read(&srq->usecnt))
741 return -EBUSY;
742
743 pd = srq->pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700744 srq_type = srq->srq_type;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300745 if (ib_srq_has_cq(srq_type))
746 cq = srq->ext.cq;
747 if (srq_type == IB_SRQT_XRC)
Sean Hefty418d5132011-05-23 19:42:29 -0700748 xrcd = srq->ext.xrc.xrcd;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700749
750 ret = srq->device->destroy_srq(srq);
Sean Hefty418d5132011-05-23 19:42:29 -0700751 if (!ret) {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700752 atomic_dec(&pd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300753 if (srq_type == IB_SRQT_XRC)
Sean Hefty418d5132011-05-23 19:42:29 -0700754 atomic_dec(&xrcd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300755 if (ib_srq_has_cq(srq_type))
Sean Hefty418d5132011-05-23 19:42:29 -0700756 atomic_dec(&cq->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700757 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700758
759 return ret;
760}
761EXPORT_SYMBOL(ib_destroy_srq);
762
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763/* Queue pairs */
764
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700765static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
766{
767 struct ib_qp *qp = context;
Yishai Hadas73c40c62013-08-01 18:49:53 +0300768 unsigned long flags;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700769
Yishai Hadas73c40c62013-08-01 18:49:53 +0300770 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700771 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
Shlomo Pongratzeec9e29f2013-04-10 14:26:46 +0000772 if (event->element.qp->event_handler)
773 event->element.qp->event_handler(event, event->element.qp->qp_context);
Yishai Hadas73c40c62013-08-01 18:49:53 +0300774 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700775}
776
Sean Heftyd3d72d92011-05-26 23:06:44 -0700777static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
778{
779 mutex_lock(&xrcd->tgt_qp_mutex);
780 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
781 mutex_unlock(&xrcd->tgt_qp_mutex);
782}
783
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700784static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
785 void (*event_handler)(struct ib_event *, void *),
786 void *qp_context)
Sean Heftyd3d72d92011-05-26 23:06:44 -0700787{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700788 struct ib_qp *qp;
789 unsigned long flags;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300790 int err;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700791
792 qp = kzalloc(sizeof *qp, GFP_KERNEL);
793 if (!qp)
794 return ERR_PTR(-ENOMEM);
795
796 qp->real_qp = real_qp;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300797 err = ib_open_shared_qp_security(qp, real_qp->device);
798 if (err) {
799 kfree(qp);
800 return ERR_PTR(err);
801 }
802
803 qp->real_qp = real_qp;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700804 atomic_inc(&real_qp->usecnt);
805 qp->device = real_qp->device;
806 qp->event_handler = event_handler;
807 qp->qp_context = qp_context;
808 qp->qp_num = real_qp->qp_num;
809 qp->qp_type = real_qp->qp_type;
810
811 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
812 list_add(&qp->open_list, &real_qp->open_list);
813 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
814
815 return qp;
Sean Heftyd3d72d92011-05-26 23:06:44 -0700816}
817
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700818struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
819 struct ib_qp_open_attr *qp_open_attr)
820{
821 struct ib_qp *qp, *real_qp;
822
823 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
824 return ERR_PTR(-EINVAL);
825
826 qp = ERR_PTR(-EINVAL);
827 mutex_lock(&xrcd->tgt_qp_mutex);
828 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
829 if (real_qp->qp_num == qp_open_attr->qp_num) {
830 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
831 qp_open_attr->qp_context);
832 break;
833 }
834 }
835 mutex_unlock(&xrcd->tgt_qp_mutex);
836 return qp;
837}
838EXPORT_SYMBOL(ib_open_qp);
839
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200840static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
841 struct ib_qp_init_attr *qp_init_attr)
842{
843 struct ib_qp *real_qp = qp;
844
845 qp->event_handler = __ib_shared_qp_event_handler;
846 qp->qp_context = qp;
847 qp->pd = NULL;
848 qp->send_cq = qp->recv_cq = NULL;
849 qp->srq = NULL;
850 qp->xrcd = qp_init_attr->xrcd;
851 atomic_inc(&qp_init_attr->xrcd->usecnt);
852 INIT_LIST_HEAD(&qp->open_list);
853
854 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
855 qp_init_attr->qp_context);
856 if (!IS_ERR(qp))
857 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
858 else
859 real_qp->device->destroy_qp(real_qp);
860 return qp;
861}
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863struct ib_qp *ib_create_qp(struct ib_pd *pd,
864 struct ib_qp_init_attr *qp_init_attr)
865{
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200866 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
867 struct ib_qp *qp;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200868 int ret;
869
Yishai Hadasa9017e22016-05-23 15:20:54 +0300870 if (qp_init_attr->rwq_ind_tbl &&
871 (qp_init_attr->recv_cq ||
872 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
873 qp_init_attr->cap.max_recv_sge))
874 return ERR_PTR(-EINVAL);
875
Christoph Hellwiga060b562016-05-03 18:01:09 +0200876 /*
877 * If the callers is using the RDMA API calculate the resources
878 * needed for the RDMA READ/WRITE operations.
879 *
880 * Note that these callers need to pass in a port number.
881 */
882 if (qp_init_attr->cap.max_rdma_ctxs)
883 rdma_rw_init_qp(device, qp_init_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Sean Heftyb42b63c2011-05-23 19:59:25 -0700885 qp = device->create_qp(pd, qp_init_attr, NULL);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200886 if (IS_ERR(qp))
887 return qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300889 ret = ib_create_qp_security(qp, device);
890 if (ret) {
891 ib_destroy_qp(qp);
892 return ERR_PTR(ret);
893 }
894
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200895 qp->device = device;
896 qp->real_qp = qp;
897 qp->uobject = NULL;
898 qp->qp_type = qp_init_attr->qp_type;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300899 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700900
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200901 atomic_set(&qp->usecnt, 0);
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200902 qp->mrs_used = 0;
903 spin_lock_init(&qp->mr_lock);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200904 INIT_LIST_HEAD(&qp->rdma_mrs);
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200905 INIT_LIST_HEAD(&qp->sig_mrs);
Noa Osherovich498ca3c2017-08-23 08:35:40 +0300906 qp->port = 0;
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200907
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200908 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
909 return ib_create_xrc_qp(qp, qp_init_attr);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700910
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200911 qp->event_handler = qp_init_attr->event_handler;
912 qp->qp_context = qp_init_attr->qp_context;
913 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
914 qp->recv_cq = NULL;
915 qp->srq = NULL;
916 } else {
917 qp->recv_cq = qp_init_attr->recv_cq;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300918 if (qp_init_attr->recv_cq)
919 atomic_inc(&qp_init_attr->recv_cq->usecnt);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200920 qp->srq = qp_init_attr->srq;
921 if (qp->srq)
922 atomic_inc(&qp_init_attr->srq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 }
924
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200925 qp->pd = pd;
926 qp->send_cq = qp_init_attr->send_cq;
927 qp->xrcd = NULL;
928
929 atomic_inc(&pd->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +0300930 if (qp_init_attr->send_cq)
931 atomic_inc(&qp_init_attr->send_cq->usecnt);
932 if (qp_init_attr->rwq_ind_tbl)
933 atomic_inc(&qp->rwq_ind_tbl->usecnt);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200934
935 if (qp_init_attr->cap.max_rdma_ctxs) {
936 ret = rdma_rw_init_mrs(qp, qp_init_attr);
937 if (ret) {
938 pr_err("failed to init MR pool ret= %d\n", ret);
939 ib_destroy_qp(qp);
Steve Wiseb6bc1c72016-09-29 07:31:33 -0700940 return ERR_PTR(ret);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200941 }
942 }
943
Bart Van Assche632bc3f2016-07-21 13:03:30 -0700944 /*
945 * Note: all hw drivers guarantee that max_send_sge is lower than
946 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
947 * max_send_sge <= max_sge_rd.
948 */
949 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
950 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
951 device->attrs.max_sge_rd);
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 return qp;
954}
955EXPORT_SYMBOL(ib_create_qp);
956
Roland Dreier8a518662006-02-13 12:48:12 -0800957static const struct {
958 int valid;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700959 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
960 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
Roland Dreier8a518662006-02-13 12:48:12 -0800961} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
962 [IB_QPS_RESET] = {
963 [IB_QPS_RESET] = { .valid = 1 },
Roland Dreier8a518662006-02-13 12:48:12 -0800964 [IB_QPS_INIT] = {
965 .valid = 1,
966 .req_param = {
967 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
968 IB_QP_PORT |
969 IB_QP_QKEY),
Or Gerlitzc938a612012-03-01 12:17:51 +0200970 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
Roland Dreier8a518662006-02-13 12:48:12 -0800971 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
972 IB_QP_PORT |
973 IB_QP_ACCESS_FLAGS),
974 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
975 IB_QP_PORT |
976 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700977 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
978 IB_QP_PORT |
979 IB_QP_ACCESS_FLAGS),
980 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
981 IB_QP_PORT |
982 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800983 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
984 IB_QP_QKEY),
985 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
986 IB_QP_QKEY),
987 }
988 },
989 },
990 [IB_QPS_INIT] = {
991 [IB_QPS_RESET] = { .valid = 1 },
992 [IB_QPS_ERR] = { .valid = 1 },
993 [IB_QPS_INIT] = {
994 .valid = 1,
995 .opt_param = {
996 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
997 IB_QP_PORT |
998 IB_QP_QKEY),
999 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1000 IB_QP_PORT |
1001 IB_QP_ACCESS_FLAGS),
1002 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1003 IB_QP_PORT |
1004 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001005 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1006 IB_QP_PORT |
1007 IB_QP_ACCESS_FLAGS),
1008 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1009 IB_QP_PORT |
1010 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -08001011 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1012 IB_QP_QKEY),
1013 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1014 IB_QP_QKEY),
1015 }
1016 },
1017 [IB_QPS_RTR] = {
1018 .valid = 1,
1019 .req_param = {
1020 [IB_QPT_UC] = (IB_QP_AV |
1021 IB_QP_PATH_MTU |
1022 IB_QP_DEST_QPN |
1023 IB_QP_RQ_PSN),
1024 [IB_QPT_RC] = (IB_QP_AV |
1025 IB_QP_PATH_MTU |
1026 IB_QP_DEST_QPN |
1027 IB_QP_RQ_PSN |
1028 IB_QP_MAX_DEST_RD_ATOMIC |
1029 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001030 [IB_QPT_XRC_INI] = (IB_QP_AV |
1031 IB_QP_PATH_MTU |
1032 IB_QP_DEST_QPN |
1033 IB_QP_RQ_PSN),
1034 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1035 IB_QP_PATH_MTU |
1036 IB_QP_DEST_QPN |
1037 IB_QP_RQ_PSN |
1038 IB_QP_MAX_DEST_RD_ATOMIC |
1039 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001040 },
1041 .opt_param = {
1042 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1043 IB_QP_QKEY),
1044 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1045 IB_QP_ACCESS_FLAGS |
1046 IB_QP_PKEY_INDEX),
1047 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1048 IB_QP_ACCESS_FLAGS |
1049 IB_QP_PKEY_INDEX),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001050 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1051 IB_QP_ACCESS_FLAGS |
1052 IB_QP_PKEY_INDEX),
1053 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1054 IB_QP_ACCESS_FLAGS |
1055 IB_QP_PKEY_INDEX),
Roland Dreier8a518662006-02-13 12:48:12 -08001056 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1057 IB_QP_QKEY),
1058 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1059 IB_QP_QKEY),
Matan Barakdd5f03b2013-12-12 18:03:11 +02001060 },
Matan Barakdbf727d2015-10-15 18:38:51 +03001061 },
Roland Dreier8a518662006-02-13 12:48:12 -08001062 },
1063 [IB_QPS_RTR] = {
1064 [IB_QPS_RESET] = { .valid = 1 },
1065 [IB_QPS_ERR] = { .valid = 1 },
1066 [IB_QPS_RTS] = {
1067 .valid = 1,
1068 .req_param = {
1069 [IB_QPT_UD] = IB_QP_SQ_PSN,
1070 [IB_QPT_UC] = IB_QP_SQ_PSN,
1071 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1072 IB_QP_RETRY_CNT |
1073 IB_QP_RNR_RETRY |
1074 IB_QP_SQ_PSN |
1075 IB_QP_MAX_QP_RD_ATOMIC),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001076 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1077 IB_QP_RETRY_CNT |
1078 IB_QP_RNR_RETRY |
1079 IB_QP_SQ_PSN |
1080 IB_QP_MAX_QP_RD_ATOMIC),
1081 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1082 IB_QP_SQ_PSN),
Roland Dreier8a518662006-02-13 12:48:12 -08001083 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1084 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1085 },
1086 .opt_param = {
1087 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1088 IB_QP_QKEY),
1089 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1090 IB_QP_ALT_PATH |
1091 IB_QP_ACCESS_FLAGS |
1092 IB_QP_PATH_MIG_STATE),
1093 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1094 IB_QP_ALT_PATH |
1095 IB_QP_ACCESS_FLAGS |
1096 IB_QP_MIN_RNR_TIMER |
1097 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001098 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1099 IB_QP_ALT_PATH |
1100 IB_QP_ACCESS_FLAGS |
1101 IB_QP_PATH_MIG_STATE),
1102 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1103 IB_QP_ALT_PATH |
1104 IB_QP_ACCESS_FLAGS |
1105 IB_QP_MIN_RNR_TIMER |
1106 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001107 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1108 IB_QP_QKEY),
1109 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1110 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001111 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001112 }
1113 }
1114 },
1115 [IB_QPS_RTS] = {
1116 [IB_QPS_RESET] = { .valid = 1 },
1117 [IB_QPS_ERR] = { .valid = 1 },
1118 [IB_QPS_RTS] = {
1119 .valid = 1,
1120 .opt_param = {
1121 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1122 IB_QP_QKEY),
Dotan Barak4546d312006-03-02 11:22:28 -08001123 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1124 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001125 IB_QP_ALT_PATH |
1126 IB_QP_PATH_MIG_STATE),
Dotan Barak4546d312006-03-02 11:22:28 -08001127 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1128 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001129 IB_QP_ALT_PATH |
1130 IB_QP_PATH_MIG_STATE |
1131 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001132 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1133 IB_QP_ACCESS_FLAGS |
1134 IB_QP_ALT_PATH |
1135 IB_QP_PATH_MIG_STATE),
1136 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1137 IB_QP_ACCESS_FLAGS |
1138 IB_QP_ALT_PATH |
1139 IB_QP_PATH_MIG_STATE |
1140 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001141 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1142 IB_QP_QKEY),
1143 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1144 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001145 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001146 }
1147 },
1148 [IB_QPS_SQD] = {
1149 .valid = 1,
1150 .opt_param = {
1151 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1152 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1153 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001154 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1155 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
Roland Dreier8a518662006-02-13 12:48:12 -08001156 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1157 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1158 }
1159 },
1160 },
1161 [IB_QPS_SQD] = {
1162 [IB_QPS_RESET] = { .valid = 1 },
1163 [IB_QPS_ERR] = { .valid = 1 },
1164 [IB_QPS_RTS] = {
1165 .valid = 1,
1166 .opt_param = {
1167 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1168 IB_QP_QKEY),
1169 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1170 IB_QP_ALT_PATH |
1171 IB_QP_ACCESS_FLAGS |
1172 IB_QP_PATH_MIG_STATE),
1173 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1174 IB_QP_ALT_PATH |
1175 IB_QP_ACCESS_FLAGS |
1176 IB_QP_MIN_RNR_TIMER |
1177 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001178 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1179 IB_QP_ALT_PATH |
1180 IB_QP_ACCESS_FLAGS |
1181 IB_QP_PATH_MIG_STATE),
1182 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1183 IB_QP_ALT_PATH |
1184 IB_QP_ACCESS_FLAGS |
1185 IB_QP_MIN_RNR_TIMER |
1186 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001187 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1188 IB_QP_QKEY),
1189 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1190 IB_QP_QKEY),
1191 }
1192 },
1193 [IB_QPS_SQD] = {
1194 .valid = 1,
1195 .opt_param = {
1196 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1197 IB_QP_QKEY),
1198 [IB_QPT_UC] = (IB_QP_AV |
Roland Dreier8a518662006-02-13 12:48:12 -08001199 IB_QP_ALT_PATH |
1200 IB_QP_ACCESS_FLAGS |
1201 IB_QP_PKEY_INDEX |
1202 IB_QP_PATH_MIG_STATE),
1203 [IB_QPT_RC] = (IB_QP_PORT |
1204 IB_QP_AV |
1205 IB_QP_TIMEOUT |
1206 IB_QP_RETRY_CNT |
1207 IB_QP_RNR_RETRY |
1208 IB_QP_MAX_QP_RD_ATOMIC |
1209 IB_QP_MAX_DEST_RD_ATOMIC |
Roland Dreier8a518662006-02-13 12:48:12 -08001210 IB_QP_ALT_PATH |
1211 IB_QP_ACCESS_FLAGS |
1212 IB_QP_PKEY_INDEX |
1213 IB_QP_MIN_RNR_TIMER |
1214 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001215 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1216 IB_QP_AV |
1217 IB_QP_TIMEOUT |
1218 IB_QP_RETRY_CNT |
1219 IB_QP_RNR_RETRY |
1220 IB_QP_MAX_QP_RD_ATOMIC |
1221 IB_QP_ALT_PATH |
1222 IB_QP_ACCESS_FLAGS |
1223 IB_QP_PKEY_INDEX |
1224 IB_QP_PATH_MIG_STATE),
1225 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1226 IB_QP_AV |
1227 IB_QP_TIMEOUT |
1228 IB_QP_MAX_DEST_RD_ATOMIC |
1229 IB_QP_ALT_PATH |
1230 IB_QP_ACCESS_FLAGS |
1231 IB_QP_PKEY_INDEX |
1232 IB_QP_MIN_RNR_TIMER |
1233 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001234 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1235 IB_QP_QKEY),
1236 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1237 IB_QP_QKEY),
1238 }
1239 }
1240 },
1241 [IB_QPS_SQE] = {
1242 [IB_QPS_RESET] = { .valid = 1 },
1243 [IB_QPS_ERR] = { .valid = 1 },
1244 [IB_QPS_RTS] = {
1245 .valid = 1,
1246 .opt_param = {
1247 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1248 IB_QP_QKEY),
1249 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1250 IB_QP_ACCESS_FLAGS),
1251 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1252 IB_QP_QKEY),
1253 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1254 IB_QP_QKEY),
1255 }
1256 }
1257 },
1258 [IB_QPS_ERR] = {
1259 [IB_QPS_RESET] = { .valid = 1 },
1260 [IB_QPS_ERR] = { .valid = 1 }
1261 }
1262};
1263
1264int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001265 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1266 enum rdma_link_layer ll)
Roland Dreier8a518662006-02-13 12:48:12 -08001267{
1268 enum ib_qp_attr_mask req_param, opt_param;
1269
1270 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1271 next_state < 0 || next_state > IB_QPS_ERR)
1272 return 0;
1273
1274 if (mask & IB_QP_CUR_STATE &&
1275 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1276 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1277 return 0;
1278
1279 if (!qp_state_table[cur_state][next_state].valid)
1280 return 0;
1281
1282 req_param = qp_state_table[cur_state][next_state].req_param[type];
1283 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1284
1285 if ((mask & req_param) != req_param)
1286 return 0;
1287
1288 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1289 return 0;
1290
1291 return 1;
1292}
1293EXPORT_SYMBOL(ib_modify_qp_is_ok);
1294
Parav Panditc0348eb2017-10-16 08:45:13 +03001295static int ib_resolve_eth_dmac(struct ib_device *device,
1296 struct rdma_ah_attr *ah_attr)
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001297{
1298 int ret = 0;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001299 struct ib_global_route *grh;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001300
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001301 if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
Moni Shouac90ea9d2016-11-23 08:23:22 +02001302 return -EINVAL;
Matan Barakdbf727d2015-10-15 18:38:51 +03001303
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001304 grh = rdma_ah_retrieve_grh(ah_attr);
1305
Noa Osherovich9636a562017-06-12 11:14:04 +03001306 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1307 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1308 __be32 addr = 0;
1309
1310 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1311 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1312 } else {
1313 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1314 (char *)ah_attr->roce.dmac);
1315 }
Moni Shouac90ea9d2016-11-23 08:23:22 +02001316 } else {
Parav Pandit1060f862017-11-14 14:51:49 +02001317 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001318 }
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001319 return ret;
1320}
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001321
Parav Pandita512c2f2017-05-23 11:26:08 +03001322/**
Parav Panditb96ac052018-01-09 15:24:51 +02001323 * IB core internal function to perform QP attributes modification.
Parav Pandita512c2f2017-05-23 11:26:08 +03001324 */
Parav Panditb96ac052018-01-09 15:24:51 +02001325static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1326 int attr_mask, struct ib_udata *udata)
Parav Pandita512c2f2017-05-23 11:26:08 +03001327{
Majd Dibbiny727b7e92017-11-14 14:51:56 +02001328 u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
Parav Pandita512c2f2017-05-23 11:26:08 +03001329 int ret;
1330
Majd Dibbiny727b7e92017-11-14 14:51:56 +02001331 if (rdma_ib_or_roce(qp->device, port)) {
1332 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1333 pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
1334 __func__, qp->device->name);
1335 attr->rq_psn &= 0xffffff;
1336 }
1337
1338 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1339 pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
1340 __func__, qp->device->name);
1341 attr->sq_psn &= 0xffffff;
1342 }
1343 }
1344
Noa Osherovich498ca3c2017-08-23 08:35:40 +03001345 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1346 if (!ret && (attr_mask & IB_QP_PORT))
1347 qp->port = attr->port_num;
1348
1349 return ret;
Parav Pandita512c2f2017-05-23 11:26:08 +03001350}
Parav Panditb96ac052018-01-09 15:24:51 +02001351
1352/**
1353 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1354 * @ib_qp: The QP to modify.
1355 * @attr: On input, specifies the QP attributes to modify. On output,
1356 * the current values of selected QP attributes are returned.
1357 * @attr_mask: A bit-mask used to specify which attributes of the QP
1358 * are being modified.
1359 * @udata: pointer to user's input output buffer information
1360 * are being modified.
1361 * It returns 0 on success and returns appropriate error code on error.
1362 */
1363int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1364 int attr_mask, struct ib_udata *udata)
1365{
1366 struct ib_qp *qp = ib_qp->real_qp;
1367 int ret;
1368
Parav Panditf2290d62018-01-09 15:24:52 +02001369 if (attr_mask & IB_QP_AV &&
1370 attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
Parav Panditb96ac052018-01-09 15:24:51 +02001371 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1372 if (ret)
1373 return ret;
1374 }
1375 return _ib_modify_qp(qp, attr, attr_mask, udata);
1376}
Parav Pandita512c2f2017-05-23 11:26:08 +03001377EXPORT_SYMBOL(ib_modify_qp_with_udata);
1378
Yuval Shaiad4186192017-06-14 23:13:34 +03001379int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1380{
1381 int rc;
1382 u32 netdev_speed;
1383 struct net_device *netdev;
1384 struct ethtool_link_ksettings lksettings;
1385
1386 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1387 return -EINVAL;
1388
1389 if (!dev->get_netdev)
1390 return -EOPNOTSUPP;
1391
1392 netdev = dev->get_netdev(dev, port_num);
1393 if (!netdev)
1394 return -ENODEV;
1395
1396 rtnl_lock();
1397 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1398 rtnl_unlock();
1399
1400 dev_put(netdev);
1401
1402 if (!rc) {
1403 netdev_speed = lksettings.base.speed;
1404 } else {
1405 netdev_speed = SPEED_1000;
1406 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1407 netdev_speed);
1408 }
1409
1410 if (netdev_speed <= SPEED_1000) {
1411 *width = IB_WIDTH_1X;
1412 *speed = IB_SPEED_SDR;
1413 } else if (netdev_speed <= SPEED_10000) {
1414 *width = IB_WIDTH_1X;
1415 *speed = IB_SPEED_FDR10;
1416 } else if (netdev_speed <= SPEED_20000) {
1417 *width = IB_WIDTH_4X;
1418 *speed = IB_SPEED_DDR;
1419 } else if (netdev_speed <= SPEED_25000) {
1420 *width = IB_WIDTH_1X;
1421 *speed = IB_SPEED_EDR;
1422 } else if (netdev_speed <= SPEED_40000) {
1423 *width = IB_WIDTH_4X;
1424 *speed = IB_SPEED_FDR10;
1425 } else {
1426 *width = IB_WIDTH_4X;
1427 *speed = IB_SPEED_EDR;
1428 }
1429
1430 return 0;
1431}
1432EXPORT_SYMBOL(ib_get_eth_speed);
1433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434int ib_modify_qp(struct ib_qp *qp,
1435 struct ib_qp_attr *qp_attr,
1436 int qp_attr_mask)
1437{
Parav Panditb96ac052018-01-09 15:24:51 +02001438 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439}
1440EXPORT_SYMBOL(ib_modify_qp);
1441
1442int ib_query_qp(struct ib_qp *qp,
1443 struct ib_qp_attr *qp_attr,
1444 int qp_attr_mask,
1445 struct ib_qp_init_attr *qp_init_attr)
1446{
1447 return qp->device->query_qp ?
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001448 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 -ENOSYS;
1450}
1451EXPORT_SYMBOL(ib_query_qp);
1452
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001453int ib_close_qp(struct ib_qp *qp)
1454{
1455 struct ib_qp *real_qp;
1456 unsigned long flags;
1457
1458 real_qp = qp->real_qp;
1459 if (real_qp == qp)
1460 return -EINVAL;
1461
1462 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1463 list_del(&qp->open_list);
1464 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1465
1466 atomic_dec(&real_qp->usecnt);
Moni Shoua4a508812017-12-24 13:54:58 +02001467 if (qp->qp_sec)
1468 ib_close_shared_qp_security(qp->qp_sec);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001469 kfree(qp);
1470
1471 return 0;
1472}
1473EXPORT_SYMBOL(ib_close_qp);
1474
1475static int __ib_destroy_shared_qp(struct ib_qp *qp)
1476{
1477 struct ib_xrcd *xrcd;
1478 struct ib_qp *real_qp;
1479 int ret;
1480
1481 real_qp = qp->real_qp;
1482 xrcd = real_qp->xrcd;
1483
1484 mutex_lock(&xrcd->tgt_qp_mutex);
1485 ib_close_qp(qp);
1486 if (atomic_read(&real_qp->usecnt) == 0)
1487 list_del(&real_qp->xrcd_list);
1488 else
1489 real_qp = NULL;
1490 mutex_unlock(&xrcd->tgt_qp_mutex);
1491
1492 if (real_qp) {
1493 ret = ib_destroy_qp(real_qp);
1494 if (!ret)
1495 atomic_dec(&xrcd->usecnt);
1496 else
1497 __ib_insert_xrcd_qp(xrcd, real_qp);
1498 }
1499
1500 return 0;
1501}
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503int ib_destroy_qp(struct ib_qp *qp)
1504{
1505 struct ib_pd *pd;
1506 struct ib_cq *scq, *rcq;
1507 struct ib_srq *srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001508 struct ib_rwq_ind_table *ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001509 struct ib_qp_security *sec;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 int ret;
1511
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001512 WARN_ON_ONCE(qp->mrs_used > 0);
1513
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001514 if (atomic_read(&qp->usecnt))
1515 return -EBUSY;
1516
1517 if (qp->real_qp != qp)
1518 return __ib_destroy_shared_qp(qp);
1519
Sean Heftyb42b63c2011-05-23 19:59:25 -07001520 pd = qp->pd;
1521 scq = qp->send_cq;
1522 rcq = qp->recv_cq;
1523 srq = qp->srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001524 ind_tbl = qp->rwq_ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001525 sec = qp->qp_sec;
1526 if (sec)
1527 ib_destroy_qp_security_begin(sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Christoph Hellwiga060b562016-05-03 18:01:09 +02001529 if (!qp->uobject)
1530 rdma_rw_cleanup_mrs(qp);
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 ret = qp->device->destroy_qp(qp);
1533 if (!ret) {
Sean Heftyb42b63c2011-05-23 19:59:25 -07001534 if (pd)
1535 atomic_dec(&pd->usecnt);
1536 if (scq)
1537 atomic_dec(&scq->usecnt);
1538 if (rcq)
1539 atomic_dec(&rcq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 if (srq)
1541 atomic_dec(&srq->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +03001542 if (ind_tbl)
1543 atomic_dec(&ind_tbl->usecnt);
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001544 if (sec)
1545 ib_destroy_qp_security_end(sec);
1546 } else {
1547 if (sec)
1548 ib_destroy_qp_security_abort(sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 }
1550
1551 return ret;
1552}
1553EXPORT_SYMBOL(ib_destroy_qp);
1554
1555/* Completion queues */
1556
1557struct ib_cq *ib_create_cq(struct ib_device *device,
1558 ib_comp_handler comp_handler,
1559 void (*event_handler)(struct ib_event *, void *),
Matan Barak8e372102015-06-11 16:35:21 +03001560 void *cq_context,
1561 const struct ib_cq_init_attr *cq_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562{
1563 struct ib_cq *cq;
1564
Matan Barak8e372102015-06-11 16:35:21 +03001565 cq = device->create_cq(device, cq_attr, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
1567 if (!IS_ERR(cq)) {
1568 cq->device = device;
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001569 cq->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 cq->comp_handler = comp_handler;
1571 cq->event_handler = event_handler;
1572 cq->cq_context = cq_context;
1573 atomic_set(&cq->usecnt, 0);
1574 }
1575
1576 return cq;
1577}
1578EXPORT_SYMBOL(ib_create_cq);
1579
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02001580int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
Eli Cohen2dd57162008-04-16 21:09:33 -07001581{
1582 return cq->device->modify_cq ?
1583 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1584}
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02001585EXPORT_SYMBOL(rdma_set_cq_moderation);
Eli Cohen2dd57162008-04-16 21:09:33 -07001586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587int ib_destroy_cq(struct ib_cq *cq)
1588{
1589 if (atomic_read(&cq->usecnt))
1590 return -EBUSY;
1591
1592 return cq->device->destroy_cq(cq);
1593}
1594EXPORT_SYMBOL(ib_destroy_cq);
1595
Roland Dreiera74cd4a2006-02-13 16:30:49 -08001596int ib_resize_cq(struct ib_cq *cq, int cqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
Roland Dreier40de2e52005-11-08 11:10:25 -08001598 return cq->device->resize_cq ?
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001599 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600}
1601EXPORT_SYMBOL(ib_resize_cq);
1602
1603/* Memory regions */
1604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605int ib_dereg_mr(struct ib_mr *mr)
1606{
Christoph Hellwigab67ed82015-12-23 19:12:54 +01001607 struct ib_pd *pd = mr->pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 int ret;
1609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 ret = mr->device->dereg_mr(mr);
1611 if (!ret)
1612 atomic_dec(&pd->usecnt);
1613
1614 return ret;
1615}
1616EXPORT_SYMBOL(ib_dereg_mr);
1617
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001618/**
1619 * ib_alloc_mr() - Allocates a memory region
1620 * @pd: protection domain associated with the region
1621 * @mr_type: memory region type
1622 * @max_num_sg: maximum sg entries available for registration.
1623 *
1624 * Notes:
1625 * Memory registeration page/sg lists must not exceed max_num_sg.
1626 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1627 * max_num_sg * used_page_size.
1628 *
1629 */
1630struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1631 enum ib_mr_type mr_type,
1632 u32 max_num_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001633{
1634 struct ib_mr *mr;
1635
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001636 if (!pd->device->alloc_mr)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001637 return ERR_PTR(-ENOSYS);
1638
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001639 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001640 if (!IS_ERR(mr)) {
1641 mr->device = pd->device;
1642 mr->pd = pd;
1643 mr->uobject = NULL;
1644 atomic_inc(&pd->usecnt);
Steve Wised4a85c32016-05-03 18:01:08 +02001645 mr->need_inval = false;
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001646 }
1647
1648 return mr;
1649}
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001650EXPORT_SYMBOL(ib_alloc_mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652/* "Fast" memory regions */
1653
1654struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1655 int mr_access_flags,
1656 struct ib_fmr_attr *fmr_attr)
1657{
1658 struct ib_fmr *fmr;
1659
1660 if (!pd->device->alloc_fmr)
1661 return ERR_PTR(-ENOSYS);
1662
1663 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1664 if (!IS_ERR(fmr)) {
1665 fmr->device = pd->device;
1666 fmr->pd = pd;
1667 atomic_inc(&pd->usecnt);
1668 }
1669
1670 return fmr;
1671}
1672EXPORT_SYMBOL(ib_alloc_fmr);
1673
1674int ib_unmap_fmr(struct list_head *fmr_list)
1675{
1676 struct ib_fmr *fmr;
1677
1678 if (list_empty(fmr_list))
1679 return 0;
1680
1681 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1682 return fmr->device->unmap_fmr(fmr_list);
1683}
1684EXPORT_SYMBOL(ib_unmap_fmr);
1685
1686int ib_dealloc_fmr(struct ib_fmr *fmr)
1687{
1688 struct ib_pd *pd;
1689 int ret;
1690
1691 pd = fmr->pd;
1692 ret = fmr->device->dealloc_fmr(fmr);
1693 if (!ret)
1694 atomic_dec(&pd->usecnt);
1695
1696 return ret;
1697}
1698EXPORT_SYMBOL(ib_dealloc_fmr);
1699
1700/* Multicast groups */
1701
Noa Osherovich52363332017-06-12 11:14:02 +03001702static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1703{
1704 struct ib_qp_init_attr init_attr = {};
1705 struct ib_qp_attr attr = {};
1706 int num_eth_ports = 0;
1707 int port;
1708
1709 /* If QP state >= init, it is assigned to a port and we can check this
1710 * port only.
1711 */
1712 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1713 if (attr.qp_state >= IB_QPS_INIT) {
Alex Estrine6f9bc32017-08-31 09:30:34 -07001714 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
Noa Osherovich52363332017-06-12 11:14:02 +03001715 IB_LINK_LAYER_INFINIBAND)
1716 return true;
1717 goto lid_check;
1718 }
1719 }
1720
1721 /* Can't get a quick answer, iterate over all ports */
1722 for (port = 0; port < qp->device->phys_port_cnt; port++)
Alex Estrine6f9bc32017-08-31 09:30:34 -07001723 if (rdma_port_get_link_layer(qp->device, port) !=
Noa Osherovich52363332017-06-12 11:14:02 +03001724 IB_LINK_LAYER_INFINIBAND)
1725 num_eth_ports++;
1726
1727 /* If we have at lease one Ethernet port, RoCE annex declares that
1728 * multicast LID should be ignored. We can't tell at this step if the
1729 * QP belongs to an IB or Ethernet port.
1730 */
1731 if (num_eth_ports)
1732 return true;
1733
1734 /* If all the ports are IB, we can check according to IB spec. */
1735lid_check:
1736 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1737 lid == be16_to_cpu(IB_LID_PERMISSIVE));
1738}
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1741{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001742 int ret;
1743
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001744 if (!qp->device->attach_mcast)
1745 return -ENOSYS;
Noa Osherovichbe1d3252017-06-12 11:14:03 +03001746
1747 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1748 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001749 return -EINVAL;
1750
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001751 ret = qp->device->attach_mcast(qp, gid, lid);
1752 if (!ret)
1753 atomic_inc(&qp->usecnt);
1754 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755}
1756EXPORT_SYMBOL(ib_attach_mcast);
1757
1758int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1759{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001760 int ret;
1761
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001762 if (!qp->device->detach_mcast)
1763 return -ENOSYS;
Noa Osherovichbe1d3252017-06-12 11:14:03 +03001764
1765 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1766 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001767 return -EINVAL;
1768
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001769 ret = qp->device->detach_mcast(qp, gid, lid);
1770 if (!ret)
1771 atomic_dec(&qp->usecnt);
1772 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773}
1774EXPORT_SYMBOL(ib_detach_mcast);
Sean Hefty59991f92011-05-23 17:52:46 -07001775
1776struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1777{
1778 struct ib_xrcd *xrcd;
1779
1780 if (!device->alloc_xrcd)
1781 return ERR_PTR(-ENOSYS);
1782
1783 xrcd = device->alloc_xrcd(device, NULL, NULL);
1784 if (!IS_ERR(xrcd)) {
1785 xrcd->device = device;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001786 xrcd->inode = NULL;
Sean Hefty59991f92011-05-23 17:52:46 -07001787 atomic_set(&xrcd->usecnt, 0);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001788 mutex_init(&xrcd->tgt_qp_mutex);
1789 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
Sean Hefty59991f92011-05-23 17:52:46 -07001790 }
1791
1792 return xrcd;
1793}
1794EXPORT_SYMBOL(ib_alloc_xrcd);
1795
1796int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1797{
Sean Heftyd3d72d92011-05-26 23:06:44 -07001798 struct ib_qp *qp;
1799 int ret;
1800
Sean Hefty59991f92011-05-23 17:52:46 -07001801 if (atomic_read(&xrcd->usecnt))
1802 return -EBUSY;
1803
Sean Heftyd3d72d92011-05-26 23:06:44 -07001804 while (!list_empty(&xrcd->tgt_qp_list)) {
1805 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1806 ret = ib_destroy_qp(qp);
1807 if (ret)
1808 return ret;
1809 }
1810
Sean Hefty59991f92011-05-23 17:52:46 -07001811 return xrcd->device->dealloc_xrcd(xrcd);
1812}
1813EXPORT_SYMBOL(ib_dealloc_xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001814
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001815/**
1816 * ib_create_wq - Creates a WQ associated with the specified protection
1817 * domain.
1818 * @pd: The protection domain associated with the WQ.
Randy Dunlap1f586212018-01-05 16:21:40 -08001819 * @wq_attr: A list of initial attributes required to create the
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001820 * WQ. If WQ creation succeeds, then the attributes are updated to
1821 * the actual capabilities of the created WQ.
1822 *
Randy Dunlap1f586212018-01-05 16:21:40 -08001823 * wq_attr->max_wr and wq_attr->max_sge determine
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001824 * the requested size of the WQ, and set to the actual values allocated
1825 * on return.
1826 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1827 * at least as large as the requested values.
1828 */
1829struct ib_wq *ib_create_wq(struct ib_pd *pd,
1830 struct ib_wq_init_attr *wq_attr)
1831{
1832 struct ib_wq *wq;
1833
1834 if (!pd->device->create_wq)
1835 return ERR_PTR(-ENOSYS);
1836
1837 wq = pd->device->create_wq(pd, wq_attr, NULL);
1838 if (!IS_ERR(wq)) {
1839 wq->event_handler = wq_attr->event_handler;
1840 wq->wq_context = wq_attr->wq_context;
1841 wq->wq_type = wq_attr->wq_type;
1842 wq->cq = wq_attr->cq;
1843 wq->device = pd->device;
1844 wq->pd = pd;
1845 wq->uobject = NULL;
1846 atomic_inc(&pd->usecnt);
1847 atomic_inc(&wq_attr->cq->usecnt);
1848 atomic_set(&wq->usecnt, 0);
1849 }
1850 return wq;
1851}
1852EXPORT_SYMBOL(ib_create_wq);
1853
1854/**
1855 * ib_destroy_wq - Destroys the specified WQ.
1856 * @wq: The WQ to destroy.
1857 */
1858int ib_destroy_wq(struct ib_wq *wq)
1859{
1860 int err;
1861 struct ib_cq *cq = wq->cq;
1862 struct ib_pd *pd = wq->pd;
1863
1864 if (atomic_read(&wq->usecnt))
1865 return -EBUSY;
1866
1867 err = wq->device->destroy_wq(wq);
1868 if (!err) {
1869 atomic_dec(&pd->usecnt);
1870 atomic_dec(&cq->usecnt);
1871 }
1872 return err;
1873}
1874EXPORT_SYMBOL(ib_destroy_wq);
1875
1876/**
1877 * ib_modify_wq - Modifies the specified WQ.
1878 * @wq: The WQ to modify.
1879 * @wq_attr: On input, specifies the WQ attributes to modify.
1880 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1881 * are being modified.
1882 * On output, the current values of selected WQ attributes are returned.
1883 */
1884int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1885 u32 wq_attr_mask)
1886{
1887 int err;
1888
1889 if (!wq->device->modify_wq)
1890 return -ENOSYS;
1891
1892 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1893 return err;
1894}
1895EXPORT_SYMBOL(ib_modify_wq);
1896
Yishai Hadas6d397862016-05-23 15:20:51 +03001897/*
1898 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1899 * @device: The device on which to create the rwq indirection table.
1900 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1901 * create the Indirection Table.
1902 *
1903 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1904 * than the created ib_rwq_ind_table object and the caller is responsible
1905 * for its memory allocation/free.
1906 */
1907struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1908 struct ib_rwq_ind_table_init_attr *init_attr)
1909{
1910 struct ib_rwq_ind_table *rwq_ind_table;
1911 int i;
1912 u32 table_size;
1913
1914 if (!device->create_rwq_ind_table)
1915 return ERR_PTR(-ENOSYS);
1916
1917 table_size = (1 << init_attr->log_ind_tbl_size);
1918 rwq_ind_table = device->create_rwq_ind_table(device,
1919 init_attr, NULL);
1920 if (IS_ERR(rwq_ind_table))
1921 return rwq_ind_table;
1922
1923 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1924 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1925 rwq_ind_table->device = device;
1926 rwq_ind_table->uobject = NULL;
1927 atomic_set(&rwq_ind_table->usecnt, 0);
1928
1929 for (i = 0; i < table_size; i++)
1930 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1931
1932 return rwq_ind_table;
1933}
1934EXPORT_SYMBOL(ib_create_rwq_ind_table);
1935
1936/*
1937 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1938 * @wq_ind_table: The Indirection Table to destroy.
1939*/
1940int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1941{
1942 int err, i;
1943 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1944 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1945
1946 if (atomic_read(&rwq_ind_table->usecnt))
1947 return -EBUSY;
1948
1949 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1950 if (!err) {
1951 for (i = 0; i < table_size; i++)
1952 atomic_dec(&ind_tbl[i]->usecnt);
1953 }
1954
1955 return err;
1956}
1957EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1958
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001959struct ib_flow *ib_create_flow(struct ib_qp *qp,
1960 struct ib_flow_attr *flow_attr,
1961 int domain)
1962{
1963 struct ib_flow *flow_id;
1964 if (!qp->device->create_flow)
1965 return ERR_PTR(-ENOSYS);
1966
1967 flow_id = qp->device->create_flow(qp, flow_attr, domain);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001968 if (!IS_ERR(flow_id)) {
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001969 atomic_inc(&qp->usecnt);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001970 flow_id->qp = qp;
1971 }
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001972 return flow_id;
1973}
1974EXPORT_SYMBOL(ib_create_flow);
1975
1976int ib_destroy_flow(struct ib_flow *flow_id)
1977{
1978 int err;
1979 struct ib_qp *qp = flow_id->qp;
1980
1981 err = qp->device->destroy_flow(flow_id);
1982 if (!err)
1983 atomic_dec(&qp->usecnt);
1984 return err;
1985}
1986EXPORT_SYMBOL(ib_destroy_flow);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001987
1988int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1989 struct ib_mr_status *mr_status)
1990{
1991 return mr->device->check_mr_status ?
1992 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1993}
1994EXPORT_SYMBOL(ib_check_mr_status);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001995
Eli Cohen50174a72016-03-11 22:58:38 +02001996int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1997 int state)
1998{
1999 if (!device->set_vf_link_state)
2000 return -ENOSYS;
2001
2002 return device->set_vf_link_state(device, vf, port, state);
2003}
2004EXPORT_SYMBOL(ib_set_vf_link_state);
2005
2006int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2007 struct ifla_vf_info *info)
2008{
2009 if (!device->get_vf_config)
2010 return -ENOSYS;
2011
2012 return device->get_vf_config(device, vf, port, info);
2013}
2014EXPORT_SYMBOL(ib_get_vf_config);
2015
2016int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2017 struct ifla_vf_stats *stats)
2018{
2019 if (!device->get_vf_stats)
2020 return -ENOSYS;
2021
2022 return device->get_vf_stats(device, vf, port, stats);
2023}
2024EXPORT_SYMBOL(ib_get_vf_stats);
2025
2026int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2027 int type)
2028{
2029 if (!device->set_vf_guid)
2030 return -ENOSYS;
2031
2032 return device->set_vf_guid(device, vf, port, guid, type);
2033}
2034EXPORT_SYMBOL(ib_set_vf_guid);
2035
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002036/**
2037 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2038 * and set it the memory region.
2039 * @mr: memory region
2040 * @sg: dma mapped scatterlist
2041 * @sg_nents: number of entries in sg
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002042 * @sg_offset: offset in bytes into sg
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002043 * @page_size: page vector desired page size
2044 *
2045 * Constraints:
2046 * - The first sg element is allowed to have an offset.
Bart Van Assche52746122016-09-26 09:09:42 -07002047 * - Each sg element must either be aligned to page_size or virtually
2048 * contiguous to the previous element. In case an sg element has a
2049 * non-contiguous offset, the mapping prefix will not include it.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002050 * - The last sg element is allowed to have length less than page_size.
2051 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2052 * then only max_num_sg entries will be mapped.
Bart Van Assche52746122016-09-26 09:09:42 -07002053 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
Sagi Grimbergf5aa9152016-02-29 19:07:32 +02002054 * constraints holds and the page_size argument is ignored.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002055 *
2056 * Returns the number of sg elements that were mapped to the memory region.
2057 *
2058 * After this completes successfully, the memory region
2059 * is ready for registration.
2060 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002061int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002062 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002063{
2064 if (unlikely(!mr->device->map_mr_sg))
2065 return -ENOSYS;
2066
2067 mr->page_size = page_size;
2068
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002069 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002070}
2071EXPORT_SYMBOL(ib_map_mr_sg);
2072
2073/**
2074 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2075 * to a page vector
2076 * @mr: memory region
2077 * @sgl: dma mapped scatterlist
2078 * @sg_nents: number of entries in sg
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002079 * @sg_offset_p: IN: start offset in bytes into sg
2080 * OUT: offset in bytes for element n of the sg of the first
2081 * byte that has not been processed where n is the return
2082 * value of this function.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002083 * @set_page: driver page assignment function pointer
2084 *
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002085 * Core service helper for drivers to convert the largest
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002086 * prefix of given sg list to a page vector. The sg list
2087 * prefix converted is the prefix that meet the requirements
2088 * of ib_map_mr_sg.
2089 *
2090 * Returns the number of sg elements that were assigned to
2091 * a page vector.
2092 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002093int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002094 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002095{
2096 struct scatterlist *sg;
Bart Van Asscheb6aeb982015-12-29 10:45:03 +01002097 u64 last_end_dma_addr = 0;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002098 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002099 unsigned int last_page_off = 0;
2100 u64 page_mask = ~((u64)mr->page_size - 1);
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002101 int i, ret;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002102
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002103 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2104 return -EINVAL;
2105
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002106 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002107 mr->length = 0;
2108
2109 for_each_sg(sgl, sg, sg_nents, i) {
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002110 u64 dma_addr = sg_dma_address(sg) + sg_offset;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002111 u64 prev_addr = dma_addr;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002112 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002113 u64 end_dma_addr = dma_addr + dma_len;
2114 u64 page_addr = dma_addr & page_mask;
2115
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002116 /*
2117 * For the second and later elements, check whether either the
2118 * end of element i-1 or the start of element i is not aligned
2119 * on a page boundary.
2120 */
2121 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2122 /* Stop mapping if there is a gap. */
2123 if (last_end_dma_addr != dma_addr)
2124 break;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002125
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002126 /*
2127 * Coalesce this element with the last. If it is small
2128 * enough just update mr->length. Otherwise start
2129 * mapping from the next page.
2130 */
2131 goto next_page;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002132 }
2133
2134 do {
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002135 ret = set_page(mr, page_addr);
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002136 if (unlikely(ret < 0)) {
2137 sg_offset = prev_addr - sg_dma_address(sg);
2138 mr->length += prev_addr - dma_addr;
2139 if (sg_offset_p)
2140 *sg_offset_p = sg_offset;
2141 return i || sg_offset ? i : ret;
2142 }
2143 prev_addr = page_addr;
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002144next_page:
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002145 page_addr += mr->page_size;
2146 } while (page_addr < end_dma_addr);
2147
2148 mr->length += dma_len;
2149 last_end_dma_addr = end_dma_addr;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002150 last_page_off = end_dma_addr & ~page_mask;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002151
2152 sg_offset = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002153 }
2154
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002155 if (sg_offset_p)
2156 *sg_offset_p = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002157 return i;
2158}
2159EXPORT_SYMBOL(ib_sg_to_pages);
Steve Wise765d6772016-02-17 08:15:41 -08002160
2161struct ib_drain_cqe {
2162 struct ib_cqe cqe;
2163 struct completion done;
2164};
2165
2166static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2167{
2168 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2169 cqe);
2170
2171 complete(&cqe->done);
2172}
2173
2174/*
2175 * Post a WR and block until its completion is reaped for the SQ.
2176 */
2177static void __ib_drain_sq(struct ib_qp *qp)
2178{
Bart Van Asschef039f442017-02-14 10:56:35 -08002179 struct ib_cq *cq = qp->send_cq;
Steve Wise765d6772016-02-17 08:15:41 -08002180 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2181 struct ib_drain_cqe sdrain;
2182 struct ib_send_wr swr = {}, *bad_swr;
2183 int ret;
2184
Steve Wise765d6772016-02-17 08:15:41 -08002185 swr.wr_cqe = &sdrain.cqe;
2186 sdrain.cqe.done = ib_drain_qp_done;
2187 init_completion(&sdrain.done);
2188
2189 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2190 if (ret) {
2191 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2192 return;
2193 }
2194
2195 ret = ib_post_send(qp, &swr, &bad_swr);
2196 if (ret) {
2197 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2198 return;
2199 }
2200
Bart Van Asschef039f442017-02-14 10:56:35 -08002201 if (cq->poll_ctx == IB_POLL_DIRECT)
2202 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2203 ib_process_cq_direct(cq, -1);
2204 else
2205 wait_for_completion(&sdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08002206}
2207
2208/*
2209 * Post a WR and block until its completion is reaped for the RQ.
2210 */
2211static void __ib_drain_rq(struct ib_qp *qp)
2212{
Bart Van Asschef039f442017-02-14 10:56:35 -08002213 struct ib_cq *cq = qp->recv_cq;
Steve Wise765d6772016-02-17 08:15:41 -08002214 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2215 struct ib_drain_cqe rdrain;
2216 struct ib_recv_wr rwr = {}, *bad_rwr;
2217 int ret;
2218
Steve Wise765d6772016-02-17 08:15:41 -08002219 rwr.wr_cqe = &rdrain.cqe;
2220 rdrain.cqe.done = ib_drain_qp_done;
2221 init_completion(&rdrain.done);
2222
2223 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2224 if (ret) {
2225 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2226 return;
2227 }
2228
2229 ret = ib_post_recv(qp, &rwr, &bad_rwr);
2230 if (ret) {
2231 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2232 return;
2233 }
2234
Bart Van Asschef039f442017-02-14 10:56:35 -08002235 if (cq->poll_ctx == IB_POLL_DIRECT)
2236 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2237 ib_process_cq_direct(cq, -1);
2238 else
2239 wait_for_completion(&rdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08002240}
2241
2242/**
2243 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2244 * application.
2245 * @qp: queue pair to drain
2246 *
2247 * If the device has a provider-specific drain function, then
2248 * call that. Otherwise call the generic drain function
2249 * __ib_drain_sq().
2250 *
2251 * The caller must:
2252 *
2253 * ensure there is room in the CQ and SQ for the drain work request and
2254 * completion.
2255 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002256 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002257 *
2258 * ensure that there are no other contexts that are posting WRs concurrently.
2259 * Otherwise the drain is not guaranteed.
2260 */
2261void ib_drain_sq(struct ib_qp *qp)
2262{
2263 if (qp->device->drain_sq)
2264 qp->device->drain_sq(qp);
2265 else
2266 __ib_drain_sq(qp);
2267}
2268EXPORT_SYMBOL(ib_drain_sq);
2269
2270/**
2271 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2272 * application.
2273 * @qp: queue pair to drain
2274 *
2275 * If the device has a provider-specific drain function, then
2276 * call that. Otherwise call the generic drain function
2277 * __ib_drain_rq().
2278 *
2279 * The caller must:
2280 *
2281 * ensure there is room in the CQ and RQ for the drain work request and
2282 * completion.
2283 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002284 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002285 *
2286 * ensure that there are no other contexts that are posting WRs concurrently.
2287 * Otherwise the drain is not guaranteed.
2288 */
2289void ib_drain_rq(struct ib_qp *qp)
2290{
2291 if (qp->device->drain_rq)
2292 qp->device->drain_rq(qp);
2293 else
2294 __ib_drain_rq(qp);
2295}
2296EXPORT_SYMBOL(ib_drain_rq);
2297
2298/**
2299 * ib_drain_qp() - Block until all CQEs have been consumed by the
2300 * application on both the RQ and SQ.
2301 * @qp: queue pair to drain
2302 *
2303 * The caller must:
2304 *
2305 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2306 * and completions.
2307 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002308 * allocate the CQs using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002309 *
2310 * ensure that there are no other contexts that are posting WRs concurrently.
2311 * Otherwise the drain is not guaranteed.
2312 */
2313void ib_drain_qp(struct ib_qp *qp)
2314{
2315 ib_drain_sq(qp);
Sagi Grimberg42235f82016-04-26 17:55:38 +03002316 if (!qp->srq)
2317 ib_drain_rq(qp);
Steve Wise765d6772016-02-17 08:15:41 -08002318}
2319EXPORT_SYMBOL(ib_drain_qp);