blob: 16ebc6372c31abe449f4e892d26e2d384cec56fc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier33b9b3e2006-01-30 14:29:21 -08008 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040041#include <linux/export.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042#include <linux/string.h>
Sean Hefty0e0ec7e2011-08-08 15:31:51 -070043#include <linux/slab.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030044#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
Daniel Jurgensd291f1a2017-05-19 15:48:52 +030047#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Roland Dreiera4d61e82005-08-25 13:40:04 -070049#include <rdma/ib_verbs.h>
50#include <rdma/ib_cache.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020051#include <rdma/ib_addr.h>
Christoph Hellwiga060b562016-05-03 18:01:09 +020052#include <rdma/rw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Or Gerlitzed4c54e2013-12-12 18:03:17 +020054#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Parav Panditc0348eb2017-10-16 08:45:13 +030056static int ib_resolve_eth_dmac(struct ib_device *device,
57 struct rdma_ah_attr *ah_attr);
58
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030059static const char * const ib_events[] = {
60 [IB_EVENT_CQ_ERR] = "CQ error",
61 [IB_EVENT_QP_FATAL] = "QP fatal error",
62 [IB_EVENT_QP_REQ_ERR] = "QP request error",
63 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
64 [IB_EVENT_COMM_EST] = "communication established",
65 [IB_EVENT_SQ_DRAINED] = "send queue drained",
66 [IB_EVENT_PATH_MIG] = "path migration successful",
67 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
68 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
69 [IB_EVENT_PORT_ACTIVE] = "port active",
70 [IB_EVENT_PORT_ERR] = "port error",
71 [IB_EVENT_LID_CHANGE] = "LID change",
72 [IB_EVENT_PKEY_CHANGE] = "P_key change",
73 [IB_EVENT_SM_CHANGE] = "SM change",
74 [IB_EVENT_SRQ_ERR] = "SRQ error",
75 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
76 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
77 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
78 [IB_EVENT_GID_CHANGE] = "GID changed",
79};
80
Bart Van Asschedb7489e2015-08-03 10:01:52 -070081const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030082{
83 size_t index = event;
84
85 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
86 ib_events[index] : "unrecognized event";
87}
88EXPORT_SYMBOL(ib_event_msg);
89
90static const char * const wc_statuses[] = {
91 [IB_WC_SUCCESS] = "success",
92 [IB_WC_LOC_LEN_ERR] = "local length error",
93 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
94 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
95 [IB_WC_LOC_PROT_ERR] = "local protection error",
96 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
97 [IB_WC_MW_BIND_ERR] = "memory management operation error",
98 [IB_WC_BAD_RESP_ERR] = "bad response error",
99 [IB_WC_LOC_ACCESS_ERR] = "local access error",
100 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
101 [IB_WC_REM_ACCESS_ERR] = "remote access error",
102 [IB_WC_REM_OP_ERR] = "remote operation error",
103 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
104 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
105 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
106 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
107 [IB_WC_REM_ABORT_ERR] = "operation aborted",
108 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
109 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
110 [IB_WC_FATAL_ERR] = "fatal error",
111 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
112 [IB_WC_GENERAL_ERR] = "general error",
113};
114
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700115const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300116{
117 size_t index = status;
118
119 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
120 wc_statuses[index] : "unrecognized status";
121}
122EXPORT_SYMBOL(ib_wc_status_msg);
123
Roland Dreier8385fd82014-06-04 10:00:16 -0700124__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700125{
126 switch (rate) {
Hans Westgaard Rye2dda362018-01-02 14:50:40 +0100127 case IB_RATE_2_5_GBPS: return 1;
128 case IB_RATE_5_GBPS: return 2;
129 case IB_RATE_10_GBPS: return 4;
130 case IB_RATE_20_GBPS: return 8;
131 case IB_RATE_30_GBPS: return 12;
132 case IB_RATE_40_GBPS: return 16;
133 case IB_RATE_60_GBPS: return 24;
134 case IB_RATE_80_GBPS: return 32;
135 case IB_RATE_120_GBPS: return 48;
136 case IB_RATE_14_GBPS: return 6;
137 case IB_RATE_56_GBPS: return 22;
138 case IB_RATE_112_GBPS: return 45;
139 case IB_RATE_168_GBPS: return 67;
140 case IB_RATE_25_GBPS: return 10;
141 case IB_RATE_100_GBPS: return 40;
142 case IB_RATE_200_GBPS: return 80;
143 case IB_RATE_300_GBPS: return 120;
144 default: return -1;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700145 }
146}
147EXPORT_SYMBOL(ib_rate_to_mult);
148
Roland Dreier8385fd82014-06-04 10:00:16 -0700149__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700150{
151 switch (mult) {
Hans Westgaard Rye2dda362018-01-02 14:50:40 +0100152 case 1: return IB_RATE_2_5_GBPS;
153 case 2: return IB_RATE_5_GBPS;
154 case 4: return IB_RATE_10_GBPS;
155 case 8: return IB_RATE_20_GBPS;
156 case 12: return IB_RATE_30_GBPS;
157 case 16: return IB_RATE_40_GBPS;
158 case 24: return IB_RATE_60_GBPS;
159 case 32: return IB_RATE_80_GBPS;
160 case 48: return IB_RATE_120_GBPS;
161 case 6: return IB_RATE_14_GBPS;
162 case 22: return IB_RATE_56_GBPS;
163 case 45: return IB_RATE_112_GBPS;
164 case 67: return IB_RATE_168_GBPS;
165 case 10: return IB_RATE_25_GBPS;
166 case 40: return IB_RATE_100_GBPS;
167 case 80: return IB_RATE_200_GBPS;
168 case 120: return IB_RATE_300_GBPS;
169 default: return IB_RATE_PORT_CURRENT;
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700170 }
171}
172EXPORT_SYMBOL(mult_to_ib_rate);
173
Roland Dreier8385fd82014-06-04 10:00:16 -0700174__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300175{
176 switch (rate) {
177 case IB_RATE_2_5_GBPS: return 2500;
178 case IB_RATE_5_GBPS: return 5000;
179 case IB_RATE_10_GBPS: return 10000;
180 case IB_RATE_20_GBPS: return 20000;
181 case IB_RATE_30_GBPS: return 30000;
182 case IB_RATE_40_GBPS: return 40000;
183 case IB_RATE_60_GBPS: return 60000;
184 case IB_RATE_80_GBPS: return 80000;
185 case IB_RATE_120_GBPS: return 120000;
186 case IB_RATE_14_GBPS: return 14062;
187 case IB_RATE_56_GBPS: return 56250;
188 case IB_RATE_112_GBPS: return 112500;
189 case IB_RATE_168_GBPS: return 168750;
190 case IB_RATE_25_GBPS: return 25781;
191 case IB_RATE_100_GBPS: return 103125;
192 case IB_RATE_200_GBPS: return 206250;
193 case IB_RATE_300_GBPS: return 309375;
194 default: return -1;
195 }
196}
197EXPORT_SYMBOL(ib_rate_to_mbps);
198
Roland Dreier8385fd82014-06-04 10:00:16 -0700199__attribute_const__ enum rdma_transport_type
Tom Tucker07ebafb2006-08-03 16:02:42 -0500200rdma_node_get_transport(enum rdma_node_type node_type)
201{
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300202
203 if (node_type == RDMA_NODE_USNIC)
Upinder Malhi5db57652014-01-15 17:02:36 -0800204 return RDMA_TRANSPORT_USNIC;
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300205 if (node_type == RDMA_NODE_USNIC_UDP)
Upinder Malhi248567f2014-01-09 14:48:19 -0800206 return RDMA_TRANSPORT_USNIC_UDP;
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300207 if (node_type == RDMA_NODE_RNIC)
208 return RDMA_TRANSPORT_IWARP;
209
210 return RDMA_TRANSPORT_IB;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500211}
212EXPORT_SYMBOL(rdma_node_get_transport);
213
Eli Cohena3f5ada2010-09-27 17:51:10 -0700214enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
215{
Leon Romanovsky82901e32017-08-17 15:50:39 +0300216 enum rdma_transport_type lt;
Eli Cohena3f5ada2010-09-27 17:51:10 -0700217 if (device->get_link_layer)
218 return device->get_link_layer(device, port_num);
219
Leon Romanovsky82901e32017-08-17 15:50:39 +0300220 lt = rdma_node_get_transport(device->node_type);
221 if (lt == RDMA_TRANSPORT_IB)
Eli Cohena3f5ada2010-09-27 17:51:10 -0700222 return IB_LINK_LAYER_INFINIBAND;
Leon Romanovsky82901e32017-08-17 15:50:39 +0300223
224 return IB_LINK_LAYER_ETHERNET;
Eli Cohena3f5ada2010-09-27 17:51:10 -0700225}
226EXPORT_SYMBOL(rdma_port_get_link_layer);
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228/* Protection domains */
229
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600230/**
231 * ib_alloc_pd - Allocates an unused protection domain.
232 * @device: The device on which to allocate the protection domain.
233 *
234 * A protection domain object provides an association between QPs, shared
235 * receive queues, address handles, memory regions, and memory windows.
236 *
237 * Every PD has a local_dma_lkey which can be used as the lkey value for local
238 * memory operations.
239 */
Christoph Hellwiged082d32016-09-05 12:56:17 +0200240struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
241 const char *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 struct ib_pd *pd;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200244 int mr_access_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700246 pd = device->alloc_pd(device, NULL, NULL);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600247 if (IS_ERR(pd))
248 return pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600250 pd->device = device;
251 pd->uobject = NULL;
Christoph Hellwig50d46332016-09-05 12:56:16 +0200252 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600253 atomic_set(&pd->usecnt, 0);
Christoph Hellwiged082d32016-09-05 12:56:17 +0200254 pd->flags = flags;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600255
Or Gerlitz86bee4c2015-12-18 10:59:45 +0200256 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600257 pd->local_dma_lkey = device->local_dma_lkey;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200258 else
259 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
260
261 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
262 pr_warn("%s: enabling unsafe global rkey\n", caller);
263 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
264 }
265
Leon Romanovsky9d5f8c22018-01-28 11:17:23 +0200266 pd->res.type = RDMA_RESTRACK_PD;
267 pd->res.kern_name = caller;
268 rdma_restrack_add(&pd->res);
269
Christoph Hellwiged082d32016-09-05 12:56:17 +0200270 if (mr_access_flags) {
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600271 struct ib_mr *mr;
272
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200273 mr = pd->device->get_dma_mr(pd, mr_access_flags);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600274 if (IS_ERR(mr)) {
275 ib_dealloc_pd(pd);
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200276 return ERR_CAST(mr);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600277 }
278
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200279 mr->device = pd->device;
280 mr->pd = pd;
281 mr->uobject = NULL;
282 mr->need_inval = false;
283
Christoph Hellwig50d46332016-09-05 12:56:16 +0200284 pd->__internal_mr = mr;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200285
286 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
287 pd->local_dma_lkey = pd->__internal_mr->lkey;
288
289 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
290 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 }
Christoph Hellwiged082d32016-09-05 12:56:17 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 return pd;
294}
Christoph Hellwiged082d32016-09-05 12:56:17 +0200295EXPORT_SYMBOL(__ib_alloc_pd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600297/**
298 * ib_dealloc_pd - Deallocates a protection domain.
299 * @pd: The protection domain to deallocate.
300 *
301 * It is an error to call this function while any resources in the pd still
302 * exist. The caller is responsible to synchronously destroy them and
303 * guarantee no new allocations will happen.
304 */
305void ib_dealloc_pd(struct ib_pd *pd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600307 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Christoph Hellwig50d46332016-09-05 12:56:16 +0200309 if (pd->__internal_mr) {
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200310 ret = pd->device->dereg_mr(pd->__internal_mr);
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600311 WARN_ON(ret);
Christoph Hellwig50d46332016-09-05 12:56:16 +0200312 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600313 }
314
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600315 /* uverbs manipulates usecnt with proper locking, while the kabi
316 requires the caller to guarantee we can't race here. */
317 WARN_ON(atomic_read(&pd->usecnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Leon Romanovsky9d5f8c22018-01-28 11:17:23 +0200319 rdma_restrack_del(&pd->res);
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600320 /* Making delalloc_pd a void return is a WIP, no driver should return
321 an error here. */
322 ret = pd->device->dealloc_pd(pd);
323 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325EXPORT_SYMBOL(ib_dealloc_pd);
326
327/* Address handles */
328
Parav Pandit5cda6582017-10-16 08:45:12 +0300329static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
330 struct rdma_ah_attr *ah_attr,
331 struct ib_udata *udata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
333 struct ib_ah *ah;
334
Parav Pandit5cda6582017-10-16 08:45:12 +0300335 ah = pd->device->create_ah(pd, ah_attr, udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 if (!IS_ERR(ah)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700338 ah->device = pd->device;
339 ah->pd = pd;
340 ah->uobject = NULL;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400341 ah->type = ah_attr->type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 atomic_inc(&pd->usecnt);
343 }
344
345 return ah;
346}
Parav Pandit5cda6582017-10-16 08:45:12 +0300347
348struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
349{
350 return _rdma_create_ah(pd, ah_attr, NULL);
351}
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400352EXPORT_SYMBOL(rdma_create_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
Parav Pandit5cda6582017-10-16 08:45:12 +0300354/**
355 * rdma_create_user_ah - Creates an address handle for the
356 * given address vector.
357 * It resolves destination mac address for ah attribute of RoCE type.
358 * @pd: The protection domain associated with the address handle.
359 * @ah_attr: The attributes of the address vector.
360 * @udata: pointer to user's input output buffer information need by
361 * provider driver.
362 *
363 * It returns 0 on success and returns appropriate error code on error.
364 * The address handle is used to reference a local or global destination
365 * in all UD QP post sends.
366 */
367struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
368 struct rdma_ah_attr *ah_attr,
369 struct ib_udata *udata)
370{
371 int err;
372
373 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
374 err = ib_resolve_eth_dmac(pd->device, ah_attr);
375 if (err)
376 return ERR_PTR(err);
377 }
378
379 return _rdma_create_ah(pd, ah_attr, udata);
380}
381EXPORT_SYMBOL(rdma_create_user_ah);
382
Moni Shoua850d8fd2016-11-10 11:30:56 +0200383int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
Somnath Koturc865f242015-12-23 14:56:51 +0200384{
385 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
386 struct iphdr ip4h_checked;
387 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
388
389 /* If it's IPv6, the version must be 6, otherwise, the first
390 * 20 bytes (before the IPv4 header) are garbled.
391 */
392 if (ip6h->version != 6)
393 return (ip4h->version == 4) ? 4 : 0;
394 /* version may be 6 or 4 because the first 20 bytes could be garbled */
395
396 /* RoCE v2 requires no options, thus header length
397 * must be 5 words
398 */
399 if (ip4h->ihl != 5)
400 return 6;
401
402 /* Verify checksum.
403 * We can't write on scattered buffers so we need to copy to
404 * temp buffer.
405 */
406 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
407 ip4h_checked.check = 0;
408 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
409 /* if IPv4 header checksum is OK, believe it */
410 if (ip4h->check == ip4h_checked.check)
411 return 4;
412 return 6;
413}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200414EXPORT_SYMBOL(ib_get_rdma_header_version);
Somnath Koturc865f242015-12-23 14:56:51 +0200415
416static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
417 u8 port_num,
418 const struct ib_grh *grh)
419{
420 int grh_version;
421
422 if (rdma_protocol_ib(device, port_num))
423 return RDMA_NETWORK_IB;
424
Moni Shoua850d8fd2016-11-10 11:30:56 +0200425 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
Somnath Koturc865f242015-12-23 14:56:51 +0200426
427 if (grh_version == 4)
428 return RDMA_NETWORK_IPV4;
429
430 if (grh->next_hdr == IPPROTO_UDP)
431 return RDMA_NETWORK_IPV6;
432
433 return RDMA_NETWORK_ROCE_V1;
434}
435
Matan Barakdbf727d2015-10-15 18:38:51 +0300436struct find_gid_index_context {
437 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +0200438 enum ib_gid_type gid_type;
Matan Barakdbf727d2015-10-15 18:38:51 +0300439};
440
441static bool find_gid_index(const union ib_gid *gid,
442 const struct ib_gid_attr *gid_attr,
443 void *context)
444{
Parav Panditb0dd0d32017-11-14 14:52:04 +0200445 struct find_gid_index_context *ctx = context;
Matan Barakdbf727d2015-10-15 18:38:51 +0300446
Somnath Koturc865f242015-12-23 14:56:51 +0200447 if (ctx->gid_type != gid_attr->gid_type)
448 return false;
449
Matan Barakdbf727d2015-10-15 18:38:51 +0300450 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
451 (is_vlan_dev(gid_attr->ndev) &&
452 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
453 return false;
454
455 return true;
456}
457
458static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
459 u16 vlan_id, const union ib_gid *sgid,
Somnath Koturc865f242015-12-23 14:56:51 +0200460 enum ib_gid_type gid_type,
Matan Barakdbf727d2015-10-15 18:38:51 +0300461 u16 *gid_index)
462{
Somnath Koturc865f242015-12-23 14:56:51 +0200463 struct find_gid_index_context context = {.vlan_id = vlan_id,
464 .gid_type = gid_type};
Matan Barakdbf727d2015-10-15 18:38:51 +0300465
466 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
467 &context, gid_index);
468}
469
Moni Shoua850d8fd2016-11-10 11:30:56 +0200470int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
471 enum rdma_network_type net_type,
472 union ib_gid *sgid, union ib_gid *dgid)
Somnath Koturc865f242015-12-23 14:56:51 +0200473{
474 struct sockaddr_in src_in;
475 struct sockaddr_in dst_in;
476 __be32 src_saddr, dst_saddr;
477
478 if (!sgid || !dgid)
479 return -EINVAL;
480
481 if (net_type == RDMA_NETWORK_IPV4) {
482 memcpy(&src_in.sin_addr.s_addr,
483 &hdr->roce4grh.saddr, 4);
484 memcpy(&dst_in.sin_addr.s_addr,
485 &hdr->roce4grh.daddr, 4);
486 src_saddr = src_in.sin_addr.s_addr;
487 dst_saddr = dst_in.sin_addr.s_addr;
488 ipv6_addr_set_v4mapped(src_saddr,
489 (struct in6_addr *)sgid);
490 ipv6_addr_set_v4mapped(dst_saddr,
491 (struct in6_addr *)dgid);
492 return 0;
493 } else if (net_type == RDMA_NETWORK_IPV6 ||
494 net_type == RDMA_NETWORK_IB) {
495 *dgid = hdr->ibgrh.dgid;
496 *sgid = hdr->ibgrh.sgid;
497 return 0;
498 } else {
499 return -EINVAL;
500 }
501}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200502EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
Somnath Koturc865f242015-12-23 14:56:51 +0200503
Parav Pandit1060f862017-11-14 14:51:49 +0200504/* Resolve destination mac address and hop limit for unicast destination
505 * GID entry, considering the source GID entry as well.
506 * ah_attribute must have have valid port_num, sgid_index.
507 */
508static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
509 struct rdma_ah_attr *ah_attr)
510{
511 struct ib_gid_attr sgid_attr;
512 struct ib_global_route *grh;
513 int hop_limit = 0xff;
514 union ib_gid sgid;
515 int ret;
516
517 grh = rdma_ah_retrieve_grh(ah_attr);
518
519 ret = ib_query_gid(device,
520 rdma_ah_get_port_num(ah_attr),
521 grh->sgid_index,
522 &sgid, &sgid_attr);
523 if (ret || !sgid_attr.ndev) {
524 if (!ret)
525 ret = -ENXIO;
526 return ret;
527 }
528
Parav Pandit56d0a7d92017-11-14 14:51:50 +0200529 /* If destination is link local and source GID is RoCEv1,
530 * IP stack is not used.
531 */
532 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
533 sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
534 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
535 ah_attr->roce.dmac);
536 goto done;
537 }
538
Parav Pandit1060f862017-11-14 14:51:49 +0200539 ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
540 ah_attr->roce.dmac,
541 sgid_attr.ndev, &hop_limit);
Parav Pandit56d0a7d92017-11-14 14:51:50 +0200542done:
Parav Pandit1060f862017-11-14 14:51:49 +0200543 dev_put(sgid_attr.ndev);
544
545 grh->hop_limit = hop_limit;
546 return ret;
547}
548
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500549/*
Parav Panditf6bdb142017-11-14 14:52:17 +0200550 * This function initializes address handle attributes from the incoming packet.
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500551 * Incoming packet has dgid of the receiver node on which this code is
552 * getting executed and, sgid contains the GID of the sender.
553 *
554 * When resolving mac address of destination, the arrived dgid is used
555 * as sgid and, sgid is used as dgid because sgid contains destinations
556 * GID whom to respond to.
557 *
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500558 */
Parav Panditf6bdb142017-11-14 14:52:17 +0200559int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
560 const struct ib_wc *wc, const struct ib_grh *grh,
561 struct rdma_ah_attr *ah_attr)
Hal Rosenstock513789e2005-07-27 11:45:34 -0700562{
Hal Rosenstock513789e2005-07-27 11:45:34 -0700563 u32 flow_class;
564 u16 gid_index;
565 int ret;
Somnath Koturc865f242015-12-23 14:56:51 +0200566 enum rdma_network_type net_type = RDMA_NETWORK_IB;
567 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
Matan Barakc3efe752016-01-04 10:49:54 +0200568 int hoplimit = 0xff;
Somnath Koturc865f242015-12-23 14:56:51 +0200569 union ib_gid dgid;
570 union ib_gid sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700571
Roland Dreier79364222017-08-29 10:34:44 -0700572 might_sleep();
573
Sean Hefty4e00d692006-06-17 20:37:39 -0700574 memset(ah_attr, 0, sizeof *ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400575 ah_attr->type = rdma_ah_find_type(device, port_num);
Michael Wang227128f2015-05-05 14:50:40 +0200576 if (rdma_cap_eth_ah(device, port_num)) {
Somnath Koturc865f242015-12-23 14:56:51 +0200577 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
578 net_type = wc->network_hdr_type;
579 else
580 net_type = ib_get_net_type_by_grh(device, port_num, grh);
581 gid_type = ib_network_to_gid_type(net_type);
582 }
Moni Shoua850d8fd2016-11-10 11:30:56 +0200583 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
584 &sgid, &dgid);
Somnath Koturc865f242015-12-23 14:56:51 +0200585 if (ret)
586 return ret;
587
Parav Pandit1060f862017-11-14 14:51:49 +0200588 rdma_ah_set_sl(ah_attr, wc->sl);
589 rdma_ah_set_port_num(ah_attr, port_num);
590
Somnath Koturc865f242015-12-23 14:56:51 +0200591 if (rdma_protocol_roce(device, port_num)) {
Matan Barakdbf727d2015-10-15 18:38:51 +0300592 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
593 wc->vlan_id : 0xffff;
594
Matan Barakdd5f03b2013-12-12 18:03:11 +0200595 if (!(wc->wc_flags & IB_WC_GRH))
596 return -EPROTOTYPE;
597
Parav Pandit1060f862017-11-14 14:51:49 +0200598 ret = get_sgid_index_from_eth(device, port_num,
599 vlan_id, &dgid,
600 gid_type, &gid_index);
Matan Barak20029832015-12-23 14:56:53 +0200601 if (ret)
602 return ret;
603
Parav Pandit1060f862017-11-14 14:51:49 +0200604 flow_class = be32_to_cpu(grh->version_tclass_flow);
605 rdma_ah_set_grh(ah_attr, &sgid,
606 flow_class & 0xFFFFF,
607 (u8)gid_index, hoplimit,
608 (flow_class >> 20) & 0xFF);
609 return ib_resolve_unicast_gid_dmac(device, ah_attr);
610 } else {
611 rdma_ah_set_dlid(ah_attr, wc->slid);
612 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200613
Parav Pandit1060f862017-11-14 14:51:49 +0200614 if (wc->wc_flags & IB_WC_GRH) {
Eli Cohenb3556002016-06-22 17:27:24 +0300615 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
616 ret = ib_find_cached_gid_by_port(device, &dgid,
617 IB_GID_TYPE_IB,
618 port_num, NULL,
619 &gid_index);
620 if (ret)
621 return ret;
622 } else {
623 gid_index = 0;
624 }
Parav Pandit1060f862017-11-14 14:51:49 +0200625
626 flow_class = be32_to_cpu(grh->version_tclass_flow);
627 rdma_ah_set_grh(ah_attr, &sgid,
628 flow_class & 0xFFFFF,
629 (u8)gid_index, hoplimit,
630 (flow_class >> 20) & 0xFF);
Matan Barakdbf727d2015-10-15 18:38:51 +0300631 }
Parav Pandit1060f862017-11-14 14:51:49 +0200632 return 0;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700633 }
Sean Hefty4e00d692006-06-17 20:37:39 -0700634}
Parav Panditf6bdb142017-11-14 14:52:17 +0200635EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
Sean Hefty4e00d692006-06-17 20:37:39 -0700636
Ira Weiny73cdaae2015-05-31 17:15:31 -0400637struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
638 const struct ib_grh *grh, u8 port_num)
Sean Hefty4e00d692006-06-17 20:37:39 -0700639{
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400640 struct rdma_ah_attr ah_attr;
Sean Hefty4e00d692006-06-17 20:37:39 -0700641 int ret;
642
Parav Panditf6bdb142017-11-14 14:52:17 +0200643 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
Sean Hefty4e00d692006-06-17 20:37:39 -0700644 if (ret)
645 return ERR_PTR(ret);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700646
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400647 return rdma_create_ah(pd, &ah_attr);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700648}
649EXPORT_SYMBOL(ib_create_ah_from_wc);
650
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400651int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400653 if (ah->type != ah_attr->type)
654 return -EINVAL;
655
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 return ah->device->modify_ah ?
657 ah->device->modify_ah(ah, ah_attr) :
658 -ENOSYS;
659}
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400660EXPORT_SYMBOL(rdma_modify_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400662int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
664 return ah->device->query_ah ?
665 ah->device->query_ah(ah, ah_attr) :
666 -ENOSYS;
667}
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400668EXPORT_SYMBOL(rdma_query_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400670int rdma_destroy_ah(struct ib_ah *ah)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
672 struct ib_pd *pd;
673 int ret;
674
675 pd = ah->pd;
676 ret = ah->device->destroy_ah(ah);
677 if (!ret)
678 atomic_dec(&pd->usecnt);
679
680 return ret;
681}
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400682EXPORT_SYMBOL(rdma_destroy_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Roland Dreierd41fcc62005-08-18 12:23:08 -0700684/* Shared receive queues */
685
686struct ib_srq *ib_create_srq(struct ib_pd *pd,
687 struct ib_srq_init_attr *srq_init_attr)
688{
689 struct ib_srq *srq;
690
691 if (!pd->device->create_srq)
692 return ERR_PTR(-ENOSYS);
693
694 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
695
696 if (!IS_ERR(srq)) {
697 srq->device = pd->device;
698 srq->pd = pd;
699 srq->uobject = NULL;
700 srq->event_handler = srq_init_attr->event_handler;
701 srq->srq_context = srq_init_attr->srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -0700702 srq->srq_type = srq_init_attr->srq_type;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300703 if (ib_srq_has_cq(srq->srq_type)) {
704 srq->ext.cq = srq_init_attr->ext.cq;
705 atomic_inc(&srq->ext.cq->usecnt);
706 }
Sean Hefty418d5132011-05-23 19:42:29 -0700707 if (srq->srq_type == IB_SRQT_XRC) {
708 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
Sean Hefty418d5132011-05-23 19:42:29 -0700709 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700710 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700711 atomic_inc(&pd->usecnt);
712 atomic_set(&srq->usecnt, 0);
713 }
714
715 return srq;
716}
717EXPORT_SYMBOL(ib_create_srq);
718
719int ib_modify_srq(struct ib_srq *srq,
720 struct ib_srq_attr *srq_attr,
721 enum ib_srq_attr_mask srq_attr_mask)
722{
Dotan Barak7ce5eac2008-04-16 21:09:28 -0700723 return srq->device->modify_srq ?
724 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
725 -ENOSYS;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700726}
727EXPORT_SYMBOL(ib_modify_srq);
728
729int ib_query_srq(struct ib_srq *srq,
730 struct ib_srq_attr *srq_attr)
731{
732 return srq->device->query_srq ?
733 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
734}
735EXPORT_SYMBOL(ib_query_srq);
736
737int ib_destroy_srq(struct ib_srq *srq)
738{
739 struct ib_pd *pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700740 enum ib_srq_type srq_type;
741 struct ib_xrcd *uninitialized_var(xrcd);
742 struct ib_cq *uninitialized_var(cq);
Roland Dreierd41fcc62005-08-18 12:23:08 -0700743 int ret;
744
745 if (atomic_read(&srq->usecnt))
746 return -EBUSY;
747
748 pd = srq->pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700749 srq_type = srq->srq_type;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300750 if (ib_srq_has_cq(srq_type))
751 cq = srq->ext.cq;
752 if (srq_type == IB_SRQT_XRC)
Sean Hefty418d5132011-05-23 19:42:29 -0700753 xrcd = srq->ext.xrc.xrcd;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700754
755 ret = srq->device->destroy_srq(srq);
Sean Hefty418d5132011-05-23 19:42:29 -0700756 if (!ret) {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700757 atomic_dec(&pd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300758 if (srq_type == IB_SRQT_XRC)
Sean Hefty418d5132011-05-23 19:42:29 -0700759 atomic_dec(&xrcd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300760 if (ib_srq_has_cq(srq_type))
Sean Hefty418d5132011-05-23 19:42:29 -0700761 atomic_dec(&cq->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700762 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700763
764 return ret;
765}
766EXPORT_SYMBOL(ib_destroy_srq);
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768/* Queue pairs */
769
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700770static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
771{
772 struct ib_qp *qp = context;
Yishai Hadas73c40c62013-08-01 18:49:53 +0300773 unsigned long flags;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700774
Yishai Hadas73c40c62013-08-01 18:49:53 +0300775 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700776 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
Shlomo Pongratzeec9e29f2013-04-10 14:26:46 +0000777 if (event->element.qp->event_handler)
778 event->element.qp->event_handler(event, event->element.qp->qp_context);
Yishai Hadas73c40c62013-08-01 18:49:53 +0300779 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700780}
781
Sean Heftyd3d72d92011-05-26 23:06:44 -0700782static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
783{
784 mutex_lock(&xrcd->tgt_qp_mutex);
785 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
786 mutex_unlock(&xrcd->tgt_qp_mutex);
787}
788
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700789static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
790 void (*event_handler)(struct ib_event *, void *),
791 void *qp_context)
Sean Heftyd3d72d92011-05-26 23:06:44 -0700792{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700793 struct ib_qp *qp;
794 unsigned long flags;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300795 int err;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700796
797 qp = kzalloc(sizeof *qp, GFP_KERNEL);
798 if (!qp)
799 return ERR_PTR(-ENOMEM);
800
801 qp->real_qp = real_qp;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300802 err = ib_open_shared_qp_security(qp, real_qp->device);
803 if (err) {
804 kfree(qp);
805 return ERR_PTR(err);
806 }
807
808 qp->real_qp = real_qp;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700809 atomic_inc(&real_qp->usecnt);
810 qp->device = real_qp->device;
811 qp->event_handler = event_handler;
812 qp->qp_context = qp_context;
813 qp->qp_num = real_qp->qp_num;
814 qp->qp_type = real_qp->qp_type;
815
816 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
817 list_add(&qp->open_list, &real_qp->open_list);
818 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
819
820 return qp;
Sean Heftyd3d72d92011-05-26 23:06:44 -0700821}
822
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700823struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
824 struct ib_qp_open_attr *qp_open_attr)
825{
826 struct ib_qp *qp, *real_qp;
827
828 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
829 return ERR_PTR(-EINVAL);
830
831 qp = ERR_PTR(-EINVAL);
832 mutex_lock(&xrcd->tgt_qp_mutex);
833 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
834 if (real_qp->qp_num == qp_open_attr->qp_num) {
835 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
836 qp_open_attr->qp_context);
837 break;
838 }
839 }
840 mutex_unlock(&xrcd->tgt_qp_mutex);
841 return qp;
842}
843EXPORT_SYMBOL(ib_open_qp);
844
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200845static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
846 struct ib_qp_init_attr *qp_init_attr)
847{
848 struct ib_qp *real_qp = qp;
849
850 qp->event_handler = __ib_shared_qp_event_handler;
851 qp->qp_context = qp;
852 qp->pd = NULL;
853 qp->send_cq = qp->recv_cq = NULL;
854 qp->srq = NULL;
855 qp->xrcd = qp_init_attr->xrcd;
856 atomic_inc(&qp_init_attr->xrcd->usecnt);
857 INIT_LIST_HEAD(&qp->open_list);
858
859 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
860 qp_init_attr->qp_context);
861 if (!IS_ERR(qp))
862 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
863 else
864 real_qp->device->destroy_qp(real_qp);
865 return qp;
866}
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868struct ib_qp *ib_create_qp(struct ib_pd *pd,
869 struct ib_qp_init_attr *qp_init_attr)
870{
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200871 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
872 struct ib_qp *qp;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200873 int ret;
874
Yishai Hadasa9017e22016-05-23 15:20:54 +0300875 if (qp_init_attr->rwq_ind_tbl &&
876 (qp_init_attr->recv_cq ||
877 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
878 qp_init_attr->cap.max_recv_sge))
879 return ERR_PTR(-EINVAL);
880
Christoph Hellwiga060b562016-05-03 18:01:09 +0200881 /*
882 * If the callers is using the RDMA API calculate the resources
883 * needed for the RDMA READ/WRITE operations.
884 *
885 * Note that these callers need to pass in a port number.
886 */
887 if (qp_init_attr->cap.max_rdma_ctxs)
888 rdma_rw_init_qp(device, qp_init_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
Leon Romanovsky78a0cd62018-01-28 11:17:21 +0200890 qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200891 if (IS_ERR(qp))
892 return qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300894 ret = ib_create_qp_security(qp, device);
895 if (ret) {
896 ib_destroy_qp(qp);
897 return ERR_PTR(ret);
898 }
899
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200900 qp->real_qp = qp;
901 qp->uobject = NULL;
902 qp->qp_type = qp_init_attr->qp_type;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300903 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700904
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200905 atomic_set(&qp->usecnt, 0);
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200906 qp->mrs_used = 0;
907 spin_lock_init(&qp->mr_lock);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200908 INIT_LIST_HEAD(&qp->rdma_mrs);
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200909 INIT_LIST_HEAD(&qp->sig_mrs);
Noa Osherovich498ca3c2017-08-23 08:35:40 +0300910 qp->port = 0;
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200911
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200912 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
913 return ib_create_xrc_qp(qp, qp_init_attr);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700914
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200915 qp->event_handler = qp_init_attr->event_handler;
916 qp->qp_context = qp_init_attr->qp_context;
917 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
918 qp->recv_cq = NULL;
919 qp->srq = NULL;
920 } else {
921 qp->recv_cq = qp_init_attr->recv_cq;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300922 if (qp_init_attr->recv_cq)
923 atomic_inc(&qp_init_attr->recv_cq->usecnt);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200924 qp->srq = qp_init_attr->srq;
925 if (qp->srq)
926 atomic_inc(&qp_init_attr->srq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 }
928
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200929 qp->send_cq = qp_init_attr->send_cq;
930 qp->xrcd = NULL;
931
932 atomic_inc(&pd->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +0300933 if (qp_init_attr->send_cq)
934 atomic_inc(&qp_init_attr->send_cq->usecnt);
935 if (qp_init_attr->rwq_ind_tbl)
936 atomic_inc(&qp->rwq_ind_tbl->usecnt);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200937
938 if (qp_init_attr->cap.max_rdma_ctxs) {
939 ret = rdma_rw_init_mrs(qp, qp_init_attr);
940 if (ret) {
941 pr_err("failed to init MR pool ret= %d\n", ret);
942 ib_destroy_qp(qp);
Steve Wiseb6bc1c72016-09-29 07:31:33 -0700943 return ERR_PTR(ret);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200944 }
945 }
946
Bart Van Assche632bc3f2016-07-21 13:03:30 -0700947 /*
948 * Note: all hw drivers guarantee that max_send_sge is lower than
949 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
950 * max_send_sge <= max_sge_rd.
951 */
952 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
953 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
954 device->attrs.max_sge_rd);
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return qp;
957}
958EXPORT_SYMBOL(ib_create_qp);
959
Roland Dreier8a518662006-02-13 12:48:12 -0800960static const struct {
961 int valid;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700962 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
963 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
Roland Dreier8a518662006-02-13 12:48:12 -0800964} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
965 [IB_QPS_RESET] = {
966 [IB_QPS_RESET] = { .valid = 1 },
Roland Dreier8a518662006-02-13 12:48:12 -0800967 [IB_QPS_INIT] = {
968 .valid = 1,
969 .req_param = {
970 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
971 IB_QP_PORT |
972 IB_QP_QKEY),
Or Gerlitzc938a612012-03-01 12:17:51 +0200973 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
Roland Dreier8a518662006-02-13 12:48:12 -0800974 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
975 IB_QP_PORT |
976 IB_QP_ACCESS_FLAGS),
977 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
978 IB_QP_PORT |
979 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700980 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
981 IB_QP_PORT |
982 IB_QP_ACCESS_FLAGS),
983 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
984 IB_QP_PORT |
985 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800986 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
987 IB_QP_QKEY),
988 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
989 IB_QP_QKEY),
990 }
991 },
992 },
993 [IB_QPS_INIT] = {
994 [IB_QPS_RESET] = { .valid = 1 },
995 [IB_QPS_ERR] = { .valid = 1 },
996 [IB_QPS_INIT] = {
997 .valid = 1,
998 .opt_param = {
999 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1000 IB_QP_PORT |
1001 IB_QP_QKEY),
1002 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1003 IB_QP_PORT |
1004 IB_QP_ACCESS_FLAGS),
1005 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1006 IB_QP_PORT |
1007 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001008 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1009 IB_QP_PORT |
1010 IB_QP_ACCESS_FLAGS),
1011 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1012 IB_QP_PORT |
1013 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -08001014 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1015 IB_QP_QKEY),
1016 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1017 IB_QP_QKEY),
1018 }
1019 },
1020 [IB_QPS_RTR] = {
1021 .valid = 1,
1022 .req_param = {
1023 [IB_QPT_UC] = (IB_QP_AV |
1024 IB_QP_PATH_MTU |
1025 IB_QP_DEST_QPN |
1026 IB_QP_RQ_PSN),
1027 [IB_QPT_RC] = (IB_QP_AV |
1028 IB_QP_PATH_MTU |
1029 IB_QP_DEST_QPN |
1030 IB_QP_RQ_PSN |
1031 IB_QP_MAX_DEST_RD_ATOMIC |
1032 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001033 [IB_QPT_XRC_INI] = (IB_QP_AV |
1034 IB_QP_PATH_MTU |
1035 IB_QP_DEST_QPN |
1036 IB_QP_RQ_PSN),
1037 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1038 IB_QP_PATH_MTU |
1039 IB_QP_DEST_QPN |
1040 IB_QP_RQ_PSN |
1041 IB_QP_MAX_DEST_RD_ATOMIC |
1042 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001043 },
1044 .opt_param = {
1045 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1046 IB_QP_QKEY),
1047 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1048 IB_QP_ACCESS_FLAGS |
1049 IB_QP_PKEY_INDEX),
1050 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1051 IB_QP_ACCESS_FLAGS |
1052 IB_QP_PKEY_INDEX),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001053 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1054 IB_QP_ACCESS_FLAGS |
1055 IB_QP_PKEY_INDEX),
1056 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1057 IB_QP_ACCESS_FLAGS |
1058 IB_QP_PKEY_INDEX),
Roland Dreier8a518662006-02-13 12:48:12 -08001059 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1060 IB_QP_QKEY),
1061 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1062 IB_QP_QKEY),
Matan Barakdd5f03b2013-12-12 18:03:11 +02001063 },
Matan Barakdbf727d2015-10-15 18:38:51 +03001064 },
Roland Dreier8a518662006-02-13 12:48:12 -08001065 },
1066 [IB_QPS_RTR] = {
1067 [IB_QPS_RESET] = { .valid = 1 },
1068 [IB_QPS_ERR] = { .valid = 1 },
1069 [IB_QPS_RTS] = {
1070 .valid = 1,
1071 .req_param = {
1072 [IB_QPT_UD] = IB_QP_SQ_PSN,
1073 [IB_QPT_UC] = IB_QP_SQ_PSN,
1074 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1075 IB_QP_RETRY_CNT |
1076 IB_QP_RNR_RETRY |
1077 IB_QP_SQ_PSN |
1078 IB_QP_MAX_QP_RD_ATOMIC),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001079 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1080 IB_QP_RETRY_CNT |
1081 IB_QP_RNR_RETRY |
1082 IB_QP_SQ_PSN |
1083 IB_QP_MAX_QP_RD_ATOMIC),
1084 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1085 IB_QP_SQ_PSN),
Roland Dreier8a518662006-02-13 12:48:12 -08001086 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1087 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1088 },
1089 .opt_param = {
1090 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1091 IB_QP_QKEY),
1092 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1093 IB_QP_ALT_PATH |
1094 IB_QP_ACCESS_FLAGS |
1095 IB_QP_PATH_MIG_STATE),
1096 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1097 IB_QP_ALT_PATH |
1098 IB_QP_ACCESS_FLAGS |
1099 IB_QP_MIN_RNR_TIMER |
1100 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001101 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1102 IB_QP_ALT_PATH |
1103 IB_QP_ACCESS_FLAGS |
1104 IB_QP_PATH_MIG_STATE),
1105 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1106 IB_QP_ALT_PATH |
1107 IB_QP_ACCESS_FLAGS |
1108 IB_QP_MIN_RNR_TIMER |
1109 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001110 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1111 IB_QP_QKEY),
1112 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1113 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001114 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001115 }
1116 }
1117 },
1118 [IB_QPS_RTS] = {
1119 [IB_QPS_RESET] = { .valid = 1 },
1120 [IB_QPS_ERR] = { .valid = 1 },
1121 [IB_QPS_RTS] = {
1122 .valid = 1,
1123 .opt_param = {
1124 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1125 IB_QP_QKEY),
Dotan Barak4546d312006-03-02 11:22:28 -08001126 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1127 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001128 IB_QP_ALT_PATH |
1129 IB_QP_PATH_MIG_STATE),
Dotan Barak4546d312006-03-02 11:22:28 -08001130 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1131 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001132 IB_QP_ALT_PATH |
1133 IB_QP_PATH_MIG_STATE |
1134 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001135 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1136 IB_QP_ACCESS_FLAGS |
1137 IB_QP_ALT_PATH |
1138 IB_QP_PATH_MIG_STATE),
1139 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1140 IB_QP_ACCESS_FLAGS |
1141 IB_QP_ALT_PATH |
1142 IB_QP_PATH_MIG_STATE |
1143 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001144 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1145 IB_QP_QKEY),
1146 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1147 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001148 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001149 }
1150 },
1151 [IB_QPS_SQD] = {
1152 .valid = 1,
1153 .opt_param = {
1154 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1155 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1156 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001157 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1158 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
Roland Dreier8a518662006-02-13 12:48:12 -08001159 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1160 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1161 }
1162 },
1163 },
1164 [IB_QPS_SQD] = {
1165 [IB_QPS_RESET] = { .valid = 1 },
1166 [IB_QPS_ERR] = { .valid = 1 },
1167 [IB_QPS_RTS] = {
1168 .valid = 1,
1169 .opt_param = {
1170 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1171 IB_QP_QKEY),
1172 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1173 IB_QP_ALT_PATH |
1174 IB_QP_ACCESS_FLAGS |
1175 IB_QP_PATH_MIG_STATE),
1176 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1177 IB_QP_ALT_PATH |
1178 IB_QP_ACCESS_FLAGS |
1179 IB_QP_MIN_RNR_TIMER |
1180 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001181 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1182 IB_QP_ALT_PATH |
1183 IB_QP_ACCESS_FLAGS |
1184 IB_QP_PATH_MIG_STATE),
1185 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1186 IB_QP_ALT_PATH |
1187 IB_QP_ACCESS_FLAGS |
1188 IB_QP_MIN_RNR_TIMER |
1189 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001190 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1191 IB_QP_QKEY),
1192 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1193 IB_QP_QKEY),
1194 }
1195 },
1196 [IB_QPS_SQD] = {
1197 .valid = 1,
1198 .opt_param = {
1199 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1200 IB_QP_QKEY),
1201 [IB_QPT_UC] = (IB_QP_AV |
Roland Dreier8a518662006-02-13 12:48:12 -08001202 IB_QP_ALT_PATH |
1203 IB_QP_ACCESS_FLAGS |
1204 IB_QP_PKEY_INDEX |
1205 IB_QP_PATH_MIG_STATE),
1206 [IB_QPT_RC] = (IB_QP_PORT |
1207 IB_QP_AV |
1208 IB_QP_TIMEOUT |
1209 IB_QP_RETRY_CNT |
1210 IB_QP_RNR_RETRY |
1211 IB_QP_MAX_QP_RD_ATOMIC |
1212 IB_QP_MAX_DEST_RD_ATOMIC |
Roland Dreier8a518662006-02-13 12:48:12 -08001213 IB_QP_ALT_PATH |
1214 IB_QP_ACCESS_FLAGS |
1215 IB_QP_PKEY_INDEX |
1216 IB_QP_MIN_RNR_TIMER |
1217 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001218 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1219 IB_QP_AV |
1220 IB_QP_TIMEOUT |
1221 IB_QP_RETRY_CNT |
1222 IB_QP_RNR_RETRY |
1223 IB_QP_MAX_QP_RD_ATOMIC |
1224 IB_QP_ALT_PATH |
1225 IB_QP_ACCESS_FLAGS |
1226 IB_QP_PKEY_INDEX |
1227 IB_QP_PATH_MIG_STATE),
1228 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1229 IB_QP_AV |
1230 IB_QP_TIMEOUT |
1231 IB_QP_MAX_DEST_RD_ATOMIC |
1232 IB_QP_ALT_PATH |
1233 IB_QP_ACCESS_FLAGS |
1234 IB_QP_PKEY_INDEX |
1235 IB_QP_MIN_RNR_TIMER |
1236 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001237 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1238 IB_QP_QKEY),
1239 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1240 IB_QP_QKEY),
1241 }
1242 }
1243 },
1244 [IB_QPS_SQE] = {
1245 [IB_QPS_RESET] = { .valid = 1 },
1246 [IB_QPS_ERR] = { .valid = 1 },
1247 [IB_QPS_RTS] = {
1248 .valid = 1,
1249 .opt_param = {
1250 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1251 IB_QP_QKEY),
1252 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1253 IB_QP_ACCESS_FLAGS),
1254 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1255 IB_QP_QKEY),
1256 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1257 IB_QP_QKEY),
1258 }
1259 }
1260 },
1261 [IB_QPS_ERR] = {
1262 [IB_QPS_RESET] = { .valid = 1 },
1263 [IB_QPS_ERR] = { .valid = 1 }
1264 }
1265};
1266
1267int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001268 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1269 enum rdma_link_layer ll)
Roland Dreier8a518662006-02-13 12:48:12 -08001270{
1271 enum ib_qp_attr_mask req_param, opt_param;
1272
1273 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1274 next_state < 0 || next_state > IB_QPS_ERR)
1275 return 0;
1276
1277 if (mask & IB_QP_CUR_STATE &&
1278 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1279 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1280 return 0;
1281
1282 if (!qp_state_table[cur_state][next_state].valid)
1283 return 0;
1284
1285 req_param = qp_state_table[cur_state][next_state].req_param[type];
1286 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1287
1288 if ((mask & req_param) != req_param)
1289 return 0;
1290
1291 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1292 return 0;
1293
1294 return 1;
1295}
1296EXPORT_SYMBOL(ib_modify_qp_is_ok);
1297
Parav Panditc0348eb2017-10-16 08:45:13 +03001298static int ib_resolve_eth_dmac(struct ib_device *device,
1299 struct rdma_ah_attr *ah_attr)
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001300{
1301 int ret = 0;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001302 struct ib_global_route *grh;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001303
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001304 if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
Moni Shouac90ea9d2016-11-23 08:23:22 +02001305 return -EINVAL;
Matan Barakdbf727d2015-10-15 18:38:51 +03001306
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001307 grh = rdma_ah_retrieve_grh(ah_attr);
1308
Noa Osherovich9636a562017-06-12 11:14:04 +03001309 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1310 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1311 __be32 addr = 0;
1312
1313 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1314 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1315 } else {
1316 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1317 (char *)ah_attr->roce.dmac);
1318 }
Moni Shouac90ea9d2016-11-23 08:23:22 +02001319 } else {
Parav Pandit1060f862017-11-14 14:51:49 +02001320 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001321 }
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001322 return ret;
1323}
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001324
Parav Pandita512c2f2017-05-23 11:26:08 +03001325/**
Parav Panditb96ac052018-01-09 15:24:51 +02001326 * IB core internal function to perform QP attributes modification.
Parav Pandita512c2f2017-05-23 11:26:08 +03001327 */
Parav Panditb96ac052018-01-09 15:24:51 +02001328static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1329 int attr_mask, struct ib_udata *udata)
Parav Pandita512c2f2017-05-23 11:26:08 +03001330{
Majd Dibbiny727b7e92017-11-14 14:51:56 +02001331 u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
Parav Pandita512c2f2017-05-23 11:26:08 +03001332 int ret;
1333
Majd Dibbiny727b7e92017-11-14 14:51:56 +02001334 if (rdma_ib_or_roce(qp->device, port)) {
1335 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1336 pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
1337 __func__, qp->device->name);
1338 attr->rq_psn &= 0xffffff;
1339 }
1340
1341 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1342 pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
1343 __func__, qp->device->name);
1344 attr->sq_psn &= 0xffffff;
1345 }
1346 }
1347
Noa Osherovich498ca3c2017-08-23 08:35:40 +03001348 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1349 if (!ret && (attr_mask & IB_QP_PORT))
1350 qp->port = attr->port_num;
1351
1352 return ret;
Parav Pandita512c2f2017-05-23 11:26:08 +03001353}
Parav Panditb96ac052018-01-09 15:24:51 +02001354
Parav Pandita6753c42018-01-09 15:24:53 +02001355static bool is_qp_type_connected(const struct ib_qp *qp)
1356{
1357 return (qp->qp_type == IB_QPT_UC ||
1358 qp->qp_type == IB_QPT_RC ||
1359 qp->qp_type == IB_QPT_XRC_INI ||
1360 qp->qp_type == IB_QPT_XRC_TGT);
1361}
1362
Parav Panditb96ac052018-01-09 15:24:51 +02001363/**
1364 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1365 * @ib_qp: The QP to modify.
1366 * @attr: On input, specifies the QP attributes to modify. On output,
1367 * the current values of selected QP attributes are returned.
1368 * @attr_mask: A bit-mask used to specify which attributes of the QP
1369 * are being modified.
1370 * @udata: pointer to user's input output buffer information
1371 * are being modified.
1372 * It returns 0 on success and returns appropriate error code on error.
1373 */
1374int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1375 int attr_mask, struct ib_udata *udata)
1376{
1377 struct ib_qp *qp = ib_qp->real_qp;
1378 int ret;
1379
Parav Panditf2290d62018-01-09 15:24:52 +02001380 if (attr_mask & IB_QP_AV &&
Parav Pandita6753c42018-01-09 15:24:53 +02001381 attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1382 is_qp_type_connected(qp)) {
Parav Panditb96ac052018-01-09 15:24:51 +02001383 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1384 if (ret)
1385 return ret;
1386 }
1387 return _ib_modify_qp(qp, attr, attr_mask, udata);
1388}
Parav Pandita512c2f2017-05-23 11:26:08 +03001389EXPORT_SYMBOL(ib_modify_qp_with_udata);
1390
Yuval Shaiad4186192017-06-14 23:13:34 +03001391int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1392{
1393 int rc;
1394 u32 netdev_speed;
1395 struct net_device *netdev;
1396 struct ethtool_link_ksettings lksettings;
1397
1398 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1399 return -EINVAL;
1400
1401 if (!dev->get_netdev)
1402 return -EOPNOTSUPP;
1403
1404 netdev = dev->get_netdev(dev, port_num);
1405 if (!netdev)
1406 return -ENODEV;
1407
1408 rtnl_lock();
1409 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1410 rtnl_unlock();
1411
1412 dev_put(netdev);
1413
1414 if (!rc) {
1415 netdev_speed = lksettings.base.speed;
1416 } else {
1417 netdev_speed = SPEED_1000;
1418 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1419 netdev_speed);
1420 }
1421
1422 if (netdev_speed <= SPEED_1000) {
1423 *width = IB_WIDTH_1X;
1424 *speed = IB_SPEED_SDR;
1425 } else if (netdev_speed <= SPEED_10000) {
1426 *width = IB_WIDTH_1X;
1427 *speed = IB_SPEED_FDR10;
1428 } else if (netdev_speed <= SPEED_20000) {
1429 *width = IB_WIDTH_4X;
1430 *speed = IB_SPEED_DDR;
1431 } else if (netdev_speed <= SPEED_25000) {
1432 *width = IB_WIDTH_1X;
1433 *speed = IB_SPEED_EDR;
1434 } else if (netdev_speed <= SPEED_40000) {
1435 *width = IB_WIDTH_4X;
1436 *speed = IB_SPEED_FDR10;
1437 } else {
1438 *width = IB_WIDTH_4X;
1439 *speed = IB_SPEED_EDR;
1440 }
1441
1442 return 0;
1443}
1444EXPORT_SYMBOL(ib_get_eth_speed);
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446int ib_modify_qp(struct ib_qp *qp,
1447 struct ib_qp_attr *qp_attr,
1448 int qp_attr_mask)
1449{
Parav Panditb96ac052018-01-09 15:24:51 +02001450 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451}
1452EXPORT_SYMBOL(ib_modify_qp);
1453
1454int ib_query_qp(struct ib_qp *qp,
1455 struct ib_qp_attr *qp_attr,
1456 int qp_attr_mask,
1457 struct ib_qp_init_attr *qp_init_attr)
1458{
1459 return qp->device->query_qp ?
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001460 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 -ENOSYS;
1462}
1463EXPORT_SYMBOL(ib_query_qp);
1464
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001465int ib_close_qp(struct ib_qp *qp)
1466{
1467 struct ib_qp *real_qp;
1468 unsigned long flags;
1469
1470 real_qp = qp->real_qp;
1471 if (real_qp == qp)
1472 return -EINVAL;
1473
1474 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1475 list_del(&qp->open_list);
1476 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1477
1478 atomic_dec(&real_qp->usecnt);
Moni Shoua4a508812017-12-24 13:54:58 +02001479 if (qp->qp_sec)
1480 ib_close_shared_qp_security(qp->qp_sec);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001481 kfree(qp);
1482
1483 return 0;
1484}
1485EXPORT_SYMBOL(ib_close_qp);
1486
1487static int __ib_destroy_shared_qp(struct ib_qp *qp)
1488{
1489 struct ib_xrcd *xrcd;
1490 struct ib_qp *real_qp;
1491 int ret;
1492
1493 real_qp = qp->real_qp;
1494 xrcd = real_qp->xrcd;
1495
1496 mutex_lock(&xrcd->tgt_qp_mutex);
1497 ib_close_qp(qp);
1498 if (atomic_read(&real_qp->usecnt) == 0)
1499 list_del(&real_qp->xrcd_list);
1500 else
1501 real_qp = NULL;
1502 mutex_unlock(&xrcd->tgt_qp_mutex);
1503
1504 if (real_qp) {
1505 ret = ib_destroy_qp(real_qp);
1506 if (!ret)
1507 atomic_dec(&xrcd->usecnt);
1508 else
1509 __ib_insert_xrcd_qp(xrcd, real_qp);
1510 }
1511
1512 return 0;
1513}
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515int ib_destroy_qp(struct ib_qp *qp)
1516{
1517 struct ib_pd *pd;
1518 struct ib_cq *scq, *rcq;
1519 struct ib_srq *srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001520 struct ib_rwq_ind_table *ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001521 struct ib_qp_security *sec;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 int ret;
1523
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001524 WARN_ON_ONCE(qp->mrs_used > 0);
1525
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001526 if (atomic_read(&qp->usecnt))
1527 return -EBUSY;
1528
1529 if (qp->real_qp != qp)
1530 return __ib_destroy_shared_qp(qp);
1531
Sean Heftyb42b63c2011-05-23 19:59:25 -07001532 pd = qp->pd;
1533 scq = qp->send_cq;
1534 rcq = qp->recv_cq;
1535 srq = qp->srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001536 ind_tbl = qp->rwq_ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001537 sec = qp->qp_sec;
1538 if (sec)
1539 ib_destroy_qp_security_begin(sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Christoph Hellwiga060b562016-05-03 18:01:09 +02001541 if (!qp->uobject)
1542 rdma_rw_cleanup_mrs(qp);
1543
Leon Romanovsky78a0cd62018-01-28 11:17:21 +02001544 rdma_restrack_del(&qp->res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 ret = qp->device->destroy_qp(qp);
1546 if (!ret) {
Sean Heftyb42b63c2011-05-23 19:59:25 -07001547 if (pd)
1548 atomic_dec(&pd->usecnt);
1549 if (scq)
1550 atomic_dec(&scq->usecnt);
1551 if (rcq)
1552 atomic_dec(&rcq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 if (srq)
1554 atomic_dec(&srq->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +03001555 if (ind_tbl)
1556 atomic_dec(&ind_tbl->usecnt);
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001557 if (sec)
1558 ib_destroy_qp_security_end(sec);
1559 } else {
1560 if (sec)
1561 ib_destroy_qp_security_abort(sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 }
1563
1564 return ret;
1565}
1566EXPORT_SYMBOL(ib_destroy_qp);
1567
1568/* Completion queues */
1569
1570struct ib_cq *ib_create_cq(struct ib_device *device,
1571 ib_comp_handler comp_handler,
1572 void (*event_handler)(struct ib_event *, void *),
Matan Barak8e372102015-06-11 16:35:21 +03001573 void *cq_context,
1574 const struct ib_cq_init_attr *cq_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
1576 struct ib_cq *cq;
1577
Matan Barak8e372102015-06-11 16:35:21 +03001578 cq = device->create_cq(device, cq_attr, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
1580 if (!IS_ERR(cq)) {
1581 cq->device = device;
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001582 cq->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 cq->comp_handler = comp_handler;
1584 cq->event_handler = event_handler;
1585 cq->cq_context = cq_context;
1586 atomic_set(&cq->usecnt, 0);
Leon Romanovsky08f294a2018-01-28 11:17:22 +02001587 cq->res.type = RDMA_RESTRACK_CQ;
1588 rdma_restrack_add(&cq->res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 }
1590
1591 return cq;
1592}
1593EXPORT_SYMBOL(ib_create_cq);
1594
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02001595int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
Eli Cohen2dd57162008-04-16 21:09:33 -07001596{
1597 return cq->device->modify_cq ?
1598 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1599}
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02001600EXPORT_SYMBOL(rdma_set_cq_moderation);
Eli Cohen2dd57162008-04-16 21:09:33 -07001601
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602int ib_destroy_cq(struct ib_cq *cq)
1603{
1604 if (atomic_read(&cq->usecnt))
1605 return -EBUSY;
1606
Leon Romanovsky08f294a2018-01-28 11:17:22 +02001607 rdma_restrack_del(&cq->res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 return cq->device->destroy_cq(cq);
1609}
1610EXPORT_SYMBOL(ib_destroy_cq);
1611
Roland Dreiera74cd4a2006-02-13 16:30:49 -08001612int ib_resize_cq(struct ib_cq *cq, int cqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613{
Roland Dreier40de2e52005-11-08 11:10:25 -08001614 return cq->device->resize_cq ?
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001615 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
1617EXPORT_SYMBOL(ib_resize_cq);
1618
1619/* Memory regions */
1620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621int ib_dereg_mr(struct ib_mr *mr)
1622{
Christoph Hellwigab67ed82015-12-23 19:12:54 +01001623 struct ib_pd *pd = mr->pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 int ret;
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 ret = mr->device->dereg_mr(mr);
1627 if (!ret)
1628 atomic_dec(&pd->usecnt);
1629
1630 return ret;
1631}
1632EXPORT_SYMBOL(ib_dereg_mr);
1633
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001634/**
1635 * ib_alloc_mr() - Allocates a memory region
1636 * @pd: protection domain associated with the region
1637 * @mr_type: memory region type
1638 * @max_num_sg: maximum sg entries available for registration.
1639 *
1640 * Notes:
1641 * Memory registeration page/sg lists must not exceed max_num_sg.
1642 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1643 * max_num_sg * used_page_size.
1644 *
1645 */
1646struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1647 enum ib_mr_type mr_type,
1648 u32 max_num_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001649{
1650 struct ib_mr *mr;
1651
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001652 if (!pd->device->alloc_mr)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001653 return ERR_PTR(-ENOSYS);
1654
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001655 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001656 if (!IS_ERR(mr)) {
1657 mr->device = pd->device;
1658 mr->pd = pd;
1659 mr->uobject = NULL;
1660 atomic_inc(&pd->usecnt);
Steve Wised4a85c32016-05-03 18:01:08 +02001661 mr->need_inval = false;
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001662 }
1663
1664 return mr;
1665}
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001666EXPORT_SYMBOL(ib_alloc_mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668/* "Fast" memory regions */
1669
1670struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1671 int mr_access_flags,
1672 struct ib_fmr_attr *fmr_attr)
1673{
1674 struct ib_fmr *fmr;
1675
1676 if (!pd->device->alloc_fmr)
1677 return ERR_PTR(-ENOSYS);
1678
1679 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1680 if (!IS_ERR(fmr)) {
1681 fmr->device = pd->device;
1682 fmr->pd = pd;
1683 atomic_inc(&pd->usecnt);
1684 }
1685
1686 return fmr;
1687}
1688EXPORT_SYMBOL(ib_alloc_fmr);
1689
1690int ib_unmap_fmr(struct list_head *fmr_list)
1691{
1692 struct ib_fmr *fmr;
1693
1694 if (list_empty(fmr_list))
1695 return 0;
1696
1697 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1698 return fmr->device->unmap_fmr(fmr_list);
1699}
1700EXPORT_SYMBOL(ib_unmap_fmr);
1701
1702int ib_dealloc_fmr(struct ib_fmr *fmr)
1703{
1704 struct ib_pd *pd;
1705 int ret;
1706
1707 pd = fmr->pd;
1708 ret = fmr->device->dealloc_fmr(fmr);
1709 if (!ret)
1710 atomic_dec(&pd->usecnt);
1711
1712 return ret;
1713}
1714EXPORT_SYMBOL(ib_dealloc_fmr);
1715
1716/* Multicast groups */
1717
Noa Osherovich52363332017-06-12 11:14:02 +03001718static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1719{
1720 struct ib_qp_init_attr init_attr = {};
1721 struct ib_qp_attr attr = {};
1722 int num_eth_ports = 0;
1723 int port;
1724
1725 /* If QP state >= init, it is assigned to a port and we can check this
1726 * port only.
1727 */
1728 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1729 if (attr.qp_state >= IB_QPS_INIT) {
Alex Estrine6f9bc32017-08-31 09:30:34 -07001730 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
Noa Osherovich52363332017-06-12 11:14:02 +03001731 IB_LINK_LAYER_INFINIBAND)
1732 return true;
1733 goto lid_check;
1734 }
1735 }
1736
1737 /* Can't get a quick answer, iterate over all ports */
1738 for (port = 0; port < qp->device->phys_port_cnt; port++)
Alex Estrine6f9bc32017-08-31 09:30:34 -07001739 if (rdma_port_get_link_layer(qp->device, port) !=
Noa Osherovich52363332017-06-12 11:14:02 +03001740 IB_LINK_LAYER_INFINIBAND)
1741 num_eth_ports++;
1742
1743 /* If we have at lease one Ethernet port, RoCE annex declares that
1744 * multicast LID should be ignored. We can't tell at this step if the
1745 * QP belongs to an IB or Ethernet port.
1746 */
1747 if (num_eth_ports)
1748 return true;
1749
1750 /* If all the ports are IB, we can check according to IB spec. */
1751lid_check:
1752 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1753 lid == be16_to_cpu(IB_LID_PERMISSIVE));
1754}
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1757{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001758 int ret;
1759
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001760 if (!qp->device->attach_mcast)
1761 return -ENOSYS;
Noa Osherovichbe1d3252017-06-12 11:14:03 +03001762
1763 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1764 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001765 return -EINVAL;
1766
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001767 ret = qp->device->attach_mcast(qp, gid, lid);
1768 if (!ret)
1769 atomic_inc(&qp->usecnt);
1770 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772EXPORT_SYMBOL(ib_attach_mcast);
1773
1774int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1775{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001776 int ret;
1777
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001778 if (!qp->device->detach_mcast)
1779 return -ENOSYS;
Noa Osherovichbe1d3252017-06-12 11:14:03 +03001780
1781 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1782 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001783 return -EINVAL;
1784
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001785 ret = qp->device->detach_mcast(qp, gid, lid);
1786 if (!ret)
1787 atomic_dec(&qp->usecnt);
1788 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789}
1790EXPORT_SYMBOL(ib_detach_mcast);
Sean Hefty59991f92011-05-23 17:52:46 -07001791
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02001792struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
Sean Hefty59991f92011-05-23 17:52:46 -07001793{
1794 struct ib_xrcd *xrcd;
1795
1796 if (!device->alloc_xrcd)
1797 return ERR_PTR(-ENOSYS);
1798
1799 xrcd = device->alloc_xrcd(device, NULL, NULL);
1800 if (!IS_ERR(xrcd)) {
1801 xrcd->device = device;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001802 xrcd->inode = NULL;
Sean Hefty59991f92011-05-23 17:52:46 -07001803 atomic_set(&xrcd->usecnt, 0);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001804 mutex_init(&xrcd->tgt_qp_mutex);
1805 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
Sean Hefty59991f92011-05-23 17:52:46 -07001806 }
1807
1808 return xrcd;
1809}
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02001810EXPORT_SYMBOL(__ib_alloc_xrcd);
Sean Hefty59991f92011-05-23 17:52:46 -07001811
1812int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1813{
Sean Heftyd3d72d92011-05-26 23:06:44 -07001814 struct ib_qp *qp;
1815 int ret;
1816
Sean Hefty59991f92011-05-23 17:52:46 -07001817 if (atomic_read(&xrcd->usecnt))
1818 return -EBUSY;
1819
Sean Heftyd3d72d92011-05-26 23:06:44 -07001820 while (!list_empty(&xrcd->tgt_qp_list)) {
1821 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1822 ret = ib_destroy_qp(qp);
1823 if (ret)
1824 return ret;
1825 }
1826
Sean Hefty59991f92011-05-23 17:52:46 -07001827 return xrcd->device->dealloc_xrcd(xrcd);
1828}
1829EXPORT_SYMBOL(ib_dealloc_xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001830
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001831/**
1832 * ib_create_wq - Creates a WQ associated with the specified protection
1833 * domain.
1834 * @pd: The protection domain associated with the WQ.
Randy Dunlap1f586212018-01-05 16:21:40 -08001835 * @wq_attr: A list of initial attributes required to create the
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001836 * WQ. If WQ creation succeeds, then the attributes are updated to
1837 * the actual capabilities of the created WQ.
1838 *
Randy Dunlap1f586212018-01-05 16:21:40 -08001839 * wq_attr->max_wr and wq_attr->max_sge determine
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001840 * the requested size of the WQ, and set to the actual values allocated
1841 * on return.
1842 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1843 * at least as large as the requested values.
1844 */
1845struct ib_wq *ib_create_wq(struct ib_pd *pd,
1846 struct ib_wq_init_attr *wq_attr)
1847{
1848 struct ib_wq *wq;
1849
1850 if (!pd->device->create_wq)
1851 return ERR_PTR(-ENOSYS);
1852
1853 wq = pd->device->create_wq(pd, wq_attr, NULL);
1854 if (!IS_ERR(wq)) {
1855 wq->event_handler = wq_attr->event_handler;
1856 wq->wq_context = wq_attr->wq_context;
1857 wq->wq_type = wq_attr->wq_type;
1858 wq->cq = wq_attr->cq;
1859 wq->device = pd->device;
1860 wq->pd = pd;
1861 wq->uobject = NULL;
1862 atomic_inc(&pd->usecnt);
1863 atomic_inc(&wq_attr->cq->usecnt);
1864 atomic_set(&wq->usecnt, 0);
1865 }
1866 return wq;
1867}
1868EXPORT_SYMBOL(ib_create_wq);
1869
1870/**
1871 * ib_destroy_wq - Destroys the specified WQ.
1872 * @wq: The WQ to destroy.
1873 */
1874int ib_destroy_wq(struct ib_wq *wq)
1875{
1876 int err;
1877 struct ib_cq *cq = wq->cq;
1878 struct ib_pd *pd = wq->pd;
1879
1880 if (atomic_read(&wq->usecnt))
1881 return -EBUSY;
1882
1883 err = wq->device->destroy_wq(wq);
1884 if (!err) {
1885 atomic_dec(&pd->usecnt);
1886 atomic_dec(&cq->usecnt);
1887 }
1888 return err;
1889}
1890EXPORT_SYMBOL(ib_destroy_wq);
1891
1892/**
1893 * ib_modify_wq - Modifies the specified WQ.
1894 * @wq: The WQ to modify.
1895 * @wq_attr: On input, specifies the WQ attributes to modify.
1896 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1897 * are being modified.
1898 * On output, the current values of selected WQ attributes are returned.
1899 */
1900int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1901 u32 wq_attr_mask)
1902{
1903 int err;
1904
1905 if (!wq->device->modify_wq)
1906 return -ENOSYS;
1907
1908 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1909 return err;
1910}
1911EXPORT_SYMBOL(ib_modify_wq);
1912
Yishai Hadas6d397862016-05-23 15:20:51 +03001913/*
1914 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1915 * @device: The device on which to create the rwq indirection table.
1916 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1917 * create the Indirection Table.
1918 *
1919 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1920 * than the created ib_rwq_ind_table object and the caller is responsible
1921 * for its memory allocation/free.
1922 */
1923struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1924 struct ib_rwq_ind_table_init_attr *init_attr)
1925{
1926 struct ib_rwq_ind_table *rwq_ind_table;
1927 int i;
1928 u32 table_size;
1929
1930 if (!device->create_rwq_ind_table)
1931 return ERR_PTR(-ENOSYS);
1932
1933 table_size = (1 << init_attr->log_ind_tbl_size);
1934 rwq_ind_table = device->create_rwq_ind_table(device,
1935 init_attr, NULL);
1936 if (IS_ERR(rwq_ind_table))
1937 return rwq_ind_table;
1938
1939 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1940 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1941 rwq_ind_table->device = device;
1942 rwq_ind_table->uobject = NULL;
1943 atomic_set(&rwq_ind_table->usecnt, 0);
1944
1945 for (i = 0; i < table_size; i++)
1946 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1947
1948 return rwq_ind_table;
1949}
1950EXPORT_SYMBOL(ib_create_rwq_ind_table);
1951
1952/*
1953 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1954 * @wq_ind_table: The Indirection Table to destroy.
1955*/
1956int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1957{
1958 int err, i;
1959 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1960 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1961
1962 if (atomic_read(&rwq_ind_table->usecnt))
1963 return -EBUSY;
1964
1965 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1966 if (!err) {
1967 for (i = 0; i < table_size; i++)
1968 atomic_dec(&ind_tbl[i]->usecnt);
1969 }
1970
1971 return err;
1972}
1973EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1974
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001975struct ib_flow *ib_create_flow(struct ib_qp *qp,
1976 struct ib_flow_attr *flow_attr,
1977 int domain)
1978{
1979 struct ib_flow *flow_id;
1980 if (!qp->device->create_flow)
1981 return ERR_PTR(-ENOSYS);
1982
1983 flow_id = qp->device->create_flow(qp, flow_attr, domain);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001984 if (!IS_ERR(flow_id)) {
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001985 atomic_inc(&qp->usecnt);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001986 flow_id->qp = qp;
1987 }
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001988 return flow_id;
1989}
1990EXPORT_SYMBOL(ib_create_flow);
1991
1992int ib_destroy_flow(struct ib_flow *flow_id)
1993{
1994 int err;
1995 struct ib_qp *qp = flow_id->qp;
1996
1997 err = qp->device->destroy_flow(flow_id);
1998 if (!err)
1999 atomic_dec(&qp->usecnt);
2000 return err;
2001}
2002EXPORT_SYMBOL(ib_destroy_flow);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02002003
2004int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2005 struct ib_mr_status *mr_status)
2006{
2007 return mr->device->check_mr_status ?
2008 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
2009}
2010EXPORT_SYMBOL(ib_check_mr_status);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002011
Eli Cohen50174a72016-03-11 22:58:38 +02002012int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2013 int state)
2014{
2015 if (!device->set_vf_link_state)
2016 return -ENOSYS;
2017
2018 return device->set_vf_link_state(device, vf, port, state);
2019}
2020EXPORT_SYMBOL(ib_set_vf_link_state);
2021
2022int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2023 struct ifla_vf_info *info)
2024{
2025 if (!device->get_vf_config)
2026 return -ENOSYS;
2027
2028 return device->get_vf_config(device, vf, port, info);
2029}
2030EXPORT_SYMBOL(ib_get_vf_config);
2031
2032int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2033 struct ifla_vf_stats *stats)
2034{
2035 if (!device->get_vf_stats)
2036 return -ENOSYS;
2037
2038 return device->get_vf_stats(device, vf, port, stats);
2039}
2040EXPORT_SYMBOL(ib_get_vf_stats);
2041
2042int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2043 int type)
2044{
2045 if (!device->set_vf_guid)
2046 return -ENOSYS;
2047
2048 return device->set_vf_guid(device, vf, port, guid, type);
2049}
2050EXPORT_SYMBOL(ib_set_vf_guid);
2051
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002052/**
2053 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2054 * and set it the memory region.
2055 * @mr: memory region
2056 * @sg: dma mapped scatterlist
2057 * @sg_nents: number of entries in sg
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002058 * @sg_offset: offset in bytes into sg
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002059 * @page_size: page vector desired page size
2060 *
2061 * Constraints:
2062 * - The first sg element is allowed to have an offset.
Bart Van Assche52746122016-09-26 09:09:42 -07002063 * - Each sg element must either be aligned to page_size or virtually
2064 * contiguous to the previous element. In case an sg element has a
2065 * non-contiguous offset, the mapping prefix will not include it.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002066 * - The last sg element is allowed to have length less than page_size.
2067 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2068 * then only max_num_sg entries will be mapped.
Bart Van Assche52746122016-09-26 09:09:42 -07002069 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
Sagi Grimbergf5aa9152016-02-29 19:07:32 +02002070 * constraints holds and the page_size argument is ignored.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002071 *
2072 * Returns the number of sg elements that were mapped to the memory region.
2073 *
2074 * After this completes successfully, the memory region
2075 * is ready for registration.
2076 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002077int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002078 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002079{
2080 if (unlikely(!mr->device->map_mr_sg))
2081 return -ENOSYS;
2082
2083 mr->page_size = page_size;
2084
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002085 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002086}
2087EXPORT_SYMBOL(ib_map_mr_sg);
2088
2089/**
2090 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2091 * to a page vector
2092 * @mr: memory region
2093 * @sgl: dma mapped scatterlist
2094 * @sg_nents: number of entries in sg
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002095 * @sg_offset_p: IN: start offset in bytes into sg
2096 * OUT: offset in bytes for element n of the sg of the first
2097 * byte that has not been processed where n is the return
2098 * value of this function.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002099 * @set_page: driver page assignment function pointer
2100 *
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002101 * Core service helper for drivers to convert the largest
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002102 * prefix of given sg list to a page vector. The sg list
2103 * prefix converted is the prefix that meet the requirements
2104 * of ib_map_mr_sg.
2105 *
2106 * Returns the number of sg elements that were assigned to
2107 * a page vector.
2108 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002109int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002110 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002111{
2112 struct scatterlist *sg;
Bart Van Asscheb6aeb982015-12-29 10:45:03 +01002113 u64 last_end_dma_addr = 0;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002114 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002115 unsigned int last_page_off = 0;
2116 u64 page_mask = ~((u64)mr->page_size - 1);
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002117 int i, ret;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002118
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002119 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2120 return -EINVAL;
2121
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002122 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002123 mr->length = 0;
2124
2125 for_each_sg(sgl, sg, sg_nents, i) {
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002126 u64 dma_addr = sg_dma_address(sg) + sg_offset;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002127 u64 prev_addr = dma_addr;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002128 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002129 u64 end_dma_addr = dma_addr + dma_len;
2130 u64 page_addr = dma_addr & page_mask;
2131
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002132 /*
2133 * For the second and later elements, check whether either the
2134 * end of element i-1 or the start of element i is not aligned
2135 * on a page boundary.
2136 */
2137 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2138 /* Stop mapping if there is a gap. */
2139 if (last_end_dma_addr != dma_addr)
2140 break;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002141
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002142 /*
2143 * Coalesce this element with the last. If it is small
2144 * enough just update mr->length. Otherwise start
2145 * mapping from the next page.
2146 */
2147 goto next_page;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002148 }
2149
2150 do {
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002151 ret = set_page(mr, page_addr);
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002152 if (unlikely(ret < 0)) {
2153 sg_offset = prev_addr - sg_dma_address(sg);
2154 mr->length += prev_addr - dma_addr;
2155 if (sg_offset_p)
2156 *sg_offset_p = sg_offset;
2157 return i || sg_offset ? i : ret;
2158 }
2159 prev_addr = page_addr;
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002160next_page:
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002161 page_addr += mr->page_size;
2162 } while (page_addr < end_dma_addr);
2163
2164 mr->length += dma_len;
2165 last_end_dma_addr = end_dma_addr;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002166 last_page_off = end_dma_addr & ~page_mask;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002167
2168 sg_offset = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002169 }
2170
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002171 if (sg_offset_p)
2172 *sg_offset_p = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002173 return i;
2174}
2175EXPORT_SYMBOL(ib_sg_to_pages);
Steve Wise765d6772016-02-17 08:15:41 -08002176
2177struct ib_drain_cqe {
2178 struct ib_cqe cqe;
2179 struct completion done;
2180};
2181
2182static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2183{
2184 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2185 cqe);
2186
2187 complete(&cqe->done);
2188}
2189
2190/*
2191 * Post a WR and block until its completion is reaped for the SQ.
2192 */
2193static void __ib_drain_sq(struct ib_qp *qp)
2194{
Bart Van Asschef039f442017-02-14 10:56:35 -08002195 struct ib_cq *cq = qp->send_cq;
Steve Wise765d6772016-02-17 08:15:41 -08002196 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2197 struct ib_drain_cqe sdrain;
2198 struct ib_send_wr swr = {}, *bad_swr;
2199 int ret;
2200
Steve Wise765d6772016-02-17 08:15:41 -08002201 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2202 if (ret) {
2203 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2204 return;
2205 }
2206
Max Gurtovoyaaebd372018-01-14 17:07:48 +02002207 swr.wr_cqe = &sdrain.cqe;
2208 sdrain.cqe.done = ib_drain_qp_done;
2209 init_completion(&sdrain.done);
2210
Steve Wise765d6772016-02-17 08:15:41 -08002211 ret = ib_post_send(qp, &swr, &bad_swr);
2212 if (ret) {
2213 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2214 return;
2215 }
2216
Bart Van Asschef039f442017-02-14 10:56:35 -08002217 if (cq->poll_ctx == IB_POLL_DIRECT)
2218 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2219 ib_process_cq_direct(cq, -1);
2220 else
2221 wait_for_completion(&sdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08002222}
2223
2224/*
2225 * Post a WR and block until its completion is reaped for the RQ.
2226 */
2227static void __ib_drain_rq(struct ib_qp *qp)
2228{
Bart Van Asschef039f442017-02-14 10:56:35 -08002229 struct ib_cq *cq = qp->recv_cq;
Steve Wise765d6772016-02-17 08:15:41 -08002230 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2231 struct ib_drain_cqe rdrain;
2232 struct ib_recv_wr rwr = {}, *bad_rwr;
2233 int ret;
2234
Steve Wise765d6772016-02-17 08:15:41 -08002235 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2236 if (ret) {
2237 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2238 return;
2239 }
2240
Max Gurtovoyaaebd372018-01-14 17:07:48 +02002241 rwr.wr_cqe = &rdrain.cqe;
2242 rdrain.cqe.done = ib_drain_qp_done;
2243 init_completion(&rdrain.done);
2244
Steve Wise765d6772016-02-17 08:15:41 -08002245 ret = ib_post_recv(qp, &rwr, &bad_rwr);
2246 if (ret) {
2247 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2248 return;
2249 }
2250
Bart Van Asschef039f442017-02-14 10:56:35 -08002251 if (cq->poll_ctx == IB_POLL_DIRECT)
2252 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2253 ib_process_cq_direct(cq, -1);
2254 else
2255 wait_for_completion(&rdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08002256}
2257
2258/**
2259 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2260 * application.
2261 * @qp: queue pair to drain
2262 *
2263 * If the device has a provider-specific drain function, then
2264 * call that. Otherwise call the generic drain function
2265 * __ib_drain_sq().
2266 *
2267 * The caller must:
2268 *
2269 * ensure there is room in the CQ and SQ for the drain work request and
2270 * completion.
2271 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002272 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002273 *
2274 * ensure that there are no other contexts that are posting WRs concurrently.
2275 * Otherwise the drain is not guaranteed.
2276 */
2277void ib_drain_sq(struct ib_qp *qp)
2278{
2279 if (qp->device->drain_sq)
2280 qp->device->drain_sq(qp);
2281 else
2282 __ib_drain_sq(qp);
2283}
2284EXPORT_SYMBOL(ib_drain_sq);
2285
2286/**
2287 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2288 * application.
2289 * @qp: queue pair to drain
2290 *
2291 * If the device has a provider-specific drain function, then
2292 * call that. Otherwise call the generic drain function
2293 * __ib_drain_rq().
2294 *
2295 * The caller must:
2296 *
2297 * ensure there is room in the CQ and RQ for the drain work request and
2298 * completion.
2299 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002300 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002301 *
2302 * ensure that there are no other contexts that are posting WRs concurrently.
2303 * Otherwise the drain is not guaranteed.
2304 */
2305void ib_drain_rq(struct ib_qp *qp)
2306{
2307 if (qp->device->drain_rq)
2308 qp->device->drain_rq(qp);
2309 else
2310 __ib_drain_rq(qp);
2311}
2312EXPORT_SYMBOL(ib_drain_rq);
2313
2314/**
2315 * ib_drain_qp() - Block until all CQEs have been consumed by the
2316 * application on both the RQ and SQ.
2317 * @qp: queue pair to drain
2318 *
2319 * The caller must:
2320 *
2321 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2322 * and completions.
2323 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002324 * allocate the CQs using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002325 *
2326 * ensure that there are no other contexts that are posting WRs concurrently.
2327 * Otherwise the drain is not guaranteed.
2328 */
2329void ib_drain_qp(struct ib_qp *qp)
2330{
2331 ib_drain_sq(qp);
Sagi Grimberg42235f82016-04-26 17:55:38 +03002332 if (!qp->srq)
2333 ib_drain_rq(qp);
Steve Wise765d6772016-02-17 08:15:41 -08002334}
2335EXPORT_SYMBOL(ib_drain_qp);