blob: 75ebd74f8bbdfa531f5fed6510deb416c7dddb25 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier33b9b3e2006-01-30 14:29:21 -08008 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040041#include <linux/export.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042#include <linux/string.h>
Sean Hefty0e0ec7e2011-08-08 15:31:51 -070043#include <linux/slab.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030044#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
Daniel Jurgensd291f1a2017-05-19 15:48:52 +030047#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Roland Dreiera4d61e82005-08-25 13:40:04 -070049#include <rdma/ib_verbs.h>
50#include <rdma/ib_cache.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020051#include <rdma/ib_addr.h>
Christoph Hellwiga060b562016-05-03 18:01:09 +020052#include <rdma/rw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Or Gerlitzed4c54e2013-12-12 18:03:17 +020054#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Parav Panditc0348eb2017-10-16 08:45:13 +030056static int ib_resolve_eth_dmac(struct ib_device *device,
57 struct rdma_ah_attr *ah_attr);
58
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030059static const char * const ib_events[] = {
60 [IB_EVENT_CQ_ERR] = "CQ error",
61 [IB_EVENT_QP_FATAL] = "QP fatal error",
62 [IB_EVENT_QP_REQ_ERR] = "QP request error",
63 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
64 [IB_EVENT_COMM_EST] = "communication established",
65 [IB_EVENT_SQ_DRAINED] = "send queue drained",
66 [IB_EVENT_PATH_MIG] = "path migration successful",
67 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
68 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
69 [IB_EVENT_PORT_ACTIVE] = "port active",
70 [IB_EVENT_PORT_ERR] = "port error",
71 [IB_EVENT_LID_CHANGE] = "LID change",
72 [IB_EVENT_PKEY_CHANGE] = "P_key change",
73 [IB_EVENT_SM_CHANGE] = "SM change",
74 [IB_EVENT_SRQ_ERR] = "SRQ error",
75 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
76 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
77 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
78 [IB_EVENT_GID_CHANGE] = "GID changed",
79};
80
Bart Van Asschedb7489e2015-08-03 10:01:52 -070081const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030082{
83 size_t index = event;
84
85 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
86 ib_events[index] : "unrecognized event";
87}
88EXPORT_SYMBOL(ib_event_msg);
89
90static const char * const wc_statuses[] = {
91 [IB_WC_SUCCESS] = "success",
92 [IB_WC_LOC_LEN_ERR] = "local length error",
93 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
94 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
95 [IB_WC_LOC_PROT_ERR] = "local protection error",
96 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
97 [IB_WC_MW_BIND_ERR] = "memory management operation error",
98 [IB_WC_BAD_RESP_ERR] = "bad response error",
99 [IB_WC_LOC_ACCESS_ERR] = "local access error",
100 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
101 [IB_WC_REM_ACCESS_ERR] = "remote access error",
102 [IB_WC_REM_OP_ERR] = "remote operation error",
103 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
104 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
105 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
106 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
107 [IB_WC_REM_ABORT_ERR] = "operation aborted",
108 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
109 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
110 [IB_WC_FATAL_ERR] = "fatal error",
111 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
112 [IB_WC_GENERAL_ERR] = "general error",
113};
114
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700115const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300116{
117 size_t index = status;
118
119 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
120 wc_statuses[index] : "unrecognized status";
121}
122EXPORT_SYMBOL(ib_wc_status_msg);
123
Roland Dreier8385fd82014-06-04 10:00:16 -0700124__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700125{
126 switch (rate) {
127 case IB_RATE_2_5_GBPS: return 1;
128 case IB_RATE_5_GBPS: return 2;
129 case IB_RATE_10_GBPS: return 4;
130 case IB_RATE_20_GBPS: return 8;
131 case IB_RATE_30_GBPS: return 12;
132 case IB_RATE_40_GBPS: return 16;
133 case IB_RATE_60_GBPS: return 24;
134 case IB_RATE_80_GBPS: return 32;
135 case IB_RATE_120_GBPS: return 48;
136 default: return -1;
137 }
138}
139EXPORT_SYMBOL(ib_rate_to_mult);
140
Roland Dreier8385fd82014-06-04 10:00:16 -0700141__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700142{
143 switch (mult) {
144 case 1: return IB_RATE_2_5_GBPS;
145 case 2: return IB_RATE_5_GBPS;
146 case 4: return IB_RATE_10_GBPS;
147 case 8: return IB_RATE_20_GBPS;
148 case 12: return IB_RATE_30_GBPS;
149 case 16: return IB_RATE_40_GBPS;
150 case 24: return IB_RATE_60_GBPS;
151 case 32: return IB_RATE_80_GBPS;
152 case 48: return IB_RATE_120_GBPS;
153 default: return IB_RATE_PORT_CURRENT;
154 }
155}
156EXPORT_SYMBOL(mult_to_ib_rate);
157
Roland Dreier8385fd82014-06-04 10:00:16 -0700158__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300159{
160 switch (rate) {
161 case IB_RATE_2_5_GBPS: return 2500;
162 case IB_RATE_5_GBPS: return 5000;
163 case IB_RATE_10_GBPS: return 10000;
164 case IB_RATE_20_GBPS: return 20000;
165 case IB_RATE_30_GBPS: return 30000;
166 case IB_RATE_40_GBPS: return 40000;
167 case IB_RATE_60_GBPS: return 60000;
168 case IB_RATE_80_GBPS: return 80000;
169 case IB_RATE_120_GBPS: return 120000;
170 case IB_RATE_14_GBPS: return 14062;
171 case IB_RATE_56_GBPS: return 56250;
172 case IB_RATE_112_GBPS: return 112500;
173 case IB_RATE_168_GBPS: return 168750;
174 case IB_RATE_25_GBPS: return 25781;
175 case IB_RATE_100_GBPS: return 103125;
176 case IB_RATE_200_GBPS: return 206250;
177 case IB_RATE_300_GBPS: return 309375;
178 default: return -1;
179 }
180}
181EXPORT_SYMBOL(ib_rate_to_mbps);
182
Roland Dreier8385fd82014-06-04 10:00:16 -0700183__attribute_const__ enum rdma_transport_type
Tom Tucker07ebafb2006-08-03 16:02:42 -0500184rdma_node_get_transport(enum rdma_node_type node_type)
185{
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300186
187 if (node_type == RDMA_NODE_USNIC)
Upinder Malhi5db57652014-01-15 17:02:36 -0800188 return RDMA_TRANSPORT_USNIC;
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300189 if (node_type == RDMA_NODE_USNIC_UDP)
Upinder Malhi248567f2014-01-09 14:48:19 -0800190 return RDMA_TRANSPORT_USNIC_UDP;
Leon Romanovskycdc596d2017-08-17 15:50:38 +0300191 if (node_type == RDMA_NODE_RNIC)
192 return RDMA_TRANSPORT_IWARP;
193
194 return RDMA_TRANSPORT_IB;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500195}
196EXPORT_SYMBOL(rdma_node_get_transport);
197
Eli Cohena3f5ada2010-09-27 17:51:10 -0700198enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
199{
Leon Romanovsky82901e32017-08-17 15:50:39 +0300200 enum rdma_transport_type lt;
Eli Cohena3f5ada2010-09-27 17:51:10 -0700201 if (device->get_link_layer)
202 return device->get_link_layer(device, port_num);
203
Leon Romanovsky82901e32017-08-17 15:50:39 +0300204 lt = rdma_node_get_transport(device->node_type);
205 if (lt == RDMA_TRANSPORT_IB)
Eli Cohena3f5ada2010-09-27 17:51:10 -0700206 return IB_LINK_LAYER_INFINIBAND;
Leon Romanovsky82901e32017-08-17 15:50:39 +0300207
208 return IB_LINK_LAYER_ETHERNET;
Eli Cohena3f5ada2010-09-27 17:51:10 -0700209}
210EXPORT_SYMBOL(rdma_port_get_link_layer);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212/* Protection domains */
213
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600214/**
215 * ib_alloc_pd - Allocates an unused protection domain.
216 * @device: The device on which to allocate the protection domain.
217 *
218 * A protection domain object provides an association between QPs, shared
219 * receive queues, address handles, memory regions, and memory windows.
220 *
221 * Every PD has a local_dma_lkey which can be used as the lkey value for local
222 * memory operations.
223 */
Christoph Hellwiged082d32016-09-05 12:56:17 +0200224struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
225 const char *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 struct ib_pd *pd;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200228 int mr_access_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700230 pd = device->alloc_pd(device, NULL, NULL);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600231 if (IS_ERR(pd))
232 return pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600234 pd->device = device;
235 pd->uobject = NULL;
Christoph Hellwig50d46332016-09-05 12:56:16 +0200236 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600237 atomic_set(&pd->usecnt, 0);
Christoph Hellwiged082d32016-09-05 12:56:17 +0200238 pd->flags = flags;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600239
Or Gerlitz86bee4c2015-12-18 10:59:45 +0200240 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600241 pd->local_dma_lkey = device->local_dma_lkey;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200242 else
243 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
244
245 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
246 pr_warn("%s: enabling unsafe global rkey\n", caller);
247 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
248 }
249
250 if (mr_access_flags) {
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600251 struct ib_mr *mr;
252
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200253 mr = pd->device->get_dma_mr(pd, mr_access_flags);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600254 if (IS_ERR(mr)) {
255 ib_dealloc_pd(pd);
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200256 return ERR_CAST(mr);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600257 }
258
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200259 mr->device = pd->device;
260 mr->pd = pd;
261 mr->uobject = NULL;
262 mr->need_inval = false;
263
Christoph Hellwig50d46332016-09-05 12:56:16 +0200264 pd->__internal_mr = mr;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200265
266 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
267 pd->local_dma_lkey = pd->__internal_mr->lkey;
268
269 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
270 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 }
Christoph Hellwiged082d32016-09-05 12:56:17 +0200272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 return pd;
274}
Christoph Hellwiged082d32016-09-05 12:56:17 +0200275EXPORT_SYMBOL(__ib_alloc_pd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600277/**
278 * ib_dealloc_pd - Deallocates a protection domain.
279 * @pd: The protection domain to deallocate.
280 *
281 * It is an error to call this function while any resources in the pd still
282 * exist. The caller is responsible to synchronously destroy them and
283 * guarantee no new allocations will happen.
284 */
285void ib_dealloc_pd(struct ib_pd *pd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286{
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600287 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Christoph Hellwig50d46332016-09-05 12:56:16 +0200289 if (pd->__internal_mr) {
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200290 ret = pd->device->dereg_mr(pd->__internal_mr);
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600291 WARN_ON(ret);
Christoph Hellwig50d46332016-09-05 12:56:16 +0200292 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600293 }
294
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600295 /* uverbs manipulates usecnt with proper locking, while the kabi
296 requires the caller to guarantee we can't race here. */
297 WARN_ON(atomic_read(&pd->usecnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600299 /* Making delalloc_pd a void return is a WIP, no driver should return
300 an error here. */
301 ret = pd->device->dealloc_pd(pd);
302 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304EXPORT_SYMBOL(ib_dealloc_pd);
305
306/* Address handles */
307
Parav Pandit5cda6582017-10-16 08:45:12 +0300308static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
309 struct rdma_ah_attr *ah_attr,
310 struct ib_udata *udata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
312 struct ib_ah *ah;
313
Parav Pandit5cda6582017-10-16 08:45:12 +0300314 ah = pd->device->create_ah(pd, ah_attr, udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 if (!IS_ERR(ah)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700317 ah->device = pd->device;
318 ah->pd = pd;
319 ah->uobject = NULL;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400320 ah->type = ah_attr->type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 atomic_inc(&pd->usecnt);
322 }
323
324 return ah;
325}
Parav Pandit5cda6582017-10-16 08:45:12 +0300326
327struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
328{
329 return _rdma_create_ah(pd, ah_attr, NULL);
330}
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400331EXPORT_SYMBOL(rdma_create_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Parav Pandit5cda6582017-10-16 08:45:12 +0300333/**
334 * rdma_create_user_ah - Creates an address handle for the
335 * given address vector.
336 * It resolves destination mac address for ah attribute of RoCE type.
337 * @pd: The protection domain associated with the address handle.
338 * @ah_attr: The attributes of the address vector.
339 * @udata: pointer to user's input output buffer information need by
340 * provider driver.
341 *
342 * It returns 0 on success and returns appropriate error code on error.
343 * The address handle is used to reference a local or global destination
344 * in all UD QP post sends.
345 */
346struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
347 struct rdma_ah_attr *ah_attr,
348 struct ib_udata *udata)
349{
350 int err;
351
352 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
353 err = ib_resolve_eth_dmac(pd->device, ah_attr);
354 if (err)
355 return ERR_PTR(err);
356 }
357
358 return _rdma_create_ah(pd, ah_attr, udata);
359}
360EXPORT_SYMBOL(rdma_create_user_ah);
361
Moni Shoua850d8fd2016-11-10 11:30:56 +0200362int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
Somnath Koturc865f242015-12-23 14:56:51 +0200363{
364 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
365 struct iphdr ip4h_checked;
366 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
367
368 /* If it's IPv6, the version must be 6, otherwise, the first
369 * 20 bytes (before the IPv4 header) are garbled.
370 */
371 if (ip6h->version != 6)
372 return (ip4h->version == 4) ? 4 : 0;
373 /* version may be 6 or 4 because the first 20 bytes could be garbled */
374
375 /* RoCE v2 requires no options, thus header length
376 * must be 5 words
377 */
378 if (ip4h->ihl != 5)
379 return 6;
380
381 /* Verify checksum.
382 * We can't write on scattered buffers so we need to copy to
383 * temp buffer.
384 */
385 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
386 ip4h_checked.check = 0;
387 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
388 /* if IPv4 header checksum is OK, believe it */
389 if (ip4h->check == ip4h_checked.check)
390 return 4;
391 return 6;
392}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200393EXPORT_SYMBOL(ib_get_rdma_header_version);
Somnath Koturc865f242015-12-23 14:56:51 +0200394
395static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
396 u8 port_num,
397 const struct ib_grh *grh)
398{
399 int grh_version;
400
401 if (rdma_protocol_ib(device, port_num))
402 return RDMA_NETWORK_IB;
403
Moni Shoua850d8fd2016-11-10 11:30:56 +0200404 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
Somnath Koturc865f242015-12-23 14:56:51 +0200405
406 if (grh_version == 4)
407 return RDMA_NETWORK_IPV4;
408
409 if (grh->next_hdr == IPPROTO_UDP)
410 return RDMA_NETWORK_IPV6;
411
412 return RDMA_NETWORK_ROCE_V1;
413}
414
Matan Barakdbf727d2015-10-15 18:38:51 +0300415struct find_gid_index_context {
416 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +0200417 enum ib_gid_type gid_type;
Matan Barakdbf727d2015-10-15 18:38:51 +0300418};
419
420static bool find_gid_index(const union ib_gid *gid,
421 const struct ib_gid_attr *gid_attr,
422 void *context)
423{
424 struct find_gid_index_context *ctx =
425 (struct find_gid_index_context *)context;
426
Somnath Koturc865f242015-12-23 14:56:51 +0200427 if (ctx->gid_type != gid_attr->gid_type)
428 return false;
429
Matan Barakdbf727d2015-10-15 18:38:51 +0300430 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
431 (is_vlan_dev(gid_attr->ndev) &&
432 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
433 return false;
434
435 return true;
436}
437
438static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
439 u16 vlan_id, const union ib_gid *sgid,
Somnath Koturc865f242015-12-23 14:56:51 +0200440 enum ib_gid_type gid_type,
Matan Barakdbf727d2015-10-15 18:38:51 +0300441 u16 *gid_index)
442{
Somnath Koturc865f242015-12-23 14:56:51 +0200443 struct find_gid_index_context context = {.vlan_id = vlan_id,
444 .gid_type = gid_type};
Matan Barakdbf727d2015-10-15 18:38:51 +0300445
446 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
447 &context, gid_index);
448}
449
Moni Shoua850d8fd2016-11-10 11:30:56 +0200450int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
451 enum rdma_network_type net_type,
452 union ib_gid *sgid, union ib_gid *dgid)
Somnath Koturc865f242015-12-23 14:56:51 +0200453{
454 struct sockaddr_in src_in;
455 struct sockaddr_in dst_in;
456 __be32 src_saddr, dst_saddr;
457
458 if (!sgid || !dgid)
459 return -EINVAL;
460
461 if (net_type == RDMA_NETWORK_IPV4) {
462 memcpy(&src_in.sin_addr.s_addr,
463 &hdr->roce4grh.saddr, 4);
464 memcpy(&dst_in.sin_addr.s_addr,
465 &hdr->roce4grh.daddr, 4);
466 src_saddr = src_in.sin_addr.s_addr;
467 dst_saddr = dst_in.sin_addr.s_addr;
468 ipv6_addr_set_v4mapped(src_saddr,
469 (struct in6_addr *)sgid);
470 ipv6_addr_set_v4mapped(dst_saddr,
471 (struct in6_addr *)dgid);
472 return 0;
473 } else if (net_type == RDMA_NETWORK_IPV6 ||
474 net_type == RDMA_NETWORK_IB) {
475 *dgid = hdr->ibgrh.dgid;
476 *sgid = hdr->ibgrh.sgid;
477 return 0;
478 } else {
479 return -EINVAL;
480 }
481}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200482EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
Somnath Koturc865f242015-12-23 14:56:51 +0200483
Parav Pandit1060f862017-11-14 14:51:49 +0200484/* Resolve destination mac address and hop limit for unicast destination
485 * GID entry, considering the source GID entry as well.
486 * ah_attribute must have have valid port_num, sgid_index.
487 */
488static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
489 struct rdma_ah_attr *ah_attr)
490{
491 struct ib_gid_attr sgid_attr;
492 struct ib_global_route *grh;
493 int hop_limit = 0xff;
494 union ib_gid sgid;
495 int ret;
496
497 grh = rdma_ah_retrieve_grh(ah_attr);
498
499 ret = ib_query_gid(device,
500 rdma_ah_get_port_num(ah_attr),
501 grh->sgid_index,
502 &sgid, &sgid_attr);
503 if (ret || !sgid_attr.ndev) {
504 if (!ret)
505 ret = -ENXIO;
506 return ret;
507 }
508
Parav Pandit56d0a7d92017-11-14 14:51:50 +0200509 /* If destination is link local and source GID is RoCEv1,
510 * IP stack is not used.
511 */
512 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
513 sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
514 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
515 ah_attr->roce.dmac);
516 goto done;
517 }
518
Parav Pandit1060f862017-11-14 14:51:49 +0200519 ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
520 ah_attr->roce.dmac,
521 sgid_attr.ndev, &hop_limit);
Parav Pandit56d0a7d92017-11-14 14:51:50 +0200522done:
Parav Pandit1060f862017-11-14 14:51:49 +0200523 dev_put(sgid_attr.ndev);
524
525 grh->hop_limit = hop_limit;
526 return ret;
527}
528
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500529/*
530 * This function creates ah from the incoming packet.
531 * Incoming packet has dgid of the receiver node on which this code is
532 * getting executed and, sgid contains the GID of the sender.
533 *
534 * When resolving mac address of destination, the arrived dgid is used
535 * as sgid and, sgid is used as dgid because sgid contains destinations
536 * GID whom to respond to.
537 *
Gustavo A. R. Silva28b5b3a2017-05-04 20:38:20 -0500538 */
Ira Weiny73cdaae2015-05-31 17:15:31 -0400539int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
540 const struct ib_wc *wc, const struct ib_grh *grh,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400541 struct rdma_ah_attr *ah_attr)
Hal Rosenstock513789e2005-07-27 11:45:34 -0700542{
Hal Rosenstock513789e2005-07-27 11:45:34 -0700543 u32 flow_class;
544 u16 gid_index;
545 int ret;
Somnath Koturc865f242015-12-23 14:56:51 +0200546 enum rdma_network_type net_type = RDMA_NETWORK_IB;
547 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
Matan Barakc3efe752016-01-04 10:49:54 +0200548 int hoplimit = 0xff;
Somnath Koturc865f242015-12-23 14:56:51 +0200549 union ib_gid dgid;
550 union ib_gid sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700551
Roland Dreier79364222017-08-29 10:34:44 -0700552 might_sleep();
553
Sean Hefty4e00d692006-06-17 20:37:39 -0700554 memset(ah_attr, 0, sizeof *ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400555 ah_attr->type = rdma_ah_find_type(device, port_num);
Michael Wang227128f2015-05-05 14:50:40 +0200556 if (rdma_cap_eth_ah(device, port_num)) {
Somnath Koturc865f242015-12-23 14:56:51 +0200557 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
558 net_type = wc->network_hdr_type;
559 else
560 net_type = ib_get_net_type_by_grh(device, port_num, grh);
561 gid_type = ib_network_to_gid_type(net_type);
562 }
Moni Shoua850d8fd2016-11-10 11:30:56 +0200563 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
564 &sgid, &dgid);
Somnath Koturc865f242015-12-23 14:56:51 +0200565 if (ret)
566 return ret;
567
Parav Pandit1060f862017-11-14 14:51:49 +0200568 rdma_ah_set_sl(ah_attr, wc->sl);
569 rdma_ah_set_port_num(ah_attr, port_num);
570
Somnath Koturc865f242015-12-23 14:56:51 +0200571 if (rdma_protocol_roce(device, port_num)) {
Matan Barakdbf727d2015-10-15 18:38:51 +0300572 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
573 wc->vlan_id : 0xffff;
574
Matan Barakdd5f03b2013-12-12 18:03:11 +0200575 if (!(wc->wc_flags & IB_WC_GRH))
576 return -EPROTOTYPE;
577
Parav Pandit1060f862017-11-14 14:51:49 +0200578 ret = get_sgid_index_from_eth(device, port_num,
579 vlan_id, &dgid,
580 gid_type, &gid_index);
Matan Barak20029832015-12-23 14:56:53 +0200581 if (ret)
582 return ret;
583
Parav Pandit1060f862017-11-14 14:51:49 +0200584 flow_class = be32_to_cpu(grh->version_tclass_flow);
585 rdma_ah_set_grh(ah_attr, &sgid,
586 flow_class & 0xFFFFF,
587 (u8)gid_index, hoplimit,
588 (flow_class >> 20) & 0xFF);
589 return ib_resolve_unicast_gid_dmac(device, ah_attr);
590 } else {
591 rdma_ah_set_dlid(ah_attr, wc->slid);
592 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200593
Parav Pandit1060f862017-11-14 14:51:49 +0200594 if (wc->wc_flags & IB_WC_GRH) {
Eli Cohenb3556002016-06-22 17:27:24 +0300595 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
596 ret = ib_find_cached_gid_by_port(device, &dgid,
597 IB_GID_TYPE_IB,
598 port_num, NULL,
599 &gid_index);
600 if (ret)
601 return ret;
602 } else {
603 gid_index = 0;
604 }
Parav Pandit1060f862017-11-14 14:51:49 +0200605
606 flow_class = be32_to_cpu(grh->version_tclass_flow);
607 rdma_ah_set_grh(ah_attr, &sgid,
608 flow_class & 0xFFFFF,
609 (u8)gid_index, hoplimit,
610 (flow_class >> 20) & 0xFF);
Matan Barakdbf727d2015-10-15 18:38:51 +0300611 }
Parav Pandit1060f862017-11-14 14:51:49 +0200612 return 0;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700613 }
Sean Hefty4e00d692006-06-17 20:37:39 -0700614}
615EXPORT_SYMBOL(ib_init_ah_from_wc);
616
Ira Weiny73cdaae2015-05-31 17:15:31 -0400617struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
618 const struct ib_grh *grh, u8 port_num)
Sean Hefty4e00d692006-06-17 20:37:39 -0700619{
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400620 struct rdma_ah_attr ah_attr;
Sean Hefty4e00d692006-06-17 20:37:39 -0700621 int ret;
622
623 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
624 if (ret)
625 return ERR_PTR(ret);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700626
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400627 return rdma_create_ah(pd, &ah_attr);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700628}
629EXPORT_SYMBOL(ib_create_ah_from_wc);
630
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400631int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400633 if (ah->type != ah_attr->type)
634 return -EINVAL;
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 return ah->device->modify_ah ?
637 ah->device->modify_ah(ah, ah_attr) :
638 -ENOSYS;
639}
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400640EXPORT_SYMBOL(rdma_modify_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400642int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
644 return ah->device->query_ah ?
645 ah->device->query_ah(ah, ah_attr) :
646 -ENOSYS;
647}
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400648EXPORT_SYMBOL(rdma_query_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400650int rdma_destroy_ah(struct ib_ah *ah)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 struct ib_pd *pd;
653 int ret;
654
655 pd = ah->pd;
656 ret = ah->device->destroy_ah(ah);
657 if (!ret)
658 atomic_dec(&pd->usecnt);
659
660 return ret;
661}
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400662EXPORT_SYMBOL(rdma_destroy_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
Roland Dreierd41fcc62005-08-18 12:23:08 -0700664/* Shared receive queues */
665
666struct ib_srq *ib_create_srq(struct ib_pd *pd,
667 struct ib_srq_init_attr *srq_init_attr)
668{
669 struct ib_srq *srq;
670
671 if (!pd->device->create_srq)
672 return ERR_PTR(-ENOSYS);
673
674 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
675
676 if (!IS_ERR(srq)) {
677 srq->device = pd->device;
678 srq->pd = pd;
679 srq->uobject = NULL;
680 srq->event_handler = srq_init_attr->event_handler;
681 srq->srq_context = srq_init_attr->srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -0700682 srq->srq_type = srq_init_attr->srq_type;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300683 if (ib_srq_has_cq(srq->srq_type)) {
684 srq->ext.cq = srq_init_attr->ext.cq;
685 atomic_inc(&srq->ext.cq->usecnt);
686 }
Sean Hefty418d5132011-05-23 19:42:29 -0700687 if (srq->srq_type == IB_SRQT_XRC) {
688 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
Sean Hefty418d5132011-05-23 19:42:29 -0700689 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700690 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700691 atomic_inc(&pd->usecnt);
692 atomic_set(&srq->usecnt, 0);
693 }
694
695 return srq;
696}
697EXPORT_SYMBOL(ib_create_srq);
698
699int ib_modify_srq(struct ib_srq *srq,
700 struct ib_srq_attr *srq_attr,
701 enum ib_srq_attr_mask srq_attr_mask)
702{
Dotan Barak7ce5eac2008-04-16 21:09:28 -0700703 return srq->device->modify_srq ?
704 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
705 -ENOSYS;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700706}
707EXPORT_SYMBOL(ib_modify_srq);
708
709int ib_query_srq(struct ib_srq *srq,
710 struct ib_srq_attr *srq_attr)
711{
712 return srq->device->query_srq ?
713 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
714}
715EXPORT_SYMBOL(ib_query_srq);
716
717int ib_destroy_srq(struct ib_srq *srq)
718{
719 struct ib_pd *pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700720 enum ib_srq_type srq_type;
721 struct ib_xrcd *uninitialized_var(xrcd);
722 struct ib_cq *uninitialized_var(cq);
Roland Dreierd41fcc62005-08-18 12:23:08 -0700723 int ret;
724
725 if (atomic_read(&srq->usecnt))
726 return -EBUSY;
727
728 pd = srq->pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700729 srq_type = srq->srq_type;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300730 if (ib_srq_has_cq(srq_type))
731 cq = srq->ext.cq;
732 if (srq_type == IB_SRQT_XRC)
Sean Hefty418d5132011-05-23 19:42:29 -0700733 xrcd = srq->ext.xrc.xrcd;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700734
735 ret = srq->device->destroy_srq(srq);
Sean Hefty418d5132011-05-23 19:42:29 -0700736 if (!ret) {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700737 atomic_dec(&pd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300738 if (srq_type == IB_SRQT_XRC)
Sean Hefty418d5132011-05-23 19:42:29 -0700739 atomic_dec(&xrcd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +0300740 if (ib_srq_has_cq(srq_type))
Sean Hefty418d5132011-05-23 19:42:29 -0700741 atomic_dec(&cq->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700742 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700743
744 return ret;
745}
746EXPORT_SYMBOL(ib_destroy_srq);
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748/* Queue pairs */
749
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700750static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
751{
752 struct ib_qp *qp = context;
Yishai Hadas73c40c62013-08-01 18:49:53 +0300753 unsigned long flags;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700754
Yishai Hadas73c40c62013-08-01 18:49:53 +0300755 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700756 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
Shlomo Pongratzeec9e29f2013-04-10 14:26:46 +0000757 if (event->element.qp->event_handler)
758 event->element.qp->event_handler(event, event->element.qp->qp_context);
Yishai Hadas73c40c62013-08-01 18:49:53 +0300759 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700760}
761
Sean Heftyd3d72d92011-05-26 23:06:44 -0700762static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
763{
764 mutex_lock(&xrcd->tgt_qp_mutex);
765 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
766 mutex_unlock(&xrcd->tgt_qp_mutex);
767}
768
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700769static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
770 void (*event_handler)(struct ib_event *, void *),
771 void *qp_context)
Sean Heftyd3d72d92011-05-26 23:06:44 -0700772{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700773 struct ib_qp *qp;
774 unsigned long flags;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300775 int err;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700776
777 qp = kzalloc(sizeof *qp, GFP_KERNEL);
778 if (!qp)
779 return ERR_PTR(-ENOMEM);
780
781 qp->real_qp = real_qp;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300782 err = ib_open_shared_qp_security(qp, real_qp->device);
783 if (err) {
784 kfree(qp);
785 return ERR_PTR(err);
786 }
787
788 qp->real_qp = real_qp;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700789 atomic_inc(&real_qp->usecnt);
790 qp->device = real_qp->device;
791 qp->event_handler = event_handler;
792 qp->qp_context = qp_context;
793 qp->qp_num = real_qp->qp_num;
794 qp->qp_type = real_qp->qp_type;
795
796 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
797 list_add(&qp->open_list, &real_qp->open_list);
798 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
799
800 return qp;
Sean Heftyd3d72d92011-05-26 23:06:44 -0700801}
802
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700803struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
804 struct ib_qp_open_attr *qp_open_attr)
805{
806 struct ib_qp *qp, *real_qp;
807
808 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
809 return ERR_PTR(-EINVAL);
810
811 qp = ERR_PTR(-EINVAL);
812 mutex_lock(&xrcd->tgt_qp_mutex);
813 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
814 if (real_qp->qp_num == qp_open_attr->qp_num) {
815 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
816 qp_open_attr->qp_context);
817 break;
818 }
819 }
820 mutex_unlock(&xrcd->tgt_qp_mutex);
821 return qp;
822}
823EXPORT_SYMBOL(ib_open_qp);
824
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200825static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
826 struct ib_qp_init_attr *qp_init_attr)
827{
828 struct ib_qp *real_qp = qp;
829
830 qp->event_handler = __ib_shared_qp_event_handler;
831 qp->qp_context = qp;
832 qp->pd = NULL;
833 qp->send_cq = qp->recv_cq = NULL;
834 qp->srq = NULL;
835 qp->xrcd = qp_init_attr->xrcd;
836 atomic_inc(&qp_init_attr->xrcd->usecnt);
837 INIT_LIST_HEAD(&qp->open_list);
838
839 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
840 qp_init_attr->qp_context);
841 if (!IS_ERR(qp))
842 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
843 else
844 real_qp->device->destroy_qp(real_qp);
845 return qp;
846}
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848struct ib_qp *ib_create_qp(struct ib_pd *pd,
849 struct ib_qp_init_attr *qp_init_attr)
850{
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200851 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
852 struct ib_qp *qp;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200853 int ret;
854
Yishai Hadasa9017e22016-05-23 15:20:54 +0300855 if (qp_init_attr->rwq_ind_tbl &&
856 (qp_init_attr->recv_cq ||
857 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
858 qp_init_attr->cap.max_recv_sge))
859 return ERR_PTR(-EINVAL);
860
Christoph Hellwiga060b562016-05-03 18:01:09 +0200861 /*
862 * If the callers is using the RDMA API calculate the resources
863 * needed for the RDMA READ/WRITE operations.
864 *
865 * Note that these callers need to pass in a port number.
866 */
867 if (qp_init_attr->cap.max_rdma_ctxs)
868 rdma_rw_init_qp(device, qp_init_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Sean Heftyb42b63c2011-05-23 19:59:25 -0700870 qp = device->create_qp(pd, qp_init_attr, NULL);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200871 if (IS_ERR(qp))
872 return qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300874 ret = ib_create_qp_security(qp, device);
875 if (ret) {
876 ib_destroy_qp(qp);
877 return ERR_PTR(ret);
878 }
879
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200880 qp->device = device;
881 qp->real_qp = qp;
882 qp->uobject = NULL;
883 qp->qp_type = qp_init_attr->qp_type;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300884 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700885
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200886 atomic_set(&qp->usecnt, 0);
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200887 qp->mrs_used = 0;
888 spin_lock_init(&qp->mr_lock);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200889 INIT_LIST_HEAD(&qp->rdma_mrs);
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200890 INIT_LIST_HEAD(&qp->sig_mrs);
Noa Osherovich498ca3c2017-08-23 08:35:40 +0300891 qp->port = 0;
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200892
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200893 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
894 return ib_create_xrc_qp(qp, qp_init_attr);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700895
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200896 qp->event_handler = qp_init_attr->event_handler;
897 qp->qp_context = qp_init_attr->qp_context;
898 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
899 qp->recv_cq = NULL;
900 qp->srq = NULL;
901 } else {
902 qp->recv_cq = qp_init_attr->recv_cq;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300903 if (qp_init_attr->recv_cq)
904 atomic_inc(&qp_init_attr->recv_cq->usecnt);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200905 qp->srq = qp_init_attr->srq;
906 if (qp->srq)
907 atomic_inc(&qp_init_attr->srq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 }
909
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200910 qp->pd = pd;
911 qp->send_cq = qp_init_attr->send_cq;
912 qp->xrcd = NULL;
913
914 atomic_inc(&pd->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +0300915 if (qp_init_attr->send_cq)
916 atomic_inc(&qp_init_attr->send_cq->usecnt);
917 if (qp_init_attr->rwq_ind_tbl)
918 atomic_inc(&qp->rwq_ind_tbl->usecnt);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200919
920 if (qp_init_attr->cap.max_rdma_ctxs) {
921 ret = rdma_rw_init_mrs(qp, qp_init_attr);
922 if (ret) {
923 pr_err("failed to init MR pool ret= %d\n", ret);
924 ib_destroy_qp(qp);
Steve Wiseb6bc1c72016-09-29 07:31:33 -0700925 return ERR_PTR(ret);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200926 }
927 }
928
Bart Van Assche632bc3f2016-07-21 13:03:30 -0700929 /*
930 * Note: all hw drivers guarantee that max_send_sge is lower than
931 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
932 * max_send_sge <= max_sge_rd.
933 */
934 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
935 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
936 device->attrs.max_sge_rd);
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 return qp;
939}
940EXPORT_SYMBOL(ib_create_qp);
941
Roland Dreier8a518662006-02-13 12:48:12 -0800942static const struct {
943 int valid;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700944 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
945 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
Roland Dreier8a518662006-02-13 12:48:12 -0800946} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
947 [IB_QPS_RESET] = {
948 [IB_QPS_RESET] = { .valid = 1 },
Roland Dreier8a518662006-02-13 12:48:12 -0800949 [IB_QPS_INIT] = {
950 .valid = 1,
951 .req_param = {
952 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
953 IB_QP_PORT |
954 IB_QP_QKEY),
Or Gerlitzc938a612012-03-01 12:17:51 +0200955 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
Roland Dreier8a518662006-02-13 12:48:12 -0800956 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
957 IB_QP_PORT |
958 IB_QP_ACCESS_FLAGS),
959 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
960 IB_QP_PORT |
961 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700962 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
963 IB_QP_PORT |
964 IB_QP_ACCESS_FLAGS),
965 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
966 IB_QP_PORT |
967 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800968 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
969 IB_QP_QKEY),
970 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
971 IB_QP_QKEY),
972 }
973 },
974 },
975 [IB_QPS_INIT] = {
976 [IB_QPS_RESET] = { .valid = 1 },
977 [IB_QPS_ERR] = { .valid = 1 },
978 [IB_QPS_INIT] = {
979 .valid = 1,
980 .opt_param = {
981 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
982 IB_QP_PORT |
983 IB_QP_QKEY),
984 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
985 IB_QP_PORT |
986 IB_QP_ACCESS_FLAGS),
987 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
988 IB_QP_PORT |
989 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700990 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
991 IB_QP_PORT |
992 IB_QP_ACCESS_FLAGS),
993 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
994 IB_QP_PORT |
995 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800996 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
997 IB_QP_QKEY),
998 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
999 IB_QP_QKEY),
1000 }
1001 },
1002 [IB_QPS_RTR] = {
1003 .valid = 1,
1004 .req_param = {
1005 [IB_QPT_UC] = (IB_QP_AV |
1006 IB_QP_PATH_MTU |
1007 IB_QP_DEST_QPN |
1008 IB_QP_RQ_PSN),
1009 [IB_QPT_RC] = (IB_QP_AV |
1010 IB_QP_PATH_MTU |
1011 IB_QP_DEST_QPN |
1012 IB_QP_RQ_PSN |
1013 IB_QP_MAX_DEST_RD_ATOMIC |
1014 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001015 [IB_QPT_XRC_INI] = (IB_QP_AV |
1016 IB_QP_PATH_MTU |
1017 IB_QP_DEST_QPN |
1018 IB_QP_RQ_PSN),
1019 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1020 IB_QP_PATH_MTU |
1021 IB_QP_DEST_QPN |
1022 IB_QP_RQ_PSN |
1023 IB_QP_MAX_DEST_RD_ATOMIC |
1024 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001025 },
1026 .opt_param = {
1027 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1028 IB_QP_QKEY),
1029 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1030 IB_QP_ACCESS_FLAGS |
1031 IB_QP_PKEY_INDEX),
1032 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1033 IB_QP_ACCESS_FLAGS |
1034 IB_QP_PKEY_INDEX),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001035 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1036 IB_QP_ACCESS_FLAGS |
1037 IB_QP_PKEY_INDEX),
1038 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1039 IB_QP_ACCESS_FLAGS |
1040 IB_QP_PKEY_INDEX),
Roland Dreier8a518662006-02-13 12:48:12 -08001041 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1042 IB_QP_QKEY),
1043 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1044 IB_QP_QKEY),
Matan Barakdd5f03b2013-12-12 18:03:11 +02001045 },
Matan Barakdbf727d2015-10-15 18:38:51 +03001046 },
Roland Dreier8a518662006-02-13 12:48:12 -08001047 },
1048 [IB_QPS_RTR] = {
1049 [IB_QPS_RESET] = { .valid = 1 },
1050 [IB_QPS_ERR] = { .valid = 1 },
1051 [IB_QPS_RTS] = {
1052 .valid = 1,
1053 .req_param = {
1054 [IB_QPT_UD] = IB_QP_SQ_PSN,
1055 [IB_QPT_UC] = IB_QP_SQ_PSN,
1056 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1057 IB_QP_RETRY_CNT |
1058 IB_QP_RNR_RETRY |
1059 IB_QP_SQ_PSN |
1060 IB_QP_MAX_QP_RD_ATOMIC),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001061 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1062 IB_QP_RETRY_CNT |
1063 IB_QP_RNR_RETRY |
1064 IB_QP_SQ_PSN |
1065 IB_QP_MAX_QP_RD_ATOMIC),
1066 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1067 IB_QP_SQ_PSN),
Roland Dreier8a518662006-02-13 12:48:12 -08001068 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1069 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1070 },
1071 .opt_param = {
1072 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1073 IB_QP_QKEY),
1074 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1075 IB_QP_ALT_PATH |
1076 IB_QP_ACCESS_FLAGS |
1077 IB_QP_PATH_MIG_STATE),
1078 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1079 IB_QP_ALT_PATH |
1080 IB_QP_ACCESS_FLAGS |
1081 IB_QP_MIN_RNR_TIMER |
1082 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001083 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1084 IB_QP_ALT_PATH |
1085 IB_QP_ACCESS_FLAGS |
1086 IB_QP_PATH_MIG_STATE),
1087 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1088 IB_QP_ALT_PATH |
1089 IB_QP_ACCESS_FLAGS |
1090 IB_QP_MIN_RNR_TIMER |
1091 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001092 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1093 IB_QP_QKEY),
1094 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1095 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001096 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001097 }
1098 }
1099 },
1100 [IB_QPS_RTS] = {
1101 [IB_QPS_RESET] = { .valid = 1 },
1102 [IB_QPS_ERR] = { .valid = 1 },
1103 [IB_QPS_RTS] = {
1104 .valid = 1,
1105 .opt_param = {
1106 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1107 IB_QP_QKEY),
Dotan Barak4546d312006-03-02 11:22:28 -08001108 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1109 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001110 IB_QP_ALT_PATH |
1111 IB_QP_PATH_MIG_STATE),
Dotan Barak4546d312006-03-02 11:22:28 -08001112 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1113 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001114 IB_QP_ALT_PATH |
1115 IB_QP_PATH_MIG_STATE |
1116 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001117 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1118 IB_QP_ACCESS_FLAGS |
1119 IB_QP_ALT_PATH |
1120 IB_QP_PATH_MIG_STATE),
1121 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1122 IB_QP_ACCESS_FLAGS |
1123 IB_QP_ALT_PATH |
1124 IB_QP_PATH_MIG_STATE |
1125 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001126 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1127 IB_QP_QKEY),
1128 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1129 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001130 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001131 }
1132 },
1133 [IB_QPS_SQD] = {
1134 .valid = 1,
1135 .opt_param = {
1136 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1137 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1138 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001139 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1140 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
Roland Dreier8a518662006-02-13 12:48:12 -08001141 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1142 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1143 }
1144 },
1145 },
1146 [IB_QPS_SQD] = {
1147 [IB_QPS_RESET] = { .valid = 1 },
1148 [IB_QPS_ERR] = { .valid = 1 },
1149 [IB_QPS_RTS] = {
1150 .valid = 1,
1151 .opt_param = {
1152 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1153 IB_QP_QKEY),
1154 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1155 IB_QP_ALT_PATH |
1156 IB_QP_ACCESS_FLAGS |
1157 IB_QP_PATH_MIG_STATE),
1158 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1159 IB_QP_ALT_PATH |
1160 IB_QP_ACCESS_FLAGS |
1161 IB_QP_MIN_RNR_TIMER |
1162 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001163 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1164 IB_QP_ALT_PATH |
1165 IB_QP_ACCESS_FLAGS |
1166 IB_QP_PATH_MIG_STATE),
1167 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1168 IB_QP_ALT_PATH |
1169 IB_QP_ACCESS_FLAGS |
1170 IB_QP_MIN_RNR_TIMER |
1171 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001172 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1173 IB_QP_QKEY),
1174 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1175 IB_QP_QKEY),
1176 }
1177 },
1178 [IB_QPS_SQD] = {
1179 .valid = 1,
1180 .opt_param = {
1181 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1182 IB_QP_QKEY),
1183 [IB_QPT_UC] = (IB_QP_AV |
Roland Dreier8a518662006-02-13 12:48:12 -08001184 IB_QP_ALT_PATH |
1185 IB_QP_ACCESS_FLAGS |
1186 IB_QP_PKEY_INDEX |
1187 IB_QP_PATH_MIG_STATE),
1188 [IB_QPT_RC] = (IB_QP_PORT |
1189 IB_QP_AV |
1190 IB_QP_TIMEOUT |
1191 IB_QP_RETRY_CNT |
1192 IB_QP_RNR_RETRY |
1193 IB_QP_MAX_QP_RD_ATOMIC |
1194 IB_QP_MAX_DEST_RD_ATOMIC |
Roland Dreier8a518662006-02-13 12:48:12 -08001195 IB_QP_ALT_PATH |
1196 IB_QP_ACCESS_FLAGS |
1197 IB_QP_PKEY_INDEX |
1198 IB_QP_MIN_RNR_TIMER |
1199 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001200 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1201 IB_QP_AV |
1202 IB_QP_TIMEOUT |
1203 IB_QP_RETRY_CNT |
1204 IB_QP_RNR_RETRY |
1205 IB_QP_MAX_QP_RD_ATOMIC |
1206 IB_QP_ALT_PATH |
1207 IB_QP_ACCESS_FLAGS |
1208 IB_QP_PKEY_INDEX |
1209 IB_QP_PATH_MIG_STATE),
1210 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1211 IB_QP_AV |
1212 IB_QP_TIMEOUT |
1213 IB_QP_MAX_DEST_RD_ATOMIC |
1214 IB_QP_ALT_PATH |
1215 IB_QP_ACCESS_FLAGS |
1216 IB_QP_PKEY_INDEX |
1217 IB_QP_MIN_RNR_TIMER |
1218 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001219 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1220 IB_QP_QKEY),
1221 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1222 IB_QP_QKEY),
1223 }
1224 }
1225 },
1226 [IB_QPS_SQE] = {
1227 [IB_QPS_RESET] = { .valid = 1 },
1228 [IB_QPS_ERR] = { .valid = 1 },
1229 [IB_QPS_RTS] = {
1230 .valid = 1,
1231 .opt_param = {
1232 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1233 IB_QP_QKEY),
1234 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1235 IB_QP_ACCESS_FLAGS),
1236 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1237 IB_QP_QKEY),
1238 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1239 IB_QP_QKEY),
1240 }
1241 }
1242 },
1243 [IB_QPS_ERR] = {
1244 [IB_QPS_RESET] = { .valid = 1 },
1245 [IB_QPS_ERR] = { .valid = 1 }
1246 }
1247};
1248
1249int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001250 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1251 enum rdma_link_layer ll)
Roland Dreier8a518662006-02-13 12:48:12 -08001252{
1253 enum ib_qp_attr_mask req_param, opt_param;
1254
1255 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1256 next_state < 0 || next_state > IB_QPS_ERR)
1257 return 0;
1258
1259 if (mask & IB_QP_CUR_STATE &&
1260 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1261 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1262 return 0;
1263
1264 if (!qp_state_table[cur_state][next_state].valid)
1265 return 0;
1266
1267 req_param = qp_state_table[cur_state][next_state].req_param[type];
1268 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1269
1270 if ((mask & req_param) != req_param)
1271 return 0;
1272
1273 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1274 return 0;
1275
1276 return 1;
1277}
1278EXPORT_SYMBOL(ib_modify_qp_is_ok);
1279
Parav Panditc0348eb2017-10-16 08:45:13 +03001280static int ib_resolve_eth_dmac(struct ib_device *device,
1281 struct rdma_ah_attr *ah_attr)
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001282{
1283 int ret = 0;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001284 struct ib_global_route *grh;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001285
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001286 if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
Moni Shouac90ea9d2016-11-23 08:23:22 +02001287 return -EINVAL;
Matan Barakdbf727d2015-10-15 18:38:51 +03001288
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001289 if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)
Moni Shouac90ea9d2016-11-23 08:23:22 +02001290 return 0;
Matan Barakdbf727d2015-10-15 18:38:51 +03001291
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001292 grh = rdma_ah_retrieve_grh(ah_attr);
1293
Noa Osherovich9636a562017-06-12 11:14:04 +03001294 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1295 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1296 __be32 addr = 0;
1297
1298 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1299 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1300 } else {
1301 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1302 (char *)ah_attr->roce.dmac);
1303 }
Moni Shouac90ea9d2016-11-23 08:23:22 +02001304 } else {
Parav Pandit1060f862017-11-14 14:51:49 +02001305 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001306 }
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001307 return ret;
1308}
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001309
Parav Pandita512c2f2017-05-23 11:26:08 +03001310/**
1311 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1312 * @qp: The QP to modify.
1313 * @attr: On input, specifies the QP attributes to modify. On output,
1314 * the current values of selected QP attributes are returned.
1315 * @attr_mask: A bit-mask used to specify which attributes of the QP
1316 * are being modified.
1317 * @udata: pointer to user's input output buffer information
1318 * are being modified.
1319 * It returns 0 on success and returns appropriate error code on error.
1320 */
1321int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
1322 int attr_mask, struct ib_udata *udata)
1323{
1324 int ret;
1325
1326 if (attr_mask & IB_QP_AV) {
1327 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1328 if (ret)
1329 return ret;
1330 }
Noa Osherovich498ca3c2017-08-23 08:35:40 +03001331 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1332 if (!ret && (attr_mask & IB_QP_PORT))
1333 qp->port = attr->port_num;
1334
1335 return ret;
Parav Pandita512c2f2017-05-23 11:26:08 +03001336}
1337EXPORT_SYMBOL(ib_modify_qp_with_udata);
1338
Yuval Shaiad4186192017-06-14 23:13:34 +03001339int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1340{
1341 int rc;
1342 u32 netdev_speed;
1343 struct net_device *netdev;
1344 struct ethtool_link_ksettings lksettings;
1345
1346 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1347 return -EINVAL;
1348
1349 if (!dev->get_netdev)
1350 return -EOPNOTSUPP;
1351
1352 netdev = dev->get_netdev(dev, port_num);
1353 if (!netdev)
1354 return -ENODEV;
1355
1356 rtnl_lock();
1357 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1358 rtnl_unlock();
1359
1360 dev_put(netdev);
1361
1362 if (!rc) {
1363 netdev_speed = lksettings.base.speed;
1364 } else {
1365 netdev_speed = SPEED_1000;
1366 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1367 netdev_speed);
1368 }
1369
1370 if (netdev_speed <= SPEED_1000) {
1371 *width = IB_WIDTH_1X;
1372 *speed = IB_SPEED_SDR;
1373 } else if (netdev_speed <= SPEED_10000) {
1374 *width = IB_WIDTH_1X;
1375 *speed = IB_SPEED_FDR10;
1376 } else if (netdev_speed <= SPEED_20000) {
1377 *width = IB_WIDTH_4X;
1378 *speed = IB_SPEED_DDR;
1379 } else if (netdev_speed <= SPEED_25000) {
1380 *width = IB_WIDTH_1X;
1381 *speed = IB_SPEED_EDR;
1382 } else if (netdev_speed <= SPEED_40000) {
1383 *width = IB_WIDTH_4X;
1384 *speed = IB_SPEED_FDR10;
1385 } else {
1386 *width = IB_WIDTH_4X;
1387 *speed = IB_SPEED_EDR;
1388 }
1389
1390 return 0;
1391}
1392EXPORT_SYMBOL(ib_get_eth_speed);
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394int ib_modify_qp(struct ib_qp *qp,
1395 struct ib_qp_attr *qp_attr,
1396 int qp_attr_mask)
1397{
Parav Pandita512c2f2017-05-23 11:26:08 +03001398 return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399}
1400EXPORT_SYMBOL(ib_modify_qp);
1401
1402int ib_query_qp(struct ib_qp *qp,
1403 struct ib_qp_attr *qp_attr,
1404 int qp_attr_mask,
1405 struct ib_qp_init_attr *qp_init_attr)
1406{
1407 return qp->device->query_qp ?
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001408 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 -ENOSYS;
1410}
1411EXPORT_SYMBOL(ib_query_qp);
1412
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001413int ib_close_qp(struct ib_qp *qp)
1414{
1415 struct ib_qp *real_qp;
1416 unsigned long flags;
1417
1418 real_qp = qp->real_qp;
1419 if (real_qp == qp)
1420 return -EINVAL;
1421
1422 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1423 list_del(&qp->open_list);
1424 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1425
1426 atomic_dec(&real_qp->usecnt);
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001427 ib_close_shared_qp_security(qp->qp_sec);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001428 kfree(qp);
1429
1430 return 0;
1431}
1432EXPORT_SYMBOL(ib_close_qp);
1433
1434static int __ib_destroy_shared_qp(struct ib_qp *qp)
1435{
1436 struct ib_xrcd *xrcd;
1437 struct ib_qp *real_qp;
1438 int ret;
1439
1440 real_qp = qp->real_qp;
1441 xrcd = real_qp->xrcd;
1442
1443 mutex_lock(&xrcd->tgt_qp_mutex);
1444 ib_close_qp(qp);
1445 if (atomic_read(&real_qp->usecnt) == 0)
1446 list_del(&real_qp->xrcd_list);
1447 else
1448 real_qp = NULL;
1449 mutex_unlock(&xrcd->tgt_qp_mutex);
1450
1451 if (real_qp) {
1452 ret = ib_destroy_qp(real_qp);
1453 if (!ret)
1454 atomic_dec(&xrcd->usecnt);
1455 else
1456 __ib_insert_xrcd_qp(xrcd, real_qp);
1457 }
1458
1459 return 0;
1460}
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462int ib_destroy_qp(struct ib_qp *qp)
1463{
1464 struct ib_pd *pd;
1465 struct ib_cq *scq, *rcq;
1466 struct ib_srq *srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001467 struct ib_rwq_ind_table *ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001468 struct ib_qp_security *sec;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 int ret;
1470
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001471 WARN_ON_ONCE(qp->mrs_used > 0);
1472
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001473 if (atomic_read(&qp->usecnt))
1474 return -EBUSY;
1475
1476 if (qp->real_qp != qp)
1477 return __ib_destroy_shared_qp(qp);
1478
Sean Heftyb42b63c2011-05-23 19:59:25 -07001479 pd = qp->pd;
1480 scq = qp->send_cq;
1481 rcq = qp->recv_cq;
1482 srq = qp->srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001483 ind_tbl = qp->rwq_ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001484 sec = qp->qp_sec;
1485 if (sec)
1486 ib_destroy_qp_security_begin(sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Christoph Hellwiga060b562016-05-03 18:01:09 +02001488 if (!qp->uobject)
1489 rdma_rw_cleanup_mrs(qp);
1490
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 ret = qp->device->destroy_qp(qp);
1492 if (!ret) {
Sean Heftyb42b63c2011-05-23 19:59:25 -07001493 if (pd)
1494 atomic_dec(&pd->usecnt);
1495 if (scq)
1496 atomic_dec(&scq->usecnt);
1497 if (rcq)
1498 atomic_dec(&rcq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 if (srq)
1500 atomic_dec(&srq->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +03001501 if (ind_tbl)
1502 atomic_dec(&ind_tbl->usecnt);
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001503 if (sec)
1504 ib_destroy_qp_security_end(sec);
1505 } else {
1506 if (sec)
1507 ib_destroy_qp_security_abort(sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 }
1509
1510 return ret;
1511}
1512EXPORT_SYMBOL(ib_destroy_qp);
1513
1514/* Completion queues */
1515
1516struct ib_cq *ib_create_cq(struct ib_device *device,
1517 ib_comp_handler comp_handler,
1518 void (*event_handler)(struct ib_event *, void *),
Matan Barak8e372102015-06-11 16:35:21 +03001519 void *cq_context,
1520 const struct ib_cq_init_attr *cq_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521{
1522 struct ib_cq *cq;
1523
Matan Barak8e372102015-06-11 16:35:21 +03001524 cq = device->create_cq(device, cq_attr, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
1526 if (!IS_ERR(cq)) {
1527 cq->device = device;
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001528 cq->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 cq->comp_handler = comp_handler;
1530 cq->event_handler = event_handler;
1531 cq->cq_context = cq_context;
1532 atomic_set(&cq->usecnt, 0);
1533 }
1534
1535 return cq;
1536}
1537EXPORT_SYMBOL(ib_create_cq);
1538
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02001539int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
Eli Cohen2dd57162008-04-16 21:09:33 -07001540{
1541 return cq->device->modify_cq ?
1542 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1543}
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02001544EXPORT_SYMBOL(rdma_set_cq_moderation);
Eli Cohen2dd57162008-04-16 21:09:33 -07001545
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546int ib_destroy_cq(struct ib_cq *cq)
1547{
1548 if (atomic_read(&cq->usecnt))
1549 return -EBUSY;
1550
1551 return cq->device->destroy_cq(cq);
1552}
1553EXPORT_SYMBOL(ib_destroy_cq);
1554
Roland Dreiera74cd4a2006-02-13 16:30:49 -08001555int ib_resize_cq(struct ib_cq *cq, int cqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556{
Roland Dreier40de2e52005-11-08 11:10:25 -08001557 return cq->device->resize_cq ?
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001558 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559}
1560EXPORT_SYMBOL(ib_resize_cq);
1561
1562/* Memory regions */
1563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564int ib_dereg_mr(struct ib_mr *mr)
1565{
Christoph Hellwigab67ed82015-12-23 19:12:54 +01001566 struct ib_pd *pd = mr->pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 int ret;
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 ret = mr->device->dereg_mr(mr);
1570 if (!ret)
1571 atomic_dec(&pd->usecnt);
1572
1573 return ret;
1574}
1575EXPORT_SYMBOL(ib_dereg_mr);
1576
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001577/**
1578 * ib_alloc_mr() - Allocates a memory region
1579 * @pd: protection domain associated with the region
1580 * @mr_type: memory region type
1581 * @max_num_sg: maximum sg entries available for registration.
1582 *
1583 * Notes:
1584 * Memory registeration page/sg lists must not exceed max_num_sg.
1585 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1586 * max_num_sg * used_page_size.
1587 *
1588 */
1589struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1590 enum ib_mr_type mr_type,
1591 u32 max_num_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001592{
1593 struct ib_mr *mr;
1594
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001595 if (!pd->device->alloc_mr)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001596 return ERR_PTR(-ENOSYS);
1597
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001598 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001599 if (!IS_ERR(mr)) {
1600 mr->device = pd->device;
1601 mr->pd = pd;
1602 mr->uobject = NULL;
1603 atomic_inc(&pd->usecnt);
Steve Wised4a85c32016-05-03 18:01:08 +02001604 mr->need_inval = false;
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001605 }
1606
1607 return mr;
1608}
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001609EXPORT_SYMBOL(ib_alloc_mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611/* "Fast" memory regions */
1612
1613struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1614 int mr_access_flags,
1615 struct ib_fmr_attr *fmr_attr)
1616{
1617 struct ib_fmr *fmr;
1618
1619 if (!pd->device->alloc_fmr)
1620 return ERR_PTR(-ENOSYS);
1621
1622 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1623 if (!IS_ERR(fmr)) {
1624 fmr->device = pd->device;
1625 fmr->pd = pd;
1626 atomic_inc(&pd->usecnt);
1627 }
1628
1629 return fmr;
1630}
1631EXPORT_SYMBOL(ib_alloc_fmr);
1632
1633int ib_unmap_fmr(struct list_head *fmr_list)
1634{
1635 struct ib_fmr *fmr;
1636
1637 if (list_empty(fmr_list))
1638 return 0;
1639
1640 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1641 return fmr->device->unmap_fmr(fmr_list);
1642}
1643EXPORT_SYMBOL(ib_unmap_fmr);
1644
1645int ib_dealloc_fmr(struct ib_fmr *fmr)
1646{
1647 struct ib_pd *pd;
1648 int ret;
1649
1650 pd = fmr->pd;
1651 ret = fmr->device->dealloc_fmr(fmr);
1652 if (!ret)
1653 atomic_dec(&pd->usecnt);
1654
1655 return ret;
1656}
1657EXPORT_SYMBOL(ib_dealloc_fmr);
1658
1659/* Multicast groups */
1660
Noa Osherovich52363332017-06-12 11:14:02 +03001661static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1662{
1663 struct ib_qp_init_attr init_attr = {};
1664 struct ib_qp_attr attr = {};
1665 int num_eth_ports = 0;
1666 int port;
1667
1668 /* If QP state >= init, it is assigned to a port and we can check this
1669 * port only.
1670 */
1671 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1672 if (attr.qp_state >= IB_QPS_INIT) {
Alex Estrine6f9bc32017-08-31 09:30:34 -07001673 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
Noa Osherovich52363332017-06-12 11:14:02 +03001674 IB_LINK_LAYER_INFINIBAND)
1675 return true;
1676 goto lid_check;
1677 }
1678 }
1679
1680 /* Can't get a quick answer, iterate over all ports */
1681 for (port = 0; port < qp->device->phys_port_cnt; port++)
Alex Estrine6f9bc32017-08-31 09:30:34 -07001682 if (rdma_port_get_link_layer(qp->device, port) !=
Noa Osherovich52363332017-06-12 11:14:02 +03001683 IB_LINK_LAYER_INFINIBAND)
1684 num_eth_ports++;
1685
1686 /* If we have at lease one Ethernet port, RoCE annex declares that
1687 * multicast LID should be ignored. We can't tell at this step if the
1688 * QP belongs to an IB or Ethernet port.
1689 */
1690 if (num_eth_ports)
1691 return true;
1692
1693 /* If all the ports are IB, we can check according to IB spec. */
1694lid_check:
1695 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1696 lid == be16_to_cpu(IB_LID_PERMISSIVE));
1697}
1698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1700{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001701 int ret;
1702
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001703 if (!qp->device->attach_mcast)
1704 return -ENOSYS;
Noa Osherovichbe1d3252017-06-12 11:14:03 +03001705
1706 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1707 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001708 return -EINVAL;
1709
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001710 ret = qp->device->attach_mcast(qp, gid, lid);
1711 if (!ret)
1712 atomic_inc(&qp->usecnt);
1713 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714}
1715EXPORT_SYMBOL(ib_attach_mcast);
1716
1717int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1718{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001719 int ret;
1720
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001721 if (!qp->device->detach_mcast)
1722 return -ENOSYS;
Noa Osherovichbe1d3252017-06-12 11:14:03 +03001723
1724 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1725 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001726 return -EINVAL;
1727
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001728 ret = qp->device->detach_mcast(qp, gid, lid);
1729 if (!ret)
1730 atomic_dec(&qp->usecnt);
1731 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732}
1733EXPORT_SYMBOL(ib_detach_mcast);
Sean Hefty59991f92011-05-23 17:52:46 -07001734
1735struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1736{
1737 struct ib_xrcd *xrcd;
1738
1739 if (!device->alloc_xrcd)
1740 return ERR_PTR(-ENOSYS);
1741
1742 xrcd = device->alloc_xrcd(device, NULL, NULL);
1743 if (!IS_ERR(xrcd)) {
1744 xrcd->device = device;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001745 xrcd->inode = NULL;
Sean Hefty59991f92011-05-23 17:52:46 -07001746 atomic_set(&xrcd->usecnt, 0);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001747 mutex_init(&xrcd->tgt_qp_mutex);
1748 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
Sean Hefty59991f92011-05-23 17:52:46 -07001749 }
1750
1751 return xrcd;
1752}
1753EXPORT_SYMBOL(ib_alloc_xrcd);
1754
1755int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1756{
Sean Heftyd3d72d92011-05-26 23:06:44 -07001757 struct ib_qp *qp;
1758 int ret;
1759
Sean Hefty59991f92011-05-23 17:52:46 -07001760 if (atomic_read(&xrcd->usecnt))
1761 return -EBUSY;
1762
Sean Heftyd3d72d92011-05-26 23:06:44 -07001763 while (!list_empty(&xrcd->tgt_qp_list)) {
1764 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1765 ret = ib_destroy_qp(qp);
1766 if (ret)
1767 return ret;
1768 }
1769
Sean Hefty59991f92011-05-23 17:52:46 -07001770 return xrcd->device->dealloc_xrcd(xrcd);
1771}
1772EXPORT_SYMBOL(ib_dealloc_xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001773
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001774/**
1775 * ib_create_wq - Creates a WQ associated with the specified protection
1776 * domain.
1777 * @pd: The protection domain associated with the WQ.
1778 * @wq_init_attr: A list of initial attributes required to create the
1779 * WQ. If WQ creation succeeds, then the attributes are updated to
1780 * the actual capabilities of the created WQ.
1781 *
1782 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
1783 * the requested size of the WQ, and set to the actual values allocated
1784 * on return.
1785 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1786 * at least as large as the requested values.
1787 */
1788struct ib_wq *ib_create_wq(struct ib_pd *pd,
1789 struct ib_wq_init_attr *wq_attr)
1790{
1791 struct ib_wq *wq;
1792
1793 if (!pd->device->create_wq)
1794 return ERR_PTR(-ENOSYS);
1795
1796 wq = pd->device->create_wq(pd, wq_attr, NULL);
1797 if (!IS_ERR(wq)) {
1798 wq->event_handler = wq_attr->event_handler;
1799 wq->wq_context = wq_attr->wq_context;
1800 wq->wq_type = wq_attr->wq_type;
1801 wq->cq = wq_attr->cq;
1802 wq->device = pd->device;
1803 wq->pd = pd;
1804 wq->uobject = NULL;
1805 atomic_inc(&pd->usecnt);
1806 atomic_inc(&wq_attr->cq->usecnt);
1807 atomic_set(&wq->usecnt, 0);
1808 }
1809 return wq;
1810}
1811EXPORT_SYMBOL(ib_create_wq);
1812
1813/**
1814 * ib_destroy_wq - Destroys the specified WQ.
1815 * @wq: The WQ to destroy.
1816 */
1817int ib_destroy_wq(struct ib_wq *wq)
1818{
1819 int err;
1820 struct ib_cq *cq = wq->cq;
1821 struct ib_pd *pd = wq->pd;
1822
1823 if (atomic_read(&wq->usecnt))
1824 return -EBUSY;
1825
1826 err = wq->device->destroy_wq(wq);
1827 if (!err) {
1828 atomic_dec(&pd->usecnt);
1829 atomic_dec(&cq->usecnt);
1830 }
1831 return err;
1832}
1833EXPORT_SYMBOL(ib_destroy_wq);
1834
1835/**
1836 * ib_modify_wq - Modifies the specified WQ.
1837 * @wq: The WQ to modify.
1838 * @wq_attr: On input, specifies the WQ attributes to modify.
1839 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1840 * are being modified.
1841 * On output, the current values of selected WQ attributes are returned.
1842 */
1843int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1844 u32 wq_attr_mask)
1845{
1846 int err;
1847
1848 if (!wq->device->modify_wq)
1849 return -ENOSYS;
1850
1851 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1852 return err;
1853}
1854EXPORT_SYMBOL(ib_modify_wq);
1855
Yishai Hadas6d397862016-05-23 15:20:51 +03001856/*
1857 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1858 * @device: The device on which to create the rwq indirection table.
1859 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1860 * create the Indirection Table.
1861 *
1862 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1863 * than the created ib_rwq_ind_table object and the caller is responsible
1864 * for its memory allocation/free.
1865 */
1866struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1867 struct ib_rwq_ind_table_init_attr *init_attr)
1868{
1869 struct ib_rwq_ind_table *rwq_ind_table;
1870 int i;
1871 u32 table_size;
1872
1873 if (!device->create_rwq_ind_table)
1874 return ERR_PTR(-ENOSYS);
1875
1876 table_size = (1 << init_attr->log_ind_tbl_size);
1877 rwq_ind_table = device->create_rwq_ind_table(device,
1878 init_attr, NULL);
1879 if (IS_ERR(rwq_ind_table))
1880 return rwq_ind_table;
1881
1882 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1883 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1884 rwq_ind_table->device = device;
1885 rwq_ind_table->uobject = NULL;
1886 atomic_set(&rwq_ind_table->usecnt, 0);
1887
1888 for (i = 0; i < table_size; i++)
1889 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1890
1891 return rwq_ind_table;
1892}
1893EXPORT_SYMBOL(ib_create_rwq_ind_table);
1894
1895/*
1896 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1897 * @wq_ind_table: The Indirection Table to destroy.
1898*/
1899int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1900{
1901 int err, i;
1902 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1903 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1904
1905 if (atomic_read(&rwq_ind_table->usecnt))
1906 return -EBUSY;
1907
1908 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1909 if (!err) {
1910 for (i = 0; i < table_size; i++)
1911 atomic_dec(&ind_tbl[i]->usecnt);
1912 }
1913
1914 return err;
1915}
1916EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1917
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001918struct ib_flow *ib_create_flow(struct ib_qp *qp,
1919 struct ib_flow_attr *flow_attr,
1920 int domain)
1921{
1922 struct ib_flow *flow_id;
1923 if (!qp->device->create_flow)
1924 return ERR_PTR(-ENOSYS);
1925
1926 flow_id = qp->device->create_flow(qp, flow_attr, domain);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001927 if (!IS_ERR(flow_id)) {
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001928 atomic_inc(&qp->usecnt);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001929 flow_id->qp = qp;
1930 }
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001931 return flow_id;
1932}
1933EXPORT_SYMBOL(ib_create_flow);
1934
1935int ib_destroy_flow(struct ib_flow *flow_id)
1936{
1937 int err;
1938 struct ib_qp *qp = flow_id->qp;
1939
1940 err = qp->device->destroy_flow(flow_id);
1941 if (!err)
1942 atomic_dec(&qp->usecnt);
1943 return err;
1944}
1945EXPORT_SYMBOL(ib_destroy_flow);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001946
1947int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1948 struct ib_mr_status *mr_status)
1949{
1950 return mr->device->check_mr_status ?
1951 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1952}
1953EXPORT_SYMBOL(ib_check_mr_status);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001954
Eli Cohen50174a72016-03-11 22:58:38 +02001955int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1956 int state)
1957{
1958 if (!device->set_vf_link_state)
1959 return -ENOSYS;
1960
1961 return device->set_vf_link_state(device, vf, port, state);
1962}
1963EXPORT_SYMBOL(ib_set_vf_link_state);
1964
1965int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
1966 struct ifla_vf_info *info)
1967{
1968 if (!device->get_vf_config)
1969 return -ENOSYS;
1970
1971 return device->get_vf_config(device, vf, port, info);
1972}
1973EXPORT_SYMBOL(ib_get_vf_config);
1974
1975int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
1976 struct ifla_vf_stats *stats)
1977{
1978 if (!device->get_vf_stats)
1979 return -ENOSYS;
1980
1981 return device->get_vf_stats(device, vf, port, stats);
1982}
1983EXPORT_SYMBOL(ib_get_vf_stats);
1984
1985int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
1986 int type)
1987{
1988 if (!device->set_vf_guid)
1989 return -ENOSYS;
1990
1991 return device->set_vf_guid(device, vf, port, guid, type);
1992}
1993EXPORT_SYMBOL(ib_set_vf_guid);
1994
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001995/**
1996 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1997 * and set it the memory region.
1998 * @mr: memory region
1999 * @sg: dma mapped scatterlist
2000 * @sg_nents: number of entries in sg
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002001 * @sg_offset: offset in bytes into sg
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002002 * @page_size: page vector desired page size
2003 *
2004 * Constraints:
2005 * - The first sg element is allowed to have an offset.
Bart Van Assche52746122016-09-26 09:09:42 -07002006 * - Each sg element must either be aligned to page_size or virtually
2007 * contiguous to the previous element. In case an sg element has a
2008 * non-contiguous offset, the mapping prefix will not include it.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002009 * - The last sg element is allowed to have length less than page_size.
2010 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2011 * then only max_num_sg entries will be mapped.
Bart Van Assche52746122016-09-26 09:09:42 -07002012 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
Sagi Grimbergf5aa9152016-02-29 19:07:32 +02002013 * constraints holds and the page_size argument is ignored.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002014 *
2015 * Returns the number of sg elements that were mapped to the memory region.
2016 *
2017 * After this completes successfully, the memory region
2018 * is ready for registration.
2019 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002020int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002021 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002022{
2023 if (unlikely(!mr->device->map_mr_sg))
2024 return -ENOSYS;
2025
2026 mr->page_size = page_size;
2027
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002028 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002029}
2030EXPORT_SYMBOL(ib_map_mr_sg);
2031
2032/**
2033 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2034 * to a page vector
2035 * @mr: memory region
2036 * @sgl: dma mapped scatterlist
2037 * @sg_nents: number of entries in sg
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002038 * @sg_offset_p: IN: start offset in bytes into sg
2039 * OUT: offset in bytes for element n of the sg of the first
2040 * byte that has not been processed where n is the return
2041 * value of this function.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002042 * @set_page: driver page assignment function pointer
2043 *
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002044 * Core service helper for drivers to convert the largest
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002045 * prefix of given sg list to a page vector. The sg list
2046 * prefix converted is the prefix that meet the requirements
2047 * of ib_map_mr_sg.
2048 *
2049 * Returns the number of sg elements that were assigned to
2050 * a page vector.
2051 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002052int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002053 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002054{
2055 struct scatterlist *sg;
Bart Van Asscheb6aeb982015-12-29 10:45:03 +01002056 u64 last_end_dma_addr = 0;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002057 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002058 unsigned int last_page_off = 0;
2059 u64 page_mask = ~((u64)mr->page_size - 1);
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002060 int i, ret;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002061
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002062 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2063 return -EINVAL;
2064
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002065 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002066 mr->length = 0;
2067
2068 for_each_sg(sgl, sg, sg_nents, i) {
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002069 u64 dma_addr = sg_dma_address(sg) + sg_offset;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002070 u64 prev_addr = dma_addr;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002071 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002072 u64 end_dma_addr = dma_addr + dma_len;
2073 u64 page_addr = dma_addr & page_mask;
2074
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002075 /*
2076 * For the second and later elements, check whether either the
2077 * end of element i-1 or the start of element i is not aligned
2078 * on a page boundary.
2079 */
2080 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2081 /* Stop mapping if there is a gap. */
2082 if (last_end_dma_addr != dma_addr)
2083 break;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002084
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002085 /*
2086 * Coalesce this element with the last. If it is small
2087 * enough just update mr->length. Otherwise start
2088 * mapping from the next page.
2089 */
2090 goto next_page;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002091 }
2092
2093 do {
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002094 ret = set_page(mr, page_addr);
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002095 if (unlikely(ret < 0)) {
2096 sg_offset = prev_addr - sg_dma_address(sg);
2097 mr->length += prev_addr - dma_addr;
2098 if (sg_offset_p)
2099 *sg_offset_p = sg_offset;
2100 return i || sg_offset ? i : ret;
2101 }
2102 prev_addr = page_addr;
Bart Van Assche8f5ba102015-12-03 16:04:17 -08002103next_page:
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002104 page_addr += mr->page_size;
2105 } while (page_addr < end_dma_addr);
2106
2107 mr->length += dma_len;
2108 last_end_dma_addr = end_dma_addr;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002109 last_page_off = end_dma_addr & ~page_mask;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002110
2111 sg_offset = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002112 }
2113
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002114 if (sg_offset_p)
2115 *sg_offset_p = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002116 return i;
2117}
2118EXPORT_SYMBOL(ib_sg_to_pages);
Steve Wise765d6772016-02-17 08:15:41 -08002119
2120struct ib_drain_cqe {
2121 struct ib_cqe cqe;
2122 struct completion done;
2123};
2124
2125static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2126{
2127 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2128 cqe);
2129
2130 complete(&cqe->done);
2131}
2132
2133/*
2134 * Post a WR and block until its completion is reaped for the SQ.
2135 */
2136static void __ib_drain_sq(struct ib_qp *qp)
2137{
Bart Van Asschef039f442017-02-14 10:56:35 -08002138 struct ib_cq *cq = qp->send_cq;
Steve Wise765d6772016-02-17 08:15:41 -08002139 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2140 struct ib_drain_cqe sdrain;
2141 struct ib_send_wr swr = {}, *bad_swr;
2142 int ret;
2143
Steve Wise765d6772016-02-17 08:15:41 -08002144 swr.wr_cqe = &sdrain.cqe;
2145 sdrain.cqe.done = ib_drain_qp_done;
2146 init_completion(&sdrain.done);
2147
2148 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2149 if (ret) {
2150 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2151 return;
2152 }
2153
2154 ret = ib_post_send(qp, &swr, &bad_swr);
2155 if (ret) {
2156 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2157 return;
2158 }
2159
Bart Van Asschef039f442017-02-14 10:56:35 -08002160 if (cq->poll_ctx == IB_POLL_DIRECT)
2161 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2162 ib_process_cq_direct(cq, -1);
2163 else
2164 wait_for_completion(&sdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08002165}
2166
2167/*
2168 * Post a WR and block until its completion is reaped for the RQ.
2169 */
2170static void __ib_drain_rq(struct ib_qp *qp)
2171{
Bart Van Asschef039f442017-02-14 10:56:35 -08002172 struct ib_cq *cq = qp->recv_cq;
Steve Wise765d6772016-02-17 08:15:41 -08002173 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2174 struct ib_drain_cqe rdrain;
2175 struct ib_recv_wr rwr = {}, *bad_rwr;
2176 int ret;
2177
Steve Wise765d6772016-02-17 08:15:41 -08002178 rwr.wr_cqe = &rdrain.cqe;
2179 rdrain.cqe.done = ib_drain_qp_done;
2180 init_completion(&rdrain.done);
2181
2182 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2183 if (ret) {
2184 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2185 return;
2186 }
2187
2188 ret = ib_post_recv(qp, &rwr, &bad_rwr);
2189 if (ret) {
2190 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2191 return;
2192 }
2193
Bart Van Asschef039f442017-02-14 10:56:35 -08002194 if (cq->poll_ctx == IB_POLL_DIRECT)
2195 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2196 ib_process_cq_direct(cq, -1);
2197 else
2198 wait_for_completion(&rdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08002199}
2200
2201/**
2202 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2203 * application.
2204 * @qp: queue pair to drain
2205 *
2206 * If the device has a provider-specific drain function, then
2207 * call that. Otherwise call the generic drain function
2208 * __ib_drain_sq().
2209 *
2210 * The caller must:
2211 *
2212 * ensure there is room in the CQ and SQ for the drain work request and
2213 * completion.
2214 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002215 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002216 *
2217 * ensure that there are no other contexts that are posting WRs concurrently.
2218 * Otherwise the drain is not guaranteed.
2219 */
2220void ib_drain_sq(struct ib_qp *qp)
2221{
2222 if (qp->device->drain_sq)
2223 qp->device->drain_sq(qp);
2224 else
2225 __ib_drain_sq(qp);
2226}
2227EXPORT_SYMBOL(ib_drain_sq);
2228
2229/**
2230 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2231 * application.
2232 * @qp: queue pair to drain
2233 *
2234 * If the device has a provider-specific drain function, then
2235 * call that. Otherwise call the generic drain function
2236 * __ib_drain_rq().
2237 *
2238 * The caller must:
2239 *
2240 * ensure there is room in the CQ and RQ for the drain work request and
2241 * completion.
2242 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002243 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002244 *
2245 * ensure that there are no other contexts that are posting WRs concurrently.
2246 * Otherwise the drain is not guaranteed.
2247 */
2248void ib_drain_rq(struct ib_qp *qp)
2249{
2250 if (qp->device->drain_rq)
2251 qp->device->drain_rq(qp);
2252 else
2253 __ib_drain_rq(qp);
2254}
2255EXPORT_SYMBOL(ib_drain_rq);
2256
2257/**
2258 * ib_drain_qp() - Block until all CQEs have been consumed by the
2259 * application on both the RQ and SQ.
2260 * @qp: queue pair to drain
2261 *
2262 * The caller must:
2263 *
2264 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2265 * and completions.
2266 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002267 * allocate the CQs using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002268 *
2269 * ensure that there are no other contexts that are posting WRs concurrently.
2270 * Otherwise the drain is not guaranteed.
2271 */
2272void ib_drain_qp(struct ib_qp *qp)
2273{
2274 ib_drain_sq(qp);
Sagi Grimberg42235f82016-04-26 17:55:38 +03002275 if (!qp->srq)
2276 ib_drain_rq(qp);
Steve Wise765d6772016-02-17 08:15:41 -08002277}
2278EXPORT_SYMBOL(ib_drain_qp);