blob: 98869ebb5097faf6bd58472ad00caee8ab539b88 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier33b9b3e2006-01-30 14:29:21 -08008 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040041#include <linux/export.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042#include <linux/string.h>
Sean Hefty0e0ec7e2011-08-08 15:31:51 -070043#include <linux/slab.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030044#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Roland Dreiera4d61e82005-08-25 13:40:04 -070048#include <rdma/ib_verbs.h>
49#include <rdma/ib_cache.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020050#include <rdma/ib_addr.h>
Christoph Hellwiga060b562016-05-03 18:01:09 +020051#include <rdma/rw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Or Gerlitzed4c54e2013-12-12 18:03:17 +020053#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030055static const char * const ib_events[] = {
56 [IB_EVENT_CQ_ERR] = "CQ error",
57 [IB_EVENT_QP_FATAL] = "QP fatal error",
58 [IB_EVENT_QP_REQ_ERR] = "QP request error",
59 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
60 [IB_EVENT_COMM_EST] = "communication established",
61 [IB_EVENT_SQ_DRAINED] = "send queue drained",
62 [IB_EVENT_PATH_MIG] = "path migration successful",
63 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
64 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
65 [IB_EVENT_PORT_ACTIVE] = "port active",
66 [IB_EVENT_PORT_ERR] = "port error",
67 [IB_EVENT_LID_CHANGE] = "LID change",
68 [IB_EVENT_PKEY_CHANGE] = "P_key change",
69 [IB_EVENT_SM_CHANGE] = "SM change",
70 [IB_EVENT_SRQ_ERR] = "SRQ error",
71 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
72 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
73 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
74 [IB_EVENT_GID_CHANGE] = "GID changed",
75};
76
Bart Van Asschedb7489e2015-08-03 10:01:52 -070077const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030078{
79 size_t index = event;
80
81 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
82 ib_events[index] : "unrecognized event";
83}
84EXPORT_SYMBOL(ib_event_msg);
85
86static const char * const wc_statuses[] = {
87 [IB_WC_SUCCESS] = "success",
88 [IB_WC_LOC_LEN_ERR] = "local length error",
89 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
90 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
91 [IB_WC_LOC_PROT_ERR] = "local protection error",
92 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
93 [IB_WC_MW_BIND_ERR] = "memory management operation error",
94 [IB_WC_BAD_RESP_ERR] = "bad response error",
95 [IB_WC_LOC_ACCESS_ERR] = "local access error",
96 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
97 [IB_WC_REM_ACCESS_ERR] = "remote access error",
98 [IB_WC_REM_OP_ERR] = "remote operation error",
99 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
100 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
101 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
102 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
103 [IB_WC_REM_ABORT_ERR] = "operation aborted",
104 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
105 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
106 [IB_WC_FATAL_ERR] = "fatal error",
107 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
108 [IB_WC_GENERAL_ERR] = "general error",
109};
110
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700111const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300112{
113 size_t index = status;
114
115 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
116 wc_statuses[index] : "unrecognized status";
117}
118EXPORT_SYMBOL(ib_wc_status_msg);
119
Roland Dreier8385fd82014-06-04 10:00:16 -0700120__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700121{
122 switch (rate) {
123 case IB_RATE_2_5_GBPS: return 1;
124 case IB_RATE_5_GBPS: return 2;
125 case IB_RATE_10_GBPS: return 4;
126 case IB_RATE_20_GBPS: return 8;
127 case IB_RATE_30_GBPS: return 12;
128 case IB_RATE_40_GBPS: return 16;
129 case IB_RATE_60_GBPS: return 24;
130 case IB_RATE_80_GBPS: return 32;
131 case IB_RATE_120_GBPS: return 48;
132 default: return -1;
133 }
134}
135EXPORT_SYMBOL(ib_rate_to_mult);
136
Roland Dreier8385fd82014-06-04 10:00:16 -0700137__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700138{
139 switch (mult) {
140 case 1: return IB_RATE_2_5_GBPS;
141 case 2: return IB_RATE_5_GBPS;
142 case 4: return IB_RATE_10_GBPS;
143 case 8: return IB_RATE_20_GBPS;
144 case 12: return IB_RATE_30_GBPS;
145 case 16: return IB_RATE_40_GBPS;
146 case 24: return IB_RATE_60_GBPS;
147 case 32: return IB_RATE_80_GBPS;
148 case 48: return IB_RATE_120_GBPS;
149 default: return IB_RATE_PORT_CURRENT;
150 }
151}
152EXPORT_SYMBOL(mult_to_ib_rate);
153
Roland Dreier8385fd82014-06-04 10:00:16 -0700154__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300155{
156 switch (rate) {
157 case IB_RATE_2_5_GBPS: return 2500;
158 case IB_RATE_5_GBPS: return 5000;
159 case IB_RATE_10_GBPS: return 10000;
160 case IB_RATE_20_GBPS: return 20000;
161 case IB_RATE_30_GBPS: return 30000;
162 case IB_RATE_40_GBPS: return 40000;
163 case IB_RATE_60_GBPS: return 60000;
164 case IB_RATE_80_GBPS: return 80000;
165 case IB_RATE_120_GBPS: return 120000;
166 case IB_RATE_14_GBPS: return 14062;
167 case IB_RATE_56_GBPS: return 56250;
168 case IB_RATE_112_GBPS: return 112500;
169 case IB_RATE_168_GBPS: return 168750;
170 case IB_RATE_25_GBPS: return 25781;
171 case IB_RATE_100_GBPS: return 103125;
172 case IB_RATE_200_GBPS: return 206250;
173 case IB_RATE_300_GBPS: return 309375;
174 default: return -1;
175 }
176}
177EXPORT_SYMBOL(ib_rate_to_mbps);
178
Roland Dreier8385fd82014-06-04 10:00:16 -0700179__attribute_const__ enum rdma_transport_type
Tom Tucker07ebafb2006-08-03 16:02:42 -0500180rdma_node_get_transport(enum rdma_node_type node_type)
181{
182 switch (node_type) {
183 case RDMA_NODE_IB_CA:
184 case RDMA_NODE_IB_SWITCH:
185 case RDMA_NODE_IB_ROUTER:
186 return RDMA_TRANSPORT_IB;
187 case RDMA_NODE_RNIC:
188 return RDMA_TRANSPORT_IWARP;
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000189 case RDMA_NODE_USNIC:
Upinder Malhi5db57652014-01-15 17:02:36 -0800190 return RDMA_TRANSPORT_USNIC;
191 case RDMA_NODE_USNIC_UDP:
Upinder Malhi248567f2014-01-09 14:48:19 -0800192 return RDMA_TRANSPORT_USNIC_UDP;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500193 default:
194 BUG();
195 return 0;
196 }
197}
198EXPORT_SYMBOL(rdma_node_get_transport);
199
Eli Cohena3f5ada2010-09-27 17:51:10 -0700200enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
201{
202 if (device->get_link_layer)
203 return device->get_link_layer(device, port_num);
204
205 switch (rdma_node_get_transport(device->node_type)) {
206 case RDMA_TRANSPORT_IB:
207 return IB_LINK_LAYER_INFINIBAND;
208 case RDMA_TRANSPORT_IWARP:
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000209 case RDMA_TRANSPORT_USNIC:
Upinder Malhi248567f2014-01-09 14:48:19 -0800210 case RDMA_TRANSPORT_USNIC_UDP:
Eli Cohena3f5ada2010-09-27 17:51:10 -0700211 return IB_LINK_LAYER_ETHERNET;
212 default:
213 return IB_LINK_LAYER_UNSPECIFIED;
214 }
215}
216EXPORT_SYMBOL(rdma_port_get_link_layer);
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218/* Protection domains */
219
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600220/**
221 * ib_alloc_pd - Allocates an unused protection domain.
222 * @device: The device on which to allocate the protection domain.
223 *
224 * A protection domain object provides an association between QPs, shared
225 * receive queues, address handles, memory regions, and memory windows.
226 *
227 * Every PD has a local_dma_lkey which can be used as the lkey value for local
228 * memory operations.
229 */
Christoph Hellwiged082d32016-09-05 12:56:17 +0200230struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
231 const char *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
233 struct ib_pd *pd;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200234 int mr_access_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700236 pd = device->alloc_pd(device, NULL, NULL);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600237 if (IS_ERR(pd))
238 return pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600240 pd->device = device;
241 pd->uobject = NULL;
Christoph Hellwig50d46332016-09-05 12:56:16 +0200242 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600243 atomic_set(&pd->usecnt, 0);
Christoph Hellwiged082d32016-09-05 12:56:17 +0200244 pd->flags = flags;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600245
Or Gerlitz86bee4c2015-12-18 10:59:45 +0200246 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600247 pd->local_dma_lkey = device->local_dma_lkey;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200248 else
249 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
250
251 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
252 pr_warn("%s: enabling unsafe global rkey\n", caller);
253 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
254 }
255
256 if (mr_access_flags) {
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600257 struct ib_mr *mr;
258
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200259 mr = pd->device->get_dma_mr(pd, mr_access_flags);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600260 if (IS_ERR(mr)) {
261 ib_dealloc_pd(pd);
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200262 return ERR_CAST(mr);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600263 }
264
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200265 mr->device = pd->device;
266 mr->pd = pd;
267 mr->uobject = NULL;
268 mr->need_inval = false;
269
Christoph Hellwig50d46332016-09-05 12:56:16 +0200270 pd->__internal_mr = mr;
Christoph Hellwiged082d32016-09-05 12:56:17 +0200271
272 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
273 pd->local_dma_lkey = pd->__internal_mr->lkey;
274
275 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
276 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 }
Christoph Hellwiged082d32016-09-05 12:56:17 +0200278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 return pd;
280}
Christoph Hellwiged082d32016-09-05 12:56:17 +0200281EXPORT_SYMBOL(__ib_alloc_pd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600283/**
284 * ib_dealloc_pd - Deallocates a protection domain.
285 * @pd: The protection domain to deallocate.
286 *
287 * It is an error to call this function while any resources in the pd still
288 * exist. The caller is responsible to synchronously destroy them and
289 * guarantee no new allocations will happen.
290 */
291void ib_dealloc_pd(struct ib_pd *pd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600293 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Christoph Hellwig50d46332016-09-05 12:56:16 +0200295 if (pd->__internal_mr) {
Christoph Hellwig5ef990f2016-09-05 12:56:21 +0200296 ret = pd->device->dereg_mr(pd->__internal_mr);
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600297 WARN_ON(ret);
Christoph Hellwig50d46332016-09-05 12:56:16 +0200298 pd->__internal_mr = NULL;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600299 }
300
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600301 /* uverbs manipulates usecnt with proper locking, while the kabi
302 requires the caller to guarantee we can't race here. */
303 WARN_ON(atomic_read(&pd->usecnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600305 /* Making delalloc_pd a void return is a WIP, no driver should return
306 an error here. */
307 ret = pd->device->dealloc_pd(pd);
308 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309}
310EXPORT_SYMBOL(ib_dealloc_pd);
311
312/* Address handles */
313
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400314struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
316 struct ib_ah *ah;
317
Moni Shoua477864c2016-11-23 08:23:24 +0200318 ah = pd->device->create_ah(pd, ah_attr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320 if (!IS_ERR(ah)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700321 ah->device = pd->device;
322 ah->pd = pd;
323 ah->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 atomic_inc(&pd->usecnt);
325 }
326
327 return ah;
328}
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400329EXPORT_SYMBOL(rdma_create_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Moni Shoua850d8fd2016-11-10 11:30:56 +0200331int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
Somnath Koturc865f242015-12-23 14:56:51 +0200332{
333 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
334 struct iphdr ip4h_checked;
335 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
336
337 /* If it's IPv6, the version must be 6, otherwise, the first
338 * 20 bytes (before the IPv4 header) are garbled.
339 */
340 if (ip6h->version != 6)
341 return (ip4h->version == 4) ? 4 : 0;
342 /* version may be 6 or 4 because the first 20 bytes could be garbled */
343
344 /* RoCE v2 requires no options, thus header length
345 * must be 5 words
346 */
347 if (ip4h->ihl != 5)
348 return 6;
349
350 /* Verify checksum.
351 * We can't write on scattered buffers so we need to copy to
352 * temp buffer.
353 */
354 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
355 ip4h_checked.check = 0;
356 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
357 /* if IPv4 header checksum is OK, believe it */
358 if (ip4h->check == ip4h_checked.check)
359 return 4;
360 return 6;
361}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200362EXPORT_SYMBOL(ib_get_rdma_header_version);
Somnath Koturc865f242015-12-23 14:56:51 +0200363
364static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
365 u8 port_num,
366 const struct ib_grh *grh)
367{
368 int grh_version;
369
370 if (rdma_protocol_ib(device, port_num))
371 return RDMA_NETWORK_IB;
372
Moni Shoua850d8fd2016-11-10 11:30:56 +0200373 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
Somnath Koturc865f242015-12-23 14:56:51 +0200374
375 if (grh_version == 4)
376 return RDMA_NETWORK_IPV4;
377
378 if (grh->next_hdr == IPPROTO_UDP)
379 return RDMA_NETWORK_IPV6;
380
381 return RDMA_NETWORK_ROCE_V1;
382}
383
Matan Barakdbf727d2015-10-15 18:38:51 +0300384struct find_gid_index_context {
385 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +0200386 enum ib_gid_type gid_type;
Matan Barakdbf727d2015-10-15 18:38:51 +0300387};
388
389static bool find_gid_index(const union ib_gid *gid,
390 const struct ib_gid_attr *gid_attr,
391 void *context)
392{
393 struct find_gid_index_context *ctx =
394 (struct find_gid_index_context *)context;
395
Somnath Koturc865f242015-12-23 14:56:51 +0200396 if (ctx->gid_type != gid_attr->gid_type)
397 return false;
398
Matan Barakdbf727d2015-10-15 18:38:51 +0300399 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
400 (is_vlan_dev(gid_attr->ndev) &&
401 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
402 return false;
403
404 return true;
405}
406
407static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
408 u16 vlan_id, const union ib_gid *sgid,
Somnath Koturc865f242015-12-23 14:56:51 +0200409 enum ib_gid_type gid_type,
Matan Barakdbf727d2015-10-15 18:38:51 +0300410 u16 *gid_index)
411{
Somnath Koturc865f242015-12-23 14:56:51 +0200412 struct find_gid_index_context context = {.vlan_id = vlan_id,
413 .gid_type = gid_type};
Matan Barakdbf727d2015-10-15 18:38:51 +0300414
415 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
416 &context, gid_index);
417}
418
Moni Shoua850d8fd2016-11-10 11:30:56 +0200419int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
420 enum rdma_network_type net_type,
421 union ib_gid *sgid, union ib_gid *dgid)
Somnath Koturc865f242015-12-23 14:56:51 +0200422{
423 struct sockaddr_in src_in;
424 struct sockaddr_in dst_in;
425 __be32 src_saddr, dst_saddr;
426
427 if (!sgid || !dgid)
428 return -EINVAL;
429
430 if (net_type == RDMA_NETWORK_IPV4) {
431 memcpy(&src_in.sin_addr.s_addr,
432 &hdr->roce4grh.saddr, 4);
433 memcpy(&dst_in.sin_addr.s_addr,
434 &hdr->roce4grh.daddr, 4);
435 src_saddr = src_in.sin_addr.s_addr;
436 dst_saddr = dst_in.sin_addr.s_addr;
437 ipv6_addr_set_v4mapped(src_saddr,
438 (struct in6_addr *)sgid);
439 ipv6_addr_set_v4mapped(dst_saddr,
440 (struct in6_addr *)dgid);
441 return 0;
442 } else if (net_type == RDMA_NETWORK_IPV6 ||
443 net_type == RDMA_NETWORK_IB) {
444 *dgid = hdr->ibgrh.dgid;
445 *sgid = hdr->ibgrh.sgid;
446 return 0;
447 } else {
448 return -EINVAL;
449 }
450}
Moni Shoua850d8fd2016-11-10 11:30:56 +0200451EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
Somnath Koturc865f242015-12-23 14:56:51 +0200452
Ira Weiny73cdaae2015-05-31 17:15:31 -0400453int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
454 const struct ib_wc *wc, const struct ib_grh *grh,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400455 struct rdma_ah_attr *ah_attr)
Hal Rosenstock513789e2005-07-27 11:45:34 -0700456{
Hal Rosenstock513789e2005-07-27 11:45:34 -0700457 u32 flow_class;
458 u16 gid_index;
459 int ret;
Somnath Koturc865f242015-12-23 14:56:51 +0200460 enum rdma_network_type net_type = RDMA_NETWORK_IB;
461 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
Matan Barakc3efe752016-01-04 10:49:54 +0200462 int hoplimit = 0xff;
Somnath Koturc865f242015-12-23 14:56:51 +0200463 union ib_gid dgid;
464 union ib_gid sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700465
Sean Hefty4e00d692006-06-17 20:37:39 -0700466 memset(ah_attr, 0, sizeof *ah_attr);
Michael Wang227128f2015-05-05 14:50:40 +0200467 if (rdma_cap_eth_ah(device, port_num)) {
Somnath Koturc865f242015-12-23 14:56:51 +0200468 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
469 net_type = wc->network_hdr_type;
470 else
471 net_type = ib_get_net_type_by_grh(device, port_num, grh);
472 gid_type = ib_network_to_gid_type(net_type);
473 }
Moni Shoua850d8fd2016-11-10 11:30:56 +0200474 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
475 &sgid, &dgid);
Somnath Koturc865f242015-12-23 14:56:51 +0200476 if (ret)
477 return ret;
478
479 if (rdma_protocol_roce(device, port_num)) {
Matan Barak20029832015-12-23 14:56:53 +0200480 int if_index = 0;
Matan Barakdbf727d2015-10-15 18:38:51 +0300481 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
482 wc->vlan_id : 0xffff;
Matan Barak20029832015-12-23 14:56:53 +0200483 struct net_device *idev;
484 struct net_device *resolved_dev;
Matan Barakdbf727d2015-10-15 18:38:51 +0300485
Matan Barakdd5f03b2013-12-12 18:03:11 +0200486 if (!(wc->wc_flags & IB_WC_GRH))
487 return -EPROTOTYPE;
488
Matan Barak20029832015-12-23 14:56:53 +0200489 if (!device->get_netdev)
490 return -EOPNOTSUPP;
491
492 idev = device->get_netdev(device, port_num);
493 if (!idev)
494 return -ENODEV;
495
Matan Barakf7f4b23e2016-01-04 10:49:53 +0200496 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
497 ah_attr->dmac,
498 wc->wc_flags & IB_WC_WITH_VLAN ?
499 NULL : &vlan_id,
Matan Barakc3efe752016-01-04 10:49:54 +0200500 &if_index, &hoplimit);
Matan Barak20029832015-12-23 14:56:53 +0200501 if (ret) {
502 dev_put(idev);
503 return ret;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200504 }
Matan Barakdbf727d2015-10-15 18:38:51 +0300505
Matan Barak20029832015-12-23 14:56:53 +0200506 resolved_dev = dev_get_by_index(&init_net, if_index);
507 if (resolved_dev->flags & IFF_LOOPBACK) {
508 dev_put(resolved_dev);
509 resolved_dev = idev;
510 dev_hold(resolved_dev);
511 }
512 rcu_read_lock();
513 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
514 resolved_dev))
515 ret = -EHOSTUNREACH;
516 rcu_read_unlock();
517 dev_put(idev);
518 dev_put(resolved_dev);
519 if (ret)
520 return ret;
521
Matan Barakdbf727d2015-10-15 18:38:51 +0300522 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
Somnath Koturc865f242015-12-23 14:56:51 +0200523 &dgid, gid_type, &gid_index);
Matan Barakdbf727d2015-10-15 18:38:51 +0300524 if (ret)
525 return ret;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200526 }
527
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400528 rdma_ah_set_dlid(ah_attr, wc->slid);
529 rdma_ah_set_sl(ah_attr, wc->sl);
530 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
531 rdma_ah_set_port_num(ah_attr, port_num);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700532
533 if (wc->wc_flags & IB_WC_GRH) {
Matan Barakdbf727d2015-10-15 18:38:51 +0300534 if (!rdma_cap_eth_ah(device, port_num)) {
Eli Cohenb3556002016-06-22 17:27:24 +0300535 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
536 ret = ib_find_cached_gid_by_port(device, &dgid,
537 IB_GID_TYPE_IB,
538 port_num, NULL,
539 &gid_index);
540 if (ret)
541 return ret;
542 } else {
543 gid_index = 0;
544 }
Matan Barakdbf727d2015-10-15 18:38:51 +0300545 }
Hal Rosenstock513789e2005-07-27 11:45:34 -0700546
Hal Rosenstock497677a2005-07-27 11:45:35 -0700547 flow_class = be32_to_cpu(grh->version_tclass_flow);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400548 rdma_ah_set_grh(ah_attr, &sgid,
549 flow_class & 0xFFFFF,
550 (u8)gid_index, hoplimit,
551 (flow_class >> 20) & 0xFF);
552
Hal Rosenstock513789e2005-07-27 11:45:34 -0700553 }
Sean Hefty4e00d692006-06-17 20:37:39 -0700554 return 0;
555}
556EXPORT_SYMBOL(ib_init_ah_from_wc);
557
Ira Weiny73cdaae2015-05-31 17:15:31 -0400558struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
559 const struct ib_grh *grh, u8 port_num)
Sean Hefty4e00d692006-06-17 20:37:39 -0700560{
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400561 struct rdma_ah_attr ah_attr;
Sean Hefty4e00d692006-06-17 20:37:39 -0700562 int ret;
563
564 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
565 if (ret)
566 return ERR_PTR(ret);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700567
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -0400568 return rdma_create_ah(pd, &ah_attr);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700569}
570EXPORT_SYMBOL(ib_create_ah_from_wc);
571
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400572int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
574 return ah->device->modify_ah ?
575 ah->device->modify_ah(ah, ah_attr) :
576 -ENOSYS;
577}
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -0400578EXPORT_SYMBOL(rdma_modify_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400580int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 return ah->device->query_ah ?
583 ah->device->query_ah(ah, ah_attr) :
584 -ENOSYS;
585}
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -0400586EXPORT_SYMBOL(rdma_query_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400588int rdma_destroy_ah(struct ib_ah *ah)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
590 struct ib_pd *pd;
591 int ret;
592
593 pd = ah->pd;
594 ret = ah->device->destroy_ah(ah);
595 if (!ret)
596 atomic_dec(&pd->usecnt);
597
598 return ret;
599}
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -0400600EXPORT_SYMBOL(rdma_destroy_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Roland Dreierd41fcc62005-08-18 12:23:08 -0700602/* Shared receive queues */
603
604struct ib_srq *ib_create_srq(struct ib_pd *pd,
605 struct ib_srq_init_attr *srq_init_attr)
606{
607 struct ib_srq *srq;
608
609 if (!pd->device->create_srq)
610 return ERR_PTR(-ENOSYS);
611
612 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
613
614 if (!IS_ERR(srq)) {
615 srq->device = pd->device;
616 srq->pd = pd;
617 srq->uobject = NULL;
618 srq->event_handler = srq_init_attr->event_handler;
619 srq->srq_context = srq_init_attr->srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -0700620 srq->srq_type = srq_init_attr->srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -0700621 if (srq->srq_type == IB_SRQT_XRC) {
622 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
623 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
624 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
625 atomic_inc(&srq->ext.xrc.cq->usecnt);
626 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700627 atomic_inc(&pd->usecnt);
628 atomic_set(&srq->usecnt, 0);
629 }
630
631 return srq;
632}
633EXPORT_SYMBOL(ib_create_srq);
634
635int ib_modify_srq(struct ib_srq *srq,
636 struct ib_srq_attr *srq_attr,
637 enum ib_srq_attr_mask srq_attr_mask)
638{
Dotan Barak7ce5eac2008-04-16 21:09:28 -0700639 return srq->device->modify_srq ?
640 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
641 -ENOSYS;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700642}
643EXPORT_SYMBOL(ib_modify_srq);
644
645int ib_query_srq(struct ib_srq *srq,
646 struct ib_srq_attr *srq_attr)
647{
648 return srq->device->query_srq ?
649 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
650}
651EXPORT_SYMBOL(ib_query_srq);
652
653int ib_destroy_srq(struct ib_srq *srq)
654{
655 struct ib_pd *pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700656 enum ib_srq_type srq_type;
657 struct ib_xrcd *uninitialized_var(xrcd);
658 struct ib_cq *uninitialized_var(cq);
Roland Dreierd41fcc62005-08-18 12:23:08 -0700659 int ret;
660
661 if (atomic_read(&srq->usecnt))
662 return -EBUSY;
663
664 pd = srq->pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700665 srq_type = srq->srq_type;
666 if (srq_type == IB_SRQT_XRC) {
667 xrcd = srq->ext.xrc.xrcd;
668 cq = srq->ext.xrc.cq;
669 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700670
671 ret = srq->device->destroy_srq(srq);
Sean Hefty418d5132011-05-23 19:42:29 -0700672 if (!ret) {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700673 atomic_dec(&pd->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700674 if (srq_type == IB_SRQT_XRC) {
675 atomic_dec(&xrcd->usecnt);
676 atomic_dec(&cq->usecnt);
677 }
678 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700679
680 return ret;
681}
682EXPORT_SYMBOL(ib_destroy_srq);
683
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684/* Queue pairs */
685
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700686static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
687{
688 struct ib_qp *qp = context;
Yishai Hadas73c40c62013-08-01 18:49:53 +0300689 unsigned long flags;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700690
Yishai Hadas73c40c62013-08-01 18:49:53 +0300691 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700692 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
Shlomo Pongratzeec9e29f2013-04-10 14:26:46 +0000693 if (event->element.qp->event_handler)
694 event->element.qp->event_handler(event, event->element.qp->qp_context);
Yishai Hadas73c40c62013-08-01 18:49:53 +0300695 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700696}
697
Sean Heftyd3d72d92011-05-26 23:06:44 -0700698static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
699{
700 mutex_lock(&xrcd->tgt_qp_mutex);
701 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
702 mutex_unlock(&xrcd->tgt_qp_mutex);
703}
704
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700705static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
706 void (*event_handler)(struct ib_event *, void *),
707 void *qp_context)
Sean Heftyd3d72d92011-05-26 23:06:44 -0700708{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700709 struct ib_qp *qp;
710 unsigned long flags;
711
712 qp = kzalloc(sizeof *qp, GFP_KERNEL);
713 if (!qp)
714 return ERR_PTR(-ENOMEM);
715
716 qp->real_qp = real_qp;
717 atomic_inc(&real_qp->usecnt);
718 qp->device = real_qp->device;
719 qp->event_handler = event_handler;
720 qp->qp_context = qp_context;
721 qp->qp_num = real_qp->qp_num;
722 qp->qp_type = real_qp->qp_type;
723
724 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
725 list_add(&qp->open_list, &real_qp->open_list);
726 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
727
728 return qp;
Sean Heftyd3d72d92011-05-26 23:06:44 -0700729}
730
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700731struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
732 struct ib_qp_open_attr *qp_open_attr)
733{
734 struct ib_qp *qp, *real_qp;
735
736 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
737 return ERR_PTR(-EINVAL);
738
739 qp = ERR_PTR(-EINVAL);
740 mutex_lock(&xrcd->tgt_qp_mutex);
741 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
742 if (real_qp->qp_num == qp_open_attr->qp_num) {
743 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
744 qp_open_attr->qp_context);
745 break;
746 }
747 }
748 mutex_unlock(&xrcd->tgt_qp_mutex);
749 return qp;
750}
751EXPORT_SYMBOL(ib_open_qp);
752
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200753static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
754 struct ib_qp_init_attr *qp_init_attr)
755{
756 struct ib_qp *real_qp = qp;
757
758 qp->event_handler = __ib_shared_qp_event_handler;
759 qp->qp_context = qp;
760 qp->pd = NULL;
761 qp->send_cq = qp->recv_cq = NULL;
762 qp->srq = NULL;
763 qp->xrcd = qp_init_attr->xrcd;
764 atomic_inc(&qp_init_attr->xrcd->usecnt);
765 INIT_LIST_HEAD(&qp->open_list);
766
767 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
768 qp_init_attr->qp_context);
769 if (!IS_ERR(qp))
770 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
771 else
772 real_qp->device->destroy_qp(real_qp);
773 return qp;
774}
775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776struct ib_qp *ib_create_qp(struct ib_pd *pd,
777 struct ib_qp_init_attr *qp_init_attr)
778{
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200779 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
780 struct ib_qp *qp;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200781 int ret;
782
Yishai Hadasa9017e22016-05-23 15:20:54 +0300783 if (qp_init_attr->rwq_ind_tbl &&
784 (qp_init_attr->recv_cq ||
785 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
786 qp_init_attr->cap.max_recv_sge))
787 return ERR_PTR(-EINVAL);
788
Christoph Hellwiga060b562016-05-03 18:01:09 +0200789 /*
790 * If the callers is using the RDMA API calculate the resources
791 * needed for the RDMA READ/WRITE operations.
792 *
793 * Note that these callers need to pass in a port number.
794 */
795 if (qp_init_attr->cap.max_rdma_ctxs)
796 rdma_rw_init_qp(device, qp_init_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Sean Heftyb42b63c2011-05-23 19:59:25 -0700798 qp = device->create_qp(pd, qp_init_attr, NULL);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200799 if (IS_ERR(qp))
800 return qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200802 qp->device = device;
803 qp->real_qp = qp;
804 qp->uobject = NULL;
805 qp->qp_type = qp_init_attr->qp_type;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300806 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700807
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200808 atomic_set(&qp->usecnt, 0);
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200809 qp->mrs_used = 0;
810 spin_lock_init(&qp->mr_lock);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200811 INIT_LIST_HEAD(&qp->rdma_mrs);
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200812 INIT_LIST_HEAD(&qp->sig_mrs);
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200813
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200814 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
815 return ib_create_xrc_qp(qp, qp_init_attr);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700816
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200817 qp->event_handler = qp_init_attr->event_handler;
818 qp->qp_context = qp_init_attr->qp_context;
819 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
820 qp->recv_cq = NULL;
821 qp->srq = NULL;
822 } else {
823 qp->recv_cq = qp_init_attr->recv_cq;
Yishai Hadasa9017e22016-05-23 15:20:54 +0300824 if (qp_init_attr->recv_cq)
825 atomic_inc(&qp_init_attr->recv_cq->usecnt);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200826 qp->srq = qp_init_attr->srq;
827 if (qp->srq)
828 atomic_inc(&qp_init_attr->srq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 }
830
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200831 qp->pd = pd;
832 qp->send_cq = qp_init_attr->send_cq;
833 qp->xrcd = NULL;
834
835 atomic_inc(&pd->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +0300836 if (qp_init_attr->send_cq)
837 atomic_inc(&qp_init_attr->send_cq->usecnt);
838 if (qp_init_attr->rwq_ind_tbl)
839 atomic_inc(&qp->rwq_ind_tbl->usecnt);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200840
841 if (qp_init_attr->cap.max_rdma_ctxs) {
842 ret = rdma_rw_init_mrs(qp, qp_init_attr);
843 if (ret) {
844 pr_err("failed to init MR pool ret= %d\n", ret);
845 ib_destroy_qp(qp);
Steve Wiseb6bc1c72016-09-29 07:31:33 -0700846 return ERR_PTR(ret);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200847 }
848 }
849
Bart Van Assche632bc3f2016-07-21 13:03:30 -0700850 /*
851 * Note: all hw drivers guarantee that max_send_sge is lower than
852 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
853 * max_send_sge <= max_sge_rd.
854 */
855 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
856 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
857 device->attrs.max_sge_rd);
858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return qp;
860}
861EXPORT_SYMBOL(ib_create_qp);
862
Roland Dreier8a518662006-02-13 12:48:12 -0800863static const struct {
864 int valid;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700865 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
866 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
Roland Dreier8a518662006-02-13 12:48:12 -0800867} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
868 [IB_QPS_RESET] = {
869 [IB_QPS_RESET] = { .valid = 1 },
Roland Dreier8a518662006-02-13 12:48:12 -0800870 [IB_QPS_INIT] = {
871 .valid = 1,
872 .req_param = {
873 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
874 IB_QP_PORT |
875 IB_QP_QKEY),
Or Gerlitzc938a612012-03-01 12:17:51 +0200876 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
Roland Dreier8a518662006-02-13 12:48:12 -0800877 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
878 IB_QP_PORT |
879 IB_QP_ACCESS_FLAGS),
880 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
881 IB_QP_PORT |
882 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700883 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
884 IB_QP_PORT |
885 IB_QP_ACCESS_FLAGS),
886 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
887 IB_QP_PORT |
888 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800889 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
890 IB_QP_QKEY),
891 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
892 IB_QP_QKEY),
893 }
894 },
895 },
896 [IB_QPS_INIT] = {
897 [IB_QPS_RESET] = { .valid = 1 },
898 [IB_QPS_ERR] = { .valid = 1 },
899 [IB_QPS_INIT] = {
900 .valid = 1,
901 .opt_param = {
902 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
903 IB_QP_PORT |
904 IB_QP_QKEY),
905 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
906 IB_QP_PORT |
907 IB_QP_ACCESS_FLAGS),
908 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
909 IB_QP_PORT |
910 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700911 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
912 IB_QP_PORT |
913 IB_QP_ACCESS_FLAGS),
914 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
915 IB_QP_PORT |
916 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800917 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
918 IB_QP_QKEY),
919 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
920 IB_QP_QKEY),
921 }
922 },
923 [IB_QPS_RTR] = {
924 .valid = 1,
925 .req_param = {
926 [IB_QPT_UC] = (IB_QP_AV |
927 IB_QP_PATH_MTU |
928 IB_QP_DEST_QPN |
929 IB_QP_RQ_PSN),
930 [IB_QPT_RC] = (IB_QP_AV |
931 IB_QP_PATH_MTU |
932 IB_QP_DEST_QPN |
933 IB_QP_RQ_PSN |
934 IB_QP_MAX_DEST_RD_ATOMIC |
935 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700936 [IB_QPT_XRC_INI] = (IB_QP_AV |
937 IB_QP_PATH_MTU |
938 IB_QP_DEST_QPN |
939 IB_QP_RQ_PSN),
940 [IB_QPT_XRC_TGT] = (IB_QP_AV |
941 IB_QP_PATH_MTU |
942 IB_QP_DEST_QPN |
943 IB_QP_RQ_PSN |
944 IB_QP_MAX_DEST_RD_ATOMIC |
945 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -0800946 },
947 .opt_param = {
948 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
949 IB_QP_QKEY),
950 [IB_QPT_UC] = (IB_QP_ALT_PATH |
951 IB_QP_ACCESS_FLAGS |
952 IB_QP_PKEY_INDEX),
953 [IB_QPT_RC] = (IB_QP_ALT_PATH |
954 IB_QP_ACCESS_FLAGS |
955 IB_QP_PKEY_INDEX),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700956 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
957 IB_QP_ACCESS_FLAGS |
958 IB_QP_PKEY_INDEX),
959 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
960 IB_QP_ACCESS_FLAGS |
961 IB_QP_PKEY_INDEX),
Roland Dreier8a518662006-02-13 12:48:12 -0800962 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
963 IB_QP_QKEY),
964 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
965 IB_QP_QKEY),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200966 },
Matan Barakdbf727d2015-10-15 18:38:51 +0300967 },
Roland Dreier8a518662006-02-13 12:48:12 -0800968 },
969 [IB_QPS_RTR] = {
970 [IB_QPS_RESET] = { .valid = 1 },
971 [IB_QPS_ERR] = { .valid = 1 },
972 [IB_QPS_RTS] = {
973 .valid = 1,
974 .req_param = {
975 [IB_QPT_UD] = IB_QP_SQ_PSN,
976 [IB_QPT_UC] = IB_QP_SQ_PSN,
977 [IB_QPT_RC] = (IB_QP_TIMEOUT |
978 IB_QP_RETRY_CNT |
979 IB_QP_RNR_RETRY |
980 IB_QP_SQ_PSN |
981 IB_QP_MAX_QP_RD_ATOMIC),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700982 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
983 IB_QP_RETRY_CNT |
984 IB_QP_RNR_RETRY |
985 IB_QP_SQ_PSN |
986 IB_QP_MAX_QP_RD_ATOMIC),
987 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
988 IB_QP_SQ_PSN),
Roland Dreier8a518662006-02-13 12:48:12 -0800989 [IB_QPT_SMI] = IB_QP_SQ_PSN,
990 [IB_QPT_GSI] = IB_QP_SQ_PSN,
991 },
992 .opt_param = {
993 [IB_QPT_UD] = (IB_QP_CUR_STATE |
994 IB_QP_QKEY),
995 [IB_QPT_UC] = (IB_QP_CUR_STATE |
996 IB_QP_ALT_PATH |
997 IB_QP_ACCESS_FLAGS |
998 IB_QP_PATH_MIG_STATE),
999 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1000 IB_QP_ALT_PATH |
1001 IB_QP_ACCESS_FLAGS |
1002 IB_QP_MIN_RNR_TIMER |
1003 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001004 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1005 IB_QP_ALT_PATH |
1006 IB_QP_ACCESS_FLAGS |
1007 IB_QP_PATH_MIG_STATE),
1008 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1009 IB_QP_ALT_PATH |
1010 IB_QP_ACCESS_FLAGS |
1011 IB_QP_MIN_RNR_TIMER |
1012 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001013 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1014 IB_QP_QKEY),
1015 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1016 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001017 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001018 }
1019 }
1020 },
1021 [IB_QPS_RTS] = {
1022 [IB_QPS_RESET] = { .valid = 1 },
1023 [IB_QPS_ERR] = { .valid = 1 },
1024 [IB_QPS_RTS] = {
1025 .valid = 1,
1026 .opt_param = {
1027 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1028 IB_QP_QKEY),
Dotan Barak4546d312006-03-02 11:22:28 -08001029 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1030 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001031 IB_QP_ALT_PATH |
1032 IB_QP_PATH_MIG_STATE),
Dotan Barak4546d312006-03-02 11:22:28 -08001033 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1034 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -08001035 IB_QP_ALT_PATH |
1036 IB_QP_PATH_MIG_STATE |
1037 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001038 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1039 IB_QP_ACCESS_FLAGS |
1040 IB_QP_ALT_PATH |
1041 IB_QP_PATH_MIG_STATE),
1042 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1043 IB_QP_ACCESS_FLAGS |
1044 IB_QP_ALT_PATH |
1045 IB_QP_PATH_MIG_STATE |
1046 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001047 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1048 IB_QP_QKEY),
1049 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1050 IB_QP_QKEY),
Bodong Wang528e5a12016-12-01 13:43:14 +02001051 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
Roland Dreier8a518662006-02-13 12:48:12 -08001052 }
1053 },
1054 [IB_QPS_SQD] = {
1055 .valid = 1,
1056 .opt_param = {
1057 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1058 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1059 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001060 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1061 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
Roland Dreier8a518662006-02-13 12:48:12 -08001062 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1063 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1064 }
1065 },
1066 },
1067 [IB_QPS_SQD] = {
1068 [IB_QPS_RESET] = { .valid = 1 },
1069 [IB_QPS_ERR] = { .valid = 1 },
1070 [IB_QPS_RTS] = {
1071 .valid = 1,
1072 .opt_param = {
1073 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1074 IB_QP_QKEY),
1075 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1076 IB_QP_ALT_PATH |
1077 IB_QP_ACCESS_FLAGS |
1078 IB_QP_PATH_MIG_STATE),
1079 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1080 IB_QP_ALT_PATH |
1081 IB_QP_ACCESS_FLAGS |
1082 IB_QP_MIN_RNR_TIMER |
1083 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001084 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1085 IB_QP_ALT_PATH |
1086 IB_QP_ACCESS_FLAGS |
1087 IB_QP_PATH_MIG_STATE),
1088 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1089 IB_QP_ALT_PATH |
1090 IB_QP_ACCESS_FLAGS |
1091 IB_QP_MIN_RNR_TIMER |
1092 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001093 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1094 IB_QP_QKEY),
1095 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1096 IB_QP_QKEY),
1097 }
1098 },
1099 [IB_QPS_SQD] = {
1100 .valid = 1,
1101 .opt_param = {
1102 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1103 IB_QP_QKEY),
1104 [IB_QPT_UC] = (IB_QP_AV |
Roland Dreier8a518662006-02-13 12:48:12 -08001105 IB_QP_ALT_PATH |
1106 IB_QP_ACCESS_FLAGS |
1107 IB_QP_PKEY_INDEX |
1108 IB_QP_PATH_MIG_STATE),
1109 [IB_QPT_RC] = (IB_QP_PORT |
1110 IB_QP_AV |
1111 IB_QP_TIMEOUT |
1112 IB_QP_RETRY_CNT |
1113 IB_QP_RNR_RETRY |
1114 IB_QP_MAX_QP_RD_ATOMIC |
1115 IB_QP_MAX_DEST_RD_ATOMIC |
Roland Dreier8a518662006-02-13 12:48:12 -08001116 IB_QP_ALT_PATH |
1117 IB_QP_ACCESS_FLAGS |
1118 IB_QP_PKEY_INDEX |
1119 IB_QP_MIN_RNR_TIMER |
1120 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001121 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1122 IB_QP_AV |
1123 IB_QP_TIMEOUT |
1124 IB_QP_RETRY_CNT |
1125 IB_QP_RNR_RETRY |
1126 IB_QP_MAX_QP_RD_ATOMIC |
1127 IB_QP_ALT_PATH |
1128 IB_QP_ACCESS_FLAGS |
1129 IB_QP_PKEY_INDEX |
1130 IB_QP_PATH_MIG_STATE),
1131 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1132 IB_QP_AV |
1133 IB_QP_TIMEOUT |
1134 IB_QP_MAX_DEST_RD_ATOMIC |
1135 IB_QP_ALT_PATH |
1136 IB_QP_ACCESS_FLAGS |
1137 IB_QP_PKEY_INDEX |
1138 IB_QP_MIN_RNR_TIMER |
1139 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001140 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1141 IB_QP_QKEY),
1142 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1143 IB_QP_QKEY),
1144 }
1145 }
1146 },
1147 [IB_QPS_SQE] = {
1148 [IB_QPS_RESET] = { .valid = 1 },
1149 [IB_QPS_ERR] = { .valid = 1 },
1150 [IB_QPS_RTS] = {
1151 .valid = 1,
1152 .opt_param = {
1153 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1154 IB_QP_QKEY),
1155 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1156 IB_QP_ACCESS_FLAGS),
1157 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1158 IB_QP_QKEY),
1159 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1160 IB_QP_QKEY),
1161 }
1162 }
1163 },
1164 [IB_QPS_ERR] = {
1165 [IB_QPS_RESET] = { .valid = 1 },
1166 [IB_QPS_ERR] = { .valid = 1 }
1167 }
1168};
1169
1170int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001171 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1172 enum rdma_link_layer ll)
Roland Dreier8a518662006-02-13 12:48:12 -08001173{
1174 enum ib_qp_attr_mask req_param, opt_param;
1175
1176 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1177 next_state < 0 || next_state > IB_QPS_ERR)
1178 return 0;
1179
1180 if (mask & IB_QP_CUR_STATE &&
1181 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1182 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1183 return 0;
1184
1185 if (!qp_state_table[cur_state][next_state].valid)
1186 return 0;
1187
1188 req_param = qp_state_table[cur_state][next_state].req_param[type];
1189 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1190
1191 if ((mask & req_param) != req_param)
1192 return 0;
1193
1194 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1195 return 0;
1196
1197 return 1;
1198}
1199EXPORT_SYMBOL(ib_modify_qp_is_ok);
1200
Moni Shouac90ea9d2016-11-23 08:23:22 +02001201int ib_resolve_eth_dmac(struct ib_device *device,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001202 struct rdma_ah_attr *ah_attr)
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001203{
1204 int ret = 0;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001205 struct ib_global_route *grh;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001206
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001207 if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
Moni Shouac90ea9d2016-11-23 08:23:22 +02001208 return -EINVAL;
Matan Barakdbf727d2015-10-15 18:38:51 +03001209
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001210 if (!rdma_cap_eth_ah(device, rdma_ah_get_port_num(ah_attr)))
Moni Shouac90ea9d2016-11-23 08:23:22 +02001211 return 0;
Matan Barakdbf727d2015-10-15 18:38:51 +03001212
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001213 grh = rdma_ah_retrieve_grh(ah_attr);
1214
1215 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
1216 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
Moni Shouac90ea9d2016-11-23 08:23:22 +02001217 ah_attr->dmac);
1218 } else {
1219 union ib_gid sgid;
1220 struct ib_gid_attr sgid_attr;
1221 int ifindex;
1222 int hop_limit;
Matan Barakdbf727d2015-10-15 18:38:51 +03001223
Moni Shouac90ea9d2016-11-23 08:23:22 +02001224 ret = ib_query_gid(device,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001225 rdma_ah_get_port_num(ah_attr),
1226 grh->sgid_index,
Moni Shouac90ea9d2016-11-23 08:23:22 +02001227 &sgid, &sgid_attr);
Matan Barakdbf727d2015-10-15 18:38:51 +03001228
Moni Shouac90ea9d2016-11-23 08:23:22 +02001229 if (ret || !sgid_attr.ndev) {
1230 if (!ret)
1231 ret = -ENXIO;
1232 goto out;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001233 }
Moni Shouac90ea9d2016-11-23 08:23:22 +02001234
1235 ifindex = sgid_attr.ndev->ifindex;
1236
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001237 ret =
1238 rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
1239 ah_attr->dmac,
1240 NULL, &ifindex, &hop_limit);
Moni Shouac90ea9d2016-11-23 08:23:22 +02001241
1242 dev_put(sgid_attr.ndev);
1243
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001244 grh->hop_limit = hop_limit;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001245 }
1246out:
1247 return ret;
1248}
Matan Barakdbf727d2015-10-15 18:38:51 +03001249EXPORT_SYMBOL(ib_resolve_eth_dmac);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251int ib_modify_qp(struct ib_qp *qp,
1252 struct ib_qp_attr *qp_attr,
1253 int qp_attr_mask)
1254{
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001255
Moni Shouac90ea9d2016-11-23 08:23:22 +02001256 if (qp_attr_mask & IB_QP_AV) {
1257 int ret;
1258
1259 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
1260 if (ret)
1261 return ret;
1262 }
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001263
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001264 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265}
1266EXPORT_SYMBOL(ib_modify_qp);
1267
1268int ib_query_qp(struct ib_qp *qp,
1269 struct ib_qp_attr *qp_attr,
1270 int qp_attr_mask,
1271 struct ib_qp_init_attr *qp_init_attr)
1272{
1273 return qp->device->query_qp ?
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001274 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 -ENOSYS;
1276}
1277EXPORT_SYMBOL(ib_query_qp);
1278
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001279int ib_close_qp(struct ib_qp *qp)
1280{
1281 struct ib_qp *real_qp;
1282 unsigned long flags;
1283
1284 real_qp = qp->real_qp;
1285 if (real_qp == qp)
1286 return -EINVAL;
1287
1288 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1289 list_del(&qp->open_list);
1290 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1291
1292 atomic_dec(&real_qp->usecnt);
1293 kfree(qp);
1294
1295 return 0;
1296}
1297EXPORT_SYMBOL(ib_close_qp);
1298
1299static int __ib_destroy_shared_qp(struct ib_qp *qp)
1300{
1301 struct ib_xrcd *xrcd;
1302 struct ib_qp *real_qp;
1303 int ret;
1304
1305 real_qp = qp->real_qp;
1306 xrcd = real_qp->xrcd;
1307
1308 mutex_lock(&xrcd->tgt_qp_mutex);
1309 ib_close_qp(qp);
1310 if (atomic_read(&real_qp->usecnt) == 0)
1311 list_del(&real_qp->xrcd_list);
1312 else
1313 real_qp = NULL;
1314 mutex_unlock(&xrcd->tgt_qp_mutex);
1315
1316 if (real_qp) {
1317 ret = ib_destroy_qp(real_qp);
1318 if (!ret)
1319 atomic_dec(&xrcd->usecnt);
1320 else
1321 __ib_insert_xrcd_qp(xrcd, real_qp);
1322 }
1323
1324 return 0;
1325}
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327int ib_destroy_qp(struct ib_qp *qp)
1328{
1329 struct ib_pd *pd;
1330 struct ib_cq *scq, *rcq;
1331 struct ib_srq *srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001332 struct ib_rwq_ind_table *ind_tbl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 int ret;
1334
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001335 WARN_ON_ONCE(qp->mrs_used > 0);
1336
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001337 if (atomic_read(&qp->usecnt))
1338 return -EBUSY;
1339
1340 if (qp->real_qp != qp)
1341 return __ib_destroy_shared_qp(qp);
1342
Sean Heftyb42b63c2011-05-23 19:59:25 -07001343 pd = qp->pd;
1344 scq = qp->send_cq;
1345 rcq = qp->recv_cq;
1346 srq = qp->srq;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001347 ind_tbl = qp->rwq_ind_tbl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
Christoph Hellwiga060b562016-05-03 18:01:09 +02001349 if (!qp->uobject)
1350 rdma_rw_cleanup_mrs(qp);
1351
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 ret = qp->device->destroy_qp(qp);
1353 if (!ret) {
Sean Heftyb42b63c2011-05-23 19:59:25 -07001354 if (pd)
1355 atomic_dec(&pd->usecnt);
1356 if (scq)
1357 atomic_dec(&scq->usecnt);
1358 if (rcq)
1359 atomic_dec(&rcq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 if (srq)
1361 atomic_dec(&srq->usecnt);
Yishai Hadasa9017e22016-05-23 15:20:54 +03001362 if (ind_tbl)
1363 atomic_dec(&ind_tbl->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 }
1365
1366 return ret;
1367}
1368EXPORT_SYMBOL(ib_destroy_qp);
1369
1370/* Completion queues */
1371
1372struct ib_cq *ib_create_cq(struct ib_device *device,
1373 ib_comp_handler comp_handler,
1374 void (*event_handler)(struct ib_event *, void *),
Matan Barak8e372102015-06-11 16:35:21 +03001375 void *cq_context,
1376 const struct ib_cq_init_attr *cq_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
1378 struct ib_cq *cq;
1379
Matan Barak8e372102015-06-11 16:35:21 +03001380 cq = device->create_cq(device, cq_attr, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
1382 if (!IS_ERR(cq)) {
1383 cq->device = device;
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001384 cq->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 cq->comp_handler = comp_handler;
1386 cq->event_handler = event_handler;
1387 cq->cq_context = cq_context;
1388 atomic_set(&cq->usecnt, 0);
1389 }
1390
1391 return cq;
1392}
1393EXPORT_SYMBOL(ib_create_cq);
1394
Eli Cohen2dd57162008-04-16 21:09:33 -07001395int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1396{
1397 return cq->device->modify_cq ?
1398 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1399}
1400EXPORT_SYMBOL(ib_modify_cq);
1401
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402int ib_destroy_cq(struct ib_cq *cq)
1403{
1404 if (atomic_read(&cq->usecnt))
1405 return -EBUSY;
1406
1407 return cq->device->destroy_cq(cq);
1408}
1409EXPORT_SYMBOL(ib_destroy_cq);
1410
Roland Dreiera74cd4a2006-02-13 16:30:49 -08001411int ib_resize_cq(struct ib_cq *cq, int cqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412{
Roland Dreier40de2e52005-11-08 11:10:25 -08001413 return cq->device->resize_cq ?
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001414 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415}
1416EXPORT_SYMBOL(ib_resize_cq);
1417
1418/* Memory regions */
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420int ib_dereg_mr(struct ib_mr *mr)
1421{
Christoph Hellwigab67ed82015-12-23 19:12:54 +01001422 struct ib_pd *pd = mr->pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 int ret;
1424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 ret = mr->device->dereg_mr(mr);
1426 if (!ret)
1427 atomic_dec(&pd->usecnt);
1428
1429 return ret;
1430}
1431EXPORT_SYMBOL(ib_dereg_mr);
1432
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001433/**
1434 * ib_alloc_mr() - Allocates a memory region
1435 * @pd: protection domain associated with the region
1436 * @mr_type: memory region type
1437 * @max_num_sg: maximum sg entries available for registration.
1438 *
1439 * Notes:
1440 * Memory registeration page/sg lists must not exceed max_num_sg.
1441 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1442 * max_num_sg * used_page_size.
1443 *
1444 */
1445struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1446 enum ib_mr_type mr_type,
1447 u32 max_num_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001448{
1449 struct ib_mr *mr;
1450
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001451 if (!pd->device->alloc_mr)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001452 return ERR_PTR(-ENOSYS);
1453
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001454 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001455 if (!IS_ERR(mr)) {
1456 mr->device = pd->device;
1457 mr->pd = pd;
1458 mr->uobject = NULL;
1459 atomic_inc(&pd->usecnt);
Steve Wised4a85c32016-05-03 18:01:08 +02001460 mr->need_inval = false;
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001461 }
1462
1463 return mr;
1464}
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001465EXPORT_SYMBOL(ib_alloc_mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467/* "Fast" memory regions */
1468
1469struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1470 int mr_access_flags,
1471 struct ib_fmr_attr *fmr_attr)
1472{
1473 struct ib_fmr *fmr;
1474
1475 if (!pd->device->alloc_fmr)
1476 return ERR_PTR(-ENOSYS);
1477
1478 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1479 if (!IS_ERR(fmr)) {
1480 fmr->device = pd->device;
1481 fmr->pd = pd;
1482 atomic_inc(&pd->usecnt);
1483 }
1484
1485 return fmr;
1486}
1487EXPORT_SYMBOL(ib_alloc_fmr);
1488
1489int ib_unmap_fmr(struct list_head *fmr_list)
1490{
1491 struct ib_fmr *fmr;
1492
1493 if (list_empty(fmr_list))
1494 return 0;
1495
1496 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1497 return fmr->device->unmap_fmr(fmr_list);
1498}
1499EXPORT_SYMBOL(ib_unmap_fmr);
1500
1501int ib_dealloc_fmr(struct ib_fmr *fmr)
1502{
1503 struct ib_pd *pd;
1504 int ret;
1505
1506 pd = fmr->pd;
1507 ret = fmr->device->dealloc_fmr(fmr);
1508 if (!ret)
1509 atomic_dec(&pd->usecnt);
1510
1511 return ret;
1512}
1513EXPORT_SYMBOL(ib_dealloc_fmr);
1514
1515/* Multicast groups */
1516
1517int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1518{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001519 int ret;
1520
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001521 if (!qp->device->attach_mcast)
1522 return -ENOSYS;
Michael J. Ruhl8561eae2017-04-09 10:15:51 -07001523 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1524 lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1525 lid == be16_to_cpu(IB_LID_PERMISSIVE))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001526 return -EINVAL;
1527
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001528 ret = qp->device->attach_mcast(qp, gid, lid);
1529 if (!ret)
1530 atomic_inc(&qp->usecnt);
1531 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532}
1533EXPORT_SYMBOL(ib_attach_mcast);
1534
1535int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1536{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001537 int ret;
1538
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001539 if (!qp->device->detach_mcast)
1540 return -ENOSYS;
Michael J. Ruhl8561eae2017-04-09 10:15:51 -07001541 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1542 lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1543 lid == be16_to_cpu(IB_LID_PERMISSIVE))
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001544 return -EINVAL;
1545
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001546 ret = qp->device->detach_mcast(qp, gid, lid);
1547 if (!ret)
1548 atomic_dec(&qp->usecnt);
1549 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550}
1551EXPORT_SYMBOL(ib_detach_mcast);
Sean Hefty59991f92011-05-23 17:52:46 -07001552
1553struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1554{
1555 struct ib_xrcd *xrcd;
1556
1557 if (!device->alloc_xrcd)
1558 return ERR_PTR(-ENOSYS);
1559
1560 xrcd = device->alloc_xrcd(device, NULL, NULL);
1561 if (!IS_ERR(xrcd)) {
1562 xrcd->device = device;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001563 xrcd->inode = NULL;
Sean Hefty59991f92011-05-23 17:52:46 -07001564 atomic_set(&xrcd->usecnt, 0);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001565 mutex_init(&xrcd->tgt_qp_mutex);
1566 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
Sean Hefty59991f92011-05-23 17:52:46 -07001567 }
1568
1569 return xrcd;
1570}
1571EXPORT_SYMBOL(ib_alloc_xrcd);
1572
1573int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1574{
Sean Heftyd3d72d92011-05-26 23:06:44 -07001575 struct ib_qp *qp;
1576 int ret;
1577
Sean Hefty59991f92011-05-23 17:52:46 -07001578 if (atomic_read(&xrcd->usecnt))
1579 return -EBUSY;
1580
Sean Heftyd3d72d92011-05-26 23:06:44 -07001581 while (!list_empty(&xrcd->tgt_qp_list)) {
1582 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1583 ret = ib_destroy_qp(qp);
1584 if (ret)
1585 return ret;
1586 }
1587
Sean Hefty59991f92011-05-23 17:52:46 -07001588 return xrcd->device->dealloc_xrcd(xrcd);
1589}
1590EXPORT_SYMBOL(ib_dealloc_xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001591
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001592/**
1593 * ib_create_wq - Creates a WQ associated with the specified protection
1594 * domain.
1595 * @pd: The protection domain associated with the WQ.
1596 * @wq_init_attr: A list of initial attributes required to create the
1597 * WQ. If WQ creation succeeds, then the attributes are updated to
1598 * the actual capabilities of the created WQ.
1599 *
1600 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
1601 * the requested size of the WQ, and set to the actual values allocated
1602 * on return.
1603 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1604 * at least as large as the requested values.
1605 */
1606struct ib_wq *ib_create_wq(struct ib_pd *pd,
1607 struct ib_wq_init_attr *wq_attr)
1608{
1609 struct ib_wq *wq;
1610
1611 if (!pd->device->create_wq)
1612 return ERR_PTR(-ENOSYS);
1613
1614 wq = pd->device->create_wq(pd, wq_attr, NULL);
1615 if (!IS_ERR(wq)) {
1616 wq->event_handler = wq_attr->event_handler;
1617 wq->wq_context = wq_attr->wq_context;
1618 wq->wq_type = wq_attr->wq_type;
1619 wq->cq = wq_attr->cq;
1620 wq->device = pd->device;
1621 wq->pd = pd;
1622 wq->uobject = NULL;
1623 atomic_inc(&pd->usecnt);
1624 atomic_inc(&wq_attr->cq->usecnt);
1625 atomic_set(&wq->usecnt, 0);
1626 }
1627 return wq;
1628}
1629EXPORT_SYMBOL(ib_create_wq);
1630
1631/**
1632 * ib_destroy_wq - Destroys the specified WQ.
1633 * @wq: The WQ to destroy.
1634 */
1635int ib_destroy_wq(struct ib_wq *wq)
1636{
1637 int err;
1638 struct ib_cq *cq = wq->cq;
1639 struct ib_pd *pd = wq->pd;
1640
1641 if (atomic_read(&wq->usecnt))
1642 return -EBUSY;
1643
1644 err = wq->device->destroy_wq(wq);
1645 if (!err) {
1646 atomic_dec(&pd->usecnt);
1647 atomic_dec(&cq->usecnt);
1648 }
1649 return err;
1650}
1651EXPORT_SYMBOL(ib_destroy_wq);
1652
1653/**
1654 * ib_modify_wq - Modifies the specified WQ.
1655 * @wq: The WQ to modify.
1656 * @wq_attr: On input, specifies the WQ attributes to modify.
1657 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1658 * are being modified.
1659 * On output, the current values of selected WQ attributes are returned.
1660 */
1661int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1662 u32 wq_attr_mask)
1663{
1664 int err;
1665
1666 if (!wq->device->modify_wq)
1667 return -ENOSYS;
1668
1669 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1670 return err;
1671}
1672EXPORT_SYMBOL(ib_modify_wq);
1673
Yishai Hadas6d397862016-05-23 15:20:51 +03001674/*
1675 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1676 * @device: The device on which to create the rwq indirection table.
1677 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1678 * create the Indirection Table.
1679 *
1680 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1681 * than the created ib_rwq_ind_table object and the caller is responsible
1682 * for its memory allocation/free.
1683 */
1684struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1685 struct ib_rwq_ind_table_init_attr *init_attr)
1686{
1687 struct ib_rwq_ind_table *rwq_ind_table;
1688 int i;
1689 u32 table_size;
1690
1691 if (!device->create_rwq_ind_table)
1692 return ERR_PTR(-ENOSYS);
1693
1694 table_size = (1 << init_attr->log_ind_tbl_size);
1695 rwq_ind_table = device->create_rwq_ind_table(device,
1696 init_attr, NULL);
1697 if (IS_ERR(rwq_ind_table))
1698 return rwq_ind_table;
1699
1700 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1701 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1702 rwq_ind_table->device = device;
1703 rwq_ind_table->uobject = NULL;
1704 atomic_set(&rwq_ind_table->usecnt, 0);
1705
1706 for (i = 0; i < table_size; i++)
1707 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1708
1709 return rwq_ind_table;
1710}
1711EXPORT_SYMBOL(ib_create_rwq_ind_table);
1712
1713/*
1714 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1715 * @wq_ind_table: The Indirection Table to destroy.
1716*/
1717int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1718{
1719 int err, i;
1720 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1721 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1722
1723 if (atomic_read(&rwq_ind_table->usecnt))
1724 return -EBUSY;
1725
1726 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1727 if (!err) {
1728 for (i = 0; i < table_size; i++)
1729 atomic_dec(&ind_tbl[i]->usecnt);
1730 }
1731
1732 return err;
1733}
1734EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1735
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001736struct ib_flow *ib_create_flow(struct ib_qp *qp,
1737 struct ib_flow_attr *flow_attr,
1738 int domain)
1739{
1740 struct ib_flow *flow_id;
1741 if (!qp->device->create_flow)
1742 return ERR_PTR(-ENOSYS);
1743
1744 flow_id = qp->device->create_flow(qp, flow_attr, domain);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001745 if (!IS_ERR(flow_id)) {
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001746 atomic_inc(&qp->usecnt);
Mark Bloch8ecc7982016-10-27 16:36:30 +03001747 flow_id->qp = qp;
1748 }
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001749 return flow_id;
1750}
1751EXPORT_SYMBOL(ib_create_flow);
1752
1753int ib_destroy_flow(struct ib_flow *flow_id)
1754{
1755 int err;
1756 struct ib_qp *qp = flow_id->qp;
1757
1758 err = qp->device->destroy_flow(flow_id);
1759 if (!err)
1760 atomic_dec(&qp->usecnt);
1761 return err;
1762}
1763EXPORT_SYMBOL(ib_destroy_flow);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001764
1765int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1766 struct ib_mr_status *mr_status)
1767{
1768 return mr->device->check_mr_status ?
1769 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1770}
1771EXPORT_SYMBOL(ib_check_mr_status);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001772
Eli Cohen50174a72016-03-11 22:58:38 +02001773int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1774 int state)
1775{
1776 if (!device->set_vf_link_state)
1777 return -ENOSYS;
1778
1779 return device->set_vf_link_state(device, vf, port, state);
1780}
1781EXPORT_SYMBOL(ib_set_vf_link_state);
1782
1783int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
1784 struct ifla_vf_info *info)
1785{
1786 if (!device->get_vf_config)
1787 return -ENOSYS;
1788
1789 return device->get_vf_config(device, vf, port, info);
1790}
1791EXPORT_SYMBOL(ib_get_vf_config);
1792
1793int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
1794 struct ifla_vf_stats *stats)
1795{
1796 if (!device->get_vf_stats)
1797 return -ENOSYS;
1798
1799 return device->get_vf_stats(device, vf, port, stats);
1800}
1801EXPORT_SYMBOL(ib_get_vf_stats);
1802
1803int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
1804 int type)
1805{
1806 if (!device->set_vf_guid)
1807 return -ENOSYS;
1808
1809 return device->set_vf_guid(device, vf, port, guid, type);
1810}
1811EXPORT_SYMBOL(ib_set_vf_guid);
1812
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001813/**
1814 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1815 * and set it the memory region.
1816 * @mr: memory region
1817 * @sg: dma mapped scatterlist
1818 * @sg_nents: number of entries in sg
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001819 * @sg_offset: offset in bytes into sg
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001820 * @page_size: page vector desired page size
1821 *
1822 * Constraints:
1823 * - The first sg element is allowed to have an offset.
Bart Van Assche52746122016-09-26 09:09:42 -07001824 * - Each sg element must either be aligned to page_size or virtually
1825 * contiguous to the previous element. In case an sg element has a
1826 * non-contiguous offset, the mapping prefix will not include it.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001827 * - The last sg element is allowed to have length less than page_size.
1828 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1829 * then only max_num_sg entries will be mapped.
Bart Van Assche52746122016-09-26 09:09:42 -07001830 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
Sagi Grimbergf5aa9152016-02-29 19:07:32 +02001831 * constraints holds and the page_size argument is ignored.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001832 *
1833 * Returns the number of sg elements that were mapped to the memory region.
1834 *
1835 * After this completes successfully, the memory region
1836 * is ready for registration.
1837 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001838int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001839 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001840{
1841 if (unlikely(!mr->device->map_mr_sg))
1842 return -ENOSYS;
1843
1844 mr->page_size = page_size;
1845
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001846 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001847}
1848EXPORT_SYMBOL(ib_map_mr_sg);
1849
1850/**
1851 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1852 * to a page vector
1853 * @mr: memory region
1854 * @sgl: dma mapped scatterlist
1855 * @sg_nents: number of entries in sg
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001856 * @sg_offset_p: IN: start offset in bytes into sg
1857 * OUT: offset in bytes for element n of the sg of the first
1858 * byte that has not been processed where n is the return
1859 * value of this function.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001860 * @set_page: driver page assignment function pointer
1861 *
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001862 * Core service helper for drivers to convert the largest
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001863 * prefix of given sg list to a page vector. The sg list
1864 * prefix converted is the prefix that meet the requirements
1865 * of ib_map_mr_sg.
1866 *
1867 * Returns the number of sg elements that were assigned to
1868 * a page vector.
1869 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001870int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001871 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001872{
1873 struct scatterlist *sg;
Bart Van Asscheb6aeb982015-12-29 10:45:03 +01001874 u64 last_end_dma_addr = 0;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001875 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001876 unsigned int last_page_off = 0;
1877 u64 page_mask = ~((u64)mr->page_size - 1);
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001878 int i, ret;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001879
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001880 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
1881 return -EINVAL;
1882
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001883 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001884 mr->length = 0;
1885
1886 for_each_sg(sgl, sg, sg_nents, i) {
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001887 u64 dma_addr = sg_dma_address(sg) + sg_offset;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001888 u64 prev_addr = dma_addr;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001889 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001890 u64 end_dma_addr = dma_addr + dma_len;
1891 u64 page_addr = dma_addr & page_mask;
1892
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001893 /*
1894 * For the second and later elements, check whether either the
1895 * end of element i-1 or the start of element i is not aligned
1896 * on a page boundary.
1897 */
1898 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1899 /* Stop mapping if there is a gap. */
1900 if (last_end_dma_addr != dma_addr)
1901 break;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001902
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001903 /*
1904 * Coalesce this element with the last. If it is small
1905 * enough just update mr->length. Otherwise start
1906 * mapping from the next page.
1907 */
1908 goto next_page;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001909 }
1910
1911 do {
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001912 ret = set_page(mr, page_addr);
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001913 if (unlikely(ret < 0)) {
1914 sg_offset = prev_addr - sg_dma_address(sg);
1915 mr->length += prev_addr - dma_addr;
1916 if (sg_offset_p)
1917 *sg_offset_p = sg_offset;
1918 return i || sg_offset ? i : ret;
1919 }
1920 prev_addr = page_addr;
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001921next_page:
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001922 page_addr += mr->page_size;
1923 } while (page_addr < end_dma_addr);
1924
1925 mr->length += dma_len;
1926 last_end_dma_addr = end_dma_addr;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001927 last_page_off = end_dma_addr & ~page_mask;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001928
1929 sg_offset = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001930 }
1931
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001932 if (sg_offset_p)
1933 *sg_offset_p = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001934 return i;
1935}
1936EXPORT_SYMBOL(ib_sg_to_pages);
Steve Wise765d6772016-02-17 08:15:41 -08001937
1938struct ib_drain_cqe {
1939 struct ib_cqe cqe;
1940 struct completion done;
1941};
1942
1943static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
1944{
1945 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
1946 cqe);
1947
1948 complete(&cqe->done);
1949}
1950
1951/*
1952 * Post a WR and block until its completion is reaped for the SQ.
1953 */
1954static void __ib_drain_sq(struct ib_qp *qp)
1955{
Bart Van Asschef039f442017-02-14 10:56:35 -08001956 struct ib_cq *cq = qp->send_cq;
Steve Wise765d6772016-02-17 08:15:41 -08001957 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1958 struct ib_drain_cqe sdrain;
1959 struct ib_send_wr swr = {}, *bad_swr;
1960 int ret;
1961
Steve Wise765d6772016-02-17 08:15:41 -08001962 swr.wr_cqe = &sdrain.cqe;
1963 sdrain.cqe.done = ib_drain_qp_done;
1964 init_completion(&sdrain.done);
1965
1966 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1967 if (ret) {
1968 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1969 return;
1970 }
1971
1972 ret = ib_post_send(qp, &swr, &bad_swr);
1973 if (ret) {
1974 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1975 return;
1976 }
1977
Bart Van Asschef039f442017-02-14 10:56:35 -08001978 if (cq->poll_ctx == IB_POLL_DIRECT)
1979 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
1980 ib_process_cq_direct(cq, -1);
1981 else
1982 wait_for_completion(&sdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08001983}
1984
1985/*
1986 * Post a WR and block until its completion is reaped for the RQ.
1987 */
1988static void __ib_drain_rq(struct ib_qp *qp)
1989{
Bart Van Asschef039f442017-02-14 10:56:35 -08001990 struct ib_cq *cq = qp->recv_cq;
Steve Wise765d6772016-02-17 08:15:41 -08001991 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1992 struct ib_drain_cqe rdrain;
1993 struct ib_recv_wr rwr = {}, *bad_rwr;
1994 int ret;
1995
Steve Wise765d6772016-02-17 08:15:41 -08001996 rwr.wr_cqe = &rdrain.cqe;
1997 rdrain.cqe.done = ib_drain_qp_done;
1998 init_completion(&rdrain.done);
1999
2000 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2001 if (ret) {
2002 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2003 return;
2004 }
2005
2006 ret = ib_post_recv(qp, &rwr, &bad_rwr);
2007 if (ret) {
2008 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2009 return;
2010 }
2011
Bart Van Asschef039f442017-02-14 10:56:35 -08002012 if (cq->poll_ctx == IB_POLL_DIRECT)
2013 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2014 ib_process_cq_direct(cq, -1);
2015 else
2016 wait_for_completion(&rdrain.done);
Steve Wise765d6772016-02-17 08:15:41 -08002017}
2018
2019/**
2020 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2021 * application.
2022 * @qp: queue pair to drain
2023 *
2024 * If the device has a provider-specific drain function, then
2025 * call that. Otherwise call the generic drain function
2026 * __ib_drain_sq().
2027 *
2028 * The caller must:
2029 *
2030 * ensure there is room in the CQ and SQ for the drain work request and
2031 * completion.
2032 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002033 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002034 *
2035 * ensure that there are no other contexts that are posting WRs concurrently.
2036 * Otherwise the drain is not guaranteed.
2037 */
2038void ib_drain_sq(struct ib_qp *qp)
2039{
2040 if (qp->device->drain_sq)
2041 qp->device->drain_sq(qp);
2042 else
2043 __ib_drain_sq(qp);
2044}
2045EXPORT_SYMBOL(ib_drain_sq);
2046
2047/**
2048 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2049 * application.
2050 * @qp: queue pair to drain
2051 *
2052 * If the device has a provider-specific drain function, then
2053 * call that. Otherwise call the generic drain function
2054 * __ib_drain_rq().
2055 *
2056 * The caller must:
2057 *
2058 * ensure there is room in the CQ and RQ for the drain work request and
2059 * completion.
2060 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002061 * allocate the CQ using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002062 *
2063 * ensure that there are no other contexts that are posting WRs concurrently.
2064 * Otherwise the drain is not guaranteed.
2065 */
2066void ib_drain_rq(struct ib_qp *qp)
2067{
2068 if (qp->device->drain_rq)
2069 qp->device->drain_rq(qp);
2070 else
2071 __ib_drain_rq(qp);
2072}
2073EXPORT_SYMBOL(ib_drain_rq);
2074
2075/**
2076 * ib_drain_qp() - Block until all CQEs have been consumed by the
2077 * application on both the RQ and SQ.
2078 * @qp: queue pair to drain
2079 *
2080 * The caller must:
2081 *
2082 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2083 * and completions.
2084 *
Bart Van Asschef039f442017-02-14 10:56:35 -08002085 * allocate the CQs using ib_alloc_cq().
Steve Wise765d6772016-02-17 08:15:41 -08002086 *
2087 * ensure that there are no other contexts that are posting WRs concurrently.
2088 * Otherwise the drain is not guaranteed.
2089 */
2090void ib_drain_qp(struct ib_qp *qp)
2091{
2092 ib_drain_sq(qp);
Sagi Grimberg42235f82016-04-26 17:55:38 +03002093 if (!qp->srq)
2094 ib_drain_rq(qp);
Steve Wise765d6772016-02-17 08:15:41 -08002095}
2096EXPORT_SYMBOL(ib_drain_qp);