blob: 1d7d4cf442e3c9646b6824e943e7e649c873d104 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier33b9b3e2006-01-30 14:29:21 -08008 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040041#include <linux/export.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042#include <linux/string.h>
Sean Hefty0e0ec7e2011-08-08 15:31:51 -070043#include <linux/slab.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030044#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Roland Dreiera4d61e82005-08-25 13:40:04 -070048#include <rdma/ib_verbs.h>
49#include <rdma/ib_cache.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020050#include <rdma/ib_addr.h>
Christoph Hellwiga060b562016-05-03 18:01:09 +020051#include <rdma/rw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Or Gerlitzed4c54e2013-12-12 18:03:17 +020053#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030055static const char * const ib_events[] = {
56 [IB_EVENT_CQ_ERR] = "CQ error",
57 [IB_EVENT_QP_FATAL] = "QP fatal error",
58 [IB_EVENT_QP_REQ_ERR] = "QP request error",
59 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
60 [IB_EVENT_COMM_EST] = "communication established",
61 [IB_EVENT_SQ_DRAINED] = "send queue drained",
62 [IB_EVENT_PATH_MIG] = "path migration successful",
63 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
64 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
65 [IB_EVENT_PORT_ACTIVE] = "port active",
66 [IB_EVENT_PORT_ERR] = "port error",
67 [IB_EVENT_LID_CHANGE] = "LID change",
68 [IB_EVENT_PKEY_CHANGE] = "P_key change",
69 [IB_EVENT_SM_CHANGE] = "SM change",
70 [IB_EVENT_SRQ_ERR] = "SRQ error",
71 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
72 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
73 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
74 [IB_EVENT_GID_CHANGE] = "GID changed",
75};
76
Bart Van Asschedb7489e2015-08-03 10:01:52 -070077const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030078{
79 size_t index = event;
80
81 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
82 ib_events[index] : "unrecognized event";
83}
84EXPORT_SYMBOL(ib_event_msg);
85
86static const char * const wc_statuses[] = {
87 [IB_WC_SUCCESS] = "success",
88 [IB_WC_LOC_LEN_ERR] = "local length error",
89 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
90 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
91 [IB_WC_LOC_PROT_ERR] = "local protection error",
92 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
93 [IB_WC_MW_BIND_ERR] = "memory management operation error",
94 [IB_WC_BAD_RESP_ERR] = "bad response error",
95 [IB_WC_LOC_ACCESS_ERR] = "local access error",
96 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
97 [IB_WC_REM_ACCESS_ERR] = "remote access error",
98 [IB_WC_REM_OP_ERR] = "remote operation error",
99 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
100 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
101 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
102 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
103 [IB_WC_REM_ABORT_ERR] = "operation aborted",
104 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
105 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
106 [IB_WC_FATAL_ERR] = "fatal error",
107 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
108 [IB_WC_GENERAL_ERR] = "general error",
109};
110
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700111const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300112{
113 size_t index = status;
114
115 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
116 wc_statuses[index] : "unrecognized status";
117}
118EXPORT_SYMBOL(ib_wc_status_msg);
119
Roland Dreier8385fd82014-06-04 10:00:16 -0700120__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700121{
122 switch (rate) {
123 case IB_RATE_2_5_GBPS: return 1;
124 case IB_RATE_5_GBPS: return 2;
125 case IB_RATE_10_GBPS: return 4;
126 case IB_RATE_20_GBPS: return 8;
127 case IB_RATE_30_GBPS: return 12;
128 case IB_RATE_40_GBPS: return 16;
129 case IB_RATE_60_GBPS: return 24;
130 case IB_RATE_80_GBPS: return 32;
131 case IB_RATE_120_GBPS: return 48;
132 default: return -1;
133 }
134}
135EXPORT_SYMBOL(ib_rate_to_mult);
136
Roland Dreier8385fd82014-06-04 10:00:16 -0700137__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700138{
139 switch (mult) {
140 case 1: return IB_RATE_2_5_GBPS;
141 case 2: return IB_RATE_5_GBPS;
142 case 4: return IB_RATE_10_GBPS;
143 case 8: return IB_RATE_20_GBPS;
144 case 12: return IB_RATE_30_GBPS;
145 case 16: return IB_RATE_40_GBPS;
146 case 24: return IB_RATE_60_GBPS;
147 case 32: return IB_RATE_80_GBPS;
148 case 48: return IB_RATE_120_GBPS;
149 default: return IB_RATE_PORT_CURRENT;
150 }
151}
152EXPORT_SYMBOL(mult_to_ib_rate);
153
Roland Dreier8385fd82014-06-04 10:00:16 -0700154__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300155{
156 switch (rate) {
157 case IB_RATE_2_5_GBPS: return 2500;
158 case IB_RATE_5_GBPS: return 5000;
159 case IB_RATE_10_GBPS: return 10000;
160 case IB_RATE_20_GBPS: return 20000;
161 case IB_RATE_30_GBPS: return 30000;
162 case IB_RATE_40_GBPS: return 40000;
163 case IB_RATE_60_GBPS: return 60000;
164 case IB_RATE_80_GBPS: return 80000;
165 case IB_RATE_120_GBPS: return 120000;
166 case IB_RATE_14_GBPS: return 14062;
167 case IB_RATE_56_GBPS: return 56250;
168 case IB_RATE_112_GBPS: return 112500;
169 case IB_RATE_168_GBPS: return 168750;
170 case IB_RATE_25_GBPS: return 25781;
171 case IB_RATE_100_GBPS: return 103125;
172 case IB_RATE_200_GBPS: return 206250;
173 case IB_RATE_300_GBPS: return 309375;
174 default: return -1;
175 }
176}
177EXPORT_SYMBOL(ib_rate_to_mbps);
178
Roland Dreier8385fd82014-06-04 10:00:16 -0700179__attribute_const__ enum rdma_transport_type
Tom Tucker07ebafb2006-08-03 16:02:42 -0500180rdma_node_get_transport(enum rdma_node_type node_type)
181{
182 switch (node_type) {
183 case RDMA_NODE_IB_CA:
184 case RDMA_NODE_IB_SWITCH:
185 case RDMA_NODE_IB_ROUTER:
186 return RDMA_TRANSPORT_IB;
187 case RDMA_NODE_RNIC:
188 return RDMA_TRANSPORT_IWARP;
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000189 case RDMA_NODE_USNIC:
Upinder Malhi5db57652014-01-15 17:02:36 -0800190 return RDMA_TRANSPORT_USNIC;
191 case RDMA_NODE_USNIC_UDP:
Upinder Malhi248567f2014-01-09 14:48:19 -0800192 return RDMA_TRANSPORT_USNIC_UDP;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500193 default:
194 BUG();
195 return 0;
196 }
197}
198EXPORT_SYMBOL(rdma_node_get_transport);
199
Eli Cohena3f5ada2010-09-27 17:51:10 -0700200enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
201{
202 if (device->get_link_layer)
203 return device->get_link_layer(device, port_num);
204
205 switch (rdma_node_get_transport(device->node_type)) {
206 case RDMA_TRANSPORT_IB:
207 return IB_LINK_LAYER_INFINIBAND;
208 case RDMA_TRANSPORT_IWARP:
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000209 case RDMA_TRANSPORT_USNIC:
Upinder Malhi248567f2014-01-09 14:48:19 -0800210 case RDMA_TRANSPORT_USNIC_UDP:
Eli Cohena3f5ada2010-09-27 17:51:10 -0700211 return IB_LINK_LAYER_ETHERNET;
212 default:
213 return IB_LINK_LAYER_UNSPECIFIED;
214 }
215}
216EXPORT_SYMBOL(rdma_port_get_link_layer);
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218/* Protection domains */
219
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600220/**
221 * ib_alloc_pd - Allocates an unused protection domain.
222 * @device: The device on which to allocate the protection domain.
223 *
224 * A protection domain object provides an association between QPs, shared
225 * receive queues, address handles, memory regions, and memory windows.
226 *
227 * Every PD has a local_dma_lkey which can be used as the lkey value for local
228 * memory operations.
229 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230struct ib_pd *ib_alloc_pd(struct ib_device *device)
231{
232 struct ib_pd *pd;
233
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700234 pd = device->alloc_pd(device, NULL, NULL);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600235 if (IS_ERR(pd))
236 return pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600238 pd->device = device;
239 pd->uobject = NULL;
240 pd->local_mr = NULL;
241 atomic_set(&pd->usecnt, 0);
242
Or Gerlitz86bee4c2015-12-18 10:59:45 +0200243 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600244 pd->local_dma_lkey = device->local_dma_lkey;
245 else {
246 struct ib_mr *mr;
247
248 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
249 if (IS_ERR(mr)) {
250 ib_dealloc_pd(pd);
251 return (struct ib_pd *)mr;
252 }
253
254 pd->local_mr = mr;
255 pd->local_dma_lkey = pd->local_mr->lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 return pd;
258}
259EXPORT_SYMBOL(ib_alloc_pd);
260
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600261/**
262 * ib_dealloc_pd - Deallocates a protection domain.
263 * @pd: The protection domain to deallocate.
264 *
265 * It is an error to call this function while any resources in the pd still
266 * exist. The caller is responsible to synchronously destroy them and
267 * guarantee no new allocations will happen.
268 */
269void ib_dealloc_pd(struct ib_pd *pd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600271 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600273 if (pd->local_mr) {
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600274 ret = ib_dereg_mr(pd->local_mr);
275 WARN_ON(ret);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600276 pd->local_mr = NULL;
277 }
278
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600279 /* uverbs manipulates usecnt with proper locking, while the kabi
280 requires the caller to guarantee we can't race here. */
281 WARN_ON(atomic_read(&pd->usecnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600283 /* Making delalloc_pd a void return is a WIP, no driver should return
284 an error here. */
285 ret = pd->device->dealloc_pd(pd);
286 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288EXPORT_SYMBOL(ib_dealloc_pd);
289
290/* Address handles */
291
292struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
293{
294 struct ib_ah *ah;
295
296 ah = pd->device->create_ah(pd, ah_attr);
297
298 if (!IS_ERR(ah)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700299 ah->device = pd->device;
300 ah->pd = pd;
301 ah->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 atomic_inc(&pd->usecnt);
303 }
304
305 return ah;
306}
307EXPORT_SYMBOL(ib_create_ah);
308
Somnath Koturc865f242015-12-23 14:56:51 +0200309static int ib_get_header_version(const union rdma_network_hdr *hdr)
310{
311 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
312 struct iphdr ip4h_checked;
313 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
314
315 /* If it's IPv6, the version must be 6, otherwise, the first
316 * 20 bytes (before the IPv4 header) are garbled.
317 */
318 if (ip6h->version != 6)
319 return (ip4h->version == 4) ? 4 : 0;
320 /* version may be 6 or 4 because the first 20 bytes could be garbled */
321
322 /* RoCE v2 requires no options, thus header length
323 * must be 5 words
324 */
325 if (ip4h->ihl != 5)
326 return 6;
327
328 /* Verify checksum.
329 * We can't write on scattered buffers so we need to copy to
330 * temp buffer.
331 */
332 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
333 ip4h_checked.check = 0;
334 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
335 /* if IPv4 header checksum is OK, believe it */
336 if (ip4h->check == ip4h_checked.check)
337 return 4;
338 return 6;
339}
340
341static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
342 u8 port_num,
343 const struct ib_grh *grh)
344{
345 int grh_version;
346
347 if (rdma_protocol_ib(device, port_num))
348 return RDMA_NETWORK_IB;
349
350 grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
351
352 if (grh_version == 4)
353 return RDMA_NETWORK_IPV4;
354
355 if (grh->next_hdr == IPPROTO_UDP)
356 return RDMA_NETWORK_IPV6;
357
358 return RDMA_NETWORK_ROCE_V1;
359}
360
Matan Barakdbf727d2015-10-15 18:38:51 +0300361struct find_gid_index_context {
362 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +0200363 enum ib_gid_type gid_type;
Matan Barakdbf727d2015-10-15 18:38:51 +0300364};
365
366static bool find_gid_index(const union ib_gid *gid,
367 const struct ib_gid_attr *gid_attr,
368 void *context)
369{
370 struct find_gid_index_context *ctx =
371 (struct find_gid_index_context *)context;
372
Somnath Koturc865f242015-12-23 14:56:51 +0200373 if (ctx->gid_type != gid_attr->gid_type)
374 return false;
375
Matan Barakdbf727d2015-10-15 18:38:51 +0300376 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
377 (is_vlan_dev(gid_attr->ndev) &&
378 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
379 return false;
380
381 return true;
382}
383
384static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
385 u16 vlan_id, const union ib_gid *sgid,
Somnath Koturc865f242015-12-23 14:56:51 +0200386 enum ib_gid_type gid_type,
Matan Barakdbf727d2015-10-15 18:38:51 +0300387 u16 *gid_index)
388{
Somnath Koturc865f242015-12-23 14:56:51 +0200389 struct find_gid_index_context context = {.vlan_id = vlan_id,
390 .gid_type = gid_type};
Matan Barakdbf727d2015-10-15 18:38:51 +0300391
392 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
393 &context, gid_index);
394}
395
Somnath Koturc865f242015-12-23 14:56:51 +0200396static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
397 enum rdma_network_type net_type,
398 union ib_gid *sgid, union ib_gid *dgid)
399{
400 struct sockaddr_in src_in;
401 struct sockaddr_in dst_in;
402 __be32 src_saddr, dst_saddr;
403
404 if (!sgid || !dgid)
405 return -EINVAL;
406
407 if (net_type == RDMA_NETWORK_IPV4) {
408 memcpy(&src_in.sin_addr.s_addr,
409 &hdr->roce4grh.saddr, 4);
410 memcpy(&dst_in.sin_addr.s_addr,
411 &hdr->roce4grh.daddr, 4);
412 src_saddr = src_in.sin_addr.s_addr;
413 dst_saddr = dst_in.sin_addr.s_addr;
414 ipv6_addr_set_v4mapped(src_saddr,
415 (struct in6_addr *)sgid);
416 ipv6_addr_set_v4mapped(dst_saddr,
417 (struct in6_addr *)dgid);
418 return 0;
419 } else if (net_type == RDMA_NETWORK_IPV6 ||
420 net_type == RDMA_NETWORK_IB) {
421 *dgid = hdr->ibgrh.dgid;
422 *sgid = hdr->ibgrh.sgid;
423 return 0;
424 } else {
425 return -EINVAL;
426 }
427}
428
Ira Weiny73cdaae2015-05-31 17:15:31 -0400429int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
430 const struct ib_wc *wc, const struct ib_grh *grh,
431 struct ib_ah_attr *ah_attr)
Hal Rosenstock513789e2005-07-27 11:45:34 -0700432{
Hal Rosenstock513789e2005-07-27 11:45:34 -0700433 u32 flow_class;
434 u16 gid_index;
435 int ret;
Somnath Koturc865f242015-12-23 14:56:51 +0200436 enum rdma_network_type net_type = RDMA_NETWORK_IB;
437 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
Matan Barakc3efe752016-01-04 10:49:54 +0200438 int hoplimit = 0xff;
Somnath Koturc865f242015-12-23 14:56:51 +0200439 union ib_gid dgid;
440 union ib_gid sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700441
Sean Hefty4e00d692006-06-17 20:37:39 -0700442 memset(ah_attr, 0, sizeof *ah_attr);
Michael Wang227128f2015-05-05 14:50:40 +0200443 if (rdma_cap_eth_ah(device, port_num)) {
Somnath Koturc865f242015-12-23 14:56:51 +0200444 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
445 net_type = wc->network_hdr_type;
446 else
447 net_type = ib_get_net_type_by_grh(device, port_num, grh);
448 gid_type = ib_network_to_gid_type(net_type);
449 }
450 ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
451 &sgid, &dgid);
452 if (ret)
453 return ret;
454
455 if (rdma_protocol_roce(device, port_num)) {
Matan Barak20029832015-12-23 14:56:53 +0200456 int if_index = 0;
Matan Barakdbf727d2015-10-15 18:38:51 +0300457 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
458 wc->vlan_id : 0xffff;
Matan Barak20029832015-12-23 14:56:53 +0200459 struct net_device *idev;
460 struct net_device *resolved_dev;
Matan Barakdbf727d2015-10-15 18:38:51 +0300461
Matan Barakdd5f03b2013-12-12 18:03:11 +0200462 if (!(wc->wc_flags & IB_WC_GRH))
463 return -EPROTOTYPE;
464
Matan Barak20029832015-12-23 14:56:53 +0200465 if (!device->get_netdev)
466 return -EOPNOTSUPP;
467
468 idev = device->get_netdev(device, port_num);
469 if (!idev)
470 return -ENODEV;
471
Matan Barakf7f4b23e2016-01-04 10:49:53 +0200472 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
473 ah_attr->dmac,
474 wc->wc_flags & IB_WC_WITH_VLAN ?
475 NULL : &vlan_id,
Matan Barakc3efe752016-01-04 10:49:54 +0200476 &if_index, &hoplimit);
Matan Barak20029832015-12-23 14:56:53 +0200477 if (ret) {
478 dev_put(idev);
479 return ret;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200480 }
Matan Barakdbf727d2015-10-15 18:38:51 +0300481
Matan Barak20029832015-12-23 14:56:53 +0200482 resolved_dev = dev_get_by_index(&init_net, if_index);
483 if (resolved_dev->flags & IFF_LOOPBACK) {
484 dev_put(resolved_dev);
485 resolved_dev = idev;
486 dev_hold(resolved_dev);
487 }
488 rcu_read_lock();
489 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
490 resolved_dev))
491 ret = -EHOSTUNREACH;
492 rcu_read_unlock();
493 dev_put(idev);
494 dev_put(resolved_dev);
495 if (ret)
496 return ret;
497
Matan Barakdbf727d2015-10-15 18:38:51 +0300498 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
Somnath Koturc865f242015-12-23 14:56:51 +0200499 &dgid, gid_type, &gid_index);
Matan Barakdbf727d2015-10-15 18:38:51 +0300500 if (ret)
501 return ret;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200502 }
503
Sean Hefty4e00d692006-06-17 20:37:39 -0700504 ah_attr->dlid = wc->slid;
505 ah_attr->sl = wc->sl;
506 ah_attr->src_path_bits = wc->dlid_path_bits;
507 ah_attr->port_num = port_num;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700508
509 if (wc->wc_flags & IB_WC_GRH) {
Sean Hefty4e00d692006-06-17 20:37:39 -0700510 ah_attr->ah_flags = IB_AH_GRH;
Somnath Koturc865f242015-12-23 14:56:51 +0200511 ah_attr->grh.dgid = sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700512
Matan Barakdbf727d2015-10-15 18:38:51 +0300513 if (!rdma_cap_eth_ah(device, port_num)) {
Somnath Koturc865f242015-12-23 14:56:51 +0200514 ret = ib_find_cached_gid_by_port(device, &dgid,
Matan Barakb39ffa12015-12-23 14:56:47 +0200515 IB_GID_TYPE_IB,
Matan Barakdbf727d2015-10-15 18:38:51 +0300516 port_num, NULL,
517 &gid_index);
518 if (ret)
519 return ret;
520 }
Hal Rosenstock513789e2005-07-27 11:45:34 -0700521
Sean Hefty4e00d692006-06-17 20:37:39 -0700522 ah_attr->grh.sgid_index = (u8) gid_index;
Hal Rosenstock497677a2005-07-27 11:45:35 -0700523 flow_class = be32_to_cpu(grh->version_tclass_flow);
Sean Hefty4e00d692006-06-17 20:37:39 -0700524 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
Matan Barakc3efe752016-01-04 10:49:54 +0200525 ah_attr->grh.hop_limit = hoplimit;
Sean Hefty4e00d692006-06-17 20:37:39 -0700526 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700527 }
Sean Hefty4e00d692006-06-17 20:37:39 -0700528 return 0;
529}
530EXPORT_SYMBOL(ib_init_ah_from_wc);
531
Ira Weiny73cdaae2015-05-31 17:15:31 -0400532struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
533 const struct ib_grh *grh, u8 port_num)
Sean Hefty4e00d692006-06-17 20:37:39 -0700534{
535 struct ib_ah_attr ah_attr;
536 int ret;
537
538 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
539 if (ret)
540 return ERR_PTR(ret);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700541
542 return ib_create_ah(pd, &ah_attr);
543}
544EXPORT_SYMBOL(ib_create_ah_from_wc);
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
547{
548 return ah->device->modify_ah ?
549 ah->device->modify_ah(ah, ah_attr) :
550 -ENOSYS;
551}
552EXPORT_SYMBOL(ib_modify_ah);
553
554int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
555{
556 return ah->device->query_ah ?
557 ah->device->query_ah(ah, ah_attr) :
558 -ENOSYS;
559}
560EXPORT_SYMBOL(ib_query_ah);
561
562int ib_destroy_ah(struct ib_ah *ah)
563{
564 struct ib_pd *pd;
565 int ret;
566
567 pd = ah->pd;
568 ret = ah->device->destroy_ah(ah);
569 if (!ret)
570 atomic_dec(&pd->usecnt);
571
572 return ret;
573}
574EXPORT_SYMBOL(ib_destroy_ah);
575
Roland Dreierd41fcc62005-08-18 12:23:08 -0700576/* Shared receive queues */
577
578struct ib_srq *ib_create_srq(struct ib_pd *pd,
579 struct ib_srq_init_attr *srq_init_attr)
580{
581 struct ib_srq *srq;
582
583 if (!pd->device->create_srq)
584 return ERR_PTR(-ENOSYS);
585
586 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
587
588 if (!IS_ERR(srq)) {
589 srq->device = pd->device;
590 srq->pd = pd;
591 srq->uobject = NULL;
592 srq->event_handler = srq_init_attr->event_handler;
593 srq->srq_context = srq_init_attr->srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -0700594 srq->srq_type = srq_init_attr->srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -0700595 if (srq->srq_type == IB_SRQT_XRC) {
596 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
597 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
598 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
599 atomic_inc(&srq->ext.xrc.cq->usecnt);
600 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700601 atomic_inc(&pd->usecnt);
602 atomic_set(&srq->usecnt, 0);
603 }
604
605 return srq;
606}
607EXPORT_SYMBOL(ib_create_srq);
608
609int ib_modify_srq(struct ib_srq *srq,
610 struct ib_srq_attr *srq_attr,
611 enum ib_srq_attr_mask srq_attr_mask)
612{
Dotan Barak7ce5eac2008-04-16 21:09:28 -0700613 return srq->device->modify_srq ?
614 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
615 -ENOSYS;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700616}
617EXPORT_SYMBOL(ib_modify_srq);
618
619int ib_query_srq(struct ib_srq *srq,
620 struct ib_srq_attr *srq_attr)
621{
622 return srq->device->query_srq ?
623 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
624}
625EXPORT_SYMBOL(ib_query_srq);
626
627int ib_destroy_srq(struct ib_srq *srq)
628{
629 struct ib_pd *pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700630 enum ib_srq_type srq_type;
631 struct ib_xrcd *uninitialized_var(xrcd);
632 struct ib_cq *uninitialized_var(cq);
Roland Dreierd41fcc62005-08-18 12:23:08 -0700633 int ret;
634
635 if (atomic_read(&srq->usecnt))
636 return -EBUSY;
637
638 pd = srq->pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700639 srq_type = srq->srq_type;
640 if (srq_type == IB_SRQT_XRC) {
641 xrcd = srq->ext.xrc.xrcd;
642 cq = srq->ext.xrc.cq;
643 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700644
645 ret = srq->device->destroy_srq(srq);
Sean Hefty418d5132011-05-23 19:42:29 -0700646 if (!ret) {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700647 atomic_dec(&pd->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700648 if (srq_type == IB_SRQT_XRC) {
649 atomic_dec(&xrcd->usecnt);
650 atomic_dec(&cq->usecnt);
651 }
652 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700653
654 return ret;
655}
656EXPORT_SYMBOL(ib_destroy_srq);
657
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658/* Queue pairs */
659
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700660static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
661{
662 struct ib_qp *qp = context;
Yishai Hadas73c40c62013-08-01 18:49:53 +0300663 unsigned long flags;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700664
Yishai Hadas73c40c62013-08-01 18:49:53 +0300665 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700666 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
Shlomo Pongratzeec9e292013-04-10 14:26:46 +0000667 if (event->element.qp->event_handler)
668 event->element.qp->event_handler(event, event->element.qp->qp_context);
Yishai Hadas73c40c62013-08-01 18:49:53 +0300669 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700670}
671
Sean Heftyd3d72d92011-05-26 23:06:44 -0700672static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
673{
674 mutex_lock(&xrcd->tgt_qp_mutex);
675 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
676 mutex_unlock(&xrcd->tgt_qp_mutex);
677}
678
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700679static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
680 void (*event_handler)(struct ib_event *, void *),
681 void *qp_context)
Sean Heftyd3d72d92011-05-26 23:06:44 -0700682{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700683 struct ib_qp *qp;
684 unsigned long flags;
685
686 qp = kzalloc(sizeof *qp, GFP_KERNEL);
687 if (!qp)
688 return ERR_PTR(-ENOMEM);
689
690 qp->real_qp = real_qp;
691 atomic_inc(&real_qp->usecnt);
692 qp->device = real_qp->device;
693 qp->event_handler = event_handler;
694 qp->qp_context = qp_context;
695 qp->qp_num = real_qp->qp_num;
696 qp->qp_type = real_qp->qp_type;
697
698 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
699 list_add(&qp->open_list, &real_qp->open_list);
700 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
701
702 return qp;
Sean Heftyd3d72d92011-05-26 23:06:44 -0700703}
704
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700705struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
706 struct ib_qp_open_attr *qp_open_attr)
707{
708 struct ib_qp *qp, *real_qp;
709
710 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
711 return ERR_PTR(-EINVAL);
712
713 qp = ERR_PTR(-EINVAL);
714 mutex_lock(&xrcd->tgt_qp_mutex);
715 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
716 if (real_qp->qp_num == qp_open_attr->qp_num) {
717 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
718 qp_open_attr->qp_context);
719 break;
720 }
721 }
722 mutex_unlock(&xrcd->tgt_qp_mutex);
723 return qp;
724}
725EXPORT_SYMBOL(ib_open_qp);
726
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200727static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
728 struct ib_qp_init_attr *qp_init_attr)
729{
730 struct ib_qp *real_qp = qp;
731
732 qp->event_handler = __ib_shared_qp_event_handler;
733 qp->qp_context = qp;
734 qp->pd = NULL;
735 qp->send_cq = qp->recv_cq = NULL;
736 qp->srq = NULL;
737 qp->xrcd = qp_init_attr->xrcd;
738 atomic_inc(&qp_init_attr->xrcd->usecnt);
739 INIT_LIST_HEAD(&qp->open_list);
740
741 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
742 qp_init_attr->qp_context);
743 if (!IS_ERR(qp))
744 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
745 else
746 real_qp->device->destroy_qp(real_qp);
747 return qp;
748}
749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750struct ib_qp *ib_create_qp(struct ib_pd *pd,
751 struct ib_qp_init_attr *qp_init_attr)
752{
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200753 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
754 struct ib_qp *qp;
Christoph Hellwiga060b562016-05-03 18:01:09 +0200755 int ret;
756
757 /*
758 * If the callers is using the RDMA API calculate the resources
759 * needed for the RDMA READ/WRITE operations.
760 *
761 * Note that these callers need to pass in a port number.
762 */
763 if (qp_init_attr->cap.max_rdma_ctxs)
764 rdma_rw_init_qp(device, qp_init_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Sean Heftyb42b63c2011-05-23 19:59:25 -0700766 qp = device->create_qp(pd, qp_init_attr, NULL);
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200767 if (IS_ERR(qp))
768 return qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200770 qp->device = device;
771 qp->real_qp = qp;
772 qp->uobject = NULL;
773 qp->qp_type = qp_init_attr->qp_type;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700774
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200775 atomic_set(&qp->usecnt, 0);
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200776 qp->mrs_used = 0;
777 spin_lock_init(&qp->mr_lock);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200778 INIT_LIST_HEAD(&qp->rdma_mrs);
Christoph Hellwig0e353e32016-05-03 18:01:12 +0200779 INIT_LIST_HEAD(&qp->sig_mrs);
Christoph Hellwigfffb0382016-05-03 18:01:07 +0200780
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200781 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
782 return ib_create_xrc_qp(qp, qp_init_attr);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700783
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200784 qp->event_handler = qp_init_attr->event_handler;
785 qp->qp_context = qp_init_attr->qp_context;
786 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
787 qp->recv_cq = NULL;
788 qp->srq = NULL;
789 } else {
790 qp->recv_cq = qp_init_attr->recv_cq;
791 atomic_inc(&qp_init_attr->recv_cq->usecnt);
792 qp->srq = qp_init_attr->srq;
793 if (qp->srq)
794 atomic_inc(&qp_init_attr->srq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 }
796
Christoph Hellwig04c41bf2016-05-03 18:01:06 +0200797 qp->pd = pd;
798 qp->send_cq = qp_init_attr->send_cq;
799 qp->xrcd = NULL;
800
801 atomic_inc(&pd->usecnt);
802 atomic_inc(&qp_init_attr->send_cq->usecnt);
Christoph Hellwiga060b562016-05-03 18:01:09 +0200803
804 if (qp_init_attr->cap.max_rdma_ctxs) {
805 ret = rdma_rw_init_mrs(qp, qp_init_attr);
806 if (ret) {
807 pr_err("failed to init MR pool ret= %d\n", ret);
808 ib_destroy_qp(qp);
809 qp = ERR_PTR(ret);
810 }
811 }
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 return qp;
814}
815EXPORT_SYMBOL(ib_create_qp);
816
Roland Dreier8a518662006-02-13 12:48:12 -0800817static const struct {
818 int valid;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700819 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
820 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
Roland Dreier8a518662006-02-13 12:48:12 -0800821} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
822 [IB_QPS_RESET] = {
823 [IB_QPS_RESET] = { .valid = 1 },
Roland Dreier8a518662006-02-13 12:48:12 -0800824 [IB_QPS_INIT] = {
825 .valid = 1,
826 .req_param = {
827 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
828 IB_QP_PORT |
829 IB_QP_QKEY),
Or Gerlitzc938a612012-03-01 12:17:51 +0200830 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
Roland Dreier8a518662006-02-13 12:48:12 -0800831 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
832 IB_QP_PORT |
833 IB_QP_ACCESS_FLAGS),
834 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
835 IB_QP_PORT |
836 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700837 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
838 IB_QP_PORT |
839 IB_QP_ACCESS_FLAGS),
840 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
841 IB_QP_PORT |
842 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800843 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
844 IB_QP_QKEY),
845 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
846 IB_QP_QKEY),
847 }
848 },
849 },
850 [IB_QPS_INIT] = {
851 [IB_QPS_RESET] = { .valid = 1 },
852 [IB_QPS_ERR] = { .valid = 1 },
853 [IB_QPS_INIT] = {
854 .valid = 1,
855 .opt_param = {
856 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
857 IB_QP_PORT |
858 IB_QP_QKEY),
859 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
860 IB_QP_PORT |
861 IB_QP_ACCESS_FLAGS),
862 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
863 IB_QP_PORT |
864 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700865 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
866 IB_QP_PORT |
867 IB_QP_ACCESS_FLAGS),
868 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
869 IB_QP_PORT |
870 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800871 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
872 IB_QP_QKEY),
873 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
874 IB_QP_QKEY),
875 }
876 },
877 [IB_QPS_RTR] = {
878 .valid = 1,
879 .req_param = {
880 [IB_QPT_UC] = (IB_QP_AV |
881 IB_QP_PATH_MTU |
882 IB_QP_DEST_QPN |
883 IB_QP_RQ_PSN),
884 [IB_QPT_RC] = (IB_QP_AV |
885 IB_QP_PATH_MTU |
886 IB_QP_DEST_QPN |
887 IB_QP_RQ_PSN |
888 IB_QP_MAX_DEST_RD_ATOMIC |
889 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700890 [IB_QPT_XRC_INI] = (IB_QP_AV |
891 IB_QP_PATH_MTU |
892 IB_QP_DEST_QPN |
893 IB_QP_RQ_PSN),
894 [IB_QPT_XRC_TGT] = (IB_QP_AV |
895 IB_QP_PATH_MTU |
896 IB_QP_DEST_QPN |
897 IB_QP_RQ_PSN |
898 IB_QP_MAX_DEST_RD_ATOMIC |
899 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -0800900 },
901 .opt_param = {
902 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
903 IB_QP_QKEY),
904 [IB_QPT_UC] = (IB_QP_ALT_PATH |
905 IB_QP_ACCESS_FLAGS |
906 IB_QP_PKEY_INDEX),
907 [IB_QPT_RC] = (IB_QP_ALT_PATH |
908 IB_QP_ACCESS_FLAGS |
909 IB_QP_PKEY_INDEX),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700910 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
911 IB_QP_ACCESS_FLAGS |
912 IB_QP_PKEY_INDEX),
913 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
914 IB_QP_ACCESS_FLAGS |
915 IB_QP_PKEY_INDEX),
Roland Dreier8a518662006-02-13 12:48:12 -0800916 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
917 IB_QP_QKEY),
918 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
919 IB_QP_QKEY),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200920 },
Matan Barakdbf727d2015-10-15 18:38:51 +0300921 },
Roland Dreier8a518662006-02-13 12:48:12 -0800922 },
923 [IB_QPS_RTR] = {
924 [IB_QPS_RESET] = { .valid = 1 },
925 [IB_QPS_ERR] = { .valid = 1 },
926 [IB_QPS_RTS] = {
927 .valid = 1,
928 .req_param = {
929 [IB_QPT_UD] = IB_QP_SQ_PSN,
930 [IB_QPT_UC] = IB_QP_SQ_PSN,
931 [IB_QPT_RC] = (IB_QP_TIMEOUT |
932 IB_QP_RETRY_CNT |
933 IB_QP_RNR_RETRY |
934 IB_QP_SQ_PSN |
935 IB_QP_MAX_QP_RD_ATOMIC),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700936 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
937 IB_QP_RETRY_CNT |
938 IB_QP_RNR_RETRY |
939 IB_QP_SQ_PSN |
940 IB_QP_MAX_QP_RD_ATOMIC),
941 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
942 IB_QP_SQ_PSN),
Roland Dreier8a518662006-02-13 12:48:12 -0800943 [IB_QPT_SMI] = IB_QP_SQ_PSN,
944 [IB_QPT_GSI] = IB_QP_SQ_PSN,
945 },
946 .opt_param = {
947 [IB_QPT_UD] = (IB_QP_CUR_STATE |
948 IB_QP_QKEY),
949 [IB_QPT_UC] = (IB_QP_CUR_STATE |
950 IB_QP_ALT_PATH |
951 IB_QP_ACCESS_FLAGS |
952 IB_QP_PATH_MIG_STATE),
953 [IB_QPT_RC] = (IB_QP_CUR_STATE |
954 IB_QP_ALT_PATH |
955 IB_QP_ACCESS_FLAGS |
956 IB_QP_MIN_RNR_TIMER |
957 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700958 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
959 IB_QP_ALT_PATH |
960 IB_QP_ACCESS_FLAGS |
961 IB_QP_PATH_MIG_STATE),
962 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
963 IB_QP_ALT_PATH |
964 IB_QP_ACCESS_FLAGS |
965 IB_QP_MIN_RNR_TIMER |
966 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -0800967 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
968 IB_QP_QKEY),
969 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
970 IB_QP_QKEY),
971 }
972 }
973 },
974 [IB_QPS_RTS] = {
975 [IB_QPS_RESET] = { .valid = 1 },
976 [IB_QPS_ERR] = { .valid = 1 },
977 [IB_QPS_RTS] = {
978 .valid = 1,
979 .opt_param = {
980 [IB_QPT_UD] = (IB_QP_CUR_STATE |
981 IB_QP_QKEY),
Dotan Barak4546d312006-03-02 11:22:28 -0800982 [IB_QPT_UC] = (IB_QP_CUR_STATE |
983 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -0800984 IB_QP_ALT_PATH |
985 IB_QP_PATH_MIG_STATE),
Dotan Barak4546d312006-03-02 11:22:28 -0800986 [IB_QPT_RC] = (IB_QP_CUR_STATE |
987 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -0800988 IB_QP_ALT_PATH |
989 IB_QP_PATH_MIG_STATE |
990 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700991 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
992 IB_QP_ACCESS_FLAGS |
993 IB_QP_ALT_PATH |
994 IB_QP_PATH_MIG_STATE),
995 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
996 IB_QP_ACCESS_FLAGS |
997 IB_QP_ALT_PATH |
998 IB_QP_PATH_MIG_STATE |
999 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -08001000 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1001 IB_QP_QKEY),
1002 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1003 IB_QP_QKEY),
1004 }
1005 },
1006 [IB_QPS_SQD] = {
1007 .valid = 1,
1008 .opt_param = {
1009 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1010 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1011 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001012 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1013 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
Roland Dreier8a518662006-02-13 12:48:12 -08001014 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1015 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1016 }
1017 },
1018 },
1019 [IB_QPS_SQD] = {
1020 [IB_QPS_RESET] = { .valid = 1 },
1021 [IB_QPS_ERR] = { .valid = 1 },
1022 [IB_QPS_RTS] = {
1023 .valid = 1,
1024 .opt_param = {
1025 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1026 IB_QP_QKEY),
1027 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1028 IB_QP_ALT_PATH |
1029 IB_QP_ACCESS_FLAGS |
1030 IB_QP_PATH_MIG_STATE),
1031 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1032 IB_QP_ALT_PATH |
1033 IB_QP_ACCESS_FLAGS |
1034 IB_QP_MIN_RNR_TIMER |
1035 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001036 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1037 IB_QP_ALT_PATH |
1038 IB_QP_ACCESS_FLAGS |
1039 IB_QP_PATH_MIG_STATE),
1040 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1041 IB_QP_ALT_PATH |
1042 IB_QP_ACCESS_FLAGS |
1043 IB_QP_MIN_RNR_TIMER |
1044 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001045 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1046 IB_QP_QKEY),
1047 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1048 IB_QP_QKEY),
1049 }
1050 },
1051 [IB_QPS_SQD] = {
1052 .valid = 1,
1053 .opt_param = {
1054 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1055 IB_QP_QKEY),
1056 [IB_QPT_UC] = (IB_QP_AV |
Roland Dreier8a518662006-02-13 12:48:12 -08001057 IB_QP_ALT_PATH |
1058 IB_QP_ACCESS_FLAGS |
1059 IB_QP_PKEY_INDEX |
1060 IB_QP_PATH_MIG_STATE),
1061 [IB_QPT_RC] = (IB_QP_PORT |
1062 IB_QP_AV |
1063 IB_QP_TIMEOUT |
1064 IB_QP_RETRY_CNT |
1065 IB_QP_RNR_RETRY |
1066 IB_QP_MAX_QP_RD_ATOMIC |
1067 IB_QP_MAX_DEST_RD_ATOMIC |
Roland Dreier8a518662006-02-13 12:48:12 -08001068 IB_QP_ALT_PATH |
1069 IB_QP_ACCESS_FLAGS |
1070 IB_QP_PKEY_INDEX |
1071 IB_QP_MIN_RNR_TIMER |
1072 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -07001073 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1074 IB_QP_AV |
1075 IB_QP_TIMEOUT |
1076 IB_QP_RETRY_CNT |
1077 IB_QP_RNR_RETRY |
1078 IB_QP_MAX_QP_RD_ATOMIC |
1079 IB_QP_ALT_PATH |
1080 IB_QP_ACCESS_FLAGS |
1081 IB_QP_PKEY_INDEX |
1082 IB_QP_PATH_MIG_STATE),
1083 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1084 IB_QP_AV |
1085 IB_QP_TIMEOUT |
1086 IB_QP_MAX_DEST_RD_ATOMIC |
1087 IB_QP_ALT_PATH |
1088 IB_QP_ACCESS_FLAGS |
1089 IB_QP_PKEY_INDEX |
1090 IB_QP_MIN_RNR_TIMER |
1091 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -08001092 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1093 IB_QP_QKEY),
1094 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1095 IB_QP_QKEY),
1096 }
1097 }
1098 },
1099 [IB_QPS_SQE] = {
1100 [IB_QPS_RESET] = { .valid = 1 },
1101 [IB_QPS_ERR] = { .valid = 1 },
1102 [IB_QPS_RTS] = {
1103 .valid = 1,
1104 .opt_param = {
1105 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1106 IB_QP_QKEY),
1107 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1108 IB_QP_ACCESS_FLAGS),
1109 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1110 IB_QP_QKEY),
1111 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1112 IB_QP_QKEY),
1113 }
1114 }
1115 },
1116 [IB_QPS_ERR] = {
1117 [IB_QPS_RESET] = { .valid = 1 },
1118 [IB_QPS_ERR] = { .valid = 1 }
1119 }
1120};
1121
1122int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +02001123 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1124 enum rdma_link_layer ll)
Roland Dreier8a518662006-02-13 12:48:12 -08001125{
1126 enum ib_qp_attr_mask req_param, opt_param;
1127
1128 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1129 next_state < 0 || next_state > IB_QPS_ERR)
1130 return 0;
1131
1132 if (mask & IB_QP_CUR_STATE &&
1133 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1134 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1135 return 0;
1136
1137 if (!qp_state_table[cur_state][next_state].valid)
1138 return 0;
1139
1140 req_param = qp_state_table[cur_state][next_state].req_param[type];
1141 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1142
1143 if ((mask & req_param) != req_param)
1144 return 0;
1145
1146 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1147 return 0;
1148
1149 return 1;
1150}
1151EXPORT_SYMBOL(ib_modify_qp_is_ok);
1152
Matan Barakdbf727d2015-10-15 18:38:51 +03001153int ib_resolve_eth_dmac(struct ib_qp *qp,
1154 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001155{
1156 int ret = 0;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001157
Matan Barakdbf727d2015-10-15 18:38:51 +03001158 if (*qp_attr_mask & IB_QP_AV) {
1159 if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
1160 qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
1161 return -EINVAL;
1162
1163 if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
1164 return 0;
1165
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001166 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
Matan Barakdbf727d2015-10-15 18:38:51 +03001167 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
1168 qp_attr->ah_attr.dmac);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001169 } else {
Matan Barakdbf727d2015-10-15 18:38:51 +03001170 union ib_gid sgid;
1171 struct ib_gid_attr sgid_attr;
1172 int ifindex;
Matan Barakc3efe752016-01-04 10:49:54 +02001173 int hop_limit;
Matan Barakdbf727d2015-10-15 18:38:51 +03001174
1175 ret = ib_query_gid(qp->device,
1176 qp_attr->ah_attr.port_num,
1177 qp_attr->ah_attr.grh.sgid_index,
1178 &sgid, &sgid_attr);
1179
1180 if (ret || !sgid_attr.ndev) {
1181 if (!ret)
1182 ret = -ENXIO;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001183 goto out;
Matan Barakdbf727d2015-10-15 18:38:51 +03001184 }
1185
1186 ifindex = sgid_attr.ndev->ifindex;
1187
Matan Barakf7f4b23e2016-01-04 10:49:53 +02001188 ret = rdma_addr_find_l2_eth_by_grh(&sgid,
1189 &qp_attr->ah_attr.grh.dgid,
1190 qp_attr->ah_attr.dmac,
Matan Barakc3efe752016-01-04 10:49:54 +02001191 NULL, &ifindex, &hop_limit);
Matan Barakdbf727d2015-10-15 18:38:51 +03001192
1193 dev_put(sgid_attr.ndev);
Matan Barakc3efe752016-01-04 10:49:54 +02001194
1195 qp_attr->ah_attr.grh.hop_limit = hop_limit;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001196 }
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001197 }
1198out:
1199 return ret;
1200}
Matan Barakdbf727d2015-10-15 18:38:51 +03001201EXPORT_SYMBOL(ib_resolve_eth_dmac);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001202
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204int ib_modify_qp(struct ib_qp *qp,
1205 struct ib_qp_attr *qp_attr,
1206 int qp_attr_mask)
1207{
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001208 int ret;
1209
Matan Barakdbf727d2015-10-15 18:38:51 +03001210 ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001211 if (ret)
1212 return ret;
1213
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001214 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215}
1216EXPORT_SYMBOL(ib_modify_qp);
1217
1218int ib_query_qp(struct ib_qp *qp,
1219 struct ib_qp_attr *qp_attr,
1220 int qp_attr_mask,
1221 struct ib_qp_init_attr *qp_init_attr)
1222{
1223 return qp->device->query_qp ?
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001224 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 -ENOSYS;
1226}
1227EXPORT_SYMBOL(ib_query_qp);
1228
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001229int ib_close_qp(struct ib_qp *qp)
1230{
1231 struct ib_qp *real_qp;
1232 unsigned long flags;
1233
1234 real_qp = qp->real_qp;
1235 if (real_qp == qp)
1236 return -EINVAL;
1237
1238 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1239 list_del(&qp->open_list);
1240 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1241
1242 atomic_dec(&real_qp->usecnt);
1243 kfree(qp);
1244
1245 return 0;
1246}
1247EXPORT_SYMBOL(ib_close_qp);
1248
1249static int __ib_destroy_shared_qp(struct ib_qp *qp)
1250{
1251 struct ib_xrcd *xrcd;
1252 struct ib_qp *real_qp;
1253 int ret;
1254
1255 real_qp = qp->real_qp;
1256 xrcd = real_qp->xrcd;
1257
1258 mutex_lock(&xrcd->tgt_qp_mutex);
1259 ib_close_qp(qp);
1260 if (atomic_read(&real_qp->usecnt) == 0)
1261 list_del(&real_qp->xrcd_list);
1262 else
1263 real_qp = NULL;
1264 mutex_unlock(&xrcd->tgt_qp_mutex);
1265
1266 if (real_qp) {
1267 ret = ib_destroy_qp(real_qp);
1268 if (!ret)
1269 atomic_dec(&xrcd->usecnt);
1270 else
1271 __ib_insert_xrcd_qp(xrcd, real_qp);
1272 }
1273
1274 return 0;
1275}
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277int ib_destroy_qp(struct ib_qp *qp)
1278{
1279 struct ib_pd *pd;
1280 struct ib_cq *scq, *rcq;
1281 struct ib_srq *srq;
1282 int ret;
1283
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001284 WARN_ON_ONCE(qp->mrs_used > 0);
1285
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001286 if (atomic_read(&qp->usecnt))
1287 return -EBUSY;
1288
1289 if (qp->real_qp != qp)
1290 return __ib_destroy_shared_qp(qp);
1291
Sean Heftyb42b63c2011-05-23 19:59:25 -07001292 pd = qp->pd;
1293 scq = qp->send_cq;
1294 rcq = qp->recv_cq;
1295 srq = qp->srq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Christoph Hellwiga060b562016-05-03 18:01:09 +02001297 if (!qp->uobject)
1298 rdma_rw_cleanup_mrs(qp);
1299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 ret = qp->device->destroy_qp(qp);
1301 if (!ret) {
Sean Heftyb42b63c2011-05-23 19:59:25 -07001302 if (pd)
1303 atomic_dec(&pd->usecnt);
1304 if (scq)
1305 atomic_dec(&scq->usecnt);
1306 if (rcq)
1307 atomic_dec(&rcq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 if (srq)
1309 atomic_dec(&srq->usecnt);
1310 }
1311
1312 return ret;
1313}
1314EXPORT_SYMBOL(ib_destroy_qp);
1315
1316/* Completion queues */
1317
1318struct ib_cq *ib_create_cq(struct ib_device *device,
1319 ib_comp_handler comp_handler,
1320 void (*event_handler)(struct ib_event *, void *),
Matan Barak8e372102015-06-11 16:35:21 +03001321 void *cq_context,
1322 const struct ib_cq_init_attr *cq_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323{
1324 struct ib_cq *cq;
1325
Matan Barak8e372102015-06-11 16:35:21 +03001326 cq = device->create_cq(device, cq_attr, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 if (!IS_ERR(cq)) {
1329 cq->device = device;
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001330 cq->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 cq->comp_handler = comp_handler;
1332 cq->event_handler = event_handler;
1333 cq->cq_context = cq_context;
1334 atomic_set(&cq->usecnt, 0);
1335 }
1336
1337 return cq;
1338}
1339EXPORT_SYMBOL(ib_create_cq);
1340
Eli Cohen2dd57162008-04-16 21:09:33 -07001341int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1342{
1343 return cq->device->modify_cq ?
1344 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1345}
1346EXPORT_SYMBOL(ib_modify_cq);
1347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348int ib_destroy_cq(struct ib_cq *cq)
1349{
1350 if (atomic_read(&cq->usecnt))
1351 return -EBUSY;
1352
1353 return cq->device->destroy_cq(cq);
1354}
1355EXPORT_SYMBOL(ib_destroy_cq);
1356
Roland Dreiera74cd4a2006-02-13 16:30:49 -08001357int ib_resize_cq(struct ib_cq *cq, int cqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358{
Roland Dreier40de2e52005-11-08 11:10:25 -08001359 return cq->device->resize_cq ?
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001360 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361}
1362EXPORT_SYMBOL(ib_resize_cq);
1363
1364/* Memory regions */
1365
1366struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1367{
1368 struct ib_mr *mr;
Eli Cohen1c636f82013-10-31 15:26:32 +02001369 int err;
1370
1371 err = ib_check_mr_access(mr_access_flags);
1372 if (err)
1373 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375 mr = pd->device->get_dma_mr(pd, mr_access_flags);
1376
1377 if (!IS_ERR(mr)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001378 mr->device = pd->device;
1379 mr->pd = pd;
1380 mr->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 atomic_inc(&pd->usecnt);
Steve Wised4a85c32016-05-03 18:01:08 +02001382 mr->need_inval = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 }
1384
1385 return mr;
1386}
1387EXPORT_SYMBOL(ib_get_dma_mr);
1388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389int ib_dereg_mr(struct ib_mr *mr)
1390{
Christoph Hellwigab67ed82015-12-23 19:12:54 +01001391 struct ib_pd *pd = mr->pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 int ret;
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 ret = mr->device->dereg_mr(mr);
1395 if (!ret)
1396 atomic_dec(&pd->usecnt);
1397
1398 return ret;
1399}
1400EXPORT_SYMBOL(ib_dereg_mr);
1401
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001402/**
1403 * ib_alloc_mr() - Allocates a memory region
1404 * @pd: protection domain associated with the region
1405 * @mr_type: memory region type
1406 * @max_num_sg: maximum sg entries available for registration.
1407 *
1408 * Notes:
1409 * Memory registeration page/sg lists must not exceed max_num_sg.
1410 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1411 * max_num_sg * used_page_size.
1412 *
1413 */
1414struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1415 enum ib_mr_type mr_type,
1416 u32 max_num_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001417{
1418 struct ib_mr *mr;
1419
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001420 if (!pd->device->alloc_mr)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001421 return ERR_PTR(-ENOSYS);
1422
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001423 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001424 if (!IS_ERR(mr)) {
1425 mr->device = pd->device;
1426 mr->pd = pd;
1427 mr->uobject = NULL;
1428 atomic_inc(&pd->usecnt);
Steve Wised4a85c32016-05-03 18:01:08 +02001429 mr->need_inval = false;
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001430 }
1431
1432 return mr;
1433}
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001434EXPORT_SYMBOL(ib_alloc_mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436/* "Fast" memory regions */
1437
1438struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1439 int mr_access_flags,
1440 struct ib_fmr_attr *fmr_attr)
1441{
1442 struct ib_fmr *fmr;
1443
1444 if (!pd->device->alloc_fmr)
1445 return ERR_PTR(-ENOSYS);
1446
1447 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1448 if (!IS_ERR(fmr)) {
1449 fmr->device = pd->device;
1450 fmr->pd = pd;
1451 atomic_inc(&pd->usecnt);
1452 }
1453
1454 return fmr;
1455}
1456EXPORT_SYMBOL(ib_alloc_fmr);
1457
1458int ib_unmap_fmr(struct list_head *fmr_list)
1459{
1460 struct ib_fmr *fmr;
1461
1462 if (list_empty(fmr_list))
1463 return 0;
1464
1465 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1466 return fmr->device->unmap_fmr(fmr_list);
1467}
1468EXPORT_SYMBOL(ib_unmap_fmr);
1469
1470int ib_dealloc_fmr(struct ib_fmr *fmr)
1471{
1472 struct ib_pd *pd;
1473 int ret;
1474
1475 pd = fmr->pd;
1476 ret = fmr->device->dealloc_fmr(fmr);
1477 if (!ret)
1478 atomic_dec(&pd->usecnt);
1479
1480 return ret;
1481}
1482EXPORT_SYMBOL(ib_dealloc_fmr);
1483
1484/* Multicast groups */
1485
1486int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1487{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001488 int ret;
1489
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001490 if (!qp->device->attach_mcast)
1491 return -ENOSYS;
1492 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1493 return -EINVAL;
1494
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001495 ret = qp->device->attach_mcast(qp, gid, lid);
1496 if (!ret)
1497 atomic_inc(&qp->usecnt);
1498 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499}
1500EXPORT_SYMBOL(ib_attach_mcast);
1501
1502int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1503{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001504 int ret;
1505
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001506 if (!qp->device->detach_mcast)
1507 return -ENOSYS;
1508 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1509 return -EINVAL;
1510
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001511 ret = qp->device->detach_mcast(qp, gid, lid);
1512 if (!ret)
1513 atomic_dec(&qp->usecnt);
1514 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
1516EXPORT_SYMBOL(ib_detach_mcast);
Sean Hefty59991f92011-05-23 17:52:46 -07001517
1518struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1519{
1520 struct ib_xrcd *xrcd;
1521
1522 if (!device->alloc_xrcd)
1523 return ERR_PTR(-ENOSYS);
1524
1525 xrcd = device->alloc_xrcd(device, NULL, NULL);
1526 if (!IS_ERR(xrcd)) {
1527 xrcd->device = device;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001528 xrcd->inode = NULL;
Sean Hefty59991f92011-05-23 17:52:46 -07001529 atomic_set(&xrcd->usecnt, 0);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001530 mutex_init(&xrcd->tgt_qp_mutex);
1531 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
Sean Hefty59991f92011-05-23 17:52:46 -07001532 }
1533
1534 return xrcd;
1535}
1536EXPORT_SYMBOL(ib_alloc_xrcd);
1537
1538int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1539{
Sean Heftyd3d72d92011-05-26 23:06:44 -07001540 struct ib_qp *qp;
1541 int ret;
1542
Sean Hefty59991f92011-05-23 17:52:46 -07001543 if (atomic_read(&xrcd->usecnt))
1544 return -EBUSY;
1545
Sean Heftyd3d72d92011-05-26 23:06:44 -07001546 while (!list_empty(&xrcd->tgt_qp_list)) {
1547 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1548 ret = ib_destroy_qp(qp);
1549 if (ret)
1550 return ret;
1551 }
1552
Sean Hefty59991f92011-05-23 17:52:46 -07001553 return xrcd->device->dealloc_xrcd(xrcd);
1554}
1555EXPORT_SYMBOL(ib_dealloc_xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001556
1557struct ib_flow *ib_create_flow(struct ib_qp *qp,
1558 struct ib_flow_attr *flow_attr,
1559 int domain)
1560{
1561 struct ib_flow *flow_id;
1562 if (!qp->device->create_flow)
1563 return ERR_PTR(-ENOSYS);
1564
1565 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1566 if (!IS_ERR(flow_id))
1567 atomic_inc(&qp->usecnt);
1568 return flow_id;
1569}
1570EXPORT_SYMBOL(ib_create_flow);
1571
1572int ib_destroy_flow(struct ib_flow *flow_id)
1573{
1574 int err;
1575 struct ib_qp *qp = flow_id->qp;
1576
1577 err = qp->device->destroy_flow(flow_id);
1578 if (!err)
1579 atomic_dec(&qp->usecnt);
1580 return err;
1581}
1582EXPORT_SYMBOL(ib_destroy_flow);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001583
1584int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1585 struct ib_mr_status *mr_status)
1586{
1587 return mr->device->check_mr_status ?
1588 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1589}
1590EXPORT_SYMBOL(ib_check_mr_status);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001591
Eli Cohen50174a72016-03-11 22:58:38 +02001592int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1593 int state)
1594{
1595 if (!device->set_vf_link_state)
1596 return -ENOSYS;
1597
1598 return device->set_vf_link_state(device, vf, port, state);
1599}
1600EXPORT_SYMBOL(ib_set_vf_link_state);
1601
1602int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
1603 struct ifla_vf_info *info)
1604{
1605 if (!device->get_vf_config)
1606 return -ENOSYS;
1607
1608 return device->get_vf_config(device, vf, port, info);
1609}
1610EXPORT_SYMBOL(ib_get_vf_config);
1611
1612int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
1613 struct ifla_vf_stats *stats)
1614{
1615 if (!device->get_vf_stats)
1616 return -ENOSYS;
1617
1618 return device->get_vf_stats(device, vf, port, stats);
1619}
1620EXPORT_SYMBOL(ib_get_vf_stats);
1621
1622int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
1623 int type)
1624{
1625 if (!device->set_vf_guid)
1626 return -ENOSYS;
1627
1628 return device->set_vf_guid(device, vf, port, guid, type);
1629}
1630EXPORT_SYMBOL(ib_set_vf_guid);
1631
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001632/**
1633 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1634 * and set it the memory region.
1635 * @mr: memory region
1636 * @sg: dma mapped scatterlist
1637 * @sg_nents: number of entries in sg
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001638 * @sg_offset: offset in bytes into sg
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001639 * @page_size: page vector desired page size
1640 *
1641 * Constraints:
1642 * - The first sg element is allowed to have an offset.
1643 * - Each sg element must be aligned to page_size (or physically
1644 * contiguous to the previous element). In case an sg element has a
1645 * non contiguous offset, the mapping prefix will not include it.
1646 * - The last sg element is allowed to have length less than page_size.
1647 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1648 * then only max_num_sg entries will be mapped.
Sagi Grimbergf5aa9152016-02-29 19:07:32 +02001649 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
1650 * constraints holds and the page_size argument is ignored.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001651 *
1652 * Returns the number of sg elements that were mapped to the memory region.
1653 *
1654 * After this completes successfully, the memory region
1655 * is ready for registration.
1656 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001657int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001658 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001659{
1660 if (unlikely(!mr->device->map_mr_sg))
1661 return -ENOSYS;
1662
1663 mr->page_size = page_size;
1664
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001665 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001666}
1667EXPORT_SYMBOL(ib_map_mr_sg);
1668
1669/**
1670 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1671 * to a page vector
1672 * @mr: memory region
1673 * @sgl: dma mapped scatterlist
1674 * @sg_nents: number of entries in sg
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001675 * @sg_offset_p: IN: start offset in bytes into sg
1676 * OUT: offset in bytes for element n of the sg of the first
1677 * byte that has not been processed where n is the return
1678 * value of this function.
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001679 * @set_page: driver page assignment function pointer
1680 *
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001681 * Core service helper for drivers to convert the largest
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001682 * prefix of given sg list to a page vector. The sg list
1683 * prefix converted is the prefix that meet the requirements
1684 * of ib_map_mr_sg.
1685 *
1686 * Returns the number of sg elements that were assigned to
1687 * a page vector.
1688 */
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001689int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001690 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001691{
1692 struct scatterlist *sg;
Bart Van Asscheb6aeb982015-12-29 10:45:03 +01001693 u64 last_end_dma_addr = 0;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001694 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001695 unsigned int last_page_off = 0;
1696 u64 page_mask = ~((u64)mr->page_size - 1);
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001697 int i, ret;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001698
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001699 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
1700 return -EINVAL;
1701
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001702 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001703 mr->length = 0;
1704
1705 for_each_sg(sgl, sg, sg_nents, i) {
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001706 u64 dma_addr = sg_dma_address(sg) + sg_offset;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001707 u64 prev_addr = dma_addr;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001708 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001709 u64 end_dma_addr = dma_addr + dma_len;
1710 u64 page_addr = dma_addr & page_mask;
1711
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001712 /*
1713 * For the second and later elements, check whether either the
1714 * end of element i-1 or the start of element i is not aligned
1715 * on a page boundary.
1716 */
1717 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1718 /* Stop mapping if there is a gap. */
1719 if (last_end_dma_addr != dma_addr)
1720 break;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001721
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001722 /*
1723 * Coalesce this element with the last. If it is small
1724 * enough just update mr->length. Otherwise start
1725 * mapping from the next page.
1726 */
1727 goto next_page;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001728 }
1729
1730 do {
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001731 ret = set_page(mr, page_addr);
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001732 if (unlikely(ret < 0)) {
1733 sg_offset = prev_addr - sg_dma_address(sg);
1734 mr->length += prev_addr - dma_addr;
1735 if (sg_offset_p)
1736 *sg_offset_p = sg_offset;
1737 return i || sg_offset ? i : ret;
1738 }
1739 prev_addr = page_addr;
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001740next_page:
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001741 page_addr += mr->page_size;
1742 } while (page_addr < end_dma_addr);
1743
1744 mr->length += dma_len;
1745 last_end_dma_addr = end_dma_addr;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001746 last_page_off = end_dma_addr & ~page_mask;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001747
1748 sg_offset = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001749 }
1750
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001751 if (sg_offset_p)
1752 *sg_offset_p = 0;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001753 return i;
1754}
1755EXPORT_SYMBOL(ib_sg_to_pages);
Steve Wise765d6772016-02-17 08:15:41 -08001756
1757struct ib_drain_cqe {
1758 struct ib_cqe cqe;
1759 struct completion done;
1760};
1761
1762static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
1763{
1764 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
1765 cqe);
1766
1767 complete(&cqe->done);
1768}
1769
1770/*
1771 * Post a WR and block until its completion is reaped for the SQ.
1772 */
1773static void __ib_drain_sq(struct ib_qp *qp)
1774{
1775 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1776 struct ib_drain_cqe sdrain;
1777 struct ib_send_wr swr = {}, *bad_swr;
1778 int ret;
1779
1780 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
1781 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
1782 "IB_POLL_DIRECT poll_ctx not supported for drain\n");
1783 return;
1784 }
1785
1786 swr.wr_cqe = &sdrain.cqe;
1787 sdrain.cqe.done = ib_drain_qp_done;
1788 init_completion(&sdrain.done);
1789
1790 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1791 if (ret) {
1792 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1793 return;
1794 }
1795
1796 ret = ib_post_send(qp, &swr, &bad_swr);
1797 if (ret) {
1798 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1799 return;
1800 }
1801
1802 wait_for_completion(&sdrain.done);
1803}
1804
1805/*
1806 * Post a WR and block until its completion is reaped for the RQ.
1807 */
1808static void __ib_drain_rq(struct ib_qp *qp)
1809{
1810 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1811 struct ib_drain_cqe rdrain;
1812 struct ib_recv_wr rwr = {}, *bad_rwr;
1813 int ret;
1814
1815 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
1816 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
1817 "IB_POLL_DIRECT poll_ctx not supported for drain\n");
1818 return;
1819 }
1820
1821 rwr.wr_cqe = &rdrain.cqe;
1822 rdrain.cqe.done = ib_drain_qp_done;
1823 init_completion(&rdrain.done);
1824
1825 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1826 if (ret) {
1827 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1828 return;
1829 }
1830
1831 ret = ib_post_recv(qp, &rwr, &bad_rwr);
1832 if (ret) {
1833 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1834 return;
1835 }
1836
1837 wait_for_completion(&rdrain.done);
1838}
1839
1840/**
1841 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
1842 * application.
1843 * @qp: queue pair to drain
1844 *
1845 * If the device has a provider-specific drain function, then
1846 * call that. Otherwise call the generic drain function
1847 * __ib_drain_sq().
1848 *
1849 * The caller must:
1850 *
1851 * ensure there is room in the CQ and SQ for the drain work request and
1852 * completion.
1853 *
1854 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1855 * IB_POLL_DIRECT.
1856 *
1857 * ensure that there are no other contexts that are posting WRs concurrently.
1858 * Otherwise the drain is not guaranteed.
1859 */
1860void ib_drain_sq(struct ib_qp *qp)
1861{
1862 if (qp->device->drain_sq)
1863 qp->device->drain_sq(qp);
1864 else
1865 __ib_drain_sq(qp);
1866}
1867EXPORT_SYMBOL(ib_drain_sq);
1868
1869/**
1870 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
1871 * application.
1872 * @qp: queue pair to drain
1873 *
1874 * If the device has a provider-specific drain function, then
1875 * call that. Otherwise call the generic drain function
1876 * __ib_drain_rq().
1877 *
1878 * The caller must:
1879 *
1880 * ensure there is room in the CQ and RQ for the drain work request and
1881 * completion.
1882 *
1883 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1884 * IB_POLL_DIRECT.
1885 *
1886 * ensure that there are no other contexts that are posting WRs concurrently.
1887 * Otherwise the drain is not guaranteed.
1888 */
1889void ib_drain_rq(struct ib_qp *qp)
1890{
1891 if (qp->device->drain_rq)
1892 qp->device->drain_rq(qp);
1893 else
1894 __ib_drain_rq(qp);
1895}
1896EXPORT_SYMBOL(ib_drain_rq);
1897
1898/**
1899 * ib_drain_qp() - Block until all CQEs have been consumed by the
1900 * application on both the RQ and SQ.
1901 * @qp: queue pair to drain
1902 *
1903 * The caller must:
1904 *
1905 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
1906 * and completions.
1907 *
1908 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
1909 * IB_POLL_DIRECT.
1910 *
1911 * ensure that there are no other contexts that are posting WRs concurrently.
1912 * Otherwise the drain is not guaranteed.
1913 */
1914void ib_drain_qp(struct ib_qp *qp)
1915{
1916 ib_drain_sq(qp);
Sagi Grimberg42235f82016-04-26 17:55:38 +03001917 if (!qp->srq)
1918 ib_drain_rq(qp);
Steve Wise765d6772016-02-17 08:15:41 -08001919}
1920EXPORT_SYMBOL(ib_drain_qp);