blob: 46d97f09fbae7d696db1ade865bae77256e1e1b7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier33b9b3e2006-01-30 14:29:21 -08008 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040041#include <linux/export.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042#include <linux/string.h>
Sean Hefty0e0ec7e2011-08-08 15:31:51 -070043#include <linux/slab.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030044#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Roland Dreiera4d61e82005-08-25 13:40:04 -070048#include <rdma/ib_verbs.h>
49#include <rdma/ib_cache.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020050#include <rdma/ib_addr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Or Gerlitzed4c54e2013-12-12 18:03:17 +020052#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030054static const char * const ib_events[] = {
55 [IB_EVENT_CQ_ERR] = "CQ error",
56 [IB_EVENT_QP_FATAL] = "QP fatal error",
57 [IB_EVENT_QP_REQ_ERR] = "QP request error",
58 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
59 [IB_EVENT_COMM_EST] = "communication established",
60 [IB_EVENT_SQ_DRAINED] = "send queue drained",
61 [IB_EVENT_PATH_MIG] = "path migration successful",
62 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
63 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
64 [IB_EVENT_PORT_ACTIVE] = "port active",
65 [IB_EVENT_PORT_ERR] = "port error",
66 [IB_EVENT_LID_CHANGE] = "LID change",
67 [IB_EVENT_PKEY_CHANGE] = "P_key change",
68 [IB_EVENT_SM_CHANGE] = "SM change",
69 [IB_EVENT_SRQ_ERR] = "SRQ error",
70 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
71 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
72 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
73 [IB_EVENT_GID_CHANGE] = "GID changed",
74};
75
76const char *ib_event_msg(enum ib_event_type event)
77{
78 size_t index = event;
79
80 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
81 ib_events[index] : "unrecognized event";
82}
83EXPORT_SYMBOL(ib_event_msg);
84
85static const char * const wc_statuses[] = {
86 [IB_WC_SUCCESS] = "success",
87 [IB_WC_LOC_LEN_ERR] = "local length error",
88 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
89 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
90 [IB_WC_LOC_PROT_ERR] = "local protection error",
91 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
92 [IB_WC_MW_BIND_ERR] = "memory management operation error",
93 [IB_WC_BAD_RESP_ERR] = "bad response error",
94 [IB_WC_LOC_ACCESS_ERR] = "local access error",
95 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
96 [IB_WC_REM_ACCESS_ERR] = "remote access error",
97 [IB_WC_REM_OP_ERR] = "remote operation error",
98 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
99 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
100 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
101 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
102 [IB_WC_REM_ABORT_ERR] = "operation aborted",
103 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
104 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
105 [IB_WC_FATAL_ERR] = "fatal error",
106 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
107 [IB_WC_GENERAL_ERR] = "general error",
108};
109
110const char *ib_wc_status_msg(enum ib_wc_status status)
111{
112 size_t index = status;
113
114 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
115 wc_statuses[index] : "unrecognized status";
116}
117EXPORT_SYMBOL(ib_wc_status_msg);
118
Roland Dreier8385fd82014-06-04 10:00:16 -0700119__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700120{
121 switch (rate) {
122 case IB_RATE_2_5_GBPS: return 1;
123 case IB_RATE_5_GBPS: return 2;
124 case IB_RATE_10_GBPS: return 4;
125 case IB_RATE_20_GBPS: return 8;
126 case IB_RATE_30_GBPS: return 12;
127 case IB_RATE_40_GBPS: return 16;
128 case IB_RATE_60_GBPS: return 24;
129 case IB_RATE_80_GBPS: return 32;
130 case IB_RATE_120_GBPS: return 48;
131 default: return -1;
132 }
133}
134EXPORT_SYMBOL(ib_rate_to_mult);
135
Roland Dreier8385fd82014-06-04 10:00:16 -0700136__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700137{
138 switch (mult) {
139 case 1: return IB_RATE_2_5_GBPS;
140 case 2: return IB_RATE_5_GBPS;
141 case 4: return IB_RATE_10_GBPS;
142 case 8: return IB_RATE_20_GBPS;
143 case 12: return IB_RATE_30_GBPS;
144 case 16: return IB_RATE_40_GBPS;
145 case 24: return IB_RATE_60_GBPS;
146 case 32: return IB_RATE_80_GBPS;
147 case 48: return IB_RATE_120_GBPS;
148 default: return IB_RATE_PORT_CURRENT;
149 }
150}
151EXPORT_SYMBOL(mult_to_ib_rate);
152
Roland Dreier8385fd82014-06-04 10:00:16 -0700153__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300154{
155 switch (rate) {
156 case IB_RATE_2_5_GBPS: return 2500;
157 case IB_RATE_5_GBPS: return 5000;
158 case IB_RATE_10_GBPS: return 10000;
159 case IB_RATE_20_GBPS: return 20000;
160 case IB_RATE_30_GBPS: return 30000;
161 case IB_RATE_40_GBPS: return 40000;
162 case IB_RATE_60_GBPS: return 60000;
163 case IB_RATE_80_GBPS: return 80000;
164 case IB_RATE_120_GBPS: return 120000;
165 case IB_RATE_14_GBPS: return 14062;
166 case IB_RATE_56_GBPS: return 56250;
167 case IB_RATE_112_GBPS: return 112500;
168 case IB_RATE_168_GBPS: return 168750;
169 case IB_RATE_25_GBPS: return 25781;
170 case IB_RATE_100_GBPS: return 103125;
171 case IB_RATE_200_GBPS: return 206250;
172 case IB_RATE_300_GBPS: return 309375;
173 default: return -1;
174 }
175}
176EXPORT_SYMBOL(ib_rate_to_mbps);
177
Roland Dreier8385fd82014-06-04 10:00:16 -0700178__attribute_const__ enum rdma_transport_type
Tom Tucker07ebafb2006-08-03 16:02:42 -0500179rdma_node_get_transport(enum rdma_node_type node_type)
180{
181 switch (node_type) {
182 case RDMA_NODE_IB_CA:
183 case RDMA_NODE_IB_SWITCH:
184 case RDMA_NODE_IB_ROUTER:
185 return RDMA_TRANSPORT_IB;
186 case RDMA_NODE_RNIC:
187 return RDMA_TRANSPORT_IWARP;
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000188 case RDMA_NODE_USNIC:
Upinder Malhi5db57652014-01-15 17:02:36 -0800189 return RDMA_TRANSPORT_USNIC;
190 case RDMA_NODE_USNIC_UDP:
Upinder Malhi248567f2014-01-09 14:48:19 -0800191 return RDMA_TRANSPORT_USNIC_UDP;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500192 default:
193 BUG();
194 return 0;
195 }
196}
197EXPORT_SYMBOL(rdma_node_get_transport);
198
Eli Cohena3f5ada2010-09-27 17:51:10 -0700199enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
200{
201 if (device->get_link_layer)
202 return device->get_link_layer(device, port_num);
203
204 switch (rdma_node_get_transport(device->node_type)) {
205 case RDMA_TRANSPORT_IB:
206 return IB_LINK_LAYER_INFINIBAND;
207 case RDMA_TRANSPORT_IWARP:
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000208 case RDMA_TRANSPORT_USNIC:
Upinder Malhi248567f2014-01-09 14:48:19 -0800209 case RDMA_TRANSPORT_USNIC_UDP:
Eli Cohena3f5ada2010-09-27 17:51:10 -0700210 return IB_LINK_LAYER_ETHERNET;
211 default:
212 return IB_LINK_LAYER_UNSPECIFIED;
213 }
214}
215EXPORT_SYMBOL(rdma_port_get_link_layer);
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217/* Protection domains */
218
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600219/**
220 * ib_alloc_pd - Allocates an unused protection domain.
221 * @device: The device on which to allocate the protection domain.
222 *
223 * A protection domain object provides an association between QPs, shared
224 * receive queues, address handles, memory regions, and memory windows.
225 *
226 * Every PD has a local_dma_lkey which can be used as the lkey value for local
227 * memory operations.
228 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229struct ib_pd *ib_alloc_pd(struct ib_device *device)
230{
231 struct ib_pd *pd;
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600232 struct ib_device_attr devattr;
233 int rc;
234
235 rc = ib_query_device(device, &devattr);
236 if (rc)
237 return ERR_PTR(rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700239 pd = device->alloc_pd(device, NULL, NULL);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600240 if (IS_ERR(pd))
241 return pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600243 pd->device = device;
244 pd->uobject = NULL;
245 pd->local_mr = NULL;
246 atomic_set(&pd->usecnt, 0);
247
248 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
249 pd->local_dma_lkey = device->local_dma_lkey;
250 else {
251 struct ib_mr *mr;
252
253 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
254 if (IS_ERR(mr)) {
255 ib_dealloc_pd(pd);
256 return (struct ib_pd *)mr;
257 }
258
259 pd->local_mr = mr;
260 pd->local_dma_lkey = pd->local_mr->lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 return pd;
263}
264EXPORT_SYMBOL(ib_alloc_pd);
265
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600266/**
267 * ib_dealloc_pd - Deallocates a protection domain.
268 * @pd: The protection domain to deallocate.
269 *
270 * It is an error to call this function while any resources in the pd still
271 * exist. The caller is responsible to synchronously destroy them and
272 * guarantee no new allocations will happen.
273 */
274void ib_dealloc_pd(struct ib_pd *pd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600276 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600278 if (pd->local_mr) {
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600279 ret = ib_dereg_mr(pd->local_mr);
280 WARN_ON(ret);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600281 pd->local_mr = NULL;
282 }
283
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600284 /* uverbs manipulates usecnt with proper locking, while the kabi
285 requires the caller to guarantee we can't race here. */
286 WARN_ON(atomic_read(&pd->usecnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600288 /* Making delalloc_pd a void return is a WIP, no driver should return
289 an error here. */
290 ret = pd->device->dealloc_pd(pd);
291 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293EXPORT_SYMBOL(ib_dealloc_pd);
294
295/* Address handles */
296
297struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
298{
299 struct ib_ah *ah;
300
301 ah = pd->device->create_ah(pd, ah_attr);
302
303 if (!IS_ERR(ah)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700304 ah->device = pd->device;
305 ah->pd = pd;
306 ah->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 atomic_inc(&pd->usecnt);
308 }
309
310 return ah;
311}
312EXPORT_SYMBOL(ib_create_ah);
313
Matan Barakdbf727d2015-10-15 18:38:51 +0300314struct find_gid_index_context {
315 u16 vlan_id;
316};
317
318static bool find_gid_index(const union ib_gid *gid,
319 const struct ib_gid_attr *gid_attr,
320 void *context)
321{
322 struct find_gid_index_context *ctx =
323 (struct find_gid_index_context *)context;
324
325 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
326 (is_vlan_dev(gid_attr->ndev) &&
327 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
328 return false;
329
330 return true;
331}
332
333static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
334 u16 vlan_id, const union ib_gid *sgid,
335 u16 *gid_index)
336{
337 struct find_gid_index_context context = {.vlan_id = vlan_id};
338
339 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
340 &context, gid_index);
341}
342
Ira Weiny73cdaae2015-05-31 17:15:31 -0400343int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
344 const struct ib_wc *wc, const struct ib_grh *grh,
345 struct ib_ah_attr *ah_attr)
Hal Rosenstock513789e2005-07-27 11:45:34 -0700346{
Hal Rosenstock513789e2005-07-27 11:45:34 -0700347 u32 flow_class;
348 u16 gid_index;
349 int ret;
350
Sean Hefty4e00d692006-06-17 20:37:39 -0700351 memset(ah_attr, 0, sizeof *ah_attr);
Michael Wang227128f2015-05-05 14:50:40 +0200352 if (rdma_cap_eth_ah(device, port_num)) {
Matan Barakdbf727d2015-10-15 18:38:51 +0300353 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
354 wc->vlan_id : 0xffff;
355
Matan Barakdd5f03b2013-12-12 18:03:11 +0200356 if (!(wc->wc_flags & IB_WC_GRH))
357 return -EPROTOTYPE;
358
Matan Barakdbf727d2015-10-15 18:38:51 +0300359 if (!(wc->wc_flags & IB_WC_WITH_SMAC) ||
360 !(wc->wc_flags & IB_WC_WITH_VLAN)) {
Matan Barakdd5f03b2013-12-12 18:03:11 +0200361 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
Matan Barakdbf727d2015-10-15 18:38:51 +0300362 ah_attr->dmac,
363 wc->wc_flags & IB_WC_WITH_VLAN ?
364 NULL : &vlan_id,
365 0);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200366 if (ret)
367 return ret;
368 }
Matan Barakdbf727d2015-10-15 18:38:51 +0300369
370 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
371 &grh->dgid, &gid_index);
372 if (ret)
373 return ret;
374
375 if (wc->wc_flags & IB_WC_WITH_SMAC)
376 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200377 }
378
Sean Hefty4e00d692006-06-17 20:37:39 -0700379 ah_attr->dlid = wc->slid;
380 ah_attr->sl = wc->sl;
381 ah_attr->src_path_bits = wc->dlid_path_bits;
382 ah_attr->port_num = port_num;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700383
384 if (wc->wc_flags & IB_WC_GRH) {
Sean Hefty4e00d692006-06-17 20:37:39 -0700385 ah_attr->ah_flags = IB_AH_GRH;
386 ah_attr->grh.dgid = grh->sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700387
Matan Barakdbf727d2015-10-15 18:38:51 +0300388 if (!rdma_cap_eth_ah(device, port_num)) {
389 ret = ib_find_cached_gid_by_port(device, &grh->dgid,
390 port_num, NULL,
391 &gid_index);
392 if (ret)
393 return ret;
394 }
Hal Rosenstock513789e2005-07-27 11:45:34 -0700395
Sean Hefty4e00d692006-06-17 20:37:39 -0700396 ah_attr->grh.sgid_index = (u8) gid_index;
Hal Rosenstock497677a2005-07-27 11:45:35 -0700397 flow_class = be32_to_cpu(grh->version_tclass_flow);
Sean Hefty4e00d692006-06-17 20:37:39 -0700398 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
Sean Hefty47645d82007-02-21 16:37:31 -0800399 ah_attr->grh.hop_limit = 0xFF;
Sean Hefty4e00d692006-06-17 20:37:39 -0700400 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700401 }
Sean Hefty4e00d692006-06-17 20:37:39 -0700402 return 0;
403}
404EXPORT_SYMBOL(ib_init_ah_from_wc);
405
Ira Weiny73cdaae2015-05-31 17:15:31 -0400406struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
407 const struct ib_grh *grh, u8 port_num)
Sean Hefty4e00d692006-06-17 20:37:39 -0700408{
409 struct ib_ah_attr ah_attr;
410 int ret;
411
412 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
413 if (ret)
414 return ERR_PTR(ret);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700415
416 return ib_create_ah(pd, &ah_attr);
417}
418EXPORT_SYMBOL(ib_create_ah_from_wc);
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
421{
422 return ah->device->modify_ah ?
423 ah->device->modify_ah(ah, ah_attr) :
424 -ENOSYS;
425}
426EXPORT_SYMBOL(ib_modify_ah);
427
428int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
429{
430 return ah->device->query_ah ?
431 ah->device->query_ah(ah, ah_attr) :
432 -ENOSYS;
433}
434EXPORT_SYMBOL(ib_query_ah);
435
436int ib_destroy_ah(struct ib_ah *ah)
437{
438 struct ib_pd *pd;
439 int ret;
440
441 pd = ah->pd;
442 ret = ah->device->destroy_ah(ah);
443 if (!ret)
444 atomic_dec(&pd->usecnt);
445
446 return ret;
447}
448EXPORT_SYMBOL(ib_destroy_ah);
449
Roland Dreierd41fcc62005-08-18 12:23:08 -0700450/* Shared receive queues */
451
452struct ib_srq *ib_create_srq(struct ib_pd *pd,
453 struct ib_srq_init_attr *srq_init_attr)
454{
455 struct ib_srq *srq;
456
457 if (!pd->device->create_srq)
458 return ERR_PTR(-ENOSYS);
459
460 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
461
462 if (!IS_ERR(srq)) {
463 srq->device = pd->device;
464 srq->pd = pd;
465 srq->uobject = NULL;
466 srq->event_handler = srq_init_attr->event_handler;
467 srq->srq_context = srq_init_attr->srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -0700468 srq->srq_type = srq_init_attr->srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -0700469 if (srq->srq_type == IB_SRQT_XRC) {
470 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
471 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
472 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
473 atomic_inc(&srq->ext.xrc.cq->usecnt);
474 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700475 atomic_inc(&pd->usecnt);
476 atomic_set(&srq->usecnt, 0);
477 }
478
479 return srq;
480}
481EXPORT_SYMBOL(ib_create_srq);
482
483int ib_modify_srq(struct ib_srq *srq,
484 struct ib_srq_attr *srq_attr,
485 enum ib_srq_attr_mask srq_attr_mask)
486{
Dotan Barak7ce5eac2008-04-16 21:09:28 -0700487 return srq->device->modify_srq ?
488 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
489 -ENOSYS;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700490}
491EXPORT_SYMBOL(ib_modify_srq);
492
493int ib_query_srq(struct ib_srq *srq,
494 struct ib_srq_attr *srq_attr)
495{
496 return srq->device->query_srq ?
497 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
498}
499EXPORT_SYMBOL(ib_query_srq);
500
501int ib_destroy_srq(struct ib_srq *srq)
502{
503 struct ib_pd *pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700504 enum ib_srq_type srq_type;
505 struct ib_xrcd *uninitialized_var(xrcd);
506 struct ib_cq *uninitialized_var(cq);
Roland Dreierd41fcc62005-08-18 12:23:08 -0700507 int ret;
508
509 if (atomic_read(&srq->usecnt))
510 return -EBUSY;
511
512 pd = srq->pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700513 srq_type = srq->srq_type;
514 if (srq_type == IB_SRQT_XRC) {
515 xrcd = srq->ext.xrc.xrcd;
516 cq = srq->ext.xrc.cq;
517 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700518
519 ret = srq->device->destroy_srq(srq);
Sean Hefty418d5132011-05-23 19:42:29 -0700520 if (!ret) {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700521 atomic_dec(&pd->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700522 if (srq_type == IB_SRQT_XRC) {
523 atomic_dec(&xrcd->usecnt);
524 atomic_dec(&cq->usecnt);
525 }
526 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700527
528 return ret;
529}
530EXPORT_SYMBOL(ib_destroy_srq);
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532/* Queue pairs */
533
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700534static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
535{
536 struct ib_qp *qp = context;
Yishai Hadas73c40c62013-08-01 18:49:53 +0300537 unsigned long flags;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700538
Yishai Hadas73c40c62013-08-01 18:49:53 +0300539 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700540 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
Shlomo Pongratzeec9e292013-04-10 14:26:46 +0000541 if (event->element.qp->event_handler)
542 event->element.qp->event_handler(event, event->element.qp->qp_context);
Yishai Hadas73c40c62013-08-01 18:49:53 +0300543 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700544}
545
Sean Heftyd3d72d92011-05-26 23:06:44 -0700546static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
547{
548 mutex_lock(&xrcd->tgt_qp_mutex);
549 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
550 mutex_unlock(&xrcd->tgt_qp_mutex);
551}
552
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700553static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
554 void (*event_handler)(struct ib_event *, void *),
555 void *qp_context)
Sean Heftyd3d72d92011-05-26 23:06:44 -0700556{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700557 struct ib_qp *qp;
558 unsigned long flags;
559
560 qp = kzalloc(sizeof *qp, GFP_KERNEL);
561 if (!qp)
562 return ERR_PTR(-ENOMEM);
563
564 qp->real_qp = real_qp;
565 atomic_inc(&real_qp->usecnt);
566 qp->device = real_qp->device;
567 qp->event_handler = event_handler;
568 qp->qp_context = qp_context;
569 qp->qp_num = real_qp->qp_num;
570 qp->qp_type = real_qp->qp_type;
571
572 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
573 list_add(&qp->open_list, &real_qp->open_list);
574 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
575
576 return qp;
Sean Heftyd3d72d92011-05-26 23:06:44 -0700577}
578
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700579struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
580 struct ib_qp_open_attr *qp_open_attr)
581{
582 struct ib_qp *qp, *real_qp;
583
584 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
585 return ERR_PTR(-EINVAL);
586
587 qp = ERR_PTR(-EINVAL);
588 mutex_lock(&xrcd->tgt_qp_mutex);
589 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
590 if (real_qp->qp_num == qp_open_attr->qp_num) {
591 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
592 qp_open_attr->qp_context);
593 break;
594 }
595 }
596 mutex_unlock(&xrcd->tgt_qp_mutex);
597 return qp;
598}
599EXPORT_SYMBOL(ib_open_qp);
600
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601struct ib_qp *ib_create_qp(struct ib_pd *pd,
602 struct ib_qp_init_attr *qp_init_attr)
603{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700604 struct ib_qp *qp, *real_qp;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700605 struct ib_device *device;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Sean Heftyb42b63c2011-05-23 19:59:25 -0700607 device = pd ? pd->device : qp_init_attr->xrcd->device;
608 qp = device->create_qp(pd, qp_init_attr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610 if (!IS_ERR(qp)) {
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700611 qp->device = device;
612 qp->real_qp = qp;
613 qp->uobject = NULL;
614 qp->qp_type = qp_init_attr->qp_type;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700615
Bernd Schuberte47e3212012-01-20 18:43:54 +0000616 atomic_set(&qp->usecnt, 0);
Sean Heftyb42b63c2011-05-23 19:59:25 -0700617 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700618 qp->event_handler = __ib_shared_qp_event_handler;
619 qp->qp_context = qp;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700620 qp->pd = NULL;
621 qp->send_cq = qp->recv_cq = NULL;
622 qp->srq = NULL;
623 qp->xrcd = qp_init_attr->xrcd;
624 atomic_inc(&qp_init_attr->xrcd->usecnt);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700625 INIT_LIST_HEAD(&qp->open_list);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700626
627 real_qp = qp;
628 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
629 qp_init_attr->qp_context);
630 if (!IS_ERR(qp))
631 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
632 else
633 real_qp->device->destroy_qp(real_qp);
Sean Heftyb42b63c2011-05-23 19:59:25 -0700634 } else {
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700635 qp->event_handler = qp_init_attr->event_handler;
636 qp->qp_context = qp_init_attr->qp_context;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700637 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
638 qp->recv_cq = NULL;
639 qp->srq = NULL;
640 } else {
641 qp->recv_cq = qp_init_attr->recv_cq;
642 atomic_inc(&qp_init_attr->recv_cq->usecnt);
643 qp->srq = qp_init_attr->srq;
644 if (qp->srq)
645 atomic_inc(&qp_init_attr->srq->usecnt);
646 }
647
648 qp->pd = pd;
649 qp->send_cq = qp_init_attr->send_cq;
650 qp->xrcd = NULL;
651
652 atomic_inc(&pd->usecnt);
653 atomic_inc(&qp_init_attr->send_cq->usecnt);
654 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656
657 return qp;
658}
659EXPORT_SYMBOL(ib_create_qp);
660
Roland Dreier8a518662006-02-13 12:48:12 -0800661static const struct {
662 int valid;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700663 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
664 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
Roland Dreier8a518662006-02-13 12:48:12 -0800665} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
666 [IB_QPS_RESET] = {
667 [IB_QPS_RESET] = { .valid = 1 },
Roland Dreier8a518662006-02-13 12:48:12 -0800668 [IB_QPS_INIT] = {
669 .valid = 1,
670 .req_param = {
671 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
672 IB_QP_PORT |
673 IB_QP_QKEY),
Or Gerlitzc938a612012-03-01 12:17:51 +0200674 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
Roland Dreier8a518662006-02-13 12:48:12 -0800675 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
676 IB_QP_PORT |
677 IB_QP_ACCESS_FLAGS),
678 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
679 IB_QP_PORT |
680 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700681 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
682 IB_QP_PORT |
683 IB_QP_ACCESS_FLAGS),
684 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
685 IB_QP_PORT |
686 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800687 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
688 IB_QP_QKEY),
689 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
690 IB_QP_QKEY),
691 }
692 },
693 },
694 [IB_QPS_INIT] = {
695 [IB_QPS_RESET] = { .valid = 1 },
696 [IB_QPS_ERR] = { .valid = 1 },
697 [IB_QPS_INIT] = {
698 .valid = 1,
699 .opt_param = {
700 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
701 IB_QP_PORT |
702 IB_QP_QKEY),
703 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
704 IB_QP_PORT |
705 IB_QP_ACCESS_FLAGS),
706 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
707 IB_QP_PORT |
708 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700709 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
710 IB_QP_PORT |
711 IB_QP_ACCESS_FLAGS),
712 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
713 IB_QP_PORT |
714 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800715 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
716 IB_QP_QKEY),
717 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
718 IB_QP_QKEY),
719 }
720 },
721 [IB_QPS_RTR] = {
722 .valid = 1,
723 .req_param = {
724 [IB_QPT_UC] = (IB_QP_AV |
725 IB_QP_PATH_MTU |
726 IB_QP_DEST_QPN |
727 IB_QP_RQ_PSN),
728 [IB_QPT_RC] = (IB_QP_AV |
729 IB_QP_PATH_MTU |
730 IB_QP_DEST_QPN |
731 IB_QP_RQ_PSN |
732 IB_QP_MAX_DEST_RD_ATOMIC |
733 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700734 [IB_QPT_XRC_INI] = (IB_QP_AV |
735 IB_QP_PATH_MTU |
736 IB_QP_DEST_QPN |
737 IB_QP_RQ_PSN),
738 [IB_QPT_XRC_TGT] = (IB_QP_AV |
739 IB_QP_PATH_MTU |
740 IB_QP_DEST_QPN |
741 IB_QP_RQ_PSN |
742 IB_QP_MAX_DEST_RD_ATOMIC |
743 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -0800744 },
745 .opt_param = {
746 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
747 IB_QP_QKEY),
748 [IB_QPT_UC] = (IB_QP_ALT_PATH |
749 IB_QP_ACCESS_FLAGS |
750 IB_QP_PKEY_INDEX),
751 [IB_QPT_RC] = (IB_QP_ALT_PATH |
752 IB_QP_ACCESS_FLAGS |
753 IB_QP_PKEY_INDEX),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700754 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
755 IB_QP_ACCESS_FLAGS |
756 IB_QP_PKEY_INDEX),
757 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
758 IB_QP_ACCESS_FLAGS |
759 IB_QP_PKEY_INDEX),
Roland Dreier8a518662006-02-13 12:48:12 -0800760 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
761 IB_QP_QKEY),
762 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
763 IB_QP_QKEY),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200764 },
Matan Barakdbf727d2015-10-15 18:38:51 +0300765 },
Roland Dreier8a518662006-02-13 12:48:12 -0800766 },
767 [IB_QPS_RTR] = {
768 [IB_QPS_RESET] = { .valid = 1 },
769 [IB_QPS_ERR] = { .valid = 1 },
770 [IB_QPS_RTS] = {
771 .valid = 1,
772 .req_param = {
773 [IB_QPT_UD] = IB_QP_SQ_PSN,
774 [IB_QPT_UC] = IB_QP_SQ_PSN,
775 [IB_QPT_RC] = (IB_QP_TIMEOUT |
776 IB_QP_RETRY_CNT |
777 IB_QP_RNR_RETRY |
778 IB_QP_SQ_PSN |
779 IB_QP_MAX_QP_RD_ATOMIC),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700780 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
781 IB_QP_RETRY_CNT |
782 IB_QP_RNR_RETRY |
783 IB_QP_SQ_PSN |
784 IB_QP_MAX_QP_RD_ATOMIC),
785 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
786 IB_QP_SQ_PSN),
Roland Dreier8a518662006-02-13 12:48:12 -0800787 [IB_QPT_SMI] = IB_QP_SQ_PSN,
788 [IB_QPT_GSI] = IB_QP_SQ_PSN,
789 },
790 .opt_param = {
791 [IB_QPT_UD] = (IB_QP_CUR_STATE |
792 IB_QP_QKEY),
793 [IB_QPT_UC] = (IB_QP_CUR_STATE |
794 IB_QP_ALT_PATH |
795 IB_QP_ACCESS_FLAGS |
796 IB_QP_PATH_MIG_STATE),
797 [IB_QPT_RC] = (IB_QP_CUR_STATE |
798 IB_QP_ALT_PATH |
799 IB_QP_ACCESS_FLAGS |
800 IB_QP_MIN_RNR_TIMER |
801 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700802 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
803 IB_QP_ALT_PATH |
804 IB_QP_ACCESS_FLAGS |
805 IB_QP_PATH_MIG_STATE),
806 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
807 IB_QP_ALT_PATH |
808 IB_QP_ACCESS_FLAGS |
809 IB_QP_MIN_RNR_TIMER |
810 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -0800811 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
812 IB_QP_QKEY),
813 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
814 IB_QP_QKEY),
815 }
816 }
817 },
818 [IB_QPS_RTS] = {
819 [IB_QPS_RESET] = { .valid = 1 },
820 [IB_QPS_ERR] = { .valid = 1 },
821 [IB_QPS_RTS] = {
822 .valid = 1,
823 .opt_param = {
824 [IB_QPT_UD] = (IB_QP_CUR_STATE |
825 IB_QP_QKEY),
Dotan Barak4546d312006-03-02 11:22:28 -0800826 [IB_QPT_UC] = (IB_QP_CUR_STATE |
827 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -0800828 IB_QP_ALT_PATH |
829 IB_QP_PATH_MIG_STATE),
Dotan Barak4546d312006-03-02 11:22:28 -0800830 [IB_QPT_RC] = (IB_QP_CUR_STATE |
831 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -0800832 IB_QP_ALT_PATH |
833 IB_QP_PATH_MIG_STATE |
834 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700835 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
836 IB_QP_ACCESS_FLAGS |
837 IB_QP_ALT_PATH |
838 IB_QP_PATH_MIG_STATE),
839 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
840 IB_QP_ACCESS_FLAGS |
841 IB_QP_ALT_PATH |
842 IB_QP_PATH_MIG_STATE |
843 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -0800844 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
845 IB_QP_QKEY),
846 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
847 IB_QP_QKEY),
848 }
849 },
850 [IB_QPS_SQD] = {
851 .valid = 1,
852 .opt_param = {
853 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
854 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
855 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700856 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
857 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
Roland Dreier8a518662006-02-13 12:48:12 -0800858 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
859 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
860 }
861 },
862 },
863 [IB_QPS_SQD] = {
864 [IB_QPS_RESET] = { .valid = 1 },
865 [IB_QPS_ERR] = { .valid = 1 },
866 [IB_QPS_RTS] = {
867 .valid = 1,
868 .opt_param = {
869 [IB_QPT_UD] = (IB_QP_CUR_STATE |
870 IB_QP_QKEY),
871 [IB_QPT_UC] = (IB_QP_CUR_STATE |
872 IB_QP_ALT_PATH |
873 IB_QP_ACCESS_FLAGS |
874 IB_QP_PATH_MIG_STATE),
875 [IB_QPT_RC] = (IB_QP_CUR_STATE |
876 IB_QP_ALT_PATH |
877 IB_QP_ACCESS_FLAGS |
878 IB_QP_MIN_RNR_TIMER |
879 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700880 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
881 IB_QP_ALT_PATH |
882 IB_QP_ACCESS_FLAGS |
883 IB_QP_PATH_MIG_STATE),
884 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
885 IB_QP_ALT_PATH |
886 IB_QP_ACCESS_FLAGS |
887 IB_QP_MIN_RNR_TIMER |
888 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -0800889 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
890 IB_QP_QKEY),
891 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
892 IB_QP_QKEY),
893 }
894 },
895 [IB_QPS_SQD] = {
896 .valid = 1,
897 .opt_param = {
898 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
899 IB_QP_QKEY),
900 [IB_QPT_UC] = (IB_QP_AV |
Roland Dreier8a518662006-02-13 12:48:12 -0800901 IB_QP_ALT_PATH |
902 IB_QP_ACCESS_FLAGS |
903 IB_QP_PKEY_INDEX |
904 IB_QP_PATH_MIG_STATE),
905 [IB_QPT_RC] = (IB_QP_PORT |
906 IB_QP_AV |
907 IB_QP_TIMEOUT |
908 IB_QP_RETRY_CNT |
909 IB_QP_RNR_RETRY |
910 IB_QP_MAX_QP_RD_ATOMIC |
911 IB_QP_MAX_DEST_RD_ATOMIC |
Roland Dreier8a518662006-02-13 12:48:12 -0800912 IB_QP_ALT_PATH |
913 IB_QP_ACCESS_FLAGS |
914 IB_QP_PKEY_INDEX |
915 IB_QP_MIN_RNR_TIMER |
916 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700917 [IB_QPT_XRC_INI] = (IB_QP_PORT |
918 IB_QP_AV |
919 IB_QP_TIMEOUT |
920 IB_QP_RETRY_CNT |
921 IB_QP_RNR_RETRY |
922 IB_QP_MAX_QP_RD_ATOMIC |
923 IB_QP_ALT_PATH |
924 IB_QP_ACCESS_FLAGS |
925 IB_QP_PKEY_INDEX |
926 IB_QP_PATH_MIG_STATE),
927 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
928 IB_QP_AV |
929 IB_QP_TIMEOUT |
930 IB_QP_MAX_DEST_RD_ATOMIC |
931 IB_QP_ALT_PATH |
932 IB_QP_ACCESS_FLAGS |
933 IB_QP_PKEY_INDEX |
934 IB_QP_MIN_RNR_TIMER |
935 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -0800936 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
937 IB_QP_QKEY),
938 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
939 IB_QP_QKEY),
940 }
941 }
942 },
943 [IB_QPS_SQE] = {
944 [IB_QPS_RESET] = { .valid = 1 },
945 [IB_QPS_ERR] = { .valid = 1 },
946 [IB_QPS_RTS] = {
947 .valid = 1,
948 .opt_param = {
949 [IB_QPT_UD] = (IB_QP_CUR_STATE |
950 IB_QP_QKEY),
951 [IB_QPT_UC] = (IB_QP_CUR_STATE |
952 IB_QP_ACCESS_FLAGS),
953 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
954 IB_QP_QKEY),
955 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
956 IB_QP_QKEY),
957 }
958 }
959 },
960 [IB_QPS_ERR] = {
961 [IB_QPS_RESET] = { .valid = 1 },
962 [IB_QPS_ERR] = { .valid = 1 }
963 }
964};
965
966int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +0200967 enum ib_qp_type type, enum ib_qp_attr_mask mask,
968 enum rdma_link_layer ll)
Roland Dreier8a518662006-02-13 12:48:12 -0800969{
970 enum ib_qp_attr_mask req_param, opt_param;
971
972 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
973 next_state < 0 || next_state > IB_QPS_ERR)
974 return 0;
975
976 if (mask & IB_QP_CUR_STATE &&
977 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
978 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
979 return 0;
980
981 if (!qp_state_table[cur_state][next_state].valid)
982 return 0;
983
984 req_param = qp_state_table[cur_state][next_state].req_param[type];
985 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
986
987 if ((mask & req_param) != req_param)
988 return 0;
989
990 if (mask & ~(req_param | opt_param | IB_QP_STATE))
991 return 0;
992
993 return 1;
994}
995EXPORT_SYMBOL(ib_modify_qp_is_ok);
996
Matan Barakdbf727d2015-10-15 18:38:51 +0300997int ib_resolve_eth_dmac(struct ib_qp *qp,
998 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
Or Gerlitzed4c54e2013-12-12 18:03:17 +0200999{
1000 int ret = 0;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001001
Matan Barakdbf727d2015-10-15 18:38:51 +03001002 if (*qp_attr_mask & IB_QP_AV) {
1003 if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
1004 qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
1005 return -EINVAL;
1006
1007 if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
1008 return 0;
1009
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001010 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
Matan Barakdbf727d2015-10-15 18:38:51 +03001011 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
1012 qp_attr->ah_attr.dmac);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001013 } else {
Matan Barakdbf727d2015-10-15 18:38:51 +03001014 union ib_gid sgid;
1015 struct ib_gid_attr sgid_attr;
1016 int ifindex;
1017
1018 ret = ib_query_gid(qp->device,
1019 qp_attr->ah_attr.port_num,
1020 qp_attr->ah_attr.grh.sgid_index,
1021 &sgid, &sgid_attr);
1022
1023 if (ret || !sgid_attr.ndev) {
1024 if (!ret)
1025 ret = -ENXIO;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001026 goto out;
Matan Barakdbf727d2015-10-15 18:38:51 +03001027 }
1028
1029 ifindex = sgid_attr.ndev->ifindex;
1030
1031 ret = rdma_addr_find_dmac_by_grh(&sgid,
1032 &qp_attr->ah_attr.grh.dgid,
1033 qp_attr->ah_attr.dmac,
1034 NULL, ifindex);
1035
1036 dev_put(sgid_attr.ndev);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001037 }
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001038 }
1039out:
1040 return ret;
1041}
Matan Barakdbf727d2015-10-15 18:38:51 +03001042EXPORT_SYMBOL(ib_resolve_eth_dmac);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001043
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045int ib_modify_qp(struct ib_qp *qp,
1046 struct ib_qp_attr *qp_attr,
1047 int qp_attr_mask)
1048{
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001049 int ret;
1050
Matan Barakdbf727d2015-10-15 18:38:51 +03001051 ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001052 if (ret)
1053 return ret;
1054
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001055 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056}
1057EXPORT_SYMBOL(ib_modify_qp);
1058
1059int ib_query_qp(struct ib_qp *qp,
1060 struct ib_qp_attr *qp_attr,
1061 int qp_attr_mask,
1062 struct ib_qp_init_attr *qp_init_attr)
1063{
1064 return qp->device->query_qp ?
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001065 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 -ENOSYS;
1067}
1068EXPORT_SYMBOL(ib_query_qp);
1069
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001070int ib_close_qp(struct ib_qp *qp)
1071{
1072 struct ib_qp *real_qp;
1073 unsigned long flags;
1074
1075 real_qp = qp->real_qp;
1076 if (real_qp == qp)
1077 return -EINVAL;
1078
1079 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1080 list_del(&qp->open_list);
1081 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1082
1083 atomic_dec(&real_qp->usecnt);
1084 kfree(qp);
1085
1086 return 0;
1087}
1088EXPORT_SYMBOL(ib_close_qp);
1089
1090static int __ib_destroy_shared_qp(struct ib_qp *qp)
1091{
1092 struct ib_xrcd *xrcd;
1093 struct ib_qp *real_qp;
1094 int ret;
1095
1096 real_qp = qp->real_qp;
1097 xrcd = real_qp->xrcd;
1098
1099 mutex_lock(&xrcd->tgt_qp_mutex);
1100 ib_close_qp(qp);
1101 if (atomic_read(&real_qp->usecnt) == 0)
1102 list_del(&real_qp->xrcd_list);
1103 else
1104 real_qp = NULL;
1105 mutex_unlock(&xrcd->tgt_qp_mutex);
1106
1107 if (real_qp) {
1108 ret = ib_destroy_qp(real_qp);
1109 if (!ret)
1110 atomic_dec(&xrcd->usecnt);
1111 else
1112 __ib_insert_xrcd_qp(xrcd, real_qp);
1113 }
1114
1115 return 0;
1116}
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118int ib_destroy_qp(struct ib_qp *qp)
1119{
1120 struct ib_pd *pd;
1121 struct ib_cq *scq, *rcq;
1122 struct ib_srq *srq;
1123 int ret;
1124
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001125 if (atomic_read(&qp->usecnt))
1126 return -EBUSY;
1127
1128 if (qp->real_qp != qp)
1129 return __ib_destroy_shared_qp(qp);
1130
Sean Heftyb42b63c2011-05-23 19:59:25 -07001131 pd = qp->pd;
1132 scq = qp->send_cq;
1133 rcq = qp->recv_cq;
1134 srq = qp->srq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136 ret = qp->device->destroy_qp(qp);
1137 if (!ret) {
Sean Heftyb42b63c2011-05-23 19:59:25 -07001138 if (pd)
1139 atomic_dec(&pd->usecnt);
1140 if (scq)
1141 atomic_dec(&scq->usecnt);
1142 if (rcq)
1143 atomic_dec(&rcq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 if (srq)
1145 atomic_dec(&srq->usecnt);
1146 }
1147
1148 return ret;
1149}
1150EXPORT_SYMBOL(ib_destroy_qp);
1151
1152/* Completion queues */
1153
1154struct ib_cq *ib_create_cq(struct ib_device *device,
1155 ib_comp_handler comp_handler,
1156 void (*event_handler)(struct ib_event *, void *),
Matan Barak8e372102015-06-11 16:35:21 +03001157 void *cq_context,
1158 const struct ib_cq_init_attr *cq_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159{
1160 struct ib_cq *cq;
1161
Matan Barak8e372102015-06-11 16:35:21 +03001162 cq = device->create_cq(device, cq_attr, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 if (!IS_ERR(cq)) {
1165 cq->device = device;
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001166 cq->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 cq->comp_handler = comp_handler;
1168 cq->event_handler = event_handler;
1169 cq->cq_context = cq_context;
1170 atomic_set(&cq->usecnt, 0);
1171 }
1172
1173 return cq;
1174}
1175EXPORT_SYMBOL(ib_create_cq);
1176
Eli Cohen2dd57162008-04-16 21:09:33 -07001177int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1178{
1179 return cq->device->modify_cq ?
1180 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1181}
1182EXPORT_SYMBOL(ib_modify_cq);
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184int ib_destroy_cq(struct ib_cq *cq)
1185{
1186 if (atomic_read(&cq->usecnt))
1187 return -EBUSY;
1188
1189 return cq->device->destroy_cq(cq);
1190}
1191EXPORT_SYMBOL(ib_destroy_cq);
1192
Roland Dreiera74cd4a2006-02-13 16:30:49 -08001193int ib_resize_cq(struct ib_cq *cq, int cqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194{
Roland Dreier40de2e52005-11-08 11:10:25 -08001195 return cq->device->resize_cq ?
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001196 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197}
1198EXPORT_SYMBOL(ib_resize_cq);
1199
1200/* Memory regions */
1201
1202struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1203{
1204 struct ib_mr *mr;
Eli Cohen1c636f82013-10-31 15:26:32 +02001205 int err;
1206
1207 err = ib_check_mr_access(mr_access_flags);
1208 if (err)
1209 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 mr = pd->device->get_dma_mr(pd, mr_access_flags);
1212
1213 if (!IS_ERR(mr)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001214 mr->device = pd->device;
1215 mr->pd = pd;
1216 mr->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 atomic_inc(&pd->usecnt);
1218 atomic_set(&mr->usecnt, 0);
1219 }
1220
1221 return mr;
1222}
1223EXPORT_SYMBOL(ib_get_dma_mr);
1224
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1226{
1227 return mr->device->query_mr ?
1228 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1229}
1230EXPORT_SYMBOL(ib_query_mr);
1231
1232int ib_dereg_mr(struct ib_mr *mr)
1233{
1234 struct ib_pd *pd;
1235 int ret;
1236
1237 if (atomic_read(&mr->usecnt))
1238 return -EBUSY;
1239
1240 pd = mr->pd;
1241 ret = mr->device->dereg_mr(mr);
1242 if (!ret)
1243 atomic_dec(&pd->usecnt);
1244
1245 return ret;
1246}
1247EXPORT_SYMBOL(ib_dereg_mr);
1248
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001249/**
1250 * ib_alloc_mr() - Allocates a memory region
1251 * @pd: protection domain associated with the region
1252 * @mr_type: memory region type
1253 * @max_num_sg: maximum sg entries available for registration.
1254 *
1255 * Notes:
1256 * Memory registeration page/sg lists must not exceed max_num_sg.
1257 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1258 * max_num_sg * used_page_size.
1259 *
1260 */
1261struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1262 enum ib_mr_type mr_type,
1263 u32 max_num_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001264{
1265 struct ib_mr *mr;
1266
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001267 if (!pd->device->alloc_mr)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001268 return ERR_PTR(-ENOSYS);
1269
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001270 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001271 if (!IS_ERR(mr)) {
1272 mr->device = pd->device;
1273 mr->pd = pd;
1274 mr->uobject = NULL;
1275 atomic_inc(&pd->usecnt);
1276 atomic_set(&mr->usecnt, 0);
1277 }
1278
1279 return mr;
1280}
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001281EXPORT_SYMBOL(ib_alloc_mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001282
1283struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1284 int max_page_list_len)
1285{
1286 struct ib_fast_reg_page_list *page_list;
1287
1288 if (!device->alloc_fast_reg_page_list)
1289 return ERR_PTR(-ENOSYS);
1290
1291 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1292
1293 if (!IS_ERR(page_list)) {
1294 page_list->device = device;
1295 page_list->max_page_list_len = max_page_list_len;
1296 }
1297
1298 return page_list;
1299}
1300EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1301
1302void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1303{
1304 page_list->device->free_fast_reg_page_list(page_list);
1305}
1306EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1307
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308/* Memory windows */
1309
Shani Michaeli7083e422013-02-06 16:19:12 +00001310struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311{
1312 struct ib_mw *mw;
1313
1314 if (!pd->device->alloc_mw)
1315 return ERR_PTR(-ENOSYS);
1316
Shani Michaeli7083e422013-02-06 16:19:12 +00001317 mw = pd->device->alloc_mw(pd, type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 if (!IS_ERR(mw)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001319 mw->device = pd->device;
1320 mw->pd = pd;
1321 mw->uobject = NULL;
Shani Michaeli7083e422013-02-06 16:19:12 +00001322 mw->type = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 atomic_inc(&pd->usecnt);
1324 }
1325
1326 return mw;
1327}
1328EXPORT_SYMBOL(ib_alloc_mw);
1329
1330int ib_dealloc_mw(struct ib_mw *mw)
1331{
1332 struct ib_pd *pd;
1333 int ret;
1334
1335 pd = mw->pd;
1336 ret = mw->device->dealloc_mw(mw);
1337 if (!ret)
1338 atomic_dec(&pd->usecnt);
1339
1340 return ret;
1341}
1342EXPORT_SYMBOL(ib_dealloc_mw);
1343
1344/* "Fast" memory regions */
1345
1346struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1347 int mr_access_flags,
1348 struct ib_fmr_attr *fmr_attr)
1349{
1350 struct ib_fmr *fmr;
1351
1352 if (!pd->device->alloc_fmr)
1353 return ERR_PTR(-ENOSYS);
1354
1355 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1356 if (!IS_ERR(fmr)) {
1357 fmr->device = pd->device;
1358 fmr->pd = pd;
1359 atomic_inc(&pd->usecnt);
1360 }
1361
1362 return fmr;
1363}
1364EXPORT_SYMBOL(ib_alloc_fmr);
1365
1366int ib_unmap_fmr(struct list_head *fmr_list)
1367{
1368 struct ib_fmr *fmr;
1369
1370 if (list_empty(fmr_list))
1371 return 0;
1372
1373 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1374 return fmr->device->unmap_fmr(fmr_list);
1375}
1376EXPORT_SYMBOL(ib_unmap_fmr);
1377
1378int ib_dealloc_fmr(struct ib_fmr *fmr)
1379{
1380 struct ib_pd *pd;
1381 int ret;
1382
1383 pd = fmr->pd;
1384 ret = fmr->device->dealloc_fmr(fmr);
1385 if (!ret)
1386 atomic_dec(&pd->usecnt);
1387
1388 return ret;
1389}
1390EXPORT_SYMBOL(ib_dealloc_fmr);
1391
1392/* Multicast groups */
1393
1394int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1395{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001396 int ret;
1397
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001398 if (!qp->device->attach_mcast)
1399 return -ENOSYS;
1400 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1401 return -EINVAL;
1402
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001403 ret = qp->device->attach_mcast(qp, gid, lid);
1404 if (!ret)
1405 atomic_inc(&qp->usecnt);
1406 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407}
1408EXPORT_SYMBOL(ib_attach_mcast);
1409
1410int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1411{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001412 int ret;
1413
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001414 if (!qp->device->detach_mcast)
1415 return -ENOSYS;
1416 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1417 return -EINVAL;
1418
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001419 ret = qp->device->detach_mcast(qp, gid, lid);
1420 if (!ret)
1421 atomic_dec(&qp->usecnt);
1422 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423}
1424EXPORT_SYMBOL(ib_detach_mcast);
Sean Hefty59991f92011-05-23 17:52:46 -07001425
1426struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1427{
1428 struct ib_xrcd *xrcd;
1429
1430 if (!device->alloc_xrcd)
1431 return ERR_PTR(-ENOSYS);
1432
1433 xrcd = device->alloc_xrcd(device, NULL, NULL);
1434 if (!IS_ERR(xrcd)) {
1435 xrcd->device = device;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001436 xrcd->inode = NULL;
Sean Hefty59991f92011-05-23 17:52:46 -07001437 atomic_set(&xrcd->usecnt, 0);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001438 mutex_init(&xrcd->tgt_qp_mutex);
1439 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
Sean Hefty59991f92011-05-23 17:52:46 -07001440 }
1441
1442 return xrcd;
1443}
1444EXPORT_SYMBOL(ib_alloc_xrcd);
1445
1446int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1447{
Sean Heftyd3d72d92011-05-26 23:06:44 -07001448 struct ib_qp *qp;
1449 int ret;
1450
Sean Hefty59991f92011-05-23 17:52:46 -07001451 if (atomic_read(&xrcd->usecnt))
1452 return -EBUSY;
1453
Sean Heftyd3d72d92011-05-26 23:06:44 -07001454 while (!list_empty(&xrcd->tgt_qp_list)) {
1455 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1456 ret = ib_destroy_qp(qp);
1457 if (ret)
1458 return ret;
1459 }
1460
Sean Hefty59991f92011-05-23 17:52:46 -07001461 return xrcd->device->dealloc_xrcd(xrcd);
1462}
1463EXPORT_SYMBOL(ib_dealloc_xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001464
1465struct ib_flow *ib_create_flow(struct ib_qp *qp,
1466 struct ib_flow_attr *flow_attr,
1467 int domain)
1468{
1469 struct ib_flow *flow_id;
1470 if (!qp->device->create_flow)
1471 return ERR_PTR(-ENOSYS);
1472
1473 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1474 if (!IS_ERR(flow_id))
1475 atomic_inc(&qp->usecnt);
1476 return flow_id;
1477}
1478EXPORT_SYMBOL(ib_create_flow);
1479
1480int ib_destroy_flow(struct ib_flow *flow_id)
1481{
1482 int err;
1483 struct ib_qp *qp = flow_id->qp;
1484
1485 err = qp->device->destroy_flow(flow_id);
1486 if (!err)
1487 atomic_dec(&qp->usecnt);
1488 return err;
1489}
1490EXPORT_SYMBOL(ib_destroy_flow);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001491
1492int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1493 struct ib_mr_status *mr_status)
1494{
1495 return mr->device->check_mr_status ?
1496 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1497}
1498EXPORT_SYMBOL(ib_check_mr_status);