blob: e397d8bb4bdec5bd5e8e624761ff2976d2a69cd7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreier33b9b3e2006-01-30 14:29:21 -08008 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
Paul Gortmakerb108d972011-05-27 15:29:33 -040041#include <linux/export.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042#include <linux/string.h>
Sean Hefty0e0ec7e2011-08-08 15:31:51 -070043#include <linux/slab.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030044#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Roland Dreiera4d61e82005-08-25 13:40:04 -070048#include <rdma/ib_verbs.h>
49#include <rdma/ib_cache.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020050#include <rdma/ib_addr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Or Gerlitzed4c54e2013-12-12 18:03:17 +020052#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030054static const char * const ib_events[] = {
55 [IB_EVENT_CQ_ERR] = "CQ error",
56 [IB_EVENT_QP_FATAL] = "QP fatal error",
57 [IB_EVENT_QP_REQ_ERR] = "QP request error",
58 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
59 [IB_EVENT_COMM_EST] = "communication established",
60 [IB_EVENT_SQ_DRAINED] = "send queue drained",
61 [IB_EVENT_PATH_MIG] = "path migration successful",
62 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
63 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
64 [IB_EVENT_PORT_ACTIVE] = "port active",
65 [IB_EVENT_PORT_ERR] = "port error",
66 [IB_EVENT_LID_CHANGE] = "LID change",
67 [IB_EVENT_PKEY_CHANGE] = "P_key change",
68 [IB_EVENT_SM_CHANGE] = "SM change",
69 [IB_EVENT_SRQ_ERR] = "SRQ error",
70 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
71 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
72 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
73 [IB_EVENT_GID_CHANGE] = "GID changed",
74};
75
Bart Van Asschedb7489e2015-08-03 10:01:52 -070076const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +030077{
78 size_t index = event;
79
80 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
81 ib_events[index] : "unrecognized event";
82}
83EXPORT_SYMBOL(ib_event_msg);
84
85static const char * const wc_statuses[] = {
86 [IB_WC_SUCCESS] = "success",
87 [IB_WC_LOC_LEN_ERR] = "local length error",
88 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
89 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
90 [IB_WC_LOC_PROT_ERR] = "local protection error",
91 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
92 [IB_WC_MW_BIND_ERR] = "memory management operation error",
93 [IB_WC_BAD_RESP_ERR] = "bad response error",
94 [IB_WC_LOC_ACCESS_ERR] = "local access error",
95 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
96 [IB_WC_REM_ACCESS_ERR] = "remote access error",
97 [IB_WC_REM_OP_ERR] = "remote operation error",
98 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
99 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
100 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
101 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
102 [IB_WC_REM_ABORT_ERR] = "operation aborted",
103 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
104 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
105 [IB_WC_FATAL_ERR] = "fatal error",
106 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
107 [IB_WC_GENERAL_ERR] = "general error",
108};
109
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700110const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300111{
112 size_t index = status;
113
114 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
115 wc_statuses[index] : "unrecognized status";
116}
117EXPORT_SYMBOL(ib_wc_status_msg);
118
Roland Dreier8385fd82014-06-04 10:00:16 -0700119__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700120{
121 switch (rate) {
122 case IB_RATE_2_5_GBPS: return 1;
123 case IB_RATE_5_GBPS: return 2;
124 case IB_RATE_10_GBPS: return 4;
125 case IB_RATE_20_GBPS: return 8;
126 case IB_RATE_30_GBPS: return 12;
127 case IB_RATE_40_GBPS: return 16;
128 case IB_RATE_60_GBPS: return 24;
129 case IB_RATE_80_GBPS: return 32;
130 case IB_RATE_120_GBPS: return 48;
131 default: return -1;
132 }
133}
134EXPORT_SYMBOL(ib_rate_to_mult);
135
Roland Dreier8385fd82014-06-04 10:00:16 -0700136__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700137{
138 switch (mult) {
139 case 1: return IB_RATE_2_5_GBPS;
140 case 2: return IB_RATE_5_GBPS;
141 case 4: return IB_RATE_10_GBPS;
142 case 8: return IB_RATE_20_GBPS;
143 case 12: return IB_RATE_30_GBPS;
144 case 16: return IB_RATE_40_GBPS;
145 case 24: return IB_RATE_60_GBPS;
146 case 32: return IB_RATE_80_GBPS;
147 case 48: return IB_RATE_120_GBPS;
148 default: return IB_RATE_PORT_CURRENT;
149 }
150}
151EXPORT_SYMBOL(mult_to_ib_rate);
152
Roland Dreier8385fd82014-06-04 10:00:16 -0700153__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300154{
155 switch (rate) {
156 case IB_RATE_2_5_GBPS: return 2500;
157 case IB_RATE_5_GBPS: return 5000;
158 case IB_RATE_10_GBPS: return 10000;
159 case IB_RATE_20_GBPS: return 20000;
160 case IB_RATE_30_GBPS: return 30000;
161 case IB_RATE_40_GBPS: return 40000;
162 case IB_RATE_60_GBPS: return 60000;
163 case IB_RATE_80_GBPS: return 80000;
164 case IB_RATE_120_GBPS: return 120000;
165 case IB_RATE_14_GBPS: return 14062;
166 case IB_RATE_56_GBPS: return 56250;
167 case IB_RATE_112_GBPS: return 112500;
168 case IB_RATE_168_GBPS: return 168750;
169 case IB_RATE_25_GBPS: return 25781;
170 case IB_RATE_100_GBPS: return 103125;
171 case IB_RATE_200_GBPS: return 206250;
172 case IB_RATE_300_GBPS: return 309375;
173 default: return -1;
174 }
175}
176EXPORT_SYMBOL(ib_rate_to_mbps);
177
Roland Dreier8385fd82014-06-04 10:00:16 -0700178__attribute_const__ enum rdma_transport_type
Tom Tucker07ebafb2006-08-03 16:02:42 -0500179rdma_node_get_transport(enum rdma_node_type node_type)
180{
181 switch (node_type) {
182 case RDMA_NODE_IB_CA:
183 case RDMA_NODE_IB_SWITCH:
184 case RDMA_NODE_IB_ROUTER:
185 return RDMA_TRANSPORT_IB;
186 case RDMA_NODE_RNIC:
187 return RDMA_TRANSPORT_IWARP;
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000188 case RDMA_NODE_USNIC:
Upinder Malhi5db57652014-01-15 17:02:36 -0800189 return RDMA_TRANSPORT_USNIC;
190 case RDMA_NODE_USNIC_UDP:
Upinder Malhi248567f2014-01-09 14:48:19 -0800191 return RDMA_TRANSPORT_USNIC_UDP;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500192 default:
193 BUG();
194 return 0;
195 }
196}
197EXPORT_SYMBOL(rdma_node_get_transport);
198
Eli Cohena3f5ada2010-09-27 17:51:10 -0700199enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
200{
201 if (device->get_link_layer)
202 return device->get_link_layer(device, port_num);
203
204 switch (rdma_node_get_transport(device->node_type)) {
205 case RDMA_TRANSPORT_IB:
206 return IB_LINK_LAYER_INFINIBAND;
207 case RDMA_TRANSPORT_IWARP:
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000208 case RDMA_TRANSPORT_USNIC:
Upinder Malhi248567f2014-01-09 14:48:19 -0800209 case RDMA_TRANSPORT_USNIC_UDP:
Eli Cohena3f5ada2010-09-27 17:51:10 -0700210 return IB_LINK_LAYER_ETHERNET;
211 default:
212 return IB_LINK_LAYER_UNSPECIFIED;
213 }
214}
215EXPORT_SYMBOL(rdma_port_get_link_layer);
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217/* Protection domains */
218
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600219/**
220 * ib_alloc_pd - Allocates an unused protection domain.
221 * @device: The device on which to allocate the protection domain.
222 *
223 * A protection domain object provides an association between QPs, shared
224 * receive queues, address handles, memory regions, and memory windows.
225 *
226 * Every PD has a local_dma_lkey which can be used as the lkey value for local
227 * memory operations.
228 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229struct ib_pd *ib_alloc_pd(struct ib_device *device)
230{
231 struct ib_pd *pd;
232
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700233 pd = device->alloc_pd(device, NULL, NULL);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600234 if (IS_ERR(pd))
235 return pd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600237 pd->device = device;
238 pd->uobject = NULL;
239 pd->local_mr = NULL;
240 atomic_set(&pd->usecnt, 0);
241
Or Gerlitz86bee4c2015-12-18 10:59:45 +0200242 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600243 pd->local_dma_lkey = device->local_dma_lkey;
244 else {
245 struct ib_mr *mr;
246
247 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
248 if (IS_ERR(mr)) {
249 ib_dealloc_pd(pd);
250 return (struct ib_pd *)mr;
251 }
252
253 pd->local_mr = mr;
254 pd->local_dma_lkey = pd->local_mr->lkey;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 return pd;
257}
258EXPORT_SYMBOL(ib_alloc_pd);
259
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600260/**
261 * ib_dealloc_pd - Deallocates a protection domain.
262 * @pd: The protection domain to deallocate.
263 *
264 * It is an error to call this function while any resources in the pd still
265 * exist. The caller is responsible to synchronously destroy them and
266 * guarantee no new allocations will happen.
267 */
268void ib_dealloc_pd(struct ib_pd *pd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600270 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600272 if (pd->local_mr) {
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600273 ret = ib_dereg_mr(pd->local_mr);
274 WARN_ON(ret);
Jason Gunthorpe96249d72015-08-05 14:14:45 -0600275 pd->local_mr = NULL;
276 }
277
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600278 /* uverbs manipulates usecnt with proper locking, while the kabi
279 requires the caller to guarantee we can't race here. */
280 WARN_ON(atomic_read(&pd->usecnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Jason Gunthorpe7dd78642015-08-05 14:34:31 -0600282 /* Making delalloc_pd a void return is a WIP, no driver should return
283 an error here. */
284 ret = pd->device->dealloc_pd(pd);
285 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287EXPORT_SYMBOL(ib_dealloc_pd);
288
289/* Address handles */
290
291struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
292{
293 struct ib_ah *ah;
294
295 ah = pd->device->create_ah(pd, ah_attr);
296
297 if (!IS_ERR(ah)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -0700298 ah->device = pd->device;
299 ah->pd = pd;
300 ah->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 atomic_inc(&pd->usecnt);
302 }
303
304 return ah;
305}
306EXPORT_SYMBOL(ib_create_ah);
307
Matan Barakdbf727d2015-10-15 18:38:51 +0300308struct find_gid_index_context {
309 u16 vlan_id;
310};
311
312static bool find_gid_index(const union ib_gid *gid,
313 const struct ib_gid_attr *gid_attr,
314 void *context)
315{
316 struct find_gid_index_context *ctx =
317 (struct find_gid_index_context *)context;
318
319 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
320 (is_vlan_dev(gid_attr->ndev) &&
321 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
322 return false;
323
324 return true;
325}
326
327static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
328 u16 vlan_id, const union ib_gid *sgid,
329 u16 *gid_index)
330{
331 struct find_gid_index_context context = {.vlan_id = vlan_id};
332
333 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
334 &context, gid_index);
335}
336
Ira Weiny73cdaae2015-05-31 17:15:31 -0400337int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
338 const struct ib_wc *wc, const struct ib_grh *grh,
339 struct ib_ah_attr *ah_attr)
Hal Rosenstock513789e2005-07-27 11:45:34 -0700340{
Hal Rosenstock513789e2005-07-27 11:45:34 -0700341 u32 flow_class;
342 u16 gid_index;
343 int ret;
344
Sean Hefty4e00d692006-06-17 20:37:39 -0700345 memset(ah_attr, 0, sizeof *ah_attr);
Michael Wang227128f2015-05-05 14:50:40 +0200346 if (rdma_cap_eth_ah(device, port_num)) {
Matan Barakdbf727d2015-10-15 18:38:51 +0300347 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
348 wc->vlan_id : 0xffff;
349
Matan Barakdd5f03b2013-12-12 18:03:11 +0200350 if (!(wc->wc_flags & IB_WC_GRH))
351 return -EPROTOTYPE;
352
Matan Barakdbf727d2015-10-15 18:38:51 +0300353 if (!(wc->wc_flags & IB_WC_WITH_SMAC) ||
354 !(wc->wc_flags & IB_WC_WITH_VLAN)) {
Matan Barakdd5f03b2013-12-12 18:03:11 +0200355 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
Matan Barakdbf727d2015-10-15 18:38:51 +0300356 ah_attr->dmac,
357 wc->wc_flags & IB_WC_WITH_VLAN ?
358 NULL : &vlan_id,
359 0);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200360 if (ret)
361 return ret;
362 }
Matan Barakdbf727d2015-10-15 18:38:51 +0300363
364 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
365 &grh->dgid, &gid_index);
366 if (ret)
367 return ret;
368
369 if (wc->wc_flags & IB_WC_WITH_SMAC)
370 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200371 }
372
Sean Hefty4e00d692006-06-17 20:37:39 -0700373 ah_attr->dlid = wc->slid;
374 ah_attr->sl = wc->sl;
375 ah_attr->src_path_bits = wc->dlid_path_bits;
376 ah_attr->port_num = port_num;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700377
378 if (wc->wc_flags & IB_WC_GRH) {
Sean Hefty4e00d692006-06-17 20:37:39 -0700379 ah_attr->ah_flags = IB_AH_GRH;
380 ah_attr->grh.dgid = grh->sgid;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700381
Matan Barakdbf727d2015-10-15 18:38:51 +0300382 if (!rdma_cap_eth_ah(device, port_num)) {
383 ret = ib_find_cached_gid_by_port(device, &grh->dgid,
Matan Barakb39ffa12015-12-23 14:56:47 +0200384 IB_GID_TYPE_IB,
Matan Barakdbf727d2015-10-15 18:38:51 +0300385 port_num, NULL,
386 &gid_index);
387 if (ret)
388 return ret;
389 }
Hal Rosenstock513789e2005-07-27 11:45:34 -0700390
Sean Hefty4e00d692006-06-17 20:37:39 -0700391 ah_attr->grh.sgid_index = (u8) gid_index;
Hal Rosenstock497677a2005-07-27 11:45:35 -0700392 flow_class = be32_to_cpu(grh->version_tclass_flow);
Sean Hefty4e00d692006-06-17 20:37:39 -0700393 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
Sean Hefty47645d82007-02-21 16:37:31 -0800394 ah_attr->grh.hop_limit = 0xFF;
Sean Hefty4e00d692006-06-17 20:37:39 -0700395 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700396 }
Sean Hefty4e00d692006-06-17 20:37:39 -0700397 return 0;
398}
399EXPORT_SYMBOL(ib_init_ah_from_wc);
400
Ira Weiny73cdaae2015-05-31 17:15:31 -0400401struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
402 const struct ib_grh *grh, u8 port_num)
Sean Hefty4e00d692006-06-17 20:37:39 -0700403{
404 struct ib_ah_attr ah_attr;
405 int ret;
406
407 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
408 if (ret)
409 return ERR_PTR(ret);
Hal Rosenstock513789e2005-07-27 11:45:34 -0700410
411 return ib_create_ah(pd, &ah_attr);
412}
413EXPORT_SYMBOL(ib_create_ah_from_wc);
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
416{
417 return ah->device->modify_ah ?
418 ah->device->modify_ah(ah, ah_attr) :
419 -ENOSYS;
420}
421EXPORT_SYMBOL(ib_modify_ah);
422
423int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
424{
425 return ah->device->query_ah ?
426 ah->device->query_ah(ah, ah_attr) :
427 -ENOSYS;
428}
429EXPORT_SYMBOL(ib_query_ah);
430
431int ib_destroy_ah(struct ib_ah *ah)
432{
433 struct ib_pd *pd;
434 int ret;
435
436 pd = ah->pd;
437 ret = ah->device->destroy_ah(ah);
438 if (!ret)
439 atomic_dec(&pd->usecnt);
440
441 return ret;
442}
443EXPORT_SYMBOL(ib_destroy_ah);
444
Roland Dreierd41fcc62005-08-18 12:23:08 -0700445/* Shared receive queues */
446
447struct ib_srq *ib_create_srq(struct ib_pd *pd,
448 struct ib_srq_init_attr *srq_init_attr)
449{
450 struct ib_srq *srq;
451
452 if (!pd->device->create_srq)
453 return ERR_PTR(-ENOSYS);
454
455 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
456
457 if (!IS_ERR(srq)) {
458 srq->device = pd->device;
459 srq->pd = pd;
460 srq->uobject = NULL;
461 srq->event_handler = srq_init_attr->event_handler;
462 srq->srq_context = srq_init_attr->srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -0700463 srq->srq_type = srq_init_attr->srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -0700464 if (srq->srq_type == IB_SRQT_XRC) {
465 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
466 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
467 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
468 atomic_inc(&srq->ext.xrc.cq->usecnt);
469 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700470 atomic_inc(&pd->usecnt);
471 atomic_set(&srq->usecnt, 0);
472 }
473
474 return srq;
475}
476EXPORT_SYMBOL(ib_create_srq);
477
478int ib_modify_srq(struct ib_srq *srq,
479 struct ib_srq_attr *srq_attr,
480 enum ib_srq_attr_mask srq_attr_mask)
481{
Dotan Barak7ce5eac2008-04-16 21:09:28 -0700482 return srq->device->modify_srq ?
483 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
484 -ENOSYS;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700485}
486EXPORT_SYMBOL(ib_modify_srq);
487
488int ib_query_srq(struct ib_srq *srq,
489 struct ib_srq_attr *srq_attr)
490{
491 return srq->device->query_srq ?
492 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
493}
494EXPORT_SYMBOL(ib_query_srq);
495
496int ib_destroy_srq(struct ib_srq *srq)
497{
498 struct ib_pd *pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700499 enum ib_srq_type srq_type;
500 struct ib_xrcd *uninitialized_var(xrcd);
501 struct ib_cq *uninitialized_var(cq);
Roland Dreierd41fcc62005-08-18 12:23:08 -0700502 int ret;
503
504 if (atomic_read(&srq->usecnt))
505 return -EBUSY;
506
507 pd = srq->pd;
Sean Hefty418d5132011-05-23 19:42:29 -0700508 srq_type = srq->srq_type;
509 if (srq_type == IB_SRQT_XRC) {
510 xrcd = srq->ext.xrc.xrcd;
511 cq = srq->ext.xrc.cq;
512 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700513
514 ret = srq->device->destroy_srq(srq);
Sean Hefty418d5132011-05-23 19:42:29 -0700515 if (!ret) {
Roland Dreierd41fcc62005-08-18 12:23:08 -0700516 atomic_dec(&pd->usecnt);
Sean Hefty418d5132011-05-23 19:42:29 -0700517 if (srq_type == IB_SRQT_XRC) {
518 atomic_dec(&xrcd->usecnt);
519 atomic_dec(&cq->usecnt);
520 }
521 }
Roland Dreierd41fcc62005-08-18 12:23:08 -0700522
523 return ret;
524}
525EXPORT_SYMBOL(ib_destroy_srq);
526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527/* Queue pairs */
528
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700529static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
530{
531 struct ib_qp *qp = context;
Yishai Hadas73c40c62013-08-01 18:49:53 +0300532 unsigned long flags;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700533
Yishai Hadas73c40c62013-08-01 18:49:53 +0300534 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700535 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
Shlomo Pongratzeec9e292013-04-10 14:26:46 +0000536 if (event->element.qp->event_handler)
537 event->element.qp->event_handler(event, event->element.qp->qp_context);
Yishai Hadas73c40c62013-08-01 18:49:53 +0300538 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700539}
540
Sean Heftyd3d72d92011-05-26 23:06:44 -0700541static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
542{
543 mutex_lock(&xrcd->tgt_qp_mutex);
544 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
545 mutex_unlock(&xrcd->tgt_qp_mutex);
546}
547
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700548static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
549 void (*event_handler)(struct ib_event *, void *),
550 void *qp_context)
Sean Heftyd3d72d92011-05-26 23:06:44 -0700551{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700552 struct ib_qp *qp;
553 unsigned long flags;
554
555 qp = kzalloc(sizeof *qp, GFP_KERNEL);
556 if (!qp)
557 return ERR_PTR(-ENOMEM);
558
559 qp->real_qp = real_qp;
560 atomic_inc(&real_qp->usecnt);
561 qp->device = real_qp->device;
562 qp->event_handler = event_handler;
563 qp->qp_context = qp_context;
564 qp->qp_num = real_qp->qp_num;
565 qp->qp_type = real_qp->qp_type;
566
567 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
568 list_add(&qp->open_list, &real_qp->open_list);
569 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
570
571 return qp;
Sean Heftyd3d72d92011-05-26 23:06:44 -0700572}
573
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700574struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
575 struct ib_qp_open_attr *qp_open_attr)
576{
577 struct ib_qp *qp, *real_qp;
578
579 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
580 return ERR_PTR(-EINVAL);
581
582 qp = ERR_PTR(-EINVAL);
583 mutex_lock(&xrcd->tgt_qp_mutex);
584 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
585 if (real_qp->qp_num == qp_open_attr->qp_num) {
586 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
587 qp_open_attr->qp_context);
588 break;
589 }
590 }
591 mutex_unlock(&xrcd->tgt_qp_mutex);
592 return qp;
593}
594EXPORT_SYMBOL(ib_open_qp);
595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596struct ib_qp *ib_create_qp(struct ib_pd *pd,
597 struct ib_qp_init_attr *qp_init_attr)
598{
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700599 struct ib_qp *qp, *real_qp;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700600 struct ib_device *device;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Sean Heftyb42b63c2011-05-23 19:59:25 -0700602 device = pd ? pd->device : qp_init_attr->xrcd->device;
603 qp = device->create_qp(pd, qp_init_attr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605 if (!IS_ERR(qp)) {
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700606 qp->device = device;
607 qp->real_qp = qp;
608 qp->uobject = NULL;
609 qp->qp_type = qp_init_attr->qp_type;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700610
Bernd Schuberte47e3212012-01-20 18:43:54 +0000611 atomic_set(&qp->usecnt, 0);
Sean Heftyb42b63c2011-05-23 19:59:25 -0700612 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700613 qp->event_handler = __ib_shared_qp_event_handler;
614 qp->qp_context = qp;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700615 qp->pd = NULL;
616 qp->send_cq = qp->recv_cq = NULL;
617 qp->srq = NULL;
618 qp->xrcd = qp_init_attr->xrcd;
619 atomic_inc(&qp_init_attr->xrcd->usecnt);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700620 INIT_LIST_HEAD(&qp->open_list);
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700621
622 real_qp = qp;
623 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
624 qp_init_attr->qp_context);
625 if (!IS_ERR(qp))
626 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
627 else
628 real_qp->device->destroy_qp(real_qp);
Sean Heftyb42b63c2011-05-23 19:59:25 -0700629 } else {
Sean Hefty0e0ec7e2011-08-08 15:31:51 -0700630 qp->event_handler = qp_init_attr->event_handler;
631 qp->qp_context = qp_init_attr->qp_context;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700632 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
633 qp->recv_cq = NULL;
634 qp->srq = NULL;
635 } else {
636 qp->recv_cq = qp_init_attr->recv_cq;
637 atomic_inc(&qp_init_attr->recv_cq->usecnt);
638 qp->srq = qp_init_attr->srq;
639 if (qp->srq)
640 atomic_inc(&qp_init_attr->srq->usecnt);
641 }
642
643 qp->pd = pd;
644 qp->send_cq = qp_init_attr->send_cq;
645 qp->xrcd = NULL;
646
647 atomic_inc(&pd->usecnt);
648 atomic_inc(&qp_init_attr->send_cq->usecnt);
649 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 }
651
652 return qp;
653}
654EXPORT_SYMBOL(ib_create_qp);
655
Roland Dreier8a518662006-02-13 12:48:12 -0800656static const struct {
657 int valid;
Sean Heftyb42b63c2011-05-23 19:59:25 -0700658 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
659 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
Roland Dreier8a518662006-02-13 12:48:12 -0800660} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
661 [IB_QPS_RESET] = {
662 [IB_QPS_RESET] = { .valid = 1 },
Roland Dreier8a518662006-02-13 12:48:12 -0800663 [IB_QPS_INIT] = {
664 .valid = 1,
665 .req_param = {
666 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
667 IB_QP_PORT |
668 IB_QP_QKEY),
Or Gerlitzc938a612012-03-01 12:17:51 +0200669 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
Roland Dreier8a518662006-02-13 12:48:12 -0800670 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
671 IB_QP_PORT |
672 IB_QP_ACCESS_FLAGS),
673 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
674 IB_QP_PORT |
675 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700676 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
677 IB_QP_PORT |
678 IB_QP_ACCESS_FLAGS),
679 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
680 IB_QP_PORT |
681 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800682 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
683 IB_QP_QKEY),
684 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
685 IB_QP_QKEY),
686 }
687 },
688 },
689 [IB_QPS_INIT] = {
690 [IB_QPS_RESET] = { .valid = 1 },
691 [IB_QPS_ERR] = { .valid = 1 },
692 [IB_QPS_INIT] = {
693 .valid = 1,
694 .opt_param = {
695 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
696 IB_QP_PORT |
697 IB_QP_QKEY),
698 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
699 IB_QP_PORT |
700 IB_QP_ACCESS_FLAGS),
701 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
702 IB_QP_PORT |
703 IB_QP_ACCESS_FLAGS),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700704 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
705 IB_QP_PORT |
706 IB_QP_ACCESS_FLAGS),
707 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
708 IB_QP_PORT |
709 IB_QP_ACCESS_FLAGS),
Roland Dreier8a518662006-02-13 12:48:12 -0800710 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
711 IB_QP_QKEY),
712 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
713 IB_QP_QKEY),
714 }
715 },
716 [IB_QPS_RTR] = {
717 .valid = 1,
718 .req_param = {
719 [IB_QPT_UC] = (IB_QP_AV |
720 IB_QP_PATH_MTU |
721 IB_QP_DEST_QPN |
722 IB_QP_RQ_PSN),
723 [IB_QPT_RC] = (IB_QP_AV |
724 IB_QP_PATH_MTU |
725 IB_QP_DEST_QPN |
726 IB_QP_RQ_PSN |
727 IB_QP_MAX_DEST_RD_ATOMIC |
728 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700729 [IB_QPT_XRC_INI] = (IB_QP_AV |
730 IB_QP_PATH_MTU |
731 IB_QP_DEST_QPN |
732 IB_QP_RQ_PSN),
733 [IB_QPT_XRC_TGT] = (IB_QP_AV |
734 IB_QP_PATH_MTU |
735 IB_QP_DEST_QPN |
736 IB_QP_RQ_PSN |
737 IB_QP_MAX_DEST_RD_ATOMIC |
738 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -0800739 },
740 .opt_param = {
741 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
742 IB_QP_QKEY),
743 [IB_QPT_UC] = (IB_QP_ALT_PATH |
744 IB_QP_ACCESS_FLAGS |
745 IB_QP_PKEY_INDEX),
746 [IB_QPT_RC] = (IB_QP_ALT_PATH |
747 IB_QP_ACCESS_FLAGS |
748 IB_QP_PKEY_INDEX),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700749 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
750 IB_QP_ACCESS_FLAGS |
751 IB_QP_PKEY_INDEX),
752 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
753 IB_QP_ACCESS_FLAGS |
754 IB_QP_PKEY_INDEX),
Roland Dreier8a518662006-02-13 12:48:12 -0800755 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
756 IB_QP_QKEY),
757 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
758 IB_QP_QKEY),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200759 },
Matan Barakdbf727d2015-10-15 18:38:51 +0300760 },
Roland Dreier8a518662006-02-13 12:48:12 -0800761 },
762 [IB_QPS_RTR] = {
763 [IB_QPS_RESET] = { .valid = 1 },
764 [IB_QPS_ERR] = { .valid = 1 },
765 [IB_QPS_RTS] = {
766 .valid = 1,
767 .req_param = {
768 [IB_QPT_UD] = IB_QP_SQ_PSN,
769 [IB_QPT_UC] = IB_QP_SQ_PSN,
770 [IB_QPT_RC] = (IB_QP_TIMEOUT |
771 IB_QP_RETRY_CNT |
772 IB_QP_RNR_RETRY |
773 IB_QP_SQ_PSN |
774 IB_QP_MAX_QP_RD_ATOMIC),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700775 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
776 IB_QP_RETRY_CNT |
777 IB_QP_RNR_RETRY |
778 IB_QP_SQ_PSN |
779 IB_QP_MAX_QP_RD_ATOMIC),
780 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
781 IB_QP_SQ_PSN),
Roland Dreier8a518662006-02-13 12:48:12 -0800782 [IB_QPT_SMI] = IB_QP_SQ_PSN,
783 [IB_QPT_GSI] = IB_QP_SQ_PSN,
784 },
785 .opt_param = {
786 [IB_QPT_UD] = (IB_QP_CUR_STATE |
787 IB_QP_QKEY),
788 [IB_QPT_UC] = (IB_QP_CUR_STATE |
789 IB_QP_ALT_PATH |
790 IB_QP_ACCESS_FLAGS |
791 IB_QP_PATH_MIG_STATE),
792 [IB_QPT_RC] = (IB_QP_CUR_STATE |
793 IB_QP_ALT_PATH |
794 IB_QP_ACCESS_FLAGS |
795 IB_QP_MIN_RNR_TIMER |
796 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700797 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
798 IB_QP_ALT_PATH |
799 IB_QP_ACCESS_FLAGS |
800 IB_QP_PATH_MIG_STATE),
801 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
802 IB_QP_ALT_PATH |
803 IB_QP_ACCESS_FLAGS |
804 IB_QP_MIN_RNR_TIMER |
805 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -0800806 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
807 IB_QP_QKEY),
808 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
809 IB_QP_QKEY),
810 }
811 }
812 },
813 [IB_QPS_RTS] = {
814 [IB_QPS_RESET] = { .valid = 1 },
815 [IB_QPS_ERR] = { .valid = 1 },
816 [IB_QPS_RTS] = {
817 .valid = 1,
818 .opt_param = {
819 [IB_QPT_UD] = (IB_QP_CUR_STATE |
820 IB_QP_QKEY),
Dotan Barak4546d312006-03-02 11:22:28 -0800821 [IB_QPT_UC] = (IB_QP_CUR_STATE |
822 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -0800823 IB_QP_ALT_PATH |
824 IB_QP_PATH_MIG_STATE),
Dotan Barak4546d312006-03-02 11:22:28 -0800825 [IB_QPT_RC] = (IB_QP_CUR_STATE |
826 IB_QP_ACCESS_FLAGS |
Roland Dreier8a518662006-02-13 12:48:12 -0800827 IB_QP_ALT_PATH |
828 IB_QP_PATH_MIG_STATE |
829 IB_QP_MIN_RNR_TIMER),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700830 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
831 IB_QP_ACCESS_FLAGS |
832 IB_QP_ALT_PATH |
833 IB_QP_PATH_MIG_STATE),
834 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
835 IB_QP_ACCESS_FLAGS |
836 IB_QP_ALT_PATH |
837 IB_QP_PATH_MIG_STATE |
838 IB_QP_MIN_RNR_TIMER),
Roland Dreier8a518662006-02-13 12:48:12 -0800839 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
840 IB_QP_QKEY),
841 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
842 IB_QP_QKEY),
843 }
844 },
845 [IB_QPS_SQD] = {
846 .valid = 1,
847 .opt_param = {
848 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
849 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
850 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
Sean Heftyb42b63c2011-05-23 19:59:25 -0700851 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
852 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
Roland Dreier8a518662006-02-13 12:48:12 -0800853 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
854 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
855 }
856 },
857 },
858 [IB_QPS_SQD] = {
859 [IB_QPS_RESET] = { .valid = 1 },
860 [IB_QPS_ERR] = { .valid = 1 },
861 [IB_QPS_RTS] = {
862 .valid = 1,
863 .opt_param = {
864 [IB_QPT_UD] = (IB_QP_CUR_STATE |
865 IB_QP_QKEY),
866 [IB_QPT_UC] = (IB_QP_CUR_STATE |
867 IB_QP_ALT_PATH |
868 IB_QP_ACCESS_FLAGS |
869 IB_QP_PATH_MIG_STATE),
870 [IB_QPT_RC] = (IB_QP_CUR_STATE |
871 IB_QP_ALT_PATH |
872 IB_QP_ACCESS_FLAGS |
873 IB_QP_MIN_RNR_TIMER |
874 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700875 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
876 IB_QP_ALT_PATH |
877 IB_QP_ACCESS_FLAGS |
878 IB_QP_PATH_MIG_STATE),
879 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
880 IB_QP_ALT_PATH |
881 IB_QP_ACCESS_FLAGS |
882 IB_QP_MIN_RNR_TIMER |
883 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -0800884 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
885 IB_QP_QKEY),
886 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
887 IB_QP_QKEY),
888 }
889 },
890 [IB_QPS_SQD] = {
891 .valid = 1,
892 .opt_param = {
893 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
894 IB_QP_QKEY),
895 [IB_QPT_UC] = (IB_QP_AV |
Roland Dreier8a518662006-02-13 12:48:12 -0800896 IB_QP_ALT_PATH |
897 IB_QP_ACCESS_FLAGS |
898 IB_QP_PKEY_INDEX |
899 IB_QP_PATH_MIG_STATE),
900 [IB_QPT_RC] = (IB_QP_PORT |
901 IB_QP_AV |
902 IB_QP_TIMEOUT |
903 IB_QP_RETRY_CNT |
904 IB_QP_RNR_RETRY |
905 IB_QP_MAX_QP_RD_ATOMIC |
906 IB_QP_MAX_DEST_RD_ATOMIC |
Roland Dreier8a518662006-02-13 12:48:12 -0800907 IB_QP_ALT_PATH |
908 IB_QP_ACCESS_FLAGS |
909 IB_QP_PKEY_INDEX |
910 IB_QP_MIN_RNR_TIMER |
911 IB_QP_PATH_MIG_STATE),
Sean Heftyb42b63c2011-05-23 19:59:25 -0700912 [IB_QPT_XRC_INI] = (IB_QP_PORT |
913 IB_QP_AV |
914 IB_QP_TIMEOUT |
915 IB_QP_RETRY_CNT |
916 IB_QP_RNR_RETRY |
917 IB_QP_MAX_QP_RD_ATOMIC |
918 IB_QP_ALT_PATH |
919 IB_QP_ACCESS_FLAGS |
920 IB_QP_PKEY_INDEX |
921 IB_QP_PATH_MIG_STATE),
922 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
923 IB_QP_AV |
924 IB_QP_TIMEOUT |
925 IB_QP_MAX_DEST_RD_ATOMIC |
926 IB_QP_ALT_PATH |
927 IB_QP_ACCESS_FLAGS |
928 IB_QP_PKEY_INDEX |
929 IB_QP_MIN_RNR_TIMER |
930 IB_QP_PATH_MIG_STATE),
Roland Dreier8a518662006-02-13 12:48:12 -0800931 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
932 IB_QP_QKEY),
933 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
934 IB_QP_QKEY),
935 }
936 }
937 },
938 [IB_QPS_SQE] = {
939 [IB_QPS_RESET] = { .valid = 1 },
940 [IB_QPS_ERR] = { .valid = 1 },
941 [IB_QPS_RTS] = {
942 .valid = 1,
943 .opt_param = {
944 [IB_QPT_UD] = (IB_QP_CUR_STATE |
945 IB_QP_QKEY),
946 [IB_QPT_UC] = (IB_QP_CUR_STATE |
947 IB_QP_ACCESS_FLAGS),
948 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
949 IB_QP_QKEY),
950 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
951 IB_QP_QKEY),
952 }
953 }
954 },
955 [IB_QPS_ERR] = {
956 [IB_QPS_RESET] = { .valid = 1 },
957 [IB_QPS_ERR] = { .valid = 1 }
958 }
959};
960
961int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Matan Barakdd5f03b2013-12-12 18:03:11 +0200962 enum ib_qp_type type, enum ib_qp_attr_mask mask,
963 enum rdma_link_layer ll)
Roland Dreier8a518662006-02-13 12:48:12 -0800964{
965 enum ib_qp_attr_mask req_param, opt_param;
966
967 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
968 next_state < 0 || next_state > IB_QPS_ERR)
969 return 0;
970
971 if (mask & IB_QP_CUR_STATE &&
972 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
973 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
974 return 0;
975
976 if (!qp_state_table[cur_state][next_state].valid)
977 return 0;
978
979 req_param = qp_state_table[cur_state][next_state].req_param[type];
980 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
981
982 if ((mask & req_param) != req_param)
983 return 0;
984
985 if (mask & ~(req_param | opt_param | IB_QP_STATE))
986 return 0;
987
988 return 1;
989}
990EXPORT_SYMBOL(ib_modify_qp_is_ok);
991
Matan Barakdbf727d2015-10-15 18:38:51 +0300992int ib_resolve_eth_dmac(struct ib_qp *qp,
993 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
Or Gerlitzed4c54e2013-12-12 18:03:17 +0200994{
995 int ret = 0;
Or Gerlitzed4c54e2013-12-12 18:03:17 +0200996
Matan Barakdbf727d2015-10-15 18:38:51 +0300997 if (*qp_attr_mask & IB_QP_AV) {
998 if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
999 qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
1000 return -EINVAL;
1001
1002 if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
1003 return 0;
1004
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001005 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
Matan Barakdbf727d2015-10-15 18:38:51 +03001006 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
1007 qp_attr->ah_attr.dmac);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001008 } else {
Matan Barakdbf727d2015-10-15 18:38:51 +03001009 union ib_gid sgid;
1010 struct ib_gid_attr sgid_attr;
1011 int ifindex;
1012
1013 ret = ib_query_gid(qp->device,
1014 qp_attr->ah_attr.port_num,
1015 qp_attr->ah_attr.grh.sgid_index,
1016 &sgid, &sgid_attr);
1017
1018 if (ret || !sgid_attr.ndev) {
1019 if (!ret)
1020 ret = -ENXIO;
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001021 goto out;
Matan Barakdbf727d2015-10-15 18:38:51 +03001022 }
1023
1024 ifindex = sgid_attr.ndev->ifindex;
1025
1026 ret = rdma_addr_find_dmac_by_grh(&sgid,
1027 &qp_attr->ah_attr.grh.dgid,
1028 qp_attr->ah_attr.dmac,
1029 NULL, ifindex);
1030
1031 dev_put(sgid_attr.ndev);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001032 }
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001033 }
1034out:
1035 return ret;
1036}
Matan Barakdbf727d2015-10-15 18:38:51 +03001037EXPORT_SYMBOL(ib_resolve_eth_dmac);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001038
1039
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040int ib_modify_qp(struct ib_qp *qp,
1041 struct ib_qp_attr *qp_attr,
1042 int qp_attr_mask)
1043{
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001044 int ret;
1045
Matan Barakdbf727d2015-10-15 18:38:51 +03001046 ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
Or Gerlitzed4c54e2013-12-12 18:03:17 +02001047 if (ret)
1048 return ret;
1049
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001050 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051}
1052EXPORT_SYMBOL(ib_modify_qp);
1053
1054int ib_query_qp(struct ib_qp *qp,
1055 struct ib_qp_attr *qp_attr,
1056 int qp_attr_mask,
1057 struct ib_qp_init_attr *qp_init_attr)
1058{
1059 return qp->device->query_qp ?
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001060 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 -ENOSYS;
1062}
1063EXPORT_SYMBOL(ib_query_qp);
1064
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001065int ib_close_qp(struct ib_qp *qp)
1066{
1067 struct ib_qp *real_qp;
1068 unsigned long flags;
1069
1070 real_qp = qp->real_qp;
1071 if (real_qp == qp)
1072 return -EINVAL;
1073
1074 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1075 list_del(&qp->open_list);
1076 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1077
1078 atomic_dec(&real_qp->usecnt);
1079 kfree(qp);
1080
1081 return 0;
1082}
1083EXPORT_SYMBOL(ib_close_qp);
1084
1085static int __ib_destroy_shared_qp(struct ib_qp *qp)
1086{
1087 struct ib_xrcd *xrcd;
1088 struct ib_qp *real_qp;
1089 int ret;
1090
1091 real_qp = qp->real_qp;
1092 xrcd = real_qp->xrcd;
1093
1094 mutex_lock(&xrcd->tgt_qp_mutex);
1095 ib_close_qp(qp);
1096 if (atomic_read(&real_qp->usecnt) == 0)
1097 list_del(&real_qp->xrcd_list);
1098 else
1099 real_qp = NULL;
1100 mutex_unlock(&xrcd->tgt_qp_mutex);
1101
1102 if (real_qp) {
1103 ret = ib_destroy_qp(real_qp);
1104 if (!ret)
1105 atomic_dec(&xrcd->usecnt);
1106 else
1107 __ib_insert_xrcd_qp(xrcd, real_qp);
1108 }
1109
1110 return 0;
1111}
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113int ib_destroy_qp(struct ib_qp *qp)
1114{
1115 struct ib_pd *pd;
1116 struct ib_cq *scq, *rcq;
1117 struct ib_srq *srq;
1118 int ret;
1119
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001120 if (atomic_read(&qp->usecnt))
1121 return -EBUSY;
1122
1123 if (qp->real_qp != qp)
1124 return __ib_destroy_shared_qp(qp);
1125
Sean Heftyb42b63c2011-05-23 19:59:25 -07001126 pd = qp->pd;
1127 scq = qp->send_cq;
1128 rcq = qp->recv_cq;
1129 srq = qp->srq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
1131 ret = qp->device->destroy_qp(qp);
1132 if (!ret) {
Sean Heftyb42b63c2011-05-23 19:59:25 -07001133 if (pd)
1134 atomic_dec(&pd->usecnt);
1135 if (scq)
1136 atomic_dec(&scq->usecnt);
1137 if (rcq)
1138 atomic_dec(&rcq->usecnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 if (srq)
1140 atomic_dec(&srq->usecnt);
1141 }
1142
1143 return ret;
1144}
1145EXPORT_SYMBOL(ib_destroy_qp);
1146
1147/* Completion queues */
1148
1149struct ib_cq *ib_create_cq(struct ib_device *device,
1150 ib_comp_handler comp_handler,
1151 void (*event_handler)(struct ib_event *, void *),
Matan Barak8e372102015-06-11 16:35:21 +03001152 void *cq_context,
1153 const struct ib_cq_init_attr *cq_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154{
1155 struct ib_cq *cq;
1156
Matan Barak8e372102015-06-11 16:35:21 +03001157 cq = device->create_cq(device, cq_attr, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159 if (!IS_ERR(cq)) {
1160 cq->device = device;
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001161 cq->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 cq->comp_handler = comp_handler;
1163 cq->event_handler = event_handler;
1164 cq->cq_context = cq_context;
1165 atomic_set(&cq->usecnt, 0);
1166 }
1167
1168 return cq;
1169}
1170EXPORT_SYMBOL(ib_create_cq);
1171
Eli Cohen2dd57162008-04-16 21:09:33 -07001172int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1173{
1174 return cq->device->modify_cq ?
1175 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1176}
1177EXPORT_SYMBOL(ib_modify_cq);
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179int ib_destroy_cq(struct ib_cq *cq)
1180{
1181 if (atomic_read(&cq->usecnt))
1182 return -EBUSY;
1183
1184 return cq->device->destroy_cq(cq);
1185}
1186EXPORT_SYMBOL(ib_destroy_cq);
1187
Roland Dreiera74cd4a2006-02-13 16:30:49 -08001188int ib_resize_cq(struct ib_cq *cq, int cqe)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Roland Dreier40de2e52005-11-08 11:10:25 -08001190 return cq->device->resize_cq ?
Roland Dreier33b9b3e2006-01-30 14:29:21 -08001191 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192}
1193EXPORT_SYMBOL(ib_resize_cq);
1194
1195/* Memory regions */
1196
1197struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1198{
1199 struct ib_mr *mr;
Eli Cohen1c636f82013-10-31 15:26:32 +02001200 int err;
1201
1202 err = ib_check_mr_access(mr_access_flags);
1203 if (err)
1204 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206 mr = pd->device->get_dma_mr(pd, mr_access_flags);
1207
1208 if (!IS_ERR(mr)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001209 mr->device = pd->device;
1210 mr->pd = pd;
1211 mr->uobject = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 atomic_inc(&pd->usecnt);
1213 atomic_set(&mr->usecnt, 0);
1214 }
1215
1216 return mr;
1217}
1218EXPORT_SYMBOL(ib_get_dma_mr);
1219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1221{
1222 return mr->device->query_mr ?
1223 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1224}
1225EXPORT_SYMBOL(ib_query_mr);
1226
1227int ib_dereg_mr(struct ib_mr *mr)
1228{
1229 struct ib_pd *pd;
1230 int ret;
1231
1232 if (atomic_read(&mr->usecnt))
1233 return -EBUSY;
1234
1235 pd = mr->pd;
1236 ret = mr->device->dereg_mr(mr);
1237 if (!ret)
1238 atomic_dec(&pd->usecnt);
1239
1240 return ret;
1241}
1242EXPORT_SYMBOL(ib_dereg_mr);
1243
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001244/**
1245 * ib_alloc_mr() - Allocates a memory region
1246 * @pd: protection domain associated with the region
1247 * @mr_type: memory region type
1248 * @max_num_sg: maximum sg entries available for registration.
1249 *
1250 * Notes:
1251 * Memory registeration page/sg lists must not exceed max_num_sg.
1252 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1253 * max_num_sg * used_page_size.
1254 *
1255 */
1256struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1257 enum ib_mr_type mr_type,
1258 u32 max_num_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001259{
1260 struct ib_mr *mr;
1261
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001262 if (!pd->device->alloc_mr)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001263 return ERR_PTR(-ENOSYS);
1264
Sagi Grimbergd9f272c2015-07-30 10:32:48 +03001265 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
Sagi Grimberg17cd3a22014-02-23 14:19:04 +02001266 if (!IS_ERR(mr)) {
1267 mr->device = pd->device;
1268 mr->pd = pd;
1269 mr->uobject = NULL;
1270 atomic_inc(&pd->usecnt);
1271 atomic_set(&mr->usecnt, 0);
1272 }
1273
1274 return mr;
1275}
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001276EXPORT_SYMBOL(ib_alloc_mr);
Steve Wise00f7ec32008-07-14 23:48:45 -07001277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278/* Memory windows */
1279
Shani Michaeli7083e422013-02-06 16:19:12 +00001280struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281{
1282 struct ib_mw *mw;
1283
1284 if (!pd->device->alloc_mw)
1285 return ERR_PTR(-ENOSYS);
1286
Shani Michaeli7083e422013-02-06 16:19:12 +00001287 mw = pd->device->alloc_mw(pd, type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 if (!IS_ERR(mw)) {
Roland Dreierb5e81bf2005-07-07 17:57:11 -07001289 mw->device = pd->device;
1290 mw->pd = pd;
1291 mw->uobject = NULL;
Shani Michaeli7083e422013-02-06 16:19:12 +00001292 mw->type = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 atomic_inc(&pd->usecnt);
1294 }
1295
1296 return mw;
1297}
1298EXPORT_SYMBOL(ib_alloc_mw);
1299
1300int ib_dealloc_mw(struct ib_mw *mw)
1301{
1302 struct ib_pd *pd;
1303 int ret;
1304
1305 pd = mw->pd;
1306 ret = mw->device->dealloc_mw(mw);
1307 if (!ret)
1308 atomic_dec(&pd->usecnt);
1309
1310 return ret;
1311}
1312EXPORT_SYMBOL(ib_dealloc_mw);
1313
1314/* "Fast" memory regions */
1315
1316struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1317 int mr_access_flags,
1318 struct ib_fmr_attr *fmr_attr)
1319{
1320 struct ib_fmr *fmr;
1321
1322 if (!pd->device->alloc_fmr)
1323 return ERR_PTR(-ENOSYS);
1324
1325 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1326 if (!IS_ERR(fmr)) {
1327 fmr->device = pd->device;
1328 fmr->pd = pd;
1329 atomic_inc(&pd->usecnt);
1330 }
1331
1332 return fmr;
1333}
1334EXPORT_SYMBOL(ib_alloc_fmr);
1335
1336int ib_unmap_fmr(struct list_head *fmr_list)
1337{
1338 struct ib_fmr *fmr;
1339
1340 if (list_empty(fmr_list))
1341 return 0;
1342
1343 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1344 return fmr->device->unmap_fmr(fmr_list);
1345}
1346EXPORT_SYMBOL(ib_unmap_fmr);
1347
1348int ib_dealloc_fmr(struct ib_fmr *fmr)
1349{
1350 struct ib_pd *pd;
1351 int ret;
1352
1353 pd = fmr->pd;
1354 ret = fmr->device->dealloc_fmr(fmr);
1355 if (!ret)
1356 atomic_dec(&pd->usecnt);
1357
1358 return ret;
1359}
1360EXPORT_SYMBOL(ib_dealloc_fmr);
1361
1362/* Multicast groups */
1363
1364int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1365{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001366 int ret;
1367
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001368 if (!qp->device->attach_mcast)
1369 return -ENOSYS;
1370 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1371 return -EINVAL;
1372
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001373 ret = qp->device->attach_mcast(qp, gid, lid);
1374 if (!ret)
1375 atomic_inc(&qp->usecnt);
1376 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377}
1378EXPORT_SYMBOL(ib_attach_mcast);
1379
1380int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1381{
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001382 int ret;
1383
Jack Morgenstein0c33aee2005-09-26 11:47:53 -07001384 if (!qp->device->detach_mcast)
1385 return -ENOSYS;
1386 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1387 return -EINVAL;
1388
Or Gerlitzc3bccbfb2012-04-29 17:04:22 +03001389 ret = qp->device->detach_mcast(qp, gid, lid);
1390 if (!ret)
1391 atomic_dec(&qp->usecnt);
1392 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393}
1394EXPORT_SYMBOL(ib_detach_mcast);
Sean Hefty59991f92011-05-23 17:52:46 -07001395
1396struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1397{
1398 struct ib_xrcd *xrcd;
1399
1400 if (!device->alloc_xrcd)
1401 return ERR_PTR(-ENOSYS);
1402
1403 xrcd = device->alloc_xrcd(device, NULL, NULL);
1404 if (!IS_ERR(xrcd)) {
1405 xrcd->device = device;
Sean Hefty53d0bd12011-05-24 08:33:46 -07001406 xrcd->inode = NULL;
Sean Hefty59991f92011-05-23 17:52:46 -07001407 atomic_set(&xrcd->usecnt, 0);
Sean Heftyd3d72d92011-05-26 23:06:44 -07001408 mutex_init(&xrcd->tgt_qp_mutex);
1409 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
Sean Hefty59991f92011-05-23 17:52:46 -07001410 }
1411
1412 return xrcd;
1413}
1414EXPORT_SYMBOL(ib_alloc_xrcd);
1415
1416int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1417{
Sean Heftyd3d72d92011-05-26 23:06:44 -07001418 struct ib_qp *qp;
1419 int ret;
1420
Sean Hefty59991f92011-05-23 17:52:46 -07001421 if (atomic_read(&xrcd->usecnt))
1422 return -EBUSY;
1423
Sean Heftyd3d72d92011-05-26 23:06:44 -07001424 while (!list_empty(&xrcd->tgt_qp_list)) {
1425 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1426 ret = ib_destroy_qp(qp);
1427 if (ret)
1428 return ret;
1429 }
1430
Sean Hefty59991f92011-05-23 17:52:46 -07001431 return xrcd->device->dealloc_xrcd(xrcd);
1432}
1433EXPORT_SYMBOL(ib_dealloc_xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001434
1435struct ib_flow *ib_create_flow(struct ib_qp *qp,
1436 struct ib_flow_attr *flow_attr,
1437 int domain)
1438{
1439 struct ib_flow *flow_id;
1440 if (!qp->device->create_flow)
1441 return ERR_PTR(-ENOSYS);
1442
1443 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1444 if (!IS_ERR(flow_id))
1445 atomic_inc(&qp->usecnt);
1446 return flow_id;
1447}
1448EXPORT_SYMBOL(ib_create_flow);
1449
1450int ib_destroy_flow(struct ib_flow *flow_id)
1451{
1452 int err;
1453 struct ib_qp *qp = flow_id->qp;
1454
1455 err = qp->device->destroy_flow(flow_id);
1456 if (!err)
1457 atomic_dec(&qp->usecnt);
1458 return err;
1459}
1460EXPORT_SYMBOL(ib_destroy_flow);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001461
1462int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1463 struct ib_mr_status *mr_status)
1464{
1465 return mr->device->check_mr_status ?
1466 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1467}
1468EXPORT_SYMBOL(ib_check_mr_status);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001469
1470/**
1471 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1472 * and set it the memory region.
1473 * @mr: memory region
1474 * @sg: dma mapped scatterlist
1475 * @sg_nents: number of entries in sg
1476 * @page_size: page vector desired page size
1477 *
1478 * Constraints:
1479 * - The first sg element is allowed to have an offset.
1480 * - Each sg element must be aligned to page_size (or physically
1481 * contiguous to the previous element). In case an sg element has a
1482 * non contiguous offset, the mapping prefix will not include it.
1483 * - The last sg element is allowed to have length less than page_size.
1484 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1485 * then only max_num_sg entries will be mapped.
1486 *
1487 * Returns the number of sg elements that were mapped to the memory region.
1488 *
1489 * After this completes successfully, the memory region
1490 * is ready for registration.
1491 */
1492int ib_map_mr_sg(struct ib_mr *mr,
1493 struct scatterlist *sg,
1494 int sg_nents,
1495 unsigned int page_size)
1496{
1497 if (unlikely(!mr->device->map_mr_sg))
1498 return -ENOSYS;
1499
1500 mr->page_size = page_size;
1501
1502 return mr->device->map_mr_sg(mr, sg, sg_nents);
1503}
1504EXPORT_SYMBOL(ib_map_mr_sg);
1505
1506/**
1507 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1508 * to a page vector
1509 * @mr: memory region
1510 * @sgl: dma mapped scatterlist
1511 * @sg_nents: number of entries in sg
1512 * @set_page: driver page assignment function pointer
1513 *
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001514 * Core service helper for drivers to convert the largest
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001515 * prefix of given sg list to a page vector. The sg list
1516 * prefix converted is the prefix that meet the requirements
1517 * of ib_map_mr_sg.
1518 *
1519 * Returns the number of sg elements that were assigned to
1520 * a page vector.
1521 */
1522int ib_sg_to_pages(struct ib_mr *mr,
1523 struct scatterlist *sgl,
1524 int sg_nents,
1525 int (*set_page)(struct ib_mr *, u64))
1526{
1527 struct scatterlist *sg;
1528 u64 last_end_dma_addr = 0, last_page_addr = 0;
1529 unsigned int last_page_off = 0;
1530 u64 page_mask = ~((u64)mr->page_size - 1);
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001531 int i, ret;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001532
1533 mr->iova = sg_dma_address(&sgl[0]);
1534 mr->length = 0;
1535
1536 for_each_sg(sgl, sg, sg_nents, i) {
1537 u64 dma_addr = sg_dma_address(sg);
1538 unsigned int dma_len = sg_dma_len(sg);
1539 u64 end_dma_addr = dma_addr + dma_len;
1540 u64 page_addr = dma_addr & page_mask;
1541
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001542 /*
1543 * For the second and later elements, check whether either the
1544 * end of element i-1 or the start of element i is not aligned
1545 * on a page boundary.
1546 */
1547 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1548 /* Stop mapping if there is a gap. */
1549 if (last_end_dma_addr != dma_addr)
1550 break;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001551
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001552 /*
1553 * Coalesce this element with the last. If it is small
1554 * enough just update mr->length. Otherwise start
1555 * mapping from the next page.
1556 */
1557 goto next_page;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001558 }
1559
1560 do {
Bart Van Assche8f5ba102015-12-03 16:04:17 -08001561 ret = set_page(mr, page_addr);
1562 if (unlikely(ret < 0))
1563 return i ? : ret;
1564next_page:
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001565 page_addr += mr->page_size;
1566 } while (page_addr < end_dma_addr);
1567
1568 mr->length += dma_len;
1569 last_end_dma_addr = end_dma_addr;
1570 last_page_addr = end_dma_addr & page_mask;
1571 last_page_off = end_dma_addr & ~page_mask;
1572 }
1573
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001574 return i;
1575}
1576EXPORT_SYMBOL(ib_sg_to_pages);