blob: 0caf465f317c28b5191d207eb9622ffa517220c5 [file] [log] [blame]
Sean Heftye51060f2006-06-17 20:37:29 -07001/*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
Sean Heftya9474912008-07-14 23:48:43 -07007 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
Sean Heftye51060f2006-06-17 20:37:29 -070012 *
Sean Heftya9474912008-07-14 23:48:43 -070013 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
Sean Heftye51060f2006-06-17 20:37:29 -070016 *
Sean Heftya9474912008-07-14 23:48:43 -070017 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
Sean Heftye51060f2006-06-17 20:37:29 -070020 *
Sean Heftya9474912008-07-14 23:48:43 -070021 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
Sean Heftye51060f2006-06-17 20:37:29 -070025 *
Sean Heftya9474912008-07-14 23:48:43 -070026 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
Sean Heftye51060f2006-06-17 20:37:29 -070034 */
35
36#include <linux/completion.h>
37#include <linux/in.h>
38#include <linux/in6.h>
39#include <linux/mutex.h>
40#include <linux/random.h>
41#include <linux/idr.h>
Tom Tucker07ebafb2006-08-03 16:02:42 -050042#include <linux/inetdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040044#include <linux/module.h>
Amir Vadai366cddb2012-04-04 21:33:29 +000045#include <net/route.h>
Sean Heftye51060f2006-06-17 20:37:29 -070046
47#include <net/tcp.h>
Aleksey Senin1f5175a2008-12-24 10:16:45 -080048#include <net/ipv6.h>
Sean Heftye51060f2006-06-17 20:37:29 -070049
50#include <rdma/rdma_cm.h>
51#include <rdma/rdma_cm_ib.h>
Nir Muchtar753f6182011-01-03 15:33:53 +000052#include <rdma/rdma_netlink.h>
Sean Hefty2e2d1902013-05-29 10:09:09 -070053#include <rdma/ib.h>
Sean Heftye51060f2006-06-17 20:37:29 -070054#include <rdma/ib_cache.h>
55#include <rdma/ib_cm.h>
56#include <rdma/ib_sa.h>
Tom Tucker07ebafb2006-08-03 16:02:42 -050057#include <rdma/iw_cm.h>
Sean Heftye51060f2006-06-17 20:37:29 -070058
59MODULE_AUTHOR("Sean Hefty");
60MODULE_DESCRIPTION("Generic RDMA CM Agent");
61MODULE_LICENSE("Dual BSD/GPL");
62
63#define CMA_CM_RESPONSE_TIMEOUT 20
Michael S. Tsirkind5bb7592006-09-13 15:01:54 +030064#define CMA_MAX_CM_RETRIES 15
Sean Heftydcb3f972007-08-01 14:47:16 -070065#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
Eli Cohen3c86aa72010-10-13 21:26:51 +020066#define CMA_IBOE_PACKET_LIFETIME 18
Sean Heftye51060f2006-06-17 20:37:29 -070067
68static void cma_add_one(struct ib_device *device);
69static void cma_remove_one(struct ib_device *device);
70
71static struct ib_client cma_client = {
72 .name = "cma",
73 .add = cma_add_one,
74 .remove = cma_remove_one
75};
76
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -070077static struct ib_sa_client sa_client;
Sean Hefty7a118df2006-10-31 11:12:59 -080078static struct rdma_addr_client addr_client;
Sean Heftye51060f2006-06-17 20:37:29 -070079static LIST_HEAD(dev_list);
80static LIST_HEAD(listen_any_list);
81static DEFINE_MUTEX(lock);
82static struct workqueue_struct *cma_wq;
Sean Heftye51060f2006-06-17 20:37:29 -070083static DEFINE_IDR(tcp_ps);
Sean Hefty628e5f62006-11-30 16:44:16 -080084static DEFINE_IDR(udp_ps);
Sean Heftyc8f6a362007-02-15 17:00:18 -080085static DEFINE_IDR(ipoib_ps);
Sean Hefty2d2e9412011-05-28 21:56:39 -070086static DEFINE_IDR(ib_ps);
Sean Heftye51060f2006-06-17 20:37:29 -070087
88struct cma_device {
89 struct list_head list;
90 struct ib_device *device;
Sean Heftye51060f2006-06-17 20:37:29 -070091 struct completion comp;
92 atomic_t refcount;
93 struct list_head id_list;
94};
95
Sean Heftye51060f2006-06-17 20:37:29 -070096struct rdma_bind_list {
97 struct idr *ps;
98 struct hlist_head owners;
99 unsigned short port;
100};
101
Sean Hefty68602122012-06-14 20:31:39 +0000102enum {
103 CMA_OPTION_AFONLY,
104};
105
Sean Heftye51060f2006-06-17 20:37:29 -0700106/*
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
111 */
112struct rdma_id_private {
113 struct rdma_cm_id id;
114
115 struct rdma_bind_list *bind_list;
116 struct hlist_node node;
Sean Heftyd02d1f52007-10-09 11:12:34 -0700117 struct list_head list; /* listen_any_list or cma_device.list */
118 struct list_head listen_list; /* per device listens */
Sean Heftye51060f2006-06-17 20:37:29 -0700119 struct cma_device *cma_dev;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800120 struct list_head mc_list;
Sean Heftye51060f2006-06-17 20:37:29 -0700121
Sean Heftyd02d1f52007-10-09 11:12:34 -0700122 int internal_id;
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700123 enum rdma_cm_state state;
Sean Heftye51060f2006-06-17 20:37:29 -0700124 spinlock_t lock;
Sean Heftyc5483382007-09-24 13:19:09 -0700125 struct mutex qp_mutex;
126
Sean Heftye51060f2006-06-17 20:37:29 -0700127 struct completion comp;
128 atomic_t refcount;
Or Gerlitzde910bd2008-07-14 23:48:53 -0700129 struct mutex handler_mutex;
Sean Heftye51060f2006-06-17 20:37:29 -0700130
131 int backlog;
132 int timeout_ms;
133 struct ib_sa_query *query;
134 int query_id;
135 union {
136 struct ib_cm_id *ib;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500137 struct iw_cm_id *iw;
Sean Heftye51060f2006-06-17 20:37:29 -0700138 } cm_id;
139
140 u32 seq_num;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800141 u32 qkey;
Sean Heftye51060f2006-06-17 20:37:29 -0700142 u32 qp_num;
Nir Muchtar83e95022011-01-13 13:56:04 +0000143 pid_t owner;
Sean Hefty68602122012-06-14 20:31:39 +0000144 u32 options;
Sean Heftye51060f2006-06-17 20:37:29 -0700145 u8 srq;
Sean Heftya81c9942007-08-08 15:51:06 -0700146 u8 tos;
Hefty, Seana9bb7912011-05-09 22:06:10 -0700147 u8 reuseaddr;
Sean Hefty5b0ec992012-06-14 20:31:39 +0000148 u8 afonly;
Sean Heftye51060f2006-06-17 20:37:29 -0700149};
150
Sean Heftyc8f6a362007-02-15 17:00:18 -0800151struct cma_multicast {
152 struct rdma_id_private *id_priv;
153 union {
154 struct ib_sa_multicast *ib;
155 } multicast;
156 struct list_head list;
157 void *context;
Roland Dreier3f446752008-08-04 11:02:14 -0700158 struct sockaddr_storage addr;
Eli Cohen3c86aa72010-10-13 21:26:51 +0200159 struct kref mcref;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800160};
161
Sean Heftye51060f2006-06-17 20:37:29 -0700162struct cma_work {
163 struct work_struct work;
164 struct rdma_id_private *id;
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700165 enum rdma_cm_state old_state;
166 enum rdma_cm_state new_state;
Sean Heftye51060f2006-06-17 20:37:29 -0700167 struct rdma_cm_event event;
168};
169
Or Gerlitzdd5bdff2008-07-22 14:14:22 -0700170struct cma_ndev_work {
171 struct work_struct work;
172 struct rdma_id_private *id;
173 struct rdma_cm_event event;
174};
175
Eli Cohen3c86aa72010-10-13 21:26:51 +0200176struct iboe_mcast_work {
177 struct work_struct work;
178 struct rdma_id_private *id;
179 struct cma_multicast *mc;
180};
181
Sean Heftye51060f2006-06-17 20:37:29 -0700182union cma_ip_addr {
183 struct in6_addr ip6;
184 struct {
Al Viro1b90c132008-03-29 03:10:28 +0000185 __be32 pad[3];
186 __be32 addr;
Sean Heftye51060f2006-06-17 20:37:29 -0700187 } ip4;
188};
189
190struct cma_hdr {
191 u8 cma_version;
192 u8 ip_version; /* IP version: 7:4 */
Al Viro1b90c132008-03-29 03:10:28 +0000193 __be16 port;
Sean Heftye51060f2006-06-17 20:37:29 -0700194 union cma_ip_addr src_addr;
195 union cma_ip_addr dst_addr;
196};
197
Sean Heftye51060f2006-06-17 20:37:29 -0700198#define CMA_VERSION 0x00
Sean Heftye51060f2006-06-17 20:37:29 -0700199
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700200static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
Sean Heftye51060f2006-06-17 20:37:29 -0700201{
202 unsigned long flags;
203 int ret;
204
205 spin_lock_irqsave(&id_priv->lock, flags);
206 ret = (id_priv->state == comp);
207 spin_unlock_irqrestore(&id_priv->lock, flags);
208 return ret;
209}
210
211static int cma_comp_exch(struct rdma_id_private *id_priv,
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700212 enum rdma_cm_state comp, enum rdma_cm_state exch)
Sean Heftye51060f2006-06-17 20:37:29 -0700213{
214 unsigned long flags;
215 int ret;
216
217 spin_lock_irqsave(&id_priv->lock, flags);
218 if ((ret = (id_priv->state == comp)))
219 id_priv->state = exch;
220 spin_unlock_irqrestore(&id_priv->lock, flags);
221 return ret;
222}
223
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700224static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
225 enum rdma_cm_state exch)
Sean Heftye51060f2006-06-17 20:37:29 -0700226{
227 unsigned long flags;
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700228 enum rdma_cm_state old;
Sean Heftye51060f2006-06-17 20:37:29 -0700229
230 spin_lock_irqsave(&id_priv->lock, flags);
231 old = id_priv->state;
232 id_priv->state = exch;
233 spin_unlock_irqrestore(&id_priv->lock, flags);
234 return old;
235}
236
237static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
238{
239 return hdr->ip_version >> 4;
240}
241
242static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
243{
244 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
245}
246
Sean Heftye51060f2006-06-17 20:37:29 -0700247static void cma_attach_to_dev(struct rdma_id_private *id_priv,
248 struct cma_device *cma_dev)
249{
250 atomic_inc(&cma_dev->refcount);
251 id_priv->cma_dev = cma_dev;
252 id_priv->id.device = cma_dev->device;
Eli Cohen3c86aa72010-10-13 21:26:51 +0200253 id_priv->id.route.addr.dev_addr.transport =
254 rdma_node_get_transport(cma_dev->device->node_type);
Sean Heftye51060f2006-06-17 20:37:29 -0700255 list_add_tail(&id_priv->list, &cma_dev->id_list);
256}
257
258static inline void cma_deref_dev(struct cma_device *cma_dev)
259{
260 if (atomic_dec_and_test(&cma_dev->refcount))
261 complete(&cma_dev->comp);
262}
263
Eli Cohen3c86aa72010-10-13 21:26:51 +0200264static inline void release_mc(struct kref *kref)
265{
266 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
267
268 kfree(mc->multicast.ib);
269 kfree(mc);
270}
271
Sean Heftya396d432011-02-23 09:05:39 -0800272static void cma_release_dev(struct rdma_id_private *id_priv)
Sean Heftye51060f2006-06-17 20:37:29 -0700273{
Sean Heftya396d432011-02-23 09:05:39 -0800274 mutex_lock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -0700275 list_del(&id_priv->list);
276 cma_deref_dev(id_priv->cma_dev);
277 id_priv->cma_dev = NULL;
Sean Heftya396d432011-02-23 09:05:39 -0800278 mutex_unlock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -0700279}
280
Sean Heftyf4753832013-05-29 10:09:14 -0700281static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
282{
283 return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
284}
285
286static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
287{
288 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
289}
290
291static inline unsigned short cma_family(struct rdma_id_private *id_priv)
292{
293 return id_priv->id.route.addr.src_addr.ss_family;
294}
295
Sean Hefty5c438132013-05-29 10:09:23 -0700296static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
Sean Heftyc8f6a362007-02-15 17:00:18 -0800297{
298 struct ib_sa_mcmember_rec rec;
299 int ret = 0;
300
Sean Hefty5c438132013-05-29 10:09:23 -0700301 if (id_priv->qkey) {
302 if (qkey && id_priv->qkey != qkey)
303 return -EINVAL;
Yossi Etigind2ca39f2009-04-08 13:42:33 -0700304 return 0;
Sean Hefty5c438132013-05-29 10:09:23 -0700305 }
306
307 if (qkey) {
308 id_priv->qkey = qkey;
309 return 0;
310 }
Yossi Etigind2ca39f2009-04-08 13:42:33 -0700311
312 switch (id_priv->id.ps) {
Sean Heftyc8f6a362007-02-15 17:00:18 -0800313 case RDMA_PS_UDP:
Sean Hefty5c438132013-05-29 10:09:23 -0700314 case RDMA_PS_IB:
Yossi Etigind2ca39f2009-04-08 13:42:33 -0700315 id_priv->qkey = RDMA_UDP_QKEY;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800316 break;
317 case RDMA_PS_IPOIB:
Yossi Etigind2ca39f2009-04-08 13:42:33 -0700318 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
319 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
320 id_priv->id.port_num, &rec.mgid,
321 &rec);
322 if (!ret)
323 id_priv->qkey = be32_to_cpu(rec.qkey);
Sean Heftyc8f6a362007-02-15 17:00:18 -0800324 break;
325 default:
326 break;
327 }
328 return ret;
329}
330
Sean Hefty680f9202013-05-29 10:09:12 -0700331static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
332{
333 dev_addr->dev_type = ARPHRD_INFINIBAND;
334 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
335 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
336}
337
338static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
339{
340 int ret;
341
342 if (addr->sa_family != AF_IB) {
Matan Barakdd5f03b2013-12-12 18:03:11 +0200343 ret = rdma_translate_ip(addr, dev_addr, NULL);
Sean Hefty680f9202013-05-29 10:09:12 -0700344 } else {
345 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
346 ret = 0;
347 }
348
349 return ret;
350}
351
Doug Ledfordbe9130c2013-09-24 17:16:28 -0400352static int cma_acquire_dev(struct rdma_id_private *id_priv,
353 struct rdma_id_private *listen_id_priv)
Sean Heftye51060f2006-06-17 20:37:29 -0700354{
Sean Heftyc8f6a362007-02-15 17:00:18 -0800355 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
Sean Heftye51060f2006-06-17 20:37:29 -0700356 struct cma_device *cma_dev;
Eli Cohen3c86aa72010-10-13 21:26:51 +0200357 union ib_gid gid, iboe_gid;
Sean Heftye51060f2006-06-17 20:37:29 -0700358 int ret = -ENODEV;
Doug Ledford29f27e82013-09-24 17:16:27 -0400359 u8 port, found_port;
Eli Cohen3c86aa72010-10-13 21:26:51 +0200360 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
361 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
Sean Heftye51060f2006-06-17 20:37:29 -0700362
Moni Shoua2efdd6a2011-07-12 11:23:15 +0000363 if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
364 id_priv->id.ps == RDMA_PS_IPOIB)
365 return -EINVAL;
366
Sean Heftya396d432011-02-23 09:05:39 -0800367 mutex_lock(&lock);
Eli Cohen3c86aa72010-10-13 21:26:51 +0200368 iboe_addr_get_sgid(dev_addr, &iboe_gid);
369 memcpy(&gid, dev_addr->src_dev_addr +
370 rdma_addr_gid_offset(dev_addr), sizeof gid);
Doug Ledfordbe9130c2013-09-24 17:16:28 -0400371 if (listen_id_priv &&
372 rdma_port_get_link_layer(listen_id_priv->id.device,
373 listen_id_priv->id.port_num) == dev_ll) {
374 cma_dev = listen_id_priv->cma_dev;
375 port = listen_id_priv->id.port_num;
376 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
377 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
378 ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
379 &found_port, NULL);
380 else
381 ret = ib_find_cached_gid(cma_dev->device, &gid,
382 &found_port, NULL);
383
384 if (!ret && (port == found_port)) {
385 id_priv->id.port_num = found_port;
386 goto out;
387 }
388 }
Sean Heftye51060f2006-06-17 20:37:29 -0700389 list_for_each_entry(cma_dev, &dev_list, list) {
Eli Cohen3c86aa72010-10-13 21:26:51 +0200390 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
Doug Ledfordbe9130c2013-09-24 17:16:28 -0400391 if (listen_id_priv &&
392 listen_id_priv->cma_dev == cma_dev &&
393 listen_id_priv->id.port_num == port)
394 continue;
Eli Cohen3c86aa72010-10-13 21:26:51 +0200395 if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
396 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
397 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
Doug Ledford29f27e82013-09-24 17:16:27 -0400398 ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
Eli Cohen3c86aa72010-10-13 21:26:51 +0200399 else
Doug Ledford29f27e82013-09-24 17:16:27 -0400400 ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
Eli Cohen3c86aa72010-10-13 21:26:51 +0200401
Doug Ledford29f27e82013-09-24 17:16:27 -0400402 if (!ret && (port == found_port)) {
403 id_priv->id.port_num = found_port;
Eli Cohen3c86aa72010-10-13 21:26:51 +0200404 goto out;
shefty63f05be2012-11-28 20:39:52 +0000405 }
Eli Cohen3c86aa72010-10-13 21:26:51 +0200406 }
Sean Heftye51060f2006-06-17 20:37:29 -0700407 }
408 }
Eli Cohen3c86aa72010-10-13 21:26:51 +0200409
410out:
411 if (!ret)
412 cma_attach_to_dev(id_priv, cma_dev);
413
Sean Heftya396d432011-02-23 09:05:39 -0800414 mutex_unlock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -0700415 return ret;
416}
417
Sean Heftyf17df3b2013-05-29 10:09:17 -0700418/*
419 * Select the source IB device and address to reach the destination IB address.
420 */
421static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
422{
423 struct cma_device *cma_dev, *cur_dev;
424 struct sockaddr_ib *addr;
425 union ib_gid gid, sgid, *dgid;
426 u16 pkey, index;
Paul Bolle8fb488d72013-07-24 15:06:07 -0700427 u8 p;
Sean Heftyf17df3b2013-05-29 10:09:17 -0700428 int i;
429
430 cma_dev = NULL;
431 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
432 dgid = (union ib_gid *) &addr->sib_addr;
433 pkey = ntohs(addr->sib_pkey);
434
435 list_for_each_entry(cur_dev, &dev_list, list) {
436 if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
437 continue;
438
439 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
440 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
441 continue;
442
443 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) {
444 if (!memcmp(&gid, dgid, sizeof(gid))) {
445 cma_dev = cur_dev;
446 sgid = gid;
Paul Bolle8fb488d72013-07-24 15:06:07 -0700447 id_priv->id.port_num = p;
Sean Heftyf17df3b2013-05-29 10:09:17 -0700448 goto found;
449 }
450
451 if (!cma_dev && (gid.global.subnet_prefix ==
452 dgid->global.subnet_prefix)) {
453 cma_dev = cur_dev;
454 sgid = gid;
Paul Bolle8fb488d72013-07-24 15:06:07 -0700455 id_priv->id.port_num = p;
Sean Heftyf17df3b2013-05-29 10:09:17 -0700456 }
457 }
458 }
459 }
460
461 if (!cma_dev)
462 return -ENODEV;
463
464found:
465 cma_attach_to_dev(id_priv, cma_dev);
Sean Heftyf17df3b2013-05-29 10:09:17 -0700466 addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
467 memcpy(&addr->sib_addr, &sgid, sizeof sgid);
468 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
469 return 0;
470}
471
Sean Heftye51060f2006-06-17 20:37:29 -0700472static void cma_deref_id(struct rdma_id_private *id_priv)
473{
474 if (atomic_dec_and_test(&id_priv->refcount))
475 complete(&id_priv->comp);
476}
477
Or Gerlitzde910bd2008-07-14 23:48:53 -0700478static int cma_disable_callback(struct rdma_id_private *id_priv,
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700479 enum rdma_cm_state state)
Sean Hefty8aa08602007-05-07 11:49:00 -0700480{
Or Gerlitzde910bd2008-07-14 23:48:53 -0700481 mutex_lock(&id_priv->handler_mutex);
482 if (id_priv->state != state) {
483 mutex_unlock(&id_priv->handler_mutex);
484 return -EINVAL;
485 }
486 return 0;
Sean Heftye51060f2006-06-17 20:37:29 -0700487}
488
489struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
Sean Heftyb26f9b92010-04-01 17:08:41 +0000490 void *context, enum rdma_port_space ps,
491 enum ib_qp_type qp_type)
Sean Heftye51060f2006-06-17 20:37:29 -0700492{
493 struct rdma_id_private *id_priv;
494
495 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
496 if (!id_priv)
497 return ERR_PTR(-ENOMEM);
498
Nir Muchtar83e95022011-01-13 13:56:04 +0000499 id_priv->owner = task_pid_nr(current);
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700500 id_priv->state = RDMA_CM_IDLE;
Sean Heftye51060f2006-06-17 20:37:29 -0700501 id_priv->id.context = context;
502 id_priv->id.event_handler = event_handler;
503 id_priv->id.ps = ps;
Sean Heftyb26f9b92010-04-01 17:08:41 +0000504 id_priv->id.qp_type = qp_type;
Sean Heftye51060f2006-06-17 20:37:29 -0700505 spin_lock_init(&id_priv->lock);
Sean Heftyc5483382007-09-24 13:19:09 -0700506 mutex_init(&id_priv->qp_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -0700507 init_completion(&id_priv->comp);
508 atomic_set(&id_priv->refcount, 1);
Or Gerlitzde910bd2008-07-14 23:48:53 -0700509 mutex_init(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -0700510 INIT_LIST_HEAD(&id_priv->listen_list);
Sean Heftyc8f6a362007-02-15 17:00:18 -0800511 INIT_LIST_HEAD(&id_priv->mc_list);
Sean Heftye51060f2006-06-17 20:37:29 -0700512 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
513
514 return &id_priv->id;
515}
516EXPORT_SYMBOL(rdma_create_id);
517
Sean Heftyc8f6a362007-02-15 17:00:18 -0800518static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
Sean Heftye51060f2006-06-17 20:37:29 -0700519{
520 struct ib_qp_attr qp_attr;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800521 int qp_attr_mask, ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700522
Sean Heftyc8f6a362007-02-15 17:00:18 -0800523 qp_attr.qp_state = IB_QPS_INIT;
524 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
Sean Heftye51060f2006-06-17 20:37:29 -0700525 if (ret)
526 return ret;
527
Sean Heftyc8f6a362007-02-15 17:00:18 -0800528 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
529 if (ret)
530 return ret;
531
532 qp_attr.qp_state = IB_QPS_RTR;
533 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
534 if (ret)
535 return ret;
536
537 qp_attr.qp_state = IB_QPS_RTS;
538 qp_attr.sq_psn = 0;
539 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
540
541 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700542}
543
Sean Heftyc8f6a362007-02-15 17:00:18 -0800544static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
Tom Tucker07ebafb2006-08-03 16:02:42 -0500545{
546 struct ib_qp_attr qp_attr;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800547 int qp_attr_mask, ret;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500548
549 qp_attr.qp_state = IB_QPS_INIT;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800550 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
551 if (ret)
552 return ret;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500553
Sean Heftyc8f6a362007-02-15 17:00:18 -0800554 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500555}
556
Sean Heftye51060f2006-06-17 20:37:29 -0700557int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
558 struct ib_qp_init_attr *qp_init_attr)
559{
560 struct rdma_id_private *id_priv;
561 struct ib_qp *qp;
562 int ret;
563
564 id_priv = container_of(id, struct rdma_id_private, id);
565 if (id->device != pd->device)
566 return -EINVAL;
567
568 qp = ib_create_qp(pd, qp_init_attr);
569 if (IS_ERR(qp))
570 return PTR_ERR(qp);
571
Sean Heftyb26f9b92010-04-01 17:08:41 +0000572 if (id->qp_type == IB_QPT_UD)
Sean Heftyc8f6a362007-02-15 17:00:18 -0800573 ret = cma_init_ud_qp(id_priv, qp);
574 else
575 ret = cma_init_conn_qp(id_priv, qp);
Sean Heftye51060f2006-06-17 20:37:29 -0700576 if (ret)
577 goto err;
578
579 id->qp = qp;
580 id_priv->qp_num = qp->qp_num;
Sean Heftye51060f2006-06-17 20:37:29 -0700581 id_priv->srq = (qp->srq != NULL);
582 return 0;
583err:
584 ib_destroy_qp(qp);
585 return ret;
586}
587EXPORT_SYMBOL(rdma_create_qp);
588
589void rdma_destroy_qp(struct rdma_cm_id *id)
590{
Sean Heftyc5483382007-09-24 13:19:09 -0700591 struct rdma_id_private *id_priv;
592
593 id_priv = container_of(id, struct rdma_id_private, id);
594 mutex_lock(&id_priv->qp_mutex);
595 ib_destroy_qp(id_priv->id.qp);
596 id_priv->id.qp = NULL;
597 mutex_unlock(&id_priv->qp_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -0700598}
599EXPORT_SYMBOL(rdma_destroy_qp);
600
Sean Hefty5851bb82008-01-04 10:47:12 -0800601static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
602 struct rdma_conn_param *conn_param)
Sean Heftye51060f2006-06-17 20:37:29 -0700603{
604 struct ib_qp_attr qp_attr;
605 int qp_attr_mask, ret;
Matan Barakdd5f03b2013-12-12 18:03:11 +0200606 union ib_gid sgid;
Sean Heftye51060f2006-06-17 20:37:29 -0700607
Sean Heftyc5483382007-09-24 13:19:09 -0700608 mutex_lock(&id_priv->qp_mutex);
609 if (!id_priv->id.qp) {
610 ret = 0;
611 goto out;
612 }
Sean Heftye51060f2006-06-17 20:37:29 -0700613
614 /* Need to update QP attributes from default values. */
615 qp_attr.qp_state = IB_QPS_INIT;
Sean Heftyc5483382007-09-24 13:19:09 -0700616 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
Sean Heftye51060f2006-06-17 20:37:29 -0700617 if (ret)
Sean Heftyc5483382007-09-24 13:19:09 -0700618 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -0700619
Sean Heftyc5483382007-09-24 13:19:09 -0700620 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
Sean Heftye51060f2006-06-17 20:37:29 -0700621 if (ret)
Sean Heftyc5483382007-09-24 13:19:09 -0700622 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -0700623
624 qp_attr.qp_state = IB_QPS_RTR;
Sean Heftyc5483382007-09-24 13:19:09 -0700625 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
Sean Heftye51060f2006-06-17 20:37:29 -0700626 if (ret)
Sean Heftyc5483382007-09-24 13:19:09 -0700627 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -0700628
Matan Barakdd5f03b2013-12-12 18:03:11 +0200629 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
630 qp_attr.ah_attr.grh.sgid_index, &sgid);
631 if (ret)
632 goto out;
633
634 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
635 == RDMA_TRANSPORT_IB &&
636 rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
637 == IB_LINK_LAYER_ETHERNET) {
638 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
639
640 if (ret)
641 goto out;
642 }
Sean Hefty5851bb82008-01-04 10:47:12 -0800643 if (conn_param)
644 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
Sean Heftyc5483382007-09-24 13:19:09 -0700645 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
646out:
647 mutex_unlock(&id_priv->qp_mutex);
648 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700649}
650
Sean Hefty5851bb82008-01-04 10:47:12 -0800651static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
652 struct rdma_conn_param *conn_param)
Sean Heftye51060f2006-06-17 20:37:29 -0700653{
654 struct ib_qp_attr qp_attr;
655 int qp_attr_mask, ret;
656
Sean Heftyc5483382007-09-24 13:19:09 -0700657 mutex_lock(&id_priv->qp_mutex);
658 if (!id_priv->id.qp) {
659 ret = 0;
660 goto out;
661 }
Sean Heftye51060f2006-06-17 20:37:29 -0700662
663 qp_attr.qp_state = IB_QPS_RTS;
Sean Heftyc5483382007-09-24 13:19:09 -0700664 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
Sean Heftye51060f2006-06-17 20:37:29 -0700665 if (ret)
Sean Heftyc5483382007-09-24 13:19:09 -0700666 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -0700667
Sean Hefty5851bb82008-01-04 10:47:12 -0800668 if (conn_param)
669 qp_attr.max_rd_atomic = conn_param->initiator_depth;
Sean Heftyc5483382007-09-24 13:19:09 -0700670 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
671out:
672 mutex_unlock(&id_priv->qp_mutex);
673 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700674}
675
Sean Heftyc5483382007-09-24 13:19:09 -0700676static int cma_modify_qp_err(struct rdma_id_private *id_priv)
Sean Heftye51060f2006-06-17 20:37:29 -0700677{
678 struct ib_qp_attr qp_attr;
Sean Heftyc5483382007-09-24 13:19:09 -0700679 int ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700680
Sean Heftyc5483382007-09-24 13:19:09 -0700681 mutex_lock(&id_priv->qp_mutex);
682 if (!id_priv->id.qp) {
683 ret = 0;
684 goto out;
685 }
Sean Heftye51060f2006-06-17 20:37:29 -0700686
687 qp_attr.qp_state = IB_QPS_ERR;
Sean Heftyc5483382007-09-24 13:19:09 -0700688 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
689out:
690 mutex_unlock(&id_priv->qp_mutex);
691 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -0700692}
693
Sean Heftyc8f6a362007-02-15 17:00:18 -0800694static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
695 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
696{
697 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
698 int ret;
Eli Cohen3c86aa72010-10-13 21:26:51 +0200699 u16 pkey;
700
701 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
702 IB_LINK_LAYER_INFINIBAND)
703 pkey = ib_addr_get_pkey(dev_addr);
704 else
705 pkey = 0xffff;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800706
707 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
Eli Cohen3c86aa72010-10-13 21:26:51 +0200708 pkey, &qp_attr->pkey_index);
Sean Heftyc8f6a362007-02-15 17:00:18 -0800709 if (ret)
710 return ret;
711
712 qp_attr->port_num = id_priv->id.port_num;
713 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
714
Sean Heftyb26f9b92010-04-01 17:08:41 +0000715 if (id_priv->id.qp_type == IB_QPT_UD) {
Sean Hefty5c438132013-05-29 10:09:23 -0700716 ret = cma_set_qkey(id_priv, 0);
Yossi Etigind2ca39f2009-04-08 13:42:33 -0700717 if (ret)
718 return ret;
719
Sean Heftyc8f6a362007-02-15 17:00:18 -0800720 qp_attr->qkey = id_priv->qkey;
721 *qp_attr_mask |= IB_QP_QKEY;
722 } else {
723 qp_attr->qp_access_flags = 0;
724 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
725 }
726 return 0;
727}
728
Sean Heftye51060f2006-06-17 20:37:29 -0700729int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
730 int *qp_attr_mask)
731{
732 struct rdma_id_private *id_priv;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800733 int ret = 0;
Sean Heftye51060f2006-06-17 20:37:29 -0700734
735 id_priv = container_of(id, struct rdma_id_private, id);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500736 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
737 case RDMA_TRANSPORT_IB:
Sean Heftyb26f9b92010-04-01 17:08:41 +0000738 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
Sean Heftyc8f6a362007-02-15 17:00:18 -0800739 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
740 else
741 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
742 qp_attr_mask);
Matan Barakdd5f03b2013-12-12 18:03:11 +0200743
Sean Heftye51060f2006-06-17 20:37:29 -0700744 if (qp_attr->qp_state == IB_QPS_RTR)
745 qp_attr->rq_psn = id_priv->seq_num;
746 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -0500747 case RDMA_TRANSPORT_IWARP:
Sean Heftyc8f6a362007-02-15 17:00:18 -0800748 if (!id_priv->cm_id.iw) {
Dotan Barak8f076532007-07-17 17:58:57 +0300749 qp_attr->qp_access_flags = 0;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800750 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
751 } else
752 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
753 qp_attr_mask);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500754 break;
Sean Heftye51060f2006-06-17 20:37:29 -0700755 default:
756 ret = -ENOSYS;
757 break;
758 }
759
760 return ret;
761}
762EXPORT_SYMBOL(rdma_init_qp_attr);
763
764static inline int cma_zero_addr(struct sockaddr *addr)
765{
Sean Hefty2e2d1902013-05-29 10:09:09 -0700766 switch (addr->sa_family) {
767 case AF_INET:
768 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
769 case AF_INET6:
770 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
771 case AF_IB:
772 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
773 default:
774 return 0;
Sean Heftye51060f2006-06-17 20:37:29 -0700775 }
776}
777
778static inline int cma_loopback_addr(struct sockaddr *addr)
779{
Sean Hefty2e2d1902013-05-29 10:09:09 -0700780 switch (addr->sa_family) {
781 case AF_INET:
782 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
783 case AF_INET6:
784 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
785 case AF_IB:
786 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
787 default:
788 return 0;
789 }
Sean Heftye51060f2006-06-17 20:37:29 -0700790}
791
792static inline int cma_any_addr(struct sockaddr *addr)
793{
794 return cma_zero_addr(addr) || cma_loopback_addr(addr);
795}
796
Hefty, Sean43b752d2011-05-09 22:06:10 -0700797static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
798{
799 if (src->sa_family != dst->sa_family)
800 return -1;
801
802 switch (src->sa_family) {
803 case AF_INET:
804 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
805 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
Sean Hefty2e2d1902013-05-29 10:09:09 -0700806 case AF_INET6:
Hefty, Sean43b752d2011-05-09 22:06:10 -0700807 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
808 &((struct sockaddr_in6 *) dst)->sin6_addr);
Sean Hefty2e2d1902013-05-29 10:09:09 -0700809 default:
810 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
811 &((struct sockaddr_ib *) dst)->sib_addr);
Hefty, Sean43b752d2011-05-09 22:06:10 -0700812 }
813}
814
Sean Hefty58afdcb2013-05-29 10:09:11 -0700815static __be16 cma_port(struct sockaddr *addr)
Sean Hefty628e5f62006-11-30 16:44:16 -0800816{
Sean Hefty58afdcb2013-05-29 10:09:11 -0700817 struct sockaddr_ib *sib;
818
819 switch (addr->sa_family) {
820 case AF_INET:
Sean Hefty628e5f62006-11-30 16:44:16 -0800821 return ((struct sockaddr_in *) addr)->sin_port;
Sean Hefty58afdcb2013-05-29 10:09:11 -0700822 case AF_INET6:
Sean Hefty628e5f62006-11-30 16:44:16 -0800823 return ((struct sockaddr_in6 *) addr)->sin6_port;
Sean Hefty58afdcb2013-05-29 10:09:11 -0700824 case AF_IB:
825 sib = (struct sockaddr_ib *) addr;
826 return htons((u16) (be64_to_cpu(sib->sib_sid) &
827 be64_to_cpu(sib->sib_sid_mask)));
828 default:
829 return 0;
830 }
Sean Hefty628e5f62006-11-30 16:44:16 -0800831}
832
Sean Heftye51060f2006-06-17 20:37:29 -0700833static inline int cma_any_port(struct sockaddr *addr)
834{
Sean Hefty628e5f62006-11-30 16:44:16 -0800835 return !cma_port(addr);
Sean Heftye51060f2006-06-17 20:37:29 -0700836}
837
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700838static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
839 struct ib_sa_path_rec *path)
Sean Heftye51060f2006-06-17 20:37:29 -0700840{
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700841 struct sockaddr_ib *listen_ib, *ib;
Sean Heftye51060f2006-06-17 20:37:29 -0700842
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700843 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
844 ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
845 ib->sib_family = listen_ib->sib_family;
846 ib->sib_pkey = path->pkey;
847 ib->sib_flowinfo = path->flow_label;
848 memcpy(&ib->sib_addr, &path->sgid, 16);
849 ib->sib_sid = listen_ib->sib_sid;
850 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
851 ib->sib_scope_id = listen_ib->sib_scope_id;
Sean Heftye51060f2006-06-17 20:37:29 -0700852
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700853 ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
854 ib->sib_family = listen_ib->sib_family;
855 ib->sib_pkey = path->pkey;
856 ib->sib_flowinfo = path->flow_label;
857 memcpy(&ib->sib_addr, &path->dgid, 16);
Sean Heftye51060f2006-06-17 20:37:29 -0700858}
859
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700860static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
861 struct cma_hdr *hdr)
Sean Heftye51060f2006-06-17 20:37:29 -0700862{
863 struct sockaddr_in *listen4, *ip4;
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700864
865 listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
866 ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
867 ip4->sin_family = listen4->sin_family;
868 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
869 ip4->sin_port = listen4->sin_port;
870
871 ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
872 ip4->sin_family = listen4->sin_family;
873 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
874 ip4->sin_port = hdr->port;
875}
876
877static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
878 struct cma_hdr *hdr)
879{
Sean Heftye51060f2006-06-17 20:37:29 -0700880 struct sockaddr_in6 *listen6, *ip6;
881
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700882 listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
883 ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
884 ip6->sin6_family = listen6->sin6_family;
885 ip6->sin6_addr = hdr->dst_addr.ip6;
886 ip6->sin6_port = listen6->sin6_port;
Sean Heftye51060f2006-06-17 20:37:29 -0700887
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700888 ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
889 ip6->sin6_family = listen6->sin6_family;
890 ip6->sin6_addr = hdr->src_addr.ip6;
891 ip6->sin6_port = hdr->port;
892}
893
894static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
895 struct ib_cm_event *ib_event)
896{
897 struct cma_hdr *hdr;
898
Sean Hefty5eb695c2013-07-24 15:06:09 -0700899 if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
900 (ib_event->event == IB_CM_REQ_RECEIVED)) {
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700901 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
902 return 0;
903 }
904
905 hdr = ib_event->private_data;
906 if (hdr->cma_version != CMA_VERSION)
907 return -EINVAL;
908
909 switch (cma_get_ip_ver(hdr)) {
910 case 4:
911 cma_save_ip4_info(id, listen_id, hdr);
Sean Heftye51060f2006-06-17 20:37:29 -0700912 break;
913 case 6:
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700914 cma_save_ip6_info(id, listen_id, hdr);
Sean Heftye51060f2006-06-17 20:37:29 -0700915 break;
916 default:
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700917 return -EINVAL;
Sean Heftye51060f2006-06-17 20:37:29 -0700918 }
Sean Heftyfbaa1a62013-05-29 10:09:21 -0700919 return 0;
Sean Heftye51060f2006-06-17 20:37:29 -0700920}
921
Sean Heftye8160e12013-05-29 10:09:22 -0700922static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
Sean Heftye51060f2006-06-17 20:37:29 -0700923{
Sean Heftye8160e12013-05-29 10:09:22 -0700924 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
Sean Heftye51060f2006-06-17 20:37:29 -0700925}
926
Sean Heftye51060f2006-06-17 20:37:29 -0700927static void cma_cancel_route(struct rdma_id_private *id_priv)
928{
Eli Cohen3c86aa72010-10-13 21:26:51 +0200929 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
930 case IB_LINK_LAYER_INFINIBAND:
Sean Heftye51060f2006-06-17 20:37:29 -0700931 if (id_priv->query)
932 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
933 break;
934 default:
935 break;
936 }
937}
938
Sean Heftye51060f2006-06-17 20:37:29 -0700939static void cma_cancel_listens(struct rdma_id_private *id_priv)
940{
941 struct rdma_id_private *dev_id_priv;
942
Sean Heftyd02d1f52007-10-09 11:12:34 -0700943 /*
944 * Remove from listen_any_list to prevent added devices from spawning
945 * additional listen requests.
946 */
Sean Heftye51060f2006-06-17 20:37:29 -0700947 mutex_lock(&lock);
948 list_del(&id_priv->list);
949
950 while (!list_empty(&id_priv->listen_list)) {
951 dev_id_priv = list_entry(id_priv->listen_list.next,
952 struct rdma_id_private, listen_list);
Sean Heftyd02d1f52007-10-09 11:12:34 -0700953 /* sync with device removal to avoid duplicate destruction */
954 list_del_init(&dev_id_priv->list);
955 list_del(&dev_id_priv->listen_list);
956 mutex_unlock(&lock);
957
958 rdma_destroy_id(&dev_id_priv->id);
959 mutex_lock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -0700960 }
961 mutex_unlock(&lock);
962}
963
964static void cma_cancel_operation(struct rdma_id_private *id_priv,
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700965 enum rdma_cm_state state)
Sean Heftye51060f2006-06-17 20:37:29 -0700966{
967 switch (state) {
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700968 case RDMA_CM_ADDR_QUERY:
Sean Heftye51060f2006-06-17 20:37:29 -0700969 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
970 break;
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700971 case RDMA_CM_ROUTE_QUERY:
Sean Heftye51060f2006-06-17 20:37:29 -0700972 cma_cancel_route(id_priv);
973 break;
Nir Muchtar550e5ca2011-05-20 11:46:11 -0700974 case RDMA_CM_LISTEN:
Sean Heftyf4753832013-05-29 10:09:14 -0700975 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
Sean Heftye51060f2006-06-17 20:37:29 -0700976 cma_cancel_listens(id_priv);
977 break;
978 default:
979 break;
980 }
981}
982
983static void cma_release_port(struct rdma_id_private *id_priv)
984{
985 struct rdma_bind_list *bind_list = id_priv->bind_list;
986
987 if (!bind_list)
988 return;
989
990 mutex_lock(&lock);
991 hlist_del(&id_priv->node);
992 if (hlist_empty(&bind_list->owners)) {
993 idr_remove(bind_list->ps, bind_list->port);
994 kfree(bind_list);
995 }
996 mutex_unlock(&lock);
997}
998
Sean Heftyc8f6a362007-02-15 17:00:18 -0800999static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
1000{
1001 struct cma_multicast *mc;
1002
1003 while (!list_empty(&id_priv->mc_list)) {
1004 mc = container_of(id_priv->mc_list.next,
1005 struct cma_multicast, list);
1006 list_del(&mc->list);
Eli Cohen3c86aa72010-10-13 21:26:51 +02001007 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
1008 case IB_LINK_LAYER_INFINIBAND:
1009 ib_sa_free_multicast(mc->multicast.ib);
1010 kfree(mc);
1011 break;
1012 case IB_LINK_LAYER_ETHERNET:
1013 kref_put(&mc->mcref, release_mc);
1014 break;
1015 default:
1016 break;
1017 }
Sean Heftyc8f6a362007-02-15 17:00:18 -08001018 }
1019}
1020
Sean Heftye51060f2006-06-17 20:37:29 -07001021void rdma_destroy_id(struct rdma_cm_id *id)
1022{
1023 struct rdma_id_private *id_priv;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001024 enum rdma_cm_state state;
Sean Heftye51060f2006-06-17 20:37:29 -07001025
1026 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001027 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
Sean Heftye51060f2006-06-17 20:37:29 -07001028 cma_cancel_operation(id_priv, state);
1029
Sean Heftya396d432011-02-23 09:05:39 -08001030 /*
1031 * Wait for any active callback to finish. New callbacks will find
1032 * the id_priv state set to destroying and abort.
1033 */
1034 mutex_lock(&id_priv->handler_mutex);
1035 mutex_unlock(&id_priv->handler_mutex);
1036
Sean Heftye51060f2006-06-17 20:37:29 -07001037 if (id_priv->cma_dev) {
Eli Cohen3c86aa72010-10-13 21:26:51 +02001038 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
Tom Tucker07ebafb2006-08-03 16:02:42 -05001039 case RDMA_TRANSPORT_IB:
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001040 if (id_priv->cm_id.ib)
Sean Heftye51060f2006-06-17 20:37:29 -07001041 ib_destroy_cm_id(id_priv->cm_id.ib);
1042 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001043 case RDMA_TRANSPORT_IWARP:
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001044 if (id_priv->cm_id.iw)
Tom Tucker07ebafb2006-08-03 16:02:42 -05001045 iw_destroy_cm_id(id_priv->cm_id.iw);
1046 break;
Sean Heftye51060f2006-06-17 20:37:29 -07001047 default:
1048 break;
1049 }
Sean Heftyc8f6a362007-02-15 17:00:18 -08001050 cma_leave_mc_groups(id_priv);
Sean Heftya396d432011-02-23 09:05:39 -08001051 cma_release_dev(id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07001052 }
1053
1054 cma_release_port(id_priv);
1055 cma_deref_id(id_priv);
1056 wait_for_completion(&id_priv->comp);
1057
Sean Heftyd02d1f52007-10-09 11:12:34 -07001058 if (id_priv->internal_id)
1059 cma_deref_id(id_priv->id.context);
1060
Sean Heftye51060f2006-06-17 20:37:29 -07001061 kfree(id_priv->id.route.path_rec);
1062 kfree(id_priv);
1063}
1064EXPORT_SYMBOL(rdma_destroy_id);
1065
1066static int cma_rep_recv(struct rdma_id_private *id_priv)
1067{
1068 int ret;
1069
Sean Hefty5851bb82008-01-04 10:47:12 -08001070 ret = cma_modify_qp_rtr(id_priv, NULL);
Sean Heftye51060f2006-06-17 20:37:29 -07001071 if (ret)
1072 goto reject;
1073
Sean Hefty5851bb82008-01-04 10:47:12 -08001074 ret = cma_modify_qp_rts(id_priv, NULL);
Sean Heftye51060f2006-06-17 20:37:29 -07001075 if (ret)
1076 goto reject;
1077
1078 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
1079 if (ret)
1080 goto reject;
1081
1082 return 0;
1083reject:
Sean Heftyc5483382007-09-24 13:19:09 -07001084 cma_modify_qp_err(id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07001085 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
1086 NULL, 0, NULL, 0);
1087 return ret;
1088}
1089
Sean Heftya1b1b612006-11-30 16:33:14 -08001090static void cma_set_rep_event_data(struct rdma_cm_event *event,
1091 struct ib_cm_rep_event_param *rep_data,
1092 void *private_data)
1093{
1094 event->param.conn.private_data = private_data;
1095 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
1096 event->param.conn.responder_resources = rep_data->responder_resources;
1097 event->param.conn.initiator_depth = rep_data->initiator_depth;
1098 event->param.conn.flow_control = rep_data->flow_control;
1099 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
1100 event->param.conn.srq = rep_data->srq;
1101 event->param.conn.qp_num = rep_data->remote_qpn;
1102}
1103
Sean Heftye51060f2006-06-17 20:37:29 -07001104static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1105{
1106 struct rdma_id_private *id_priv = cm_id->context;
Sean Heftya1b1b612006-11-30 16:33:14 -08001107 struct rdma_cm_event event;
1108 int ret = 0;
Sean Heftye51060f2006-06-17 20:37:29 -07001109
Amir Vadai38ca83a2008-07-22 14:14:23 -07001110 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001111 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
Amir Vadai38ca83a2008-07-22 14:14:23 -07001112 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001113 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
Sean Hefty8aa08602007-05-07 11:49:00 -07001114 return 0;
Sean Heftye51060f2006-06-17 20:37:29 -07001115
Sean Heftya1b1b612006-11-30 16:33:14 -08001116 memset(&event, 0, sizeof event);
Sean Heftye51060f2006-06-17 20:37:29 -07001117 switch (ib_event->event) {
1118 case IB_CM_REQ_ERROR:
1119 case IB_CM_REP_ERROR:
Sean Heftya1b1b612006-11-30 16:33:14 -08001120 event.event = RDMA_CM_EVENT_UNREACHABLE;
1121 event.status = -ETIMEDOUT;
Sean Heftye51060f2006-06-17 20:37:29 -07001122 break;
1123 case IB_CM_REP_RECEIVED:
Sean Hefty01602f12013-05-29 10:09:20 -07001124 if (id_priv->id.qp) {
Sean Heftya1b1b612006-11-30 16:33:14 -08001125 event.status = cma_rep_recv(id_priv);
1126 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
1127 RDMA_CM_EVENT_ESTABLISHED;
Sean Hefty01602f12013-05-29 10:09:20 -07001128 } else {
Sean Heftya1b1b612006-11-30 16:33:14 -08001129 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
Sean Hefty01602f12013-05-29 10:09:20 -07001130 }
Sean Heftya1b1b612006-11-30 16:33:14 -08001131 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
1132 ib_event->private_data);
Sean Heftye51060f2006-06-17 20:37:29 -07001133 break;
1134 case IB_CM_RTU_RECEIVED:
Sean Hefty0fe313b2006-11-30 16:37:15 -08001135 case IB_CM_USER_ESTABLISHED:
1136 event.event = RDMA_CM_EVENT_ESTABLISHED;
Sean Heftye51060f2006-06-17 20:37:29 -07001137 break;
1138 case IB_CM_DREQ_ERROR:
Sean Heftya1b1b612006-11-30 16:33:14 -08001139 event.status = -ETIMEDOUT; /* fall through */
Sean Heftye51060f2006-06-17 20:37:29 -07001140 case IB_CM_DREQ_RECEIVED:
1141 case IB_CM_DREP_RECEIVED:
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001142 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1143 RDMA_CM_DISCONNECT))
Sean Heftye51060f2006-06-17 20:37:29 -07001144 goto out;
Sean Heftya1b1b612006-11-30 16:33:14 -08001145 event.event = RDMA_CM_EVENT_DISCONNECTED;
Sean Heftye51060f2006-06-17 20:37:29 -07001146 break;
1147 case IB_CM_TIMEWAIT_EXIT:
Amir Vadai38ca83a2008-07-22 14:14:23 -07001148 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
1149 break;
Sean Heftye51060f2006-06-17 20:37:29 -07001150 case IB_CM_MRA_RECEIVED:
1151 /* ignore event */
1152 goto out;
1153 case IB_CM_REJ_RECEIVED:
Sean Heftyc5483382007-09-24 13:19:09 -07001154 cma_modify_qp_err(id_priv);
Sean Heftya1b1b612006-11-30 16:33:14 -08001155 event.status = ib_event->param.rej_rcvd.reason;
1156 event.event = RDMA_CM_EVENT_REJECTED;
1157 event.param.conn.private_data = ib_event->private_data;
1158 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
Sean Heftye51060f2006-06-17 20:37:29 -07001159 break;
1160 default:
Roland Dreier468f2232008-07-14 23:48:47 -07001161 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
Sean Heftye51060f2006-06-17 20:37:29 -07001162 ib_event->event);
1163 goto out;
1164 }
1165
Sean Heftya1b1b612006-11-30 16:33:14 -08001166 ret = id_priv->id.event_handler(&id_priv->id, &event);
Sean Heftye51060f2006-06-17 20:37:29 -07001167 if (ret) {
1168 /* Destroy the CM ID by returning a non-zero value. */
1169 id_priv->cm_id.ib = NULL;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001170 cma_exch(id_priv, RDMA_CM_DESTROYING);
Or Gerlitzde910bd2008-07-14 23:48:53 -07001171 mutex_unlock(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -07001172 rdma_destroy_id(&id_priv->id);
1173 return ret;
1174 }
1175out:
Or Gerlitzde910bd2008-07-14 23:48:53 -07001176 mutex_unlock(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -07001177 return ret;
1178}
1179
Sean Hefty628e5f62006-11-30 16:44:16 -08001180static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1181 struct ib_cm_event *ib_event)
Sean Heftye51060f2006-06-17 20:37:29 -07001182{
1183 struct rdma_id_private *id_priv;
1184 struct rdma_cm_id *id;
1185 struct rdma_route *rt;
Or Gerlitz64c5e612008-07-14 23:48:53 -07001186 int ret;
Sean Heftye51060f2006-06-17 20:37:29 -07001187
Krishna Kumar3f168d22006-09-29 12:09:51 -07001188 id = rdma_create_id(listen_id->event_handler, listen_id->context,
Sean Heftyb26f9b92010-04-01 17:08:41 +00001189 listen_id->ps, ib_event->param.req_rcvd.qp_type);
Krishna Kumar3f168d22006-09-29 12:09:51 -07001190 if (IS_ERR(id))
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001191 return NULL;
Krishna Kumar3f168d22006-09-29 12:09:51 -07001192
Sean Heftyf4753832013-05-29 10:09:14 -07001193 id_priv = container_of(id, struct rdma_id_private, id);
Sean Heftyfbaa1a62013-05-29 10:09:21 -07001194 if (cma_save_net_info(id, listen_id, ib_event))
1195 goto err;
Krishna Kumar3f168d22006-09-29 12:09:51 -07001196
1197 rt = &id->route;
1198 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
1199 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1200 GFP_KERNEL);
1201 if (!rt->path_rec)
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001202 goto err;
Krishna Kumar3f168d22006-09-29 12:09:51 -07001203
Sean Heftye51060f2006-06-17 20:37:29 -07001204 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1205 if (rt->num_paths == 2)
1206 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1207
Sean Heftyf4753832013-05-29 10:09:14 -07001208 if (cma_any_addr(cma_src_addr(id_priv))) {
Sean Hefty6f8372b2009-11-19 13:26:06 -08001209 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
1210 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
Sean Hefty46ea5062011-12-06 21:15:18 +00001211 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
Sean Hefty6f8372b2009-11-19 13:26:06 -08001212 } else {
Sean Heftyf4753832013-05-29 10:09:14 -07001213 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
Sean Hefty6f8372b2009-11-19 13:26:06 -08001214 if (ret)
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001215 goto err;
Sean Hefty6f8372b2009-11-19 13:26:06 -08001216 }
1217 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
Sean Heftye51060f2006-06-17 20:37:29 -07001218
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001219 id_priv->state = RDMA_CM_CONNECT;
Sean Heftye51060f2006-06-17 20:37:29 -07001220 return id_priv;
Krishna Kumar3f168d22006-09-29 12:09:51 -07001221
Krishna Kumar3f168d22006-09-29 12:09:51 -07001222err:
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001223 rdma_destroy_id(id);
Sean Heftye51060f2006-06-17 20:37:29 -07001224 return NULL;
1225}
1226
Sean Hefty628e5f62006-11-30 16:44:16 -08001227static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1228 struct ib_cm_event *ib_event)
1229{
1230 struct rdma_id_private *id_priv;
1231 struct rdma_cm_id *id;
Sean Hefty628e5f62006-11-30 16:44:16 -08001232 int ret;
1233
1234 id = rdma_create_id(listen_id->event_handler, listen_id->context,
Sean Heftyb26f9b92010-04-01 17:08:41 +00001235 listen_id->ps, IB_QPT_UD);
Sean Hefty628e5f62006-11-30 16:44:16 -08001236 if (IS_ERR(id))
1237 return NULL;
1238
Sean Heftyf4753832013-05-29 10:09:14 -07001239 id_priv = container_of(id, struct rdma_id_private, id);
Sean Heftyfbaa1a62013-05-29 10:09:21 -07001240 if (cma_save_net_info(id, listen_id, ib_event))
Sean Hefty628e5f62006-11-30 16:44:16 -08001241 goto err;
1242
Sean Hefty6f8372b2009-11-19 13:26:06 -08001243 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
Sean Heftyf4753832013-05-29 10:09:14 -07001244 ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
Sean Hefty6f8372b2009-11-19 13:26:06 -08001245 if (ret)
1246 goto err;
1247 }
Sean Hefty628e5f62006-11-30 16:44:16 -08001248
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001249 id_priv->state = RDMA_CM_CONNECT;
Sean Hefty628e5f62006-11-30 16:44:16 -08001250 return id_priv;
1251err:
1252 rdma_destroy_id(id);
1253 return NULL;
1254}
1255
Sean Heftya1b1b612006-11-30 16:33:14 -08001256static void cma_set_req_event_data(struct rdma_cm_event *event,
1257 struct ib_cm_req_event_param *req_data,
1258 void *private_data, int offset)
1259{
1260 event->param.conn.private_data = private_data + offset;
1261 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1262 event->param.conn.responder_resources = req_data->responder_resources;
1263 event->param.conn.initiator_depth = req_data->initiator_depth;
1264 event->param.conn.flow_control = req_data->flow_control;
1265 event->param.conn.retry_count = req_data->retry_count;
1266 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1267 event->param.conn.srq = req_data->srq;
1268 event->param.conn.qp_num = req_data->remote_qpn;
1269}
1270
Hefty, Sean95954802011-10-06 09:32:33 -07001271static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
1272{
Sean Hefty4dd81e82012-06-14 20:49:09 +00001273 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
Hefty, Sean95954802011-10-06 09:32:33 -07001274 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
1275 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
1276 (id->qp_type == IB_QPT_UD)) ||
1277 (!id->qp_type));
1278}
1279
Sean Heftye51060f2006-06-17 20:37:29 -07001280static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1281{
1282 struct rdma_id_private *listen_id, *conn_id;
Sean Heftya1b1b612006-11-30 16:33:14 -08001283 struct rdma_cm_event event;
Sean Heftye51060f2006-06-17 20:37:29 -07001284 int offset, ret;
Matan Barakdd5f03b2013-12-12 18:03:11 +02001285 u8 smac[ETH_ALEN];
1286 u8 alt_smac[ETH_ALEN];
1287 u8 *psmac = smac;
1288 u8 *palt_smac = alt_smac;
1289 int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
1290 RDMA_TRANSPORT_IB) &&
1291 (rdma_port_get_link_layer(cm_id->device,
1292 ib_event->param.req_rcvd.port) ==
1293 IB_LINK_LAYER_ETHERNET));
Sean Heftye51060f2006-06-17 20:37:29 -07001294
1295 listen_id = cm_id->context;
Hefty, Sean95954802011-10-06 09:32:33 -07001296 if (!cma_check_req_qp_type(&listen_id->id, ib_event))
1297 return -EINVAL;
1298
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001299 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
Sean Hefty8aa08602007-05-07 11:49:00 -07001300 return -ECONNABORTED;
Sean Heftye51060f2006-06-17 20:37:29 -07001301
Sean Hefty628e5f62006-11-30 16:44:16 -08001302 memset(&event, 0, sizeof event);
Sean Heftye8160e12013-05-29 10:09:22 -07001303 offset = cma_user_data_offset(listen_id);
Sean Hefty628e5f62006-11-30 16:44:16 -08001304 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
Hefty, Sean95954802011-10-06 09:32:33 -07001305 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
Sean Hefty628e5f62006-11-30 16:44:16 -08001306 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1307 event.param.ud.private_data = ib_event->private_data + offset;
1308 event.param.ud.private_data_len =
1309 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1310 } else {
1311 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1312 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1313 ib_event->private_data, offset);
1314 }
Sean Heftye51060f2006-06-17 20:37:29 -07001315 if (!conn_id) {
1316 ret = -ENOMEM;
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001317 goto err1;
Sean Heftye51060f2006-06-17 20:37:29 -07001318 }
1319
Or Gerlitzde910bd2008-07-14 23:48:53 -07001320 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
Doug Ledfordbe9130c2013-09-24 17:16:28 -04001321 ret = cma_acquire_dev(conn_id, listen_id);
Krishna Kumara1a733f2006-10-17 10:09:11 +05301322 if (ret)
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001323 goto err2;
Sean Heftye51060f2006-06-17 20:37:29 -07001324
1325 conn_id->cm_id.ib = cm_id;
1326 cm_id->context = conn_id;
1327 cm_id->cm_handler = cma_ib_handler;
1328
Sean Hefty25ae21a2011-02-23 08:11:32 -08001329 /*
1330 * Protect against the user destroying conn_id from another thread
1331 * until we're done accessing it.
1332 */
1333 atomic_inc(&conn_id->refcount);
Sean Heftya1b1b612006-11-30 16:33:14 -08001334 ret = conn_id->id.event_handler(&conn_id->id, &event);
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001335 if (ret)
1336 goto err3;
Krishna Kumara1a733f2006-10-17 10:09:11 +05301337
Matan Barakdd5f03b2013-12-12 18:03:11 +02001338 if (is_iboe) {
1339 if (ib_event->param.req_rcvd.primary_path != NULL)
1340 rdma_addr_find_smac_by_sgid(
1341 &ib_event->param.req_rcvd.primary_path->sgid,
1342 psmac, NULL);
1343 else
1344 psmac = NULL;
1345 if (ib_event->param.req_rcvd.alternate_path != NULL)
1346 rdma_addr_find_smac_by_sgid(
1347 &ib_event->param.req_rcvd.alternate_path->sgid,
1348 palt_smac, NULL);
1349 else
1350 palt_smac = NULL;
1351 }
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001352 /*
1353 * Acquire mutex to prevent user executing rdma_destroy_id()
1354 * while we're accessing the cm_id.
1355 */
1356 mutex_lock(&lock);
Matan Barakdd5f03b2013-12-12 18:03:11 +02001357 if (is_iboe)
1358 ib_update_cm_av(cm_id, psmac, palt_smac);
1359 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1360 (conn_id->id.qp_type != IB_QPT_UD))
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001361 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1362 mutex_unlock(&lock);
1363 mutex_unlock(&conn_id->handler_mutex);
1364 mutex_unlock(&listen_id->handler_mutex);
1365 cma_deref_id(conn_id);
1366 return 0;
1367
1368err3:
1369 cma_deref_id(conn_id);
Krishna Kumara1a733f2006-10-17 10:09:11 +05301370 /* Destroy the CM ID by returning a non-zero value. */
1371 conn_id->cm_id.ib = NULL;
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001372err2:
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001373 cma_exch(conn_id, RDMA_CM_DESTROYING);
Or Gerlitzde910bd2008-07-14 23:48:53 -07001374 mutex_unlock(&conn_id->handler_mutex);
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001375err1:
Or Gerlitzde910bd2008-07-14 23:48:53 -07001376 mutex_unlock(&listen_id->handler_mutex);
Sean Heftyb6cec8a2012-04-25 17:42:35 +00001377 if (conn_id)
1378 rdma_destroy_id(&conn_id->id);
Sean Heftye51060f2006-06-17 20:37:29 -07001379 return ret;
1380}
1381
Sean Heftycf539362013-05-29 10:09:28 -07001382__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
Sean Heftye51060f2006-06-17 20:37:29 -07001383{
Sean Hefty496ce3c2013-05-29 10:09:19 -07001384 if (addr->sa_family == AF_IB)
1385 return ((struct sockaddr_ib *) addr)->sib_sid;
1386
Sean Heftycf539362013-05-29 10:09:28 -07001387 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
Sean Heftye51060f2006-06-17 20:37:29 -07001388}
Sean Heftycf539362013-05-29 10:09:28 -07001389EXPORT_SYMBOL(rdma_get_service_id);
Sean Heftye51060f2006-06-17 20:37:29 -07001390
1391static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1392 struct ib_cm_compare_data *compare)
1393{
1394 struct cma_hdr *cma_data, *cma_mask;
Al Viro1b90c132008-03-29 03:10:28 +00001395 __be32 ip4_addr;
Sean Heftye51060f2006-06-17 20:37:29 -07001396 struct in6_addr ip6_addr;
1397
1398 memset(compare, 0, sizeof *compare);
1399 cma_data = (void *) compare->data;
1400 cma_mask = (void *) compare->mask;
Sean Heftye51060f2006-06-17 20:37:29 -07001401
1402 switch (addr->sa_family) {
1403 case AF_INET:
1404 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
Sean Hefty01602f12013-05-29 10:09:20 -07001405 cma_set_ip_ver(cma_data, 4);
1406 cma_set_ip_ver(cma_mask, 0xF);
1407 if (!cma_any_addr(addr)) {
1408 cma_data->dst_addr.ip4.addr = ip4_addr;
1409 cma_mask->dst_addr.ip4.addr = htonl(~0);
Sean Heftye51060f2006-06-17 20:37:29 -07001410 }
1411 break;
1412 case AF_INET6:
1413 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
Sean Hefty01602f12013-05-29 10:09:20 -07001414 cma_set_ip_ver(cma_data, 6);
1415 cma_set_ip_ver(cma_mask, 0xF);
1416 if (!cma_any_addr(addr)) {
1417 cma_data->dst_addr.ip6 = ip6_addr;
1418 memset(&cma_mask->dst_addr.ip6, 0xFF,
1419 sizeof cma_mask->dst_addr.ip6);
Sean Heftye51060f2006-06-17 20:37:29 -07001420 }
1421 break;
1422 default:
1423 break;
1424 }
1425}
1426
Tom Tucker07ebafb2006-08-03 16:02:42 -05001427static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1428{
1429 struct rdma_id_private *id_priv = iw_id->context;
Sean Heftya1b1b612006-11-30 16:33:14 -08001430 struct rdma_cm_event event;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001431 int ret = 0;
Steve Wise24d44a32013-07-04 16:10:44 +05301432 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1433 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001434
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001435 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
Sean Heftybe65f082007-05-07 11:49:12 -07001436 return 0;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001437
Sean Heftybe65f082007-05-07 11:49:12 -07001438 memset(&event, 0, sizeof event);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001439 switch (iw_event->event) {
1440 case IW_CM_EVENT_CLOSE:
Sean Heftya1b1b612006-11-30 16:33:14 -08001441 event.event = RDMA_CM_EVENT_DISCONNECTED;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001442 break;
1443 case IW_CM_EVENT_CONNECT_REPLY:
Steve Wise24d44a32013-07-04 16:10:44 +05301444 memcpy(cma_src_addr(id_priv), laddr,
1445 rdma_addr_size(laddr));
1446 memcpy(cma_dst_addr(id_priv), raddr,
1447 rdma_addr_size(raddr));
Steve Wise881a0452006-12-15 16:50:17 -06001448 switch (iw_event->status) {
1449 case 0:
Sean Heftya1b1b612006-11-30 16:33:14 -08001450 event.event = RDMA_CM_EVENT_ESTABLISHED;
Kumar Sanghvi3ebeebc2011-09-25 20:17:43 +05301451 event.param.conn.initiator_depth = iw_event->ird;
1452 event.param.conn.responder_resources = iw_event->ord;
Steve Wise881a0452006-12-15 16:50:17 -06001453 break;
1454 case -ECONNRESET:
1455 case -ECONNREFUSED:
1456 event.event = RDMA_CM_EVENT_REJECTED;
1457 break;
1458 case -ETIMEDOUT:
1459 event.event = RDMA_CM_EVENT_UNREACHABLE;
1460 break;
1461 default:
1462 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1463 break;
1464 }
Tom Tucker07ebafb2006-08-03 16:02:42 -05001465 break;
1466 case IW_CM_EVENT_ESTABLISHED:
Sean Heftya1b1b612006-11-30 16:33:14 -08001467 event.event = RDMA_CM_EVENT_ESTABLISHED;
Kumar Sanghvi3ebeebc2011-09-25 20:17:43 +05301468 event.param.conn.initiator_depth = iw_event->ird;
1469 event.param.conn.responder_resources = iw_event->ord;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001470 break;
1471 default:
1472 BUG_ON(1);
1473 }
1474
Sean Heftya1b1b612006-11-30 16:33:14 -08001475 event.status = iw_event->status;
1476 event.param.conn.private_data = iw_event->private_data;
1477 event.param.conn.private_data_len = iw_event->private_data_len;
1478 ret = id_priv->id.event_handler(&id_priv->id, &event);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001479 if (ret) {
1480 /* Destroy the CM ID by returning a non-zero value. */
1481 id_priv->cm_id.iw = NULL;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001482 cma_exch(id_priv, RDMA_CM_DESTROYING);
Or Gerlitzde910bd2008-07-14 23:48:53 -07001483 mutex_unlock(&id_priv->handler_mutex);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001484 rdma_destroy_id(&id_priv->id);
1485 return ret;
1486 }
1487
Or Gerlitzde910bd2008-07-14 23:48:53 -07001488 mutex_unlock(&id_priv->handler_mutex);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001489 return ret;
1490}
1491
1492static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1493 struct iw_cm_event *iw_event)
1494{
1495 struct rdma_cm_id *new_cm_id;
1496 struct rdma_id_private *listen_id, *conn_id;
Sean Heftya1b1b612006-11-30 16:33:14 -08001497 struct rdma_cm_event event;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001498 int ret;
Steve Wise8d8293c2007-10-29 11:34:05 -05001499 struct ib_device_attr attr;
Steve Wise24d44a32013-07-04 16:10:44 +05301500 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1501 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001502
1503 listen_id = cm_id->context;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001504 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
Sean Hefty8aa08602007-05-07 11:49:00 -07001505 return -ECONNABORTED;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001506
1507 /* Create a new RDMA id for the new IW CM ID */
1508 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1509 listen_id->id.context,
Sean Heftyb26f9b92010-04-01 17:08:41 +00001510 RDMA_PS_TCP, IB_QPT_RC);
Julia Lawall10f32062008-04-16 21:09:25 -07001511 if (IS_ERR(new_cm_id)) {
Tom Tucker07ebafb2006-08-03 16:02:42 -05001512 ret = -ENOMEM;
1513 goto out;
1514 }
1515 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
Or Gerlitzde910bd2008-07-14 23:48:53 -07001516 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001517 conn_id->state = RDMA_CM_CONNECT;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001518
Matan Barakdd5f03b2013-12-12 18:03:11 +02001519 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001520 if (ret) {
Or Gerlitzde910bd2008-07-14 23:48:53 -07001521 mutex_unlock(&conn_id->handler_mutex);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001522 rdma_destroy_id(new_cm_id);
1523 goto out;
1524 }
1525
Doug Ledfordbe9130c2013-09-24 17:16:28 -04001526 ret = cma_acquire_dev(conn_id, listen_id);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001527 if (ret) {
Or Gerlitzde910bd2008-07-14 23:48:53 -07001528 mutex_unlock(&conn_id->handler_mutex);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001529 rdma_destroy_id(new_cm_id);
1530 goto out;
1531 }
1532
1533 conn_id->cm_id.iw = cm_id;
1534 cm_id->context = conn_id;
1535 cm_id->cm_handler = cma_iw_handler;
1536
Steve Wise24d44a32013-07-04 16:10:44 +05301537 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
1538 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
Tom Tucker07ebafb2006-08-03 16:02:42 -05001539
Steve Wise8d8293c2007-10-29 11:34:05 -05001540 ret = ib_query_device(conn_id->id.device, &attr);
1541 if (ret) {
Or Gerlitzde910bd2008-07-14 23:48:53 -07001542 mutex_unlock(&conn_id->handler_mutex);
Steve Wise8d8293c2007-10-29 11:34:05 -05001543 rdma_destroy_id(new_cm_id);
1544 goto out;
1545 }
1546
Sean Heftya1b1b612006-11-30 16:33:14 -08001547 memset(&event, 0, sizeof event);
1548 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1549 event.param.conn.private_data = iw_event->private_data;
1550 event.param.conn.private_data_len = iw_event->private_data_len;
Kumar Sanghvi3ebeebc2011-09-25 20:17:43 +05301551 event.param.conn.initiator_depth = iw_event->ird;
1552 event.param.conn.responder_resources = iw_event->ord;
Sean Hefty25ae21a2011-02-23 08:11:32 -08001553
1554 /*
1555 * Protect against the user destroying conn_id from another thread
1556 * until we're done accessing it.
1557 */
1558 atomic_inc(&conn_id->refcount);
Sean Heftya1b1b612006-11-30 16:33:14 -08001559 ret = conn_id->id.event_handler(&conn_id->id, &event);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001560 if (ret) {
1561 /* User wants to destroy the CM ID */
1562 conn_id->cm_id.iw = NULL;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001563 cma_exch(conn_id, RDMA_CM_DESTROYING);
Or Gerlitzde910bd2008-07-14 23:48:53 -07001564 mutex_unlock(&conn_id->handler_mutex);
Sean Hefty25ae21a2011-02-23 08:11:32 -08001565 cma_deref_id(conn_id);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001566 rdma_destroy_id(&conn_id->id);
Or Gerlitzde910bd2008-07-14 23:48:53 -07001567 goto out;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001568 }
1569
Or Gerlitzde910bd2008-07-14 23:48:53 -07001570 mutex_unlock(&conn_id->handler_mutex);
Sean Hefty25ae21a2011-02-23 08:11:32 -08001571 cma_deref_id(conn_id);
Or Gerlitzde910bd2008-07-14 23:48:53 -07001572
Tom Tucker07ebafb2006-08-03 16:02:42 -05001573out:
Or Gerlitzde910bd2008-07-14 23:48:53 -07001574 mutex_unlock(&listen_id->handler_mutex);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001575 return ret;
1576}
1577
Sean Heftye51060f2006-06-17 20:37:29 -07001578static int cma_ib_listen(struct rdma_id_private *id_priv)
1579{
1580 struct ib_cm_compare_data compare_data;
1581 struct sockaddr *addr;
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001582 struct ib_cm_id *id;
Sean Heftye51060f2006-06-17 20:37:29 -07001583 __be64 svc_id;
1584 int ret;
1585
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001586 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
1587 if (IS_ERR(id))
1588 return PTR_ERR(id);
1589
1590 id_priv->cm_id.ib = id;
Sean Heftye51060f2006-06-17 20:37:29 -07001591
Sean Heftyf4753832013-05-29 10:09:14 -07001592 addr = cma_src_addr(id_priv);
Sean Heftycf539362013-05-29 10:09:28 -07001593 svc_id = rdma_get_service_id(&id_priv->id, addr);
Sean Hefty406b6a22012-06-14 20:31:39 +00001594 if (cma_any_addr(addr) && !id_priv->afonly)
Sean Heftye51060f2006-06-17 20:37:29 -07001595 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1596 else {
1597 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1598 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1599 }
1600
1601 if (ret) {
1602 ib_destroy_cm_id(id_priv->cm_id.ib);
1603 id_priv->cm_id.ib = NULL;
1604 }
1605
1606 return ret;
1607}
1608
Tom Tucker07ebafb2006-08-03 16:02:42 -05001609static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1610{
1611 int ret;
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001612 struct iw_cm_id *id;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001613
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00001614 id = iw_create_cm_id(id_priv->id.device,
1615 iw_conn_req_handler,
1616 id_priv);
1617 if (IS_ERR(id))
1618 return PTR_ERR(id);
1619
1620 id_priv->cm_id.iw = id;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001621
Steve Wise24d44a32013-07-04 16:10:44 +05301622 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
1623 rdma_addr_size(cma_src_addr(id_priv)));
Tom Tucker07ebafb2006-08-03 16:02:42 -05001624
1625 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1626
1627 if (ret) {
1628 iw_destroy_cm_id(id_priv->cm_id.iw);
1629 id_priv->cm_id.iw = NULL;
1630 }
1631
1632 return ret;
1633}
1634
Sean Heftye51060f2006-06-17 20:37:29 -07001635static int cma_listen_handler(struct rdma_cm_id *id,
1636 struct rdma_cm_event *event)
1637{
1638 struct rdma_id_private *id_priv = id->context;
1639
1640 id->context = id_priv->id.context;
1641 id->event_handler = id_priv->id.event_handler;
1642 return id_priv->id.event_handler(id, event);
1643}
1644
1645static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1646 struct cma_device *cma_dev)
1647{
1648 struct rdma_id_private *dev_id_priv;
1649 struct rdma_cm_id *id;
1650 int ret;
1651
Sean Hefty94d0c932013-05-29 10:09:24 -07001652 if (cma_family(id_priv) == AF_IB &&
1653 rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
1654 return;
1655
Sean Heftyb26f9b92010-04-01 17:08:41 +00001656 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
1657 id_priv->id.qp_type);
Sean Heftye51060f2006-06-17 20:37:29 -07001658 if (IS_ERR(id))
1659 return;
1660
1661 dev_id_priv = container_of(id, struct rdma_id_private, id);
1662
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001663 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
Sean Heftyf4753832013-05-29 10:09:14 -07001664 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
1665 rdma_addr_size(cma_src_addr(id_priv)));
Sean Heftye51060f2006-06-17 20:37:29 -07001666
1667 cma_attach_to_dev(dev_id_priv, cma_dev);
1668 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
Sean Heftyd02d1f52007-10-09 11:12:34 -07001669 atomic_inc(&id_priv->refcount);
1670 dev_id_priv->internal_id = 1;
Sean Hefty5b0ec992012-06-14 20:31:39 +00001671 dev_id_priv->afonly = id_priv->afonly;
Sean Heftye51060f2006-06-17 20:37:29 -07001672
1673 ret = rdma_listen(id, id_priv->backlog);
1674 if (ret)
Sean Heftyd02d1f52007-10-09 11:12:34 -07001675 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
Roland Dreier468f2232008-07-14 23:48:47 -07001676 "listening on device %s\n", ret, cma_dev->device->name);
Sean Heftye51060f2006-06-17 20:37:29 -07001677}
1678
1679static void cma_listen_on_all(struct rdma_id_private *id_priv)
1680{
1681 struct cma_device *cma_dev;
1682
1683 mutex_lock(&lock);
1684 list_add_tail(&id_priv->list, &listen_any_list);
1685 list_for_each_entry(cma_dev, &dev_list, list)
1686 cma_listen_on_dev(id_priv, cma_dev);
1687 mutex_unlock(&lock);
1688}
1689
Sean Heftya81c9942007-08-08 15:51:06 -07001690void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1691{
1692 struct rdma_id_private *id_priv;
1693
1694 id_priv = container_of(id, struct rdma_id_private, id);
1695 id_priv->tos = (u8) tos;
1696}
1697EXPORT_SYMBOL(rdma_set_service_type);
1698
Sean Heftye51060f2006-06-17 20:37:29 -07001699static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1700 void *context)
1701{
1702 struct cma_work *work = context;
1703 struct rdma_route *route;
1704
1705 route = &work->id->id.route;
1706
1707 if (!status) {
1708 route->num_paths = 1;
1709 *route->path_rec = *path_rec;
1710 } else {
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001711 work->old_state = RDMA_CM_ROUTE_QUERY;
1712 work->new_state = RDMA_CM_ADDR_RESOLVED;
Sean Heftye51060f2006-06-17 20:37:29 -07001713 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
Sean Hefty8f0472d2006-09-29 11:57:09 -07001714 work->event.status = status;
Sean Heftye51060f2006-06-17 20:37:29 -07001715 }
1716
1717 queue_work(cma_wq, &work->work);
1718}
1719
1720static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1721 struct cma_work *work)
1722{
Sean Heftyf4753832013-05-29 10:09:14 -07001723 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
Sean Heftye51060f2006-06-17 20:37:29 -07001724 struct ib_sa_path_rec path_rec;
Sean Heftya81c9942007-08-08 15:51:06 -07001725 ib_sa_comp_mask comp_mask;
1726 struct sockaddr_in6 *sin6;
Sean Heftyf68194c2013-05-29 10:09:18 -07001727 struct sockaddr_ib *sib;
Sean Heftye51060f2006-06-17 20:37:29 -07001728
1729 memset(&path_rec, 0, sizeof path_rec);
Sean Heftyf4753832013-05-29 10:09:14 -07001730 rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
1731 rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
1732 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
Sean Heftye51060f2006-06-17 20:37:29 -07001733 path_rec.numb_path = 1;
Sean Hefty962063e2007-02-21 16:40:44 -08001734 path_rec.reversible = 1;
Sean Heftycf539362013-05-29 10:09:28 -07001735 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
Sean Heftya81c9942007-08-08 15:51:06 -07001736
1737 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1738 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1739 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1740
Sean Heftyf68194c2013-05-29 10:09:18 -07001741 switch (cma_family(id_priv)) {
1742 case AF_INET:
Sean Heftya81c9942007-08-08 15:51:06 -07001743 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1744 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
Sean Heftyf68194c2013-05-29 10:09:18 -07001745 break;
1746 case AF_INET6:
Sean Heftyf4753832013-05-29 10:09:14 -07001747 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
Sean Heftya81c9942007-08-08 15:51:06 -07001748 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1749 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
Sean Heftyf68194c2013-05-29 10:09:18 -07001750 break;
1751 case AF_IB:
1752 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
1753 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
1754 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1755 break;
Sean Heftya81c9942007-08-08 15:51:06 -07001756 }
Sean Heftye51060f2006-06-17 20:37:29 -07001757
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001758 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
Sean Heftya81c9942007-08-08 15:51:06 -07001759 id_priv->id.port_num, &path_rec,
1760 comp_mask, timeout_ms,
1761 GFP_KERNEL, cma_query_handler,
1762 work, &id_priv->query);
Sean Heftye51060f2006-06-17 20:37:29 -07001763
1764 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1765}
1766
David Howellsc4028952006-11-22 14:57:56 +00001767static void cma_work_handler(struct work_struct *_work)
Sean Heftye51060f2006-06-17 20:37:29 -07001768{
David Howellsc4028952006-11-22 14:57:56 +00001769 struct cma_work *work = container_of(_work, struct cma_work, work);
Sean Heftye51060f2006-06-17 20:37:29 -07001770 struct rdma_id_private *id_priv = work->id;
1771 int destroy = 0;
1772
Or Gerlitzde910bd2008-07-14 23:48:53 -07001773 mutex_lock(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -07001774 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1775 goto out;
1776
1777 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001778 cma_exch(id_priv, RDMA_CM_DESTROYING);
Sean Heftye51060f2006-06-17 20:37:29 -07001779 destroy = 1;
1780 }
1781out:
Or Gerlitzde910bd2008-07-14 23:48:53 -07001782 mutex_unlock(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -07001783 cma_deref_id(id_priv);
1784 if (destroy)
1785 rdma_destroy_id(&id_priv->id);
1786 kfree(work);
1787}
1788
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07001789static void cma_ndev_work_handler(struct work_struct *_work)
1790{
1791 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
1792 struct rdma_id_private *id_priv = work->id;
1793 int destroy = 0;
1794
1795 mutex_lock(&id_priv->handler_mutex);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001796 if (id_priv->state == RDMA_CM_DESTROYING ||
1797 id_priv->state == RDMA_CM_DEVICE_REMOVAL)
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07001798 goto out;
1799
1800 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001801 cma_exch(id_priv, RDMA_CM_DESTROYING);
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07001802 destroy = 1;
1803 }
1804
1805out:
1806 mutex_unlock(&id_priv->handler_mutex);
1807 cma_deref_id(id_priv);
1808 if (destroy)
1809 rdma_destroy_id(&id_priv->id);
1810 kfree(work);
1811}
1812
Sean Heftye51060f2006-06-17 20:37:29 -07001813static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1814{
1815 struct rdma_route *route = &id_priv->id.route;
1816 struct cma_work *work;
1817 int ret;
1818
1819 work = kzalloc(sizeof *work, GFP_KERNEL);
1820 if (!work)
1821 return -ENOMEM;
1822
1823 work->id = id_priv;
David Howellsc4028952006-11-22 14:57:56 +00001824 INIT_WORK(&work->work, cma_work_handler);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001825 work->old_state = RDMA_CM_ROUTE_QUERY;
1826 work->new_state = RDMA_CM_ROUTE_RESOLVED;
Sean Heftye51060f2006-06-17 20:37:29 -07001827 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1828
1829 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1830 if (!route->path_rec) {
1831 ret = -ENOMEM;
1832 goto err1;
1833 }
1834
1835 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1836 if (ret)
1837 goto err2;
1838
1839 return 0;
1840err2:
1841 kfree(route->path_rec);
1842 route->path_rec = NULL;
1843err1:
1844 kfree(work);
1845 return ret;
1846}
1847
1848int rdma_set_ib_paths(struct rdma_cm_id *id,
1849 struct ib_sa_path_rec *path_rec, int num_paths)
1850{
1851 struct rdma_id_private *id_priv;
1852 int ret;
1853
1854 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001855 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1856 RDMA_CM_ROUTE_RESOLVED))
Sean Heftye51060f2006-06-17 20:37:29 -07001857 return -EINVAL;
1858
Julia Lawall9893e742010-05-15 23:22:38 +02001859 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
1860 GFP_KERNEL);
Sean Heftye51060f2006-06-17 20:37:29 -07001861 if (!id->route.path_rec) {
1862 ret = -ENOMEM;
1863 goto err;
1864 }
1865
Sean Heftyae2d9292010-03-25 19:12:36 +00001866 id->route.num_paths = num_paths;
Sean Heftye51060f2006-06-17 20:37:29 -07001867 return 0;
1868err:
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001869 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
Sean Heftye51060f2006-06-17 20:37:29 -07001870 return ret;
1871}
1872EXPORT_SYMBOL(rdma_set_ib_paths);
1873
Tom Tucker07ebafb2006-08-03 16:02:42 -05001874static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1875{
1876 struct cma_work *work;
1877
1878 work = kzalloc(sizeof *work, GFP_KERNEL);
1879 if (!work)
1880 return -ENOMEM;
1881
1882 work->id = id_priv;
David Howellsc4028952006-11-22 14:57:56 +00001883 INIT_WORK(&work->work, cma_work_handler);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001884 work->old_state = RDMA_CM_ROUTE_QUERY;
1885 work->new_state = RDMA_CM_ROUTE_RESOLVED;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001886 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1887 queue_work(cma_wq, &work->work);
1888 return 0;
1889}
1890
Eyal Perryeb072c42013-11-06 15:37:24 +02001891static int iboe_tos_to_sl(struct net_device *ndev, int tos)
1892{
1893 int prio;
1894 struct net_device *dev;
1895
1896 prio = rt_tos2priority(tos);
1897 dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
1898 vlan_dev_real_dev(ndev) : ndev;
1899
1900 if (dev->num_tc)
1901 return netdev_get_prio_tc_map(dev, prio);
1902
1903#if IS_ENABLED(CONFIG_VLAN_8021Q)
1904 if (ndev->priv_flags & IFF_802_1Q_VLAN)
1905 return (vlan_dev_get_egress_qos_mask(ndev, prio) &
1906 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1907#endif
1908 return 0;
1909}
1910
Eli Cohen3c86aa72010-10-13 21:26:51 +02001911static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1912{
1913 struct rdma_route *route = &id_priv->id.route;
1914 struct rdma_addr *addr = &route->addr;
1915 struct cma_work *work;
1916 int ret;
Eli Cohen3c86aa72010-10-13 21:26:51 +02001917 struct net_device *ndev = NULL;
Matan Barakdd5f03b2013-12-12 18:03:11 +02001918
Eli Cohen3c86aa72010-10-13 21:26:51 +02001919
Eli Cohen3c86aa72010-10-13 21:26:51 +02001920 work = kzalloc(sizeof *work, GFP_KERNEL);
1921 if (!work)
1922 return -ENOMEM;
1923
1924 work->id = id_priv;
1925 INIT_WORK(&work->work, cma_work_handler);
1926
1927 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
1928 if (!route->path_rec) {
1929 ret = -ENOMEM;
1930 goto err1;
1931 }
1932
1933 route->num_paths = 1;
1934
Eli Cohen3c86aa72010-10-13 21:26:51 +02001935 if (addr->dev_addr.bound_dev_if)
1936 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
1937 if (!ndev) {
1938 ret = -ENODEV;
1939 goto err2;
1940 }
1941
Matan Barakdd5f03b2013-12-12 18:03:11 +02001942 route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
1943 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
1944 memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
Eli Cohenaf7bd462010-08-26 17:18:59 +03001945
Matan Barakdd5f03b2013-12-12 18:03:11 +02001946 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr,
1947 route->path_rec->vlan_id);
1948 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr,
1949 route->path_rec->vlan_id);
Eli Cohenaf7bd462010-08-26 17:18:59 +03001950
1951 route->path_rec->hop_limit = 1;
1952 route->path_rec->reversible = 1;
1953 route->path_rec->pkey = cpu_to_be16(0xffff);
1954 route->path_rec->mtu_selector = IB_SA_EQ;
Eyal Perryeb072c42013-11-06 15:37:24 +02001955 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
Eli Cohen3c86aa72010-10-13 21:26:51 +02001956 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
1957 route->path_rec->rate_selector = IB_SA_EQ;
1958 route->path_rec->rate = iboe_get_rate(ndev);
1959 dev_put(ndev);
1960 route->path_rec->packet_life_time_selector = IB_SA_EQ;
1961 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
1962 if (!route->path_rec->mtu) {
1963 ret = -EINVAL;
1964 goto err2;
1965 }
1966
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001967 work->old_state = RDMA_CM_ROUTE_QUERY;
1968 work->new_state = RDMA_CM_ROUTE_RESOLVED;
Eli Cohen3c86aa72010-10-13 21:26:51 +02001969 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1970 work->event.status = 0;
1971
1972 queue_work(cma_wq, &work->work);
1973
1974 return 0;
1975
1976err2:
1977 kfree(route->path_rec);
1978 route->path_rec = NULL;
1979err1:
1980 kfree(work);
1981 return ret;
1982}
1983
Sean Heftye51060f2006-06-17 20:37:29 -07001984int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1985{
1986 struct rdma_id_private *id_priv;
1987 int ret;
1988
1989 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07001990 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
Sean Heftye51060f2006-06-17 20:37:29 -07001991 return -EINVAL;
1992
1993 atomic_inc(&id_priv->refcount);
Tom Tucker07ebafb2006-08-03 16:02:42 -05001994 switch (rdma_node_get_transport(id->device->node_type)) {
1995 case RDMA_TRANSPORT_IB:
Eli Cohen3c86aa72010-10-13 21:26:51 +02001996 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
1997 case IB_LINK_LAYER_INFINIBAND:
1998 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1999 break;
2000 case IB_LINK_LAYER_ETHERNET:
2001 ret = cma_resolve_iboe_route(id_priv);
2002 break;
2003 default:
2004 ret = -ENOSYS;
2005 }
Sean Heftye51060f2006-06-17 20:37:29 -07002006 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002007 case RDMA_TRANSPORT_IWARP:
2008 ret = cma_resolve_iw_route(id_priv, timeout_ms);
2009 break;
Sean Heftye51060f2006-06-17 20:37:29 -07002010 default:
2011 ret = -ENOSYS;
2012 break;
2013 }
2014 if (ret)
2015 goto err;
2016
2017 return 0;
2018err:
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002019 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
Sean Heftye51060f2006-06-17 20:37:29 -07002020 cma_deref_id(id_priv);
2021 return ret;
2022}
2023EXPORT_SYMBOL(rdma_resolve_route);
2024
Sean Hefty6a3e3622013-05-29 10:09:13 -07002025static void cma_set_loopback(struct sockaddr *addr)
2026{
2027 switch (addr->sa_family) {
2028 case AF_INET:
2029 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2030 break;
2031 case AF_INET6:
2032 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
2033 0, 0, 0, htonl(1));
2034 break;
2035 default:
2036 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
2037 0, 0, 0, htonl(1));
2038 break;
2039 }
2040}
2041
Sean Heftye51060f2006-06-17 20:37:29 -07002042static int cma_bind_loopback(struct rdma_id_private *id_priv)
2043{
Sean Heftyb0569e42013-05-29 10:09:15 -07002044 struct cma_device *cma_dev, *cur_dev;
Sean Heftye51060f2006-06-17 20:37:29 -07002045 struct ib_port_attr port_attr;
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07002046 union ib_gid gid;
Sean Heftye51060f2006-06-17 20:37:29 -07002047 u16 pkey;
2048 int ret;
2049 u8 p;
2050
Sean Heftyb0569e42013-05-29 10:09:15 -07002051 cma_dev = NULL;
Sean Heftye51060f2006-06-17 20:37:29 -07002052 mutex_lock(&lock);
Sean Heftyb0569e42013-05-29 10:09:15 -07002053 list_for_each_entry(cur_dev, &dev_list, list) {
2054 if (cma_family(id_priv) == AF_IB &&
2055 rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
2056 continue;
2057
2058 if (!cma_dev)
2059 cma_dev = cur_dev;
2060
2061 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
2062 if (!ib_query_port(cur_dev->device, p, &port_attr) &&
2063 port_attr.state == IB_PORT_ACTIVE) {
2064 cma_dev = cur_dev;
2065 goto port_found;
2066 }
2067 }
2068 }
2069
2070 if (!cma_dev) {
Sean Heftye51060f2006-06-17 20:37:29 -07002071 ret = -ENODEV;
2072 goto out;
2073 }
Krishna Kumare82153b2006-10-16 10:09:01 +05302074
2075 p = 1;
Sean Heftye51060f2006-06-17 20:37:29 -07002076
2077port_found:
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07002078 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
Sean Heftye51060f2006-06-17 20:37:29 -07002079 if (ret)
2080 goto out;
2081
2082 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
2083 if (ret)
2084 goto out;
2085
Sean Hefty6f8372b2009-11-19 13:26:06 -08002086 id_priv->id.route.addr.dev_addr.dev_type =
Eli Cohen3c86aa72010-10-13 21:26:51 +02002087 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
Sean Hefty6f8372b2009-11-19 13:26:06 -08002088 ARPHRD_INFINIBAND : ARPHRD_ETHER;
2089
2090 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
Sean Heftye51060f2006-06-17 20:37:29 -07002091 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
2092 id_priv->id.port_num = p;
2093 cma_attach_to_dev(id_priv, cma_dev);
Sean Heftyf4753832013-05-29 10:09:14 -07002094 cma_set_loopback(cma_src_addr(id_priv));
Sean Heftye51060f2006-06-17 20:37:29 -07002095out:
2096 mutex_unlock(&lock);
2097 return ret;
2098}
2099
2100static void addr_handler(int status, struct sockaddr *src_addr,
2101 struct rdma_dev_addr *dev_addr, void *context)
2102{
2103 struct rdma_id_private *id_priv = context;
Sean Heftya1b1b612006-11-30 16:33:14 -08002104 struct rdma_cm_event event;
Sean Heftye51060f2006-06-17 20:37:29 -07002105
Sean Heftya1b1b612006-11-30 16:33:14 -08002106 memset(&event, 0, sizeof event);
Or Gerlitzde910bd2008-07-14 23:48:53 -07002107 mutex_lock(&id_priv->handler_mutex);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002108 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
2109 RDMA_CM_ADDR_RESOLVED))
Sean Hefty61a73c72006-09-01 15:33:55 -07002110 goto out;
Sean Hefty61a73c72006-09-01 15:33:55 -07002111
2112 if (!status && !id_priv->cma_dev)
Doug Ledfordbe9130c2013-09-24 17:16:28 -04002113 status = cma_acquire_dev(id_priv, NULL);
Sean Heftye51060f2006-06-17 20:37:29 -07002114
2115 if (status) {
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002116 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2117 RDMA_CM_ADDR_BOUND))
Sean Heftye51060f2006-06-17 20:37:29 -07002118 goto out;
Sean Heftya1b1b612006-11-30 16:33:14 -08002119 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2120 event.status = status;
Sean Heftye51060f2006-06-17 20:37:29 -07002121 } else {
Sean Heftyf4753832013-05-29 10:09:14 -07002122 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
Sean Heftya1b1b612006-11-30 16:33:14 -08002123 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
Sean Heftye51060f2006-06-17 20:37:29 -07002124 }
2125
Sean Heftya1b1b612006-11-30 16:33:14 -08002126 if (id_priv->id.event_handler(&id_priv->id, &event)) {
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002127 cma_exch(id_priv, RDMA_CM_DESTROYING);
Or Gerlitzde910bd2008-07-14 23:48:53 -07002128 mutex_unlock(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -07002129 cma_deref_id(id_priv);
2130 rdma_destroy_id(&id_priv->id);
2131 return;
2132 }
2133out:
Or Gerlitzde910bd2008-07-14 23:48:53 -07002134 mutex_unlock(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -07002135 cma_deref_id(id_priv);
2136}
2137
2138static int cma_resolve_loopback(struct rdma_id_private *id_priv)
2139{
2140 struct cma_work *work;
Michael S. Tsirkinf0ee3402006-07-14 00:23:52 -07002141 union ib_gid gid;
Sean Heftye51060f2006-06-17 20:37:29 -07002142 int ret;
2143
2144 work = kzalloc(sizeof *work, GFP_KERNEL);
2145 if (!work)
2146 return -ENOMEM;
2147
2148 if (!id_priv->cma_dev) {
2149 ret = cma_bind_loopback(id_priv);
2150 if (ret)
2151 goto err;
2152 }
2153
Sean Hefty6f8372b2009-11-19 13:26:06 -08002154 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2155 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
Sean Heftye51060f2006-06-17 20:37:29 -07002156
Sean Heftye51060f2006-06-17 20:37:29 -07002157 work->id = id_priv;
David Howellsc4028952006-11-22 14:57:56 +00002158 INIT_WORK(&work->work, cma_work_handler);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002159 work->old_state = RDMA_CM_ADDR_QUERY;
2160 work->new_state = RDMA_CM_ADDR_RESOLVED;
Sean Heftye51060f2006-06-17 20:37:29 -07002161 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2162 queue_work(cma_wq, &work->work);
2163 return 0;
2164err:
2165 kfree(work);
2166 return ret;
2167}
2168
Sean Heftyf17df3b2013-05-29 10:09:17 -07002169static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
2170{
2171 struct cma_work *work;
2172 int ret;
2173
2174 work = kzalloc(sizeof *work, GFP_KERNEL);
2175 if (!work)
2176 return -ENOMEM;
2177
2178 if (!id_priv->cma_dev) {
2179 ret = cma_resolve_ib_dev(id_priv);
2180 if (ret)
2181 goto err;
2182 }
2183
2184 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
2185 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
2186
2187 work->id = id_priv;
2188 INIT_WORK(&work->work, cma_work_handler);
2189 work->old_state = RDMA_CM_ADDR_QUERY;
2190 work->new_state = RDMA_CM_ADDR_RESOLVED;
2191 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2192 queue_work(cma_wq, &work->work);
2193 return 0;
2194err:
2195 kfree(work);
2196 return ret;
2197}
2198
Sean Heftye51060f2006-06-17 20:37:29 -07002199static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2200 struct sockaddr *dst_addr)
2201{
Sean Heftyd14714d2009-11-19 16:46:25 -08002202 if (!src_addr || !src_addr->sa_family) {
2203 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
Sean Heftyf17df3b2013-05-29 10:09:17 -07002204 src_addr->sa_family = dst_addr->sa_family;
2205 if (dst_addr->sa_family == AF_INET6) {
Sean Heftyd14714d2009-11-19 16:46:25 -08002206 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
2207 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
Sean Heftyf17df3b2013-05-29 10:09:17 -07002208 } else if (dst_addr->sa_family == AF_IB) {
2209 ((struct sockaddr_ib *) src_addr)->sib_pkey =
2210 ((struct sockaddr_ib *) dst_addr)->sib_pkey;
Sean Heftyd14714d2009-11-19 16:46:25 -08002211 }
2212 }
2213 return rdma_bind_addr(id, src_addr);
Sean Heftye51060f2006-06-17 20:37:29 -07002214}
2215
2216int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2217 struct sockaddr *dst_addr, int timeout_ms)
2218{
2219 struct rdma_id_private *id_priv;
2220 int ret;
2221
2222 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002223 if (id_priv->state == RDMA_CM_IDLE) {
Sean Heftye51060f2006-06-17 20:37:29 -07002224 ret = cma_bind_addr(id, src_addr, dst_addr);
2225 if (ret)
2226 return ret;
2227 }
2228
Sean Hefty4ae71522013-05-29 10:09:16 -07002229 if (cma_family(id_priv) != dst_addr->sa_family)
2230 return -EINVAL;
2231
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002232 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
Sean Heftye51060f2006-06-17 20:37:29 -07002233 return -EINVAL;
2234
2235 atomic_inc(&id_priv->refcount);
Sean Heftyf4753832013-05-29 10:09:14 -07002236 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
Sean Heftyf17df3b2013-05-29 10:09:17 -07002237 if (cma_any_addr(dst_addr)) {
Sean Heftye51060f2006-06-17 20:37:29 -07002238 ret = cma_resolve_loopback(id_priv);
Sean Heftyf17df3b2013-05-29 10:09:17 -07002239 } else {
2240 if (dst_addr->sa_family == AF_IB) {
2241 ret = cma_resolve_ib_addr(id_priv);
2242 } else {
2243 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
2244 dst_addr, &id->route.addr.dev_addr,
2245 timeout_ms, addr_handler, id_priv);
2246 }
2247 }
Sean Heftye51060f2006-06-17 20:37:29 -07002248 if (ret)
2249 goto err;
2250
2251 return 0;
2252err:
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002253 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
Sean Heftye51060f2006-06-17 20:37:29 -07002254 cma_deref_id(id_priv);
2255 return ret;
2256}
2257EXPORT_SYMBOL(rdma_resolve_addr);
2258
Hefty, Seana9bb7912011-05-09 22:06:10 -07002259int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2260{
2261 struct rdma_id_private *id_priv;
2262 unsigned long flags;
2263 int ret;
2264
2265 id_priv = container_of(id, struct rdma_id_private, id);
2266 spin_lock_irqsave(&id_priv->lock, flags);
Sean Heftyc8dea2f92013-05-29 10:09:08 -07002267 if (reuse || id_priv->state == RDMA_CM_IDLE) {
Hefty, Seana9bb7912011-05-09 22:06:10 -07002268 id_priv->reuseaddr = reuse;
2269 ret = 0;
2270 } else {
2271 ret = -EINVAL;
2272 }
2273 spin_unlock_irqrestore(&id_priv->lock, flags);
2274 return ret;
2275}
2276EXPORT_SYMBOL(rdma_set_reuseaddr);
2277
Sean Hefty68602122012-06-14 20:31:39 +00002278int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
2279{
2280 struct rdma_id_private *id_priv;
2281 unsigned long flags;
2282 int ret;
2283
2284 id_priv = container_of(id, struct rdma_id_private, id);
2285 spin_lock_irqsave(&id_priv->lock, flags);
2286 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
2287 id_priv->options |= (1 << CMA_OPTION_AFONLY);
2288 id_priv->afonly = afonly;
2289 ret = 0;
2290 } else {
2291 ret = -EINVAL;
2292 }
2293 spin_unlock_irqrestore(&id_priv->lock, flags);
2294 return ret;
2295}
2296EXPORT_SYMBOL(rdma_set_afonly);
2297
Sean Heftye51060f2006-06-17 20:37:29 -07002298static void cma_bind_port(struct rdma_bind_list *bind_list,
2299 struct rdma_id_private *id_priv)
2300{
Sean Hefty58afdcb2013-05-29 10:09:11 -07002301 struct sockaddr *addr;
2302 struct sockaddr_ib *sib;
2303 u64 sid, mask;
2304 __be16 port;
Sean Heftye51060f2006-06-17 20:37:29 -07002305
Sean Heftyf4753832013-05-29 10:09:14 -07002306 addr = cma_src_addr(id_priv);
Sean Hefty58afdcb2013-05-29 10:09:11 -07002307 port = htons(bind_list->port);
2308
2309 switch (addr->sa_family) {
2310 case AF_INET:
2311 ((struct sockaddr_in *) addr)->sin_port = port;
2312 break;
2313 case AF_INET6:
2314 ((struct sockaddr_in6 *) addr)->sin6_port = port;
2315 break;
2316 case AF_IB:
2317 sib = (struct sockaddr_ib *) addr;
2318 sid = be64_to_cpu(sib->sib_sid);
2319 mask = be64_to_cpu(sib->sib_sid_mask);
2320 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
2321 sib->sib_sid_mask = cpu_to_be64(~0ULL);
2322 break;
2323 }
Sean Heftye51060f2006-06-17 20:37:29 -07002324 id_priv->bind_list = bind_list;
2325 hlist_add_head(&id_priv->node, &bind_list->owners);
2326}
2327
2328static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
2329 unsigned short snum)
2330{
2331 struct rdma_bind_list *bind_list;
Tejun Heo3b069c52013-02-27 17:04:16 -08002332 int ret;
Sean Heftye51060f2006-06-17 20:37:29 -07002333
Sean Heftycb164b82007-03-05 12:50:17 -08002334 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
Sean Heftye51060f2006-06-17 20:37:29 -07002335 if (!bind_list)
2336 return -ENOMEM;
2337
Tejun Heo3b069c52013-02-27 17:04:16 -08002338 ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
2339 if (ret < 0)
2340 goto err;
Sean Heftye51060f2006-06-17 20:37:29 -07002341
2342 bind_list->ps = ps;
Tejun Heo3b069c52013-02-27 17:04:16 -08002343 bind_list->port = (unsigned short)ret;
Sean Heftye51060f2006-06-17 20:37:29 -07002344 cma_bind_port(bind_list, id_priv);
2345 return 0;
Tejun Heo3b069c52013-02-27 17:04:16 -08002346err:
Sean Heftyaedec082007-01-29 16:41:23 -08002347 kfree(bind_list);
Tejun Heo3b069c52013-02-27 17:04:16 -08002348 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
Sean Heftyaedec082007-01-29 16:41:23 -08002349}
2350
2351static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
2352{
Tetsuo Handa5d7220e82010-04-15 11:29:04 +09002353 static unsigned int last_used_port;
2354 int low, high, remaining;
2355 unsigned int rover;
Sean Heftyaedec082007-01-29 16:41:23 -08002356
Eric W. Biederman0bbf87d2013-09-28 14:10:59 -07002357 inet_get_local_port_range(&init_net, &low, &high);
Tetsuo Handa5d7220e82010-04-15 11:29:04 +09002358 remaining = (high - low) + 1;
2359 rover = net_random() % remaining + low;
2360retry:
2361 if (last_used_port != rover &&
2362 !idr_find(ps, (unsigned short) rover)) {
2363 int ret = cma_alloc_port(ps, id_priv, rover);
2364 /*
2365 * Remember previously used port number in order to avoid
2366 * re-using same port immediately after it is closed.
2367 */
2368 if (!ret)
2369 last_used_port = rover;
2370 if (ret != -EADDRNOTAVAIL)
2371 return ret;
Sean Heftyaedec082007-01-29 16:41:23 -08002372 }
Tetsuo Handa5d7220e82010-04-15 11:29:04 +09002373 if (--remaining) {
2374 rover++;
2375 if ((rover < low) || (rover > high))
2376 rover = low;
2377 goto retry;
2378 }
2379 return -EADDRNOTAVAIL;
Sean Heftye51060f2006-06-17 20:37:29 -07002380}
2381
Hefty, Seana9bb7912011-05-09 22:06:10 -07002382/*
2383 * Check that the requested port is available. This is called when trying to
2384 * bind to a specific port, or when trying to listen on a bound port. In
2385 * the latter case, the provided id_priv may already be on the bind_list, but
2386 * we still need to check that it's okay to start listening.
2387 */
2388static int cma_check_port(struct rdma_bind_list *bind_list,
2389 struct rdma_id_private *id_priv, uint8_t reuseaddr)
Sean Heftye51060f2006-06-17 20:37:29 -07002390{
2391 struct rdma_id_private *cur_id;
Hefty, Sean43b752d2011-05-09 22:06:10 -07002392 struct sockaddr *addr, *cur_addr;
Sean Heftye51060f2006-06-17 20:37:29 -07002393
Sean Heftyf4753832013-05-29 10:09:14 -07002394 addr = cma_src_addr(id_priv);
Sasha Levinb67bfe02013-02-27 17:06:00 -08002395 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
Hefty, Seana9bb7912011-05-09 22:06:10 -07002396 if (id_priv == cur_id)
2397 continue;
2398
Sean Hefty5b0ec992012-06-14 20:31:39 +00002399 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
2400 cur_id->reuseaddr)
2401 continue;
Hefty, Seana9bb7912011-05-09 22:06:10 -07002402
Sean Heftyf4753832013-05-29 10:09:14 -07002403 cur_addr = cma_src_addr(cur_id);
Sean Hefty5b0ec992012-06-14 20:31:39 +00002404 if (id_priv->afonly && cur_id->afonly &&
2405 (addr->sa_family != cur_addr->sa_family))
2406 continue;
2407
2408 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
2409 return -EADDRNOTAVAIL;
2410
2411 if (!cma_addr_cmp(addr, cur_addr))
2412 return -EADDRINUSE;
Hefty, Seana9bb7912011-05-09 22:06:10 -07002413 }
2414 return 0;
2415}
2416
2417static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2418{
2419 struct rdma_bind_list *bind_list;
2420 unsigned short snum;
2421 int ret;
2422
Sean Heftyf4753832013-05-29 10:09:14 -07002423 snum = ntohs(cma_port(cma_src_addr(id_priv)));
Sean Heftye51060f2006-06-17 20:37:29 -07002424 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2425 return -EACCES;
2426
2427 bind_list = idr_find(ps, snum);
Hefty, Seana9bb7912011-05-09 22:06:10 -07002428 if (!bind_list) {
2429 ret = cma_alloc_port(ps, id_priv, snum);
2430 } else {
2431 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
2432 if (!ret)
2433 cma_bind_port(bind_list, id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07002434 }
Hefty, Seana9bb7912011-05-09 22:06:10 -07002435 return ret;
2436}
Sean Heftye51060f2006-06-17 20:37:29 -07002437
Hefty, Seana9bb7912011-05-09 22:06:10 -07002438static int cma_bind_listen(struct rdma_id_private *id_priv)
2439{
2440 struct rdma_bind_list *bind_list = id_priv->bind_list;
2441 int ret = 0;
2442
2443 mutex_lock(&lock);
2444 if (bind_list->owners.first->next)
2445 ret = cma_check_port(bind_list, id_priv, 0);
2446 mutex_unlock(&lock);
2447 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -07002448}
2449
Sean Hefty58afdcb2013-05-29 10:09:11 -07002450static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv)
2451{
2452 switch (id_priv->id.ps) {
Sean Hefty58afdcb2013-05-29 10:09:11 -07002453 case RDMA_PS_TCP:
2454 return &tcp_ps;
2455 case RDMA_PS_UDP:
2456 return &udp_ps;
2457 case RDMA_PS_IPOIB:
2458 return &ipoib_ps;
2459 case RDMA_PS_IB:
2460 return &ib_ps;
2461 default:
2462 return NULL;
2463 }
2464}
2465
2466static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
2467{
2468 struct idr *ps = NULL;
2469 struct sockaddr_ib *sib;
2470 u64 sid_ps, mask, sid;
2471
Sean Heftyf4753832013-05-29 10:09:14 -07002472 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
Sean Hefty58afdcb2013-05-29 10:09:11 -07002473 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
2474 sid = be64_to_cpu(sib->sib_sid) & mask;
2475
2476 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
2477 sid_ps = RDMA_IB_IP_PS_IB;
2478 ps = &ib_ps;
2479 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
2480 (sid == (RDMA_IB_IP_PS_TCP & mask))) {
2481 sid_ps = RDMA_IB_IP_PS_TCP;
2482 ps = &tcp_ps;
2483 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
2484 (sid == (RDMA_IB_IP_PS_UDP & mask))) {
2485 sid_ps = RDMA_IB_IP_PS_UDP;
2486 ps = &udp_ps;
2487 }
2488
2489 if (ps) {
2490 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
2491 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
2492 be64_to_cpu(sib->sib_sid_mask));
2493 }
2494 return ps;
2495}
2496
Sean Heftye51060f2006-06-17 20:37:29 -07002497static int cma_get_port(struct rdma_id_private *id_priv)
2498{
2499 struct idr *ps;
2500 int ret;
2501
Sean Heftyf4753832013-05-29 10:09:14 -07002502 if (cma_family(id_priv) != AF_IB)
Sean Hefty58afdcb2013-05-29 10:09:11 -07002503 ps = cma_select_inet_ps(id_priv);
2504 else
2505 ps = cma_select_ib_ps(id_priv);
2506 if (!ps)
Sean Heftye51060f2006-06-17 20:37:29 -07002507 return -EPROTONOSUPPORT;
Sean Heftye51060f2006-06-17 20:37:29 -07002508
2509 mutex_lock(&lock);
Sean Heftyf4753832013-05-29 10:09:14 -07002510 if (cma_any_port(cma_src_addr(id_priv)))
Sean Heftyaedec082007-01-29 16:41:23 -08002511 ret = cma_alloc_any_port(ps, id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07002512 else
2513 ret = cma_use_port(ps, id_priv);
2514 mutex_unlock(&lock);
2515
2516 return ret;
2517}
2518
Sean Heftyd14714d2009-11-19 16:46:25 -08002519static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2520 struct sockaddr *addr)
2521{
Roland Dreierd90f9b32012-07-05 22:39:34 -07002522#if IS_ENABLED(CONFIG_IPV6)
Sean Heftyd14714d2009-11-19 16:46:25 -08002523 struct sockaddr_in6 *sin6;
2524
2525 if (addr->sa_family != AF_INET6)
2526 return 0;
2527
2528 sin6 = (struct sockaddr_in6 *) addr;
2529 if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
2530 !sin6->sin6_scope_id)
2531 return -EINVAL;
2532
2533 dev_addr->bound_dev_if = sin6->sin6_scope_id;
2534#endif
2535 return 0;
2536}
2537
Hefty, Seana9bb7912011-05-09 22:06:10 -07002538int rdma_listen(struct rdma_cm_id *id, int backlog)
2539{
2540 struct rdma_id_private *id_priv;
2541 int ret;
2542
2543 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002544 if (id_priv->state == RDMA_CM_IDLE) {
Sean Heftyf4753832013-05-29 10:09:14 -07002545 id->route.addr.src_addr.ss_family = AF_INET;
2546 ret = rdma_bind_addr(id, cma_src_addr(id_priv));
Hefty, Seana9bb7912011-05-09 22:06:10 -07002547 if (ret)
2548 return ret;
2549 }
2550
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002551 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
Hefty, Seana9bb7912011-05-09 22:06:10 -07002552 return -EINVAL;
2553
2554 if (id_priv->reuseaddr) {
2555 ret = cma_bind_listen(id_priv);
2556 if (ret)
2557 goto err;
2558 }
2559
2560 id_priv->backlog = backlog;
2561 if (id->device) {
2562 switch (rdma_node_get_transport(id->device->node_type)) {
2563 case RDMA_TRANSPORT_IB:
2564 ret = cma_ib_listen(id_priv);
2565 if (ret)
2566 goto err;
2567 break;
2568 case RDMA_TRANSPORT_IWARP:
2569 ret = cma_iw_listen(id_priv, backlog);
2570 if (ret)
2571 goto err;
2572 break;
2573 default:
2574 ret = -ENOSYS;
2575 goto err;
2576 }
2577 } else
2578 cma_listen_on_all(id_priv);
2579
2580 return 0;
2581err:
2582 id_priv->backlog = 0;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002583 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
Hefty, Seana9bb7912011-05-09 22:06:10 -07002584 return ret;
2585}
2586EXPORT_SYMBOL(rdma_listen);
2587
Sean Heftye51060f2006-06-17 20:37:29 -07002588int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2589{
2590 struct rdma_id_private *id_priv;
2591 int ret;
2592
Sean Hefty680f9202013-05-29 10:09:12 -07002593 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
2594 addr->sa_family != AF_IB)
Sean Heftye51060f2006-06-17 20:37:29 -07002595 return -EAFNOSUPPORT;
2596
2597 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002598 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
Sean Heftye51060f2006-06-17 20:37:29 -07002599 return -EINVAL;
2600
Sean Heftyd14714d2009-11-19 16:46:25 -08002601 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
2602 if (ret)
2603 goto err1;
2604
Sean Hefty8523c042010-02-08 16:41:15 -08002605 if (!cma_any_addr(addr)) {
Sean Hefty680f9202013-05-29 10:09:12 -07002606 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
Sean Heftye51060f2006-06-17 20:37:29 -07002607 if (ret)
Krishna Kumar255d0c12006-10-24 13:22:28 -07002608 goto err1;
2609
Doug Ledfordbe9130c2013-09-24 17:16:28 -04002610 ret = cma_acquire_dev(id_priv, NULL);
Krishna Kumar255d0c12006-10-24 13:22:28 -07002611 if (ret)
2612 goto err1;
Sean Heftye51060f2006-06-17 20:37:29 -07002613 }
2614
Sean Heftyf4753832013-05-29 10:09:14 -07002615 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
Sean Hefty68602122012-06-14 20:31:39 +00002616 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
2617 if (addr->sa_family == AF_INET)
2618 id_priv->afonly = 1;
Sean Hefty5b0ec992012-06-14 20:31:39 +00002619#if IS_ENABLED(CONFIG_IPV6)
Sean Hefty68602122012-06-14 20:31:39 +00002620 else if (addr->sa_family == AF_INET6)
2621 id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
Sean Hefty5b0ec992012-06-14 20:31:39 +00002622#endif
Sean Hefty68602122012-06-14 20:31:39 +00002623 }
Sean Heftye51060f2006-06-17 20:37:29 -07002624 ret = cma_get_port(id_priv);
2625 if (ret)
Krishna Kumar255d0c12006-10-24 13:22:28 -07002626 goto err2;
Sean Heftye51060f2006-06-17 20:37:29 -07002627
2628 return 0;
Krishna Kumar255d0c12006-10-24 13:22:28 -07002629err2:
Sean Heftya396d432011-02-23 09:05:39 -08002630 if (id_priv->cma_dev)
2631 cma_release_dev(id_priv);
Krishna Kumar255d0c12006-10-24 13:22:28 -07002632err1:
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002633 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
Sean Heftye51060f2006-06-17 20:37:29 -07002634 return ret;
2635}
2636EXPORT_SYMBOL(rdma_bind_addr);
2637
Sean Heftyf4753832013-05-29 10:09:14 -07002638static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
Sean Heftye51060f2006-06-17 20:37:29 -07002639{
Sean Heftye51060f2006-06-17 20:37:29 -07002640 struct cma_hdr *cma_hdr;
Sean Heftye51060f2006-06-17 20:37:29 -07002641
Sean Hefty01602f12013-05-29 10:09:20 -07002642 cma_hdr = hdr;
2643 cma_hdr->cma_version = CMA_VERSION;
Sean Heftyf4753832013-05-29 10:09:14 -07002644 if (cma_family(id_priv) == AF_INET) {
Aleksey Senin1f5175a2008-12-24 10:16:45 -08002645 struct sockaddr_in *src4, *dst4;
Sean Heftye51060f2006-06-17 20:37:29 -07002646
Sean Heftyf4753832013-05-29 10:09:14 -07002647 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
2648 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
Aleksey Senin1f5175a2008-12-24 10:16:45 -08002649
Sean Hefty01602f12013-05-29 10:09:20 -07002650 cma_set_ip_ver(cma_hdr, 4);
2651 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2652 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2653 cma_hdr->port = src4->sin_port;
Sean Heftye8160e12013-05-29 10:09:22 -07002654 } else if (cma_family(id_priv) == AF_INET6) {
Aleksey Senin1f5175a2008-12-24 10:16:45 -08002655 struct sockaddr_in6 *src6, *dst6;
2656
Sean Heftyf4753832013-05-29 10:09:14 -07002657 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2658 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
Aleksey Senin1f5175a2008-12-24 10:16:45 -08002659
Sean Hefty01602f12013-05-29 10:09:20 -07002660 cma_set_ip_ver(cma_hdr, 6);
2661 cma_hdr->src_addr.ip6 = src6->sin6_addr;
2662 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
2663 cma_hdr->port = src6->sin6_port;
Sean Heftye51060f2006-06-17 20:37:29 -07002664 }
2665 return 0;
2666}
2667
Sean Hefty628e5f62006-11-30 16:44:16 -08002668static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2669 struct ib_cm_event *ib_event)
2670{
2671 struct rdma_id_private *id_priv = cm_id->context;
2672 struct rdma_cm_event event;
2673 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2674 int ret = 0;
2675
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002676 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
Sean Hefty8aa08602007-05-07 11:49:00 -07002677 return 0;
Sean Hefty628e5f62006-11-30 16:44:16 -08002678
Sean Hefty8aa08602007-05-07 11:49:00 -07002679 memset(&event, 0, sizeof event);
Sean Hefty628e5f62006-11-30 16:44:16 -08002680 switch (ib_event->event) {
2681 case IB_CM_SIDR_REQ_ERROR:
2682 event.event = RDMA_CM_EVENT_UNREACHABLE;
2683 event.status = -ETIMEDOUT;
2684 break;
2685 case IB_CM_SIDR_REP_RECEIVED:
2686 event.param.ud.private_data = ib_event->private_data;
2687 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2688 if (rep->status != IB_SIDR_SUCCESS) {
2689 event.event = RDMA_CM_EVENT_UNREACHABLE;
2690 event.status = ib_event->param.sidr_rep_rcvd.status;
2691 break;
2692 }
Sean Hefty5c438132013-05-29 10:09:23 -07002693 ret = cma_set_qkey(id_priv, rep->qkey);
Yossi Etigind2ca39f2009-04-08 13:42:33 -07002694 if (ret) {
2695 event.event = RDMA_CM_EVENT_ADDR_ERROR;
Sean Hefty5c438132013-05-29 10:09:23 -07002696 event.status = ret;
Sean Hefty628e5f62006-11-30 16:44:16 -08002697 break;
2698 }
2699 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2700 id_priv->id.route.path_rec,
2701 &event.param.ud.ah_attr);
2702 event.param.ud.qp_num = rep->qpn;
2703 event.param.ud.qkey = rep->qkey;
2704 event.event = RDMA_CM_EVENT_ESTABLISHED;
2705 event.status = 0;
2706 break;
2707 default:
Roland Dreier468f2232008-07-14 23:48:47 -07002708 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
Sean Hefty628e5f62006-11-30 16:44:16 -08002709 ib_event->event);
2710 goto out;
2711 }
2712
2713 ret = id_priv->id.event_handler(&id_priv->id, &event);
2714 if (ret) {
2715 /* Destroy the CM ID by returning a non-zero value. */
2716 id_priv->cm_id.ib = NULL;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002717 cma_exch(id_priv, RDMA_CM_DESTROYING);
Or Gerlitzde910bd2008-07-14 23:48:53 -07002718 mutex_unlock(&id_priv->handler_mutex);
Sean Hefty628e5f62006-11-30 16:44:16 -08002719 rdma_destroy_id(&id_priv->id);
2720 return ret;
2721 }
2722out:
Or Gerlitzde910bd2008-07-14 23:48:53 -07002723 mutex_unlock(&id_priv->handler_mutex);
Sean Hefty628e5f62006-11-30 16:44:16 -08002724 return ret;
2725}
2726
2727static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2728 struct rdma_conn_param *conn_param)
2729{
2730 struct ib_cm_sidr_req_param req;
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002731 struct ib_cm_id *id;
Sean Heftye511d1a2013-07-24 15:06:08 -07002732 void *private_data;
Sean Heftye8160e12013-05-29 10:09:22 -07002733 int offset, ret;
Sean Hefty628e5f62006-11-30 16:44:16 -08002734
Sean Heftye511d1a2013-07-24 15:06:08 -07002735 memset(&req, 0, sizeof req);
Sean Heftye8160e12013-05-29 10:09:22 -07002736 offset = cma_user_data_offset(id_priv);
2737 req.private_data_len = offset + conn_param->private_data_len;
Sean Hefty04ded162011-12-06 21:17:11 +00002738 if (req.private_data_len < conn_param->private_data_len)
2739 return -EINVAL;
2740
Sean Heftye8160e12013-05-29 10:09:22 -07002741 if (req.private_data_len) {
Sean Heftye511d1a2013-07-24 15:06:08 -07002742 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2743 if (!private_data)
Sean Heftye8160e12013-05-29 10:09:22 -07002744 return -ENOMEM;
2745 } else {
Sean Heftye511d1a2013-07-24 15:06:08 -07002746 private_data = NULL;
Sean Heftye8160e12013-05-29 10:09:22 -07002747 }
Sean Hefty628e5f62006-11-30 16:44:16 -08002748
2749 if (conn_param->private_data && conn_param->private_data_len)
Sean Heftye511d1a2013-07-24 15:06:08 -07002750 memcpy(private_data + offset, conn_param->private_data,
2751 conn_param->private_data_len);
Sean Hefty628e5f62006-11-30 16:44:16 -08002752
Sean Heftye511d1a2013-07-24 15:06:08 -07002753 if (private_data) {
2754 ret = cma_format_hdr(private_data, id_priv);
Sean Heftye8160e12013-05-29 10:09:22 -07002755 if (ret)
2756 goto out;
Sean Heftye511d1a2013-07-24 15:06:08 -07002757 req.private_data = private_data;
Sean Heftye8160e12013-05-29 10:09:22 -07002758 }
Sean Hefty628e5f62006-11-30 16:44:16 -08002759
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002760 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
2761 id_priv);
2762 if (IS_ERR(id)) {
2763 ret = PTR_ERR(id);
Sean Hefty628e5f62006-11-30 16:44:16 -08002764 goto out;
2765 }
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002766 id_priv->cm_id.ib = id;
Sean Hefty628e5f62006-11-30 16:44:16 -08002767
Sean Heftyf4753832013-05-29 10:09:14 -07002768 req.path = id_priv->id.route.path_rec;
Sean Heftycf539362013-05-29 10:09:28 -07002769 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
Sean Hefty628e5f62006-11-30 16:44:16 -08002770 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2771 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2772
2773 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2774 if (ret) {
2775 ib_destroy_cm_id(id_priv->cm_id.ib);
2776 id_priv->cm_id.ib = NULL;
2777 }
2778out:
Sean Heftye511d1a2013-07-24 15:06:08 -07002779 kfree(private_data);
Sean Hefty628e5f62006-11-30 16:44:16 -08002780 return ret;
2781}
2782
Sean Heftye51060f2006-06-17 20:37:29 -07002783static int cma_connect_ib(struct rdma_id_private *id_priv,
2784 struct rdma_conn_param *conn_param)
2785{
2786 struct ib_cm_req_param req;
2787 struct rdma_route *route;
2788 void *private_data;
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002789 struct ib_cm_id *id;
Sean Heftye51060f2006-06-17 20:37:29 -07002790 int offset, ret;
2791
2792 memset(&req, 0, sizeof req);
Sean Heftye8160e12013-05-29 10:09:22 -07002793 offset = cma_user_data_offset(id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07002794 req.private_data_len = offset + conn_param->private_data_len;
Sean Hefty04ded162011-12-06 21:17:11 +00002795 if (req.private_data_len < conn_param->private_data_len)
2796 return -EINVAL;
2797
Sean Heftye8160e12013-05-29 10:09:22 -07002798 if (req.private_data_len) {
2799 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2800 if (!private_data)
2801 return -ENOMEM;
2802 } else {
2803 private_data = NULL;
2804 }
Sean Heftye51060f2006-06-17 20:37:29 -07002805
2806 if (conn_param->private_data && conn_param->private_data_len)
2807 memcpy(private_data + offset, conn_param->private_data,
2808 conn_param->private_data_len);
2809
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002810 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
2811 if (IS_ERR(id)) {
2812 ret = PTR_ERR(id);
Sean Heftye51060f2006-06-17 20:37:29 -07002813 goto out;
2814 }
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002815 id_priv->cm_id.ib = id;
Sean Heftye51060f2006-06-17 20:37:29 -07002816
2817 route = &id_priv->id.route;
Sean Heftye8160e12013-05-29 10:09:22 -07002818 if (private_data) {
2819 ret = cma_format_hdr(private_data, id_priv);
2820 if (ret)
2821 goto out;
2822 req.private_data = private_data;
2823 }
Sean Heftye51060f2006-06-17 20:37:29 -07002824
2825 req.primary_path = &route->path_rec[0];
2826 if (route->num_paths == 2)
2827 req.alternate_path = &route->path_rec[1];
2828
Sean Heftycf539362013-05-29 10:09:28 -07002829 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
Sean Heftye51060f2006-06-17 20:37:29 -07002830 req.qp_num = id_priv->qp_num;
Sean Hefty18c441a2011-05-28 23:26:06 -07002831 req.qp_type = id_priv->id.qp_type;
Sean Heftye51060f2006-06-17 20:37:29 -07002832 req.starting_psn = id_priv->seq_num;
2833 req.responder_resources = conn_param->responder_resources;
2834 req.initiator_depth = conn_param->initiator_depth;
2835 req.flow_control = conn_param->flow_control;
Sean Hefty4ede1782012-10-03 23:42:31 +00002836 req.retry_count = min_t(u8, 7, conn_param->retry_count);
2837 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
Sean Heftye51060f2006-06-17 20:37:29 -07002838 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2839 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2840 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2841 req.srq = id_priv->srq ? 1 : 0;
2842
2843 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2844out:
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002845 if (ret && !IS_ERR(id)) {
2846 ib_destroy_cm_id(id);
Krishna Kumar675a0272006-09-29 11:47:06 -07002847 id_priv->cm_id.ib = NULL;
2848 }
2849
Sean Heftye51060f2006-06-17 20:37:29 -07002850 kfree(private_data);
2851 return ret;
2852}
2853
Tom Tucker07ebafb2006-08-03 16:02:42 -05002854static int cma_connect_iw(struct rdma_id_private *id_priv,
2855 struct rdma_conn_param *conn_param)
2856{
2857 struct iw_cm_id *cm_id;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002858 int ret;
2859 struct iw_cm_conn_param iw_param;
2860
2861 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002862 if (IS_ERR(cm_id))
2863 return PTR_ERR(cm_id);
Tom Tucker07ebafb2006-08-03 16:02:42 -05002864
2865 id_priv->cm_id.iw = cm_id;
2866
Steve Wise24d44a32013-07-04 16:10:44 +05302867 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
2868 rdma_addr_size(cma_src_addr(id_priv)));
2869 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
2870 rdma_addr_size(cma_dst_addr(id_priv)));
Tom Tucker07ebafb2006-08-03 16:02:42 -05002871
Sean Hefty5851bb82008-01-04 10:47:12 -08002872 ret = cma_modify_qp_rtr(id_priv, conn_param);
Krishna Kumar675a0272006-09-29 11:47:06 -07002873 if (ret)
2874 goto out;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002875
Hefty, Seanf45ee802011-10-06 09:33:04 -07002876 if (conn_param) {
2877 iw_param.ord = conn_param->initiator_depth;
2878 iw_param.ird = conn_param->responder_resources;
2879 iw_param.private_data = conn_param->private_data;
2880 iw_param.private_data_len = conn_param->private_data_len;
2881 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
2882 } else {
2883 memset(&iw_param, 0, sizeof iw_param);
Tom Tucker07ebafb2006-08-03 16:02:42 -05002884 iw_param.qpn = id_priv->qp_num;
Hefty, Seanf45ee802011-10-06 09:33:04 -07002885 }
Tom Tucker07ebafb2006-08-03 16:02:42 -05002886 ret = iw_cm_connect(cm_id, &iw_param);
2887out:
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00002888 if (ret) {
Krishna Kumar675a0272006-09-29 11:47:06 -07002889 iw_destroy_cm_id(cm_id);
2890 id_priv->cm_id.iw = NULL;
2891 }
Tom Tucker07ebafb2006-08-03 16:02:42 -05002892 return ret;
2893}
2894
Sean Heftye51060f2006-06-17 20:37:29 -07002895int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2896{
2897 struct rdma_id_private *id_priv;
2898 int ret;
2899
2900 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002901 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
Sean Heftye51060f2006-06-17 20:37:29 -07002902 return -EINVAL;
2903
2904 if (!id->qp) {
2905 id_priv->qp_num = conn_param->qp_num;
Sean Heftye51060f2006-06-17 20:37:29 -07002906 id_priv->srq = conn_param->srq;
2907 }
2908
Tom Tucker07ebafb2006-08-03 16:02:42 -05002909 switch (rdma_node_get_transport(id->device->node_type)) {
2910 case RDMA_TRANSPORT_IB:
Sean Heftyb26f9b92010-04-01 17:08:41 +00002911 if (id->qp_type == IB_QPT_UD)
Sean Hefty628e5f62006-11-30 16:44:16 -08002912 ret = cma_resolve_ib_udp(id_priv, conn_param);
2913 else
2914 ret = cma_connect_ib(id_priv, conn_param);
Sean Heftye51060f2006-06-17 20:37:29 -07002915 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05002916 case RDMA_TRANSPORT_IWARP:
2917 ret = cma_connect_iw(id_priv, conn_param);
2918 break;
Sean Heftye51060f2006-06-17 20:37:29 -07002919 default:
2920 ret = -ENOSYS;
2921 break;
2922 }
2923 if (ret)
2924 goto err;
2925
2926 return 0;
2927err:
Nir Muchtar550e5ca2011-05-20 11:46:11 -07002928 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
Sean Heftye51060f2006-06-17 20:37:29 -07002929 return ret;
2930}
2931EXPORT_SYMBOL(rdma_connect);
2932
2933static int cma_accept_ib(struct rdma_id_private *id_priv,
2934 struct rdma_conn_param *conn_param)
2935{
2936 struct ib_cm_rep_param rep;
Sean Hefty5851bb82008-01-04 10:47:12 -08002937 int ret;
Sean Heftye51060f2006-06-17 20:37:29 -07002938
Sean Hefty5851bb82008-01-04 10:47:12 -08002939 ret = cma_modify_qp_rtr(id_priv, conn_param);
2940 if (ret)
2941 goto out;
Sean Hefty0fe313b2006-11-30 16:37:15 -08002942
Sean Hefty5851bb82008-01-04 10:47:12 -08002943 ret = cma_modify_qp_rts(id_priv, conn_param);
2944 if (ret)
2945 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -07002946
2947 memset(&rep, 0, sizeof rep);
2948 rep.qp_num = id_priv->qp_num;
2949 rep.starting_psn = id_priv->seq_num;
2950 rep.private_data = conn_param->private_data;
2951 rep.private_data_len = conn_param->private_data_len;
2952 rep.responder_resources = conn_param->responder_resources;
2953 rep.initiator_depth = conn_param->initiator_depth;
Sean Heftye51060f2006-06-17 20:37:29 -07002954 rep.failover_accepted = 0;
2955 rep.flow_control = conn_param->flow_control;
Sean Hefty4ede1782012-10-03 23:42:31 +00002956 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
Sean Heftye51060f2006-06-17 20:37:29 -07002957 rep.srq = id_priv->srq ? 1 : 0;
2958
Sean Hefty0fe313b2006-11-30 16:37:15 -08002959 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2960out:
2961 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -07002962}
2963
Tom Tucker07ebafb2006-08-03 16:02:42 -05002964static int cma_accept_iw(struct rdma_id_private *id_priv,
2965 struct rdma_conn_param *conn_param)
2966{
2967 struct iw_cm_conn_param iw_param;
2968 int ret;
2969
Sean Hefty5851bb82008-01-04 10:47:12 -08002970 ret = cma_modify_qp_rtr(id_priv, conn_param);
Tom Tucker07ebafb2006-08-03 16:02:42 -05002971 if (ret)
2972 return ret;
2973
2974 iw_param.ord = conn_param->initiator_depth;
2975 iw_param.ird = conn_param->responder_resources;
2976 iw_param.private_data = conn_param->private_data;
2977 iw_param.private_data_len = conn_param->private_data_len;
2978 if (id_priv->id.qp) {
2979 iw_param.qpn = id_priv->qp_num;
2980 } else
2981 iw_param.qpn = conn_param->qp_num;
2982
2983 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2984}
2985
Sean Hefty628e5f62006-11-30 16:44:16 -08002986static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
Sean Hefty5c438132013-05-29 10:09:23 -07002987 enum ib_cm_sidr_status status, u32 qkey,
Sean Hefty628e5f62006-11-30 16:44:16 -08002988 const void *private_data, int private_data_len)
2989{
2990 struct ib_cm_sidr_rep_param rep;
Yossi Etigind2ca39f2009-04-08 13:42:33 -07002991 int ret;
Sean Hefty628e5f62006-11-30 16:44:16 -08002992
2993 memset(&rep, 0, sizeof rep);
2994 rep.status = status;
2995 if (status == IB_SIDR_SUCCESS) {
Sean Hefty5c438132013-05-29 10:09:23 -07002996 ret = cma_set_qkey(id_priv, qkey);
Yossi Etigind2ca39f2009-04-08 13:42:33 -07002997 if (ret)
2998 return ret;
Sean Hefty628e5f62006-11-30 16:44:16 -08002999 rep.qp_num = id_priv->qp_num;
Sean Heftyc8f6a362007-02-15 17:00:18 -08003000 rep.qkey = id_priv->qkey;
Sean Hefty628e5f62006-11-30 16:44:16 -08003001 }
3002 rep.private_data = private_data;
3003 rep.private_data_len = private_data_len;
3004
3005 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
3006}
3007
Sean Heftye51060f2006-06-17 20:37:29 -07003008int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
3009{
3010 struct rdma_id_private *id_priv;
3011 int ret;
3012
3013 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar83e95022011-01-13 13:56:04 +00003014
3015 id_priv->owner = task_pid_nr(current);
3016
Nir Muchtar550e5ca2011-05-20 11:46:11 -07003017 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
Sean Heftye51060f2006-06-17 20:37:29 -07003018 return -EINVAL;
3019
3020 if (!id->qp && conn_param) {
3021 id_priv->qp_num = conn_param->qp_num;
Sean Heftye51060f2006-06-17 20:37:29 -07003022 id_priv->srq = conn_param->srq;
3023 }
3024
Tom Tucker07ebafb2006-08-03 16:02:42 -05003025 switch (rdma_node_get_transport(id->device->node_type)) {
3026 case RDMA_TRANSPORT_IB:
Hefty, Seanf45ee802011-10-06 09:33:04 -07003027 if (id->qp_type == IB_QPT_UD) {
3028 if (conn_param)
3029 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
Sean Hefty5c438132013-05-29 10:09:23 -07003030 conn_param->qkey,
Hefty, Seanf45ee802011-10-06 09:33:04 -07003031 conn_param->private_data,
3032 conn_param->private_data_len);
3033 else
3034 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
Sean Hefty5c438132013-05-29 10:09:23 -07003035 0, NULL, 0);
Hefty, Seanf45ee802011-10-06 09:33:04 -07003036 } else {
3037 if (conn_param)
3038 ret = cma_accept_ib(id_priv, conn_param);
3039 else
3040 ret = cma_rep_recv(id_priv);
3041 }
Sean Heftye51060f2006-06-17 20:37:29 -07003042 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05003043 case RDMA_TRANSPORT_IWARP:
3044 ret = cma_accept_iw(id_priv, conn_param);
3045 break;
Sean Heftye51060f2006-06-17 20:37:29 -07003046 default:
3047 ret = -ENOSYS;
3048 break;
3049 }
3050
3051 if (ret)
3052 goto reject;
3053
3054 return 0;
3055reject:
Sean Heftyc5483382007-09-24 13:19:09 -07003056 cma_modify_qp_err(id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07003057 rdma_reject(id, NULL, 0);
3058 return ret;
3059}
3060EXPORT_SYMBOL(rdma_accept);
3061
Sean Hefty0fe313b2006-11-30 16:37:15 -08003062int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
3063{
3064 struct rdma_id_private *id_priv;
3065 int ret;
3066
3067 id_priv = container_of(id, struct rdma_id_private, id);
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00003068 if (!id_priv->cm_id.ib)
Sean Hefty0fe313b2006-11-30 16:37:15 -08003069 return -EINVAL;
3070
3071 switch (id->device->node_type) {
3072 case RDMA_NODE_IB_CA:
3073 ret = ib_cm_notify(id_priv->cm_id.ib, event);
3074 break;
3075 default:
3076 ret = 0;
3077 break;
3078 }
3079 return ret;
3080}
3081EXPORT_SYMBOL(rdma_notify);
3082
Sean Heftye51060f2006-06-17 20:37:29 -07003083int rdma_reject(struct rdma_cm_id *id, const void *private_data,
3084 u8 private_data_len)
3085{
3086 struct rdma_id_private *id_priv;
3087 int ret;
3088
3089 id_priv = container_of(id, struct rdma_id_private, id);
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00003090 if (!id_priv->cm_id.ib)
Sean Heftye51060f2006-06-17 20:37:29 -07003091 return -EINVAL;
3092
Tom Tucker07ebafb2006-08-03 16:02:42 -05003093 switch (rdma_node_get_transport(id->device->node_type)) {
3094 case RDMA_TRANSPORT_IB:
Sean Heftyb26f9b92010-04-01 17:08:41 +00003095 if (id->qp_type == IB_QPT_UD)
Sean Hefty5c438132013-05-29 10:09:23 -07003096 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
Sean Hefty628e5f62006-11-30 16:44:16 -08003097 private_data, private_data_len);
3098 else
3099 ret = ib_send_cm_rej(id_priv->cm_id.ib,
3100 IB_CM_REJ_CONSUMER_DEFINED, NULL,
3101 0, private_data, private_data_len);
Sean Heftye51060f2006-06-17 20:37:29 -07003102 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05003103 case RDMA_TRANSPORT_IWARP:
3104 ret = iw_cm_reject(id_priv->cm_id.iw,
3105 private_data, private_data_len);
3106 break;
Sean Heftye51060f2006-06-17 20:37:29 -07003107 default:
3108 ret = -ENOSYS;
3109 break;
3110 }
3111 return ret;
3112}
3113EXPORT_SYMBOL(rdma_reject);
3114
3115int rdma_disconnect(struct rdma_cm_id *id)
3116{
3117 struct rdma_id_private *id_priv;
3118 int ret;
3119
3120 id_priv = container_of(id, struct rdma_id_private, id);
Jack Morgenstein0c9361f2011-07-17 10:46:47 +00003121 if (!id_priv->cm_id.ib)
Sean Heftye51060f2006-06-17 20:37:29 -07003122 return -EINVAL;
3123
Tom Tucker07ebafb2006-08-03 16:02:42 -05003124 switch (rdma_node_get_transport(id->device->node_type)) {
3125 case RDMA_TRANSPORT_IB:
Sean Heftyc5483382007-09-24 13:19:09 -07003126 ret = cma_modify_qp_err(id_priv);
Tom Tucker07ebafb2006-08-03 16:02:42 -05003127 if (ret)
3128 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -07003129 /* Initiate or respond to a disconnect. */
3130 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
3131 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
3132 break;
Tom Tucker07ebafb2006-08-03 16:02:42 -05003133 case RDMA_TRANSPORT_IWARP:
3134 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
3135 break;
Sean Heftye51060f2006-06-17 20:37:29 -07003136 default:
Tom Tucker07ebafb2006-08-03 16:02:42 -05003137 ret = -EINVAL;
Sean Heftye51060f2006-06-17 20:37:29 -07003138 break;
3139 }
3140out:
3141 return ret;
3142}
3143EXPORT_SYMBOL(rdma_disconnect);
3144
Sean Heftyc8f6a362007-02-15 17:00:18 -08003145static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3146{
3147 struct rdma_id_private *id_priv;
3148 struct cma_multicast *mc = multicast->context;
3149 struct rdma_cm_event event;
3150 int ret;
3151
3152 id_priv = mc->id_priv;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07003153 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
3154 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
Sean Hefty8aa08602007-05-07 11:49:00 -07003155 return 0;
Sean Heftyc8f6a362007-02-15 17:00:18 -08003156
Sean Hefty5c438132013-05-29 10:09:23 -07003157 if (!status)
3158 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
Sean Heftyc5483382007-09-24 13:19:09 -07003159 mutex_lock(&id_priv->qp_mutex);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003160 if (!status && id_priv->id.qp)
3161 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
Sean Hefty46ea5062011-12-06 21:15:18 +00003162 be16_to_cpu(multicast->rec.mlid));
Sean Heftyc5483382007-09-24 13:19:09 -07003163 mutex_unlock(&id_priv->qp_mutex);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003164
3165 memset(&event, 0, sizeof event);
3166 event.status = status;
3167 event.param.ud.private_data = mc->context;
3168 if (!status) {
3169 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
3170 ib_init_ah_from_mcmember(id_priv->id.device,
3171 id_priv->id.port_num, &multicast->rec,
3172 &event.param.ud.ah_attr);
3173 event.param.ud.qp_num = 0xFFFFFF;
3174 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
3175 } else
3176 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
3177
3178 ret = id_priv->id.event_handler(&id_priv->id, &event);
3179 if (ret) {
Nir Muchtar550e5ca2011-05-20 11:46:11 -07003180 cma_exch(id_priv, RDMA_CM_DESTROYING);
Or Gerlitzde910bd2008-07-14 23:48:53 -07003181 mutex_unlock(&id_priv->handler_mutex);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003182 rdma_destroy_id(&id_priv->id);
3183 return 0;
3184 }
Sean Hefty8aa08602007-05-07 11:49:00 -07003185
Or Gerlitzde910bd2008-07-14 23:48:53 -07003186 mutex_unlock(&id_priv->handler_mutex);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003187 return 0;
3188}
3189
3190static void cma_set_mgid(struct rdma_id_private *id_priv,
3191 struct sockaddr *addr, union ib_gid *mgid)
3192{
3193 unsigned char mc_map[MAX_ADDR_LEN];
3194 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3195 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
3196 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
3197
3198 if (cma_any_addr(addr)) {
3199 memset(mgid, 0, sizeof *mgid);
3200 } else if ((addr->sa_family == AF_INET6) &&
Jason Gunthorpe1c9b2812009-11-19 12:55:21 -08003201 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
Sean Heftyc8f6a362007-02-15 17:00:18 -08003202 0xFF10A01B)) {
3203 /* IPv6 address is an SA assigned MGID. */
3204 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07003205 } else if (addr->sa_family == AF_IB) {
3206 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
Jason Gunthorpee2e62692009-11-19 12:55:22 -08003207 } else if ((addr->sa_family == AF_INET6)) {
3208 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
3209 if (id_priv->id.ps == RDMA_PS_UDP)
3210 mc_map[7] = 0x01; /* Use RDMA CM signature */
3211 *mgid = *(union ib_gid *) (mc_map + 4);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003212 } else {
Rolf Manderscheida9e527e2007-12-10 13:38:41 -07003213 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003214 if (id_priv->id.ps == RDMA_PS_UDP)
3215 mc_map[7] = 0x01; /* Use RDMA CM signature */
Sean Heftyc8f6a362007-02-15 17:00:18 -08003216 *mgid = *(union ib_gid *) (mc_map + 4);
3217 }
3218}
3219
3220static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
3221 struct cma_multicast *mc)
3222{
3223 struct ib_sa_mcmember_rec rec;
3224 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3225 ib_sa_comp_mask comp_mask;
3226 int ret;
3227
3228 ib_addr_get_mgid(dev_addr, &rec.mgid);
3229 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
3230 &rec.mgid, &rec);
3231 if (ret)
3232 return ret;
3233
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07003234 ret = cma_set_qkey(id_priv, 0);
3235 if (ret)
3236 return ret;
3237
Roland Dreier3f446752008-08-04 11:02:14 -07003238 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07003239 rec.qkey = cpu_to_be32(id_priv->qkey);
Sean Hefty6f8372b2009-11-19 13:26:06 -08003240 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003241 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
3242 rec.join_state = 1;
3243
3244 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
3245 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
3246 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
3247 IB_SA_MCMEMBER_REC_FLOW_LABEL |
3248 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
3249
Yossi Etigin84adeee2009-04-01 13:55:32 -07003250 if (id_priv->id.ps == RDMA_PS_IPOIB)
3251 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
Dotan Barak2a22fb82012-08-30 09:09:55 +00003252 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
3253 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
3254 IB_SA_MCMEMBER_REC_MTU |
3255 IB_SA_MCMEMBER_REC_HOP_LIMIT;
Yossi Etigin84adeee2009-04-01 13:55:32 -07003256
Sean Heftyc8f6a362007-02-15 17:00:18 -08003257 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
3258 id_priv->id.port_num, &rec,
3259 comp_mask, GFP_KERNEL,
3260 cma_ib_mc_handler, mc);
Rusty Russell8c6ffba2013-07-15 11:20:32 +09303261 return PTR_ERR_OR_ZERO(mc->multicast.ib);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003262}
3263
Eli Cohen3c86aa72010-10-13 21:26:51 +02003264static void iboe_mcast_work_handler(struct work_struct *work)
3265{
3266 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
3267 struct cma_multicast *mc = mw->mc;
3268 struct ib_sa_multicast *m = mc->multicast.ib;
3269
3270 mc->multicast.ib->context = mc;
3271 cma_ib_mc_handler(0, m);
3272 kref_put(&mc->mcref, release_mc);
3273 kfree(mw);
3274}
3275
3276static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
3277{
3278 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
3279 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
3280
3281 if (cma_any_addr(addr)) {
3282 memset(mgid, 0, sizeof *mgid);
3283 } else if (addr->sa_family == AF_INET6) {
3284 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
3285 } else {
3286 mgid->raw[0] = 0xff;
3287 mgid->raw[1] = 0x0e;
3288 mgid->raw[2] = 0;
3289 mgid->raw[3] = 0;
3290 mgid->raw[4] = 0;
3291 mgid->raw[5] = 0;
3292 mgid->raw[6] = 0;
3293 mgid->raw[7] = 0;
3294 mgid->raw[8] = 0;
3295 mgid->raw[9] = 0;
3296 mgid->raw[10] = 0xff;
3297 mgid->raw[11] = 0xff;
3298 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
3299 }
3300}
3301
3302static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3303 struct cma_multicast *mc)
3304{
3305 struct iboe_mcast_work *work;
3306 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3307 int err;
3308 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
3309 struct net_device *ndev = NULL;
3310
3311 if (cma_zero_addr((struct sockaddr *)&mc->addr))
3312 return -EINVAL;
3313
3314 work = kzalloc(sizeof *work, GFP_KERNEL);
3315 if (!work)
3316 return -ENOMEM;
3317
3318 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
3319 if (!mc->multicast.ib) {
3320 err = -ENOMEM;
3321 goto out1;
3322 }
3323
3324 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
3325
3326 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
3327 if (id_priv->id.ps == RDMA_PS_UDP)
3328 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
3329
3330 if (dev_addr->bound_dev_if)
3331 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
3332 if (!ndev) {
3333 err = -ENODEV;
3334 goto out2;
3335 }
3336 mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
3337 mc->multicast.ib->rec.hop_limit = 1;
3338 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
3339 dev_put(ndev);
3340 if (!mc->multicast.ib->rec.mtu) {
3341 err = -EINVAL;
3342 goto out2;
3343 }
3344 iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
3345 work->id = id_priv;
3346 work->mc = mc;
3347 INIT_WORK(&work->work, iboe_mcast_work_handler);
3348 kref_get(&mc->mcref);
3349 queue_work(cma_wq, &work->work);
3350
3351 return 0;
3352
3353out2:
3354 kfree(mc->multicast.ib);
3355out1:
3356 kfree(work);
3357 return err;
3358}
3359
Sean Heftyc8f6a362007-02-15 17:00:18 -08003360int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3361 void *context)
3362{
3363 struct rdma_id_private *id_priv;
3364 struct cma_multicast *mc;
3365 int ret;
3366
3367 id_priv = container_of(id, struct rdma_id_private, id);
Nir Muchtar550e5ca2011-05-20 11:46:11 -07003368 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3369 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
Sean Heftyc8f6a362007-02-15 17:00:18 -08003370 return -EINVAL;
3371
3372 mc = kmalloc(sizeof *mc, GFP_KERNEL);
3373 if (!mc)
3374 return -ENOMEM;
3375
Sean Heftyef560862013-05-29 10:09:10 -07003376 memcpy(&mc->addr, addr, rdma_addr_size(addr));
Sean Heftyc8f6a362007-02-15 17:00:18 -08003377 mc->context = context;
3378 mc->id_priv = id_priv;
3379
3380 spin_lock(&id_priv->lock);
3381 list_add(&mc->list, &id_priv->mc_list);
3382 spin_unlock(&id_priv->lock);
3383
3384 switch (rdma_node_get_transport(id->device->node_type)) {
3385 case RDMA_TRANSPORT_IB:
Eli Cohen3c86aa72010-10-13 21:26:51 +02003386 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3387 case IB_LINK_LAYER_INFINIBAND:
3388 ret = cma_join_ib_multicast(id_priv, mc);
3389 break;
3390 case IB_LINK_LAYER_ETHERNET:
3391 kref_init(&mc->mcref);
3392 ret = cma_iboe_join_multicast(id_priv, mc);
3393 break;
3394 default:
3395 ret = -EINVAL;
3396 }
Sean Heftyc8f6a362007-02-15 17:00:18 -08003397 break;
3398 default:
3399 ret = -ENOSYS;
3400 break;
3401 }
3402
3403 if (ret) {
3404 spin_lock_irq(&id_priv->lock);
3405 list_del(&mc->list);
3406 spin_unlock_irq(&id_priv->lock);
3407 kfree(mc);
3408 }
3409 return ret;
3410}
3411EXPORT_SYMBOL(rdma_join_multicast);
3412
3413void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
3414{
3415 struct rdma_id_private *id_priv;
3416 struct cma_multicast *mc;
3417
3418 id_priv = container_of(id, struct rdma_id_private, id);
3419 spin_lock_irq(&id_priv->lock);
3420 list_for_each_entry(mc, &id_priv->mc_list, list) {
Sean Heftyef560862013-05-29 10:09:10 -07003421 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
Sean Heftyc8f6a362007-02-15 17:00:18 -08003422 list_del(&mc->list);
3423 spin_unlock_irq(&id_priv->lock);
3424
3425 if (id->qp)
3426 ib_detach_mcast(id->qp,
3427 &mc->multicast.ib->rec.mgid,
Sean Hefty46ea5062011-12-06 21:15:18 +00003428 be16_to_cpu(mc->multicast.ib->rec.mlid));
Eli Cohen3c86aa72010-10-13 21:26:51 +02003429 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
3430 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3431 case IB_LINK_LAYER_INFINIBAND:
3432 ib_sa_free_multicast(mc->multicast.ib);
3433 kfree(mc);
3434 break;
3435 case IB_LINK_LAYER_ETHERNET:
3436 kref_put(&mc->mcref, release_mc);
3437 break;
3438 default:
3439 break;
3440 }
3441 }
Sean Heftyc8f6a362007-02-15 17:00:18 -08003442 return;
3443 }
3444 }
3445 spin_unlock_irq(&id_priv->lock);
3446}
3447EXPORT_SYMBOL(rdma_leave_multicast);
3448
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07003449static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
3450{
3451 struct rdma_dev_addr *dev_addr;
3452 struct cma_ndev_work *work;
3453
3454 dev_addr = &id_priv->id.route.addr.dev_addr;
3455
Sean Hefty6266ed62009-11-19 12:55:22 -08003456 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07003457 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
3458 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
3459 ndev->name, &id_priv->id);
3460 work = kzalloc(sizeof *work, GFP_KERNEL);
3461 if (!work)
3462 return -ENOMEM;
3463
3464 INIT_WORK(&work->work, cma_ndev_work_handler);
3465 work->id = id_priv;
3466 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
3467 atomic_inc(&id_priv->refcount);
3468 queue_work(cma_wq, &work->work);
3469 }
3470
3471 return 0;
3472}
3473
3474static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
Jiri Pirko351638e2013-05-28 01:30:21 +00003475 void *ptr)
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07003476{
Jiri Pirko351638e2013-05-28 01:30:21 +00003477 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07003478 struct cma_device *cma_dev;
3479 struct rdma_id_private *id_priv;
3480 int ret = NOTIFY_DONE;
3481
3482 if (dev_net(ndev) != &init_net)
3483 return NOTIFY_DONE;
3484
3485 if (event != NETDEV_BONDING_FAILOVER)
3486 return NOTIFY_DONE;
3487
3488 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
3489 return NOTIFY_DONE;
3490
3491 mutex_lock(&lock);
3492 list_for_each_entry(cma_dev, &dev_list, list)
3493 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3494 ret = cma_netdev_change(ndev, id_priv);
3495 if (ret)
3496 goto out;
3497 }
3498
3499out:
3500 mutex_unlock(&lock);
3501 return ret;
3502}
3503
3504static struct notifier_block cma_nb = {
3505 .notifier_call = cma_netdev_callback
3506};
3507
Sean Heftye51060f2006-06-17 20:37:29 -07003508static void cma_add_one(struct ib_device *device)
3509{
3510 struct cma_device *cma_dev;
3511 struct rdma_id_private *id_priv;
3512
3513 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
3514 if (!cma_dev)
3515 return;
3516
3517 cma_dev->device = device;
Sean Heftye51060f2006-06-17 20:37:29 -07003518
3519 init_completion(&cma_dev->comp);
3520 atomic_set(&cma_dev->refcount, 1);
3521 INIT_LIST_HEAD(&cma_dev->id_list);
3522 ib_set_client_data(device, &cma_client, cma_dev);
3523
3524 mutex_lock(&lock);
3525 list_add_tail(&cma_dev->list, &dev_list);
3526 list_for_each_entry(id_priv, &listen_any_list, list)
3527 cma_listen_on_dev(id_priv, cma_dev);
3528 mutex_unlock(&lock);
Sean Heftye51060f2006-06-17 20:37:29 -07003529}
3530
3531static int cma_remove_id_dev(struct rdma_id_private *id_priv)
3532{
Sean Heftya1b1b612006-11-30 16:33:14 -08003533 struct rdma_cm_event event;
Nir Muchtar550e5ca2011-05-20 11:46:11 -07003534 enum rdma_cm_state state;
Or Gerlitzde910bd2008-07-14 23:48:53 -07003535 int ret = 0;
Sean Heftye51060f2006-06-17 20:37:29 -07003536
3537 /* Record that we want to remove the device */
Nir Muchtar550e5ca2011-05-20 11:46:11 -07003538 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
3539 if (state == RDMA_CM_DESTROYING)
Sean Heftye51060f2006-06-17 20:37:29 -07003540 return 0;
3541
3542 cma_cancel_operation(id_priv, state);
Or Gerlitzde910bd2008-07-14 23:48:53 -07003543 mutex_lock(&id_priv->handler_mutex);
Sean Heftye51060f2006-06-17 20:37:29 -07003544
3545 /* Check for destruction from another callback. */
Nir Muchtar550e5ca2011-05-20 11:46:11 -07003546 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
Or Gerlitzde910bd2008-07-14 23:48:53 -07003547 goto out;
Sean Heftye51060f2006-06-17 20:37:29 -07003548
Sean Heftya1b1b612006-11-30 16:33:14 -08003549 memset(&event, 0, sizeof event);
3550 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
Or Gerlitzde910bd2008-07-14 23:48:53 -07003551 ret = id_priv->id.event_handler(&id_priv->id, &event);
3552out:
3553 mutex_unlock(&id_priv->handler_mutex);
3554 return ret;
Sean Heftye51060f2006-06-17 20:37:29 -07003555}
3556
3557static void cma_process_remove(struct cma_device *cma_dev)
3558{
Sean Heftye51060f2006-06-17 20:37:29 -07003559 struct rdma_id_private *id_priv;
3560 int ret;
3561
Sean Heftye51060f2006-06-17 20:37:29 -07003562 mutex_lock(&lock);
3563 while (!list_empty(&cma_dev->id_list)) {
3564 id_priv = list_entry(cma_dev->id_list.next,
3565 struct rdma_id_private, list);
3566
Sean Heftyd02d1f52007-10-09 11:12:34 -07003567 list_del(&id_priv->listen_list);
Krishna Kumar94de1782006-09-29 12:03:35 -07003568 list_del_init(&id_priv->list);
Sean Heftye51060f2006-06-17 20:37:29 -07003569 atomic_inc(&id_priv->refcount);
3570 mutex_unlock(&lock);
3571
Sean Heftyd02d1f52007-10-09 11:12:34 -07003572 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
Sean Heftye51060f2006-06-17 20:37:29 -07003573 cma_deref_id(id_priv);
3574 if (ret)
3575 rdma_destroy_id(&id_priv->id);
3576
3577 mutex_lock(&lock);
3578 }
3579 mutex_unlock(&lock);
3580
3581 cma_deref_dev(cma_dev);
3582 wait_for_completion(&cma_dev->comp);
3583}
3584
3585static void cma_remove_one(struct ib_device *device)
3586{
3587 struct cma_device *cma_dev;
3588
3589 cma_dev = ib_get_client_data(device, &cma_client);
3590 if (!cma_dev)
3591 return;
3592
3593 mutex_lock(&lock);
3594 list_del(&cma_dev->list);
3595 mutex_unlock(&lock);
3596
3597 cma_process_remove(cma_dev);
3598 kfree(cma_dev);
3599}
3600
Nir Muchtar753f6182011-01-03 15:33:53 +00003601static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
3602{
3603 struct nlmsghdr *nlh;
3604 struct rdma_cm_id_stats *id_stats;
3605 struct rdma_id_private *id_priv;
3606 struct rdma_cm_id *id = NULL;
3607 struct cma_device *cma_dev;
3608 int i_dev = 0, i_id = 0;
3609
3610 /*
3611 * We export all of the IDs as a sequence of messages. Each
3612 * ID gets its own netlink message.
3613 */
3614 mutex_lock(&lock);
3615
3616 list_for_each_entry(cma_dev, &dev_list, list) {
3617 if (i_dev < cb->args[0]) {
3618 i_dev++;
3619 continue;
3620 }
3621
3622 i_id = 0;
3623 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3624 if (i_id < cb->args[1]) {
3625 i_id++;
3626 continue;
3627 }
3628
3629 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
3630 sizeof *id_stats, RDMA_NL_RDMA_CM,
3631 RDMA_NL_RDMA_CM_ID_STATS);
3632 if (!id_stats)
3633 goto out;
3634
3635 memset(id_stats, 0, sizeof *id_stats);
3636 id = &id_priv->id;
3637 id_stats->node_type = id->route.addr.dev_addr.dev_type;
3638 id_stats->port_num = id->port_num;
3639 id_stats->bound_dev_if =
3640 id->route.addr.dev_addr.bound_dev_if;
3641
Sean Heftyce117ff2013-05-29 10:09:34 -07003642 if (ibnl_put_attr(skb, nlh,
3643 rdma_addr_size(cma_src_addr(id_priv)),
3644 cma_src_addr(id_priv),
3645 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
3646 goto out;
3647 if (ibnl_put_attr(skb, nlh,
3648 rdma_addr_size(cma_src_addr(id_priv)),
3649 cma_dst_addr(id_priv),
3650 RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
3651 goto out;
Nir Muchtar753f6182011-01-03 15:33:53 +00003652
Nir Muchtar83e95022011-01-13 13:56:04 +00003653 id_stats->pid = id_priv->owner;
Nir Muchtar753f6182011-01-03 15:33:53 +00003654 id_stats->port_space = id->ps;
3655 id_stats->cm_state = id_priv->state;
3656 id_stats->qp_num = id_priv->qp_num;
3657 id_stats->qp_type = id->qp_type;
3658
3659 i_id++;
3660 }
3661
3662 cb->args[1] = 0;
3663 i_dev++;
3664 }
3665
3666out:
3667 mutex_unlock(&lock);
3668 cb->args[0] = i_dev;
3669 cb->args[1] = i_id;
3670
3671 return skb->len;
3672}
3673
3674static const struct ibnl_client_cbs cma_cb_table[] = {
Gao feng809d5fc2012-10-04 20:15:49 +00003675 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
3676 .module = THIS_MODULE },
Nir Muchtar753f6182011-01-03 15:33:53 +00003677};
3678
Peter Huewe716abb12009-06-23 10:38:42 -07003679static int __init cma_init(void)
Sean Heftye51060f2006-06-17 20:37:29 -07003680{
Tetsuo Handa5d7220e82010-04-15 11:29:04 +09003681 int ret;
Stephen Hemminger227b60f2007-10-10 17:30:46 -07003682
Sean Heftyc7f743a2007-02-01 12:23:37 -08003683 cma_wq = create_singlethread_workqueue("rdma_cm");
Sean Heftye51060f2006-06-17 20:37:29 -07003684 if (!cma_wq)
3685 return -ENOMEM;
3686
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003687 ib_sa_register_client(&sa_client);
Sean Hefty7a118df2006-10-31 11:12:59 -08003688 rdma_addr_register_client(&addr_client);
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07003689 register_netdevice_notifier(&cma_nb);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003690
Sean Heftye51060f2006-06-17 20:37:29 -07003691 ret = ib_register_client(&cma_client);
3692 if (ret)
3693 goto err;
Nir Muchtar753f6182011-01-03 15:33:53 +00003694
3695 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
3696 printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
3697
Sean Heftye51060f2006-06-17 20:37:29 -07003698 return 0;
3699
3700err:
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07003701 unregister_netdevice_notifier(&cma_nb);
Sean Hefty7a118df2006-10-31 11:12:59 -08003702 rdma_addr_unregister_client(&addr_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003703 ib_sa_unregister_client(&sa_client);
Sean Heftye51060f2006-06-17 20:37:29 -07003704 destroy_workqueue(cma_wq);
3705 return ret;
3706}
3707
Peter Huewe716abb12009-06-23 10:38:42 -07003708static void __exit cma_cleanup(void)
Sean Heftye51060f2006-06-17 20:37:29 -07003709{
Nir Muchtar753f6182011-01-03 15:33:53 +00003710 ibnl_remove_client(RDMA_NL_RDMA_CM);
Sean Heftye51060f2006-06-17 20:37:29 -07003711 ib_unregister_client(&cma_client);
Or Gerlitzdd5bdff2008-07-22 14:14:22 -07003712 unregister_netdevice_notifier(&cma_nb);
Sean Hefty7a118df2006-10-31 11:12:59 -08003713 rdma_addr_unregister_client(&addr_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003714 ib_sa_unregister_client(&sa_client);
Sean Heftye51060f2006-06-17 20:37:29 -07003715 destroy_workqueue(cma_wq);
Sean Heftye51060f2006-06-17 20:37:29 -07003716 idr_destroy(&tcp_ps);
Sean Hefty628e5f62006-11-30 16:44:16 -08003717 idr_destroy(&udp_ps);
Sean Heftyc8f6a362007-02-15 17:00:18 -08003718 idr_destroy(&ipoib_ps);
Sean Hefty2d2e9412011-05-28 21:56:39 -07003719 idr_destroy(&ib_ps);
Sean Heftye51060f2006-06-17 20:37:29 -07003720}
3721
3722module_init(cma_init);
3723module_exit(cma_cleanup);