blob: 1f71c306923f67697d67aa5226efb06bed2a2b87 [file] [log] [blame]
Tom Tucker922a8e92006-08-03 16:02:40 -05001/*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 */
38#include <linux/dma-mapping.h>
39#include <linux/err.h>
40#include <linux/idr.h>
41#include <linux/interrupt.h>
Tom Tucker922a8e92006-08-03 16:02:40 -050042#include <linux/rbtree.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040043#include <linux/sched.h>
Tom Tucker922a8e92006-08-03 16:02:40 -050044#include <linux/spinlock.h>
45#include <linux/workqueue.h>
46#include <linux/completion.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/slab.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040048#include <linux/module.h>
Steve Wise2f0304d2014-07-25 09:11:33 -050049#include <linux/sysctl.h>
Tom Tucker922a8e92006-08-03 16:02:40 -050050
51#include <rdma/iw_cm.h>
52#include <rdma/ib_addr.h>
Faisal Latifb493d912016-02-26 09:18:00 -060053#include <rdma/iw_portmap.h>
54#include <rdma/rdma_netlink.h>
Tom Tucker922a8e92006-08-03 16:02:40 -050055
56#include "iwcm.h"
57
58MODULE_AUTHOR("Tom Tucker");
59MODULE_DESCRIPTION("iWARP CM");
60MODULE_LICENSE("Dual BSD/GPL");
61
Faisal Latifb493d912016-02-26 09:18:00 -060062static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
63 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
64 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
65 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
66 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
67 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
68 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
69 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
70};
71
Tom Tucker922a8e92006-08-03 16:02:40 -050072static struct workqueue_struct *iwcm_wq;
73struct iwcm_work {
74 struct work_struct work;
75 struct iwcm_id_private *cm_id;
76 struct list_head list;
77 struct iw_cm_event event;
78 struct list_head free_list;
79};
80
Steve Wise2f0304d2014-07-25 09:11:33 -050081static unsigned int default_backlog = 256;
82
83static struct ctl_table_header *iwcm_ctl_table_hdr;
84static struct ctl_table iwcm_ctl_table[] = {
85 {
86 .procname = "default_backlog",
87 .data = &default_backlog,
88 .maxlen = sizeof(default_backlog),
89 .mode = 0644,
90 .proc_handler = proc_dointvec,
91 },
92 { }
93};
94
Tom Tucker922a8e92006-08-03 16:02:40 -050095/*
96 * The following services provide a mechanism for pre-allocating iwcm_work
97 * elements. The design pre-allocates them based on the cm_id type:
98 * LISTENING IDS: Get enough elements preallocated to handle the
99 * listen backlog.
100 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
101 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
102 *
103 * Allocating them in connect and listen avoids having to deal
104 * with allocation failures on the event upcall from the provider (which
105 * is called in the interrupt context).
106 *
107 * One exception is when creating the cm_id for incoming connection requests.
108 * There are two cases:
109 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
110 * the backlog is exceeded, then no more connection request events will
111 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
Krishna Kumar715a5882006-11-09 09:30:45 +0530112 * to the provider to reject the connection request.
Tom Tucker922a8e92006-08-03 16:02:40 -0500113 * 2) in the connection request workqueue handler, cm_conn_req_handler().
114 * If work elements cannot be allocated for the new connect request cm_id,
115 * then IWCM will call the provider reject method. This is ok since
116 * cm_conn_req_handler() runs in the workqueue thread context.
117 */
118
119static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
120{
121 struct iwcm_work *work;
122
123 if (list_empty(&cm_id_priv->work_free_list))
124 return NULL;
125 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
126 free_list);
127 list_del_init(&work->free_list);
128 return work;
129}
130
131static void put_work(struct iwcm_work *work)
132{
133 list_add(&work->free_list, &work->cm_id->work_free_list);
134}
135
136static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
137{
138 struct list_head *e, *tmp;
139
Bernard Metzler3c4fef92020-03-02 19:16:14 +0100140 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
141 list_del(e);
Tom Tucker922a8e92006-08-03 16:02:40 -0500142 kfree(list_entry(e, struct iwcm_work, free_list));
Bernard Metzler3c4fef92020-03-02 19:16:14 +0100143 }
Tom Tucker922a8e92006-08-03 16:02:40 -0500144}
145
146static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
147{
148 struct iwcm_work *work;
149
150 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
151 while (count--) {
152 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
153 if (!work) {
154 dealloc_work_entries(cm_id_priv);
155 return -ENOMEM;
156 }
157 work->cm_id = cm_id_priv;
158 INIT_LIST_HEAD(&work->list);
159 put_work(work);
160 }
161 return 0;
162}
163
164/*
Krishna Kumar715a5882006-11-09 09:30:45 +0530165 * Save private data from incoming connection requests to
166 * iw_cm_event, so the low level driver doesn't have to. Adjust
Tom Tucker922a8e92006-08-03 16:02:40 -0500167 * the event ptr to point to the local copy.
168 */
Krishna Kumar715a5882006-11-09 09:30:45 +0530169static int copy_private_data(struct iw_cm_event *event)
Tom Tucker922a8e92006-08-03 16:02:40 -0500170{
171 void *p;
172
Eric Sesterhennbed8bdf2006-10-23 22:17:21 +0200173 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
Tom Tucker922a8e92006-08-03 16:02:40 -0500174 if (!p)
175 return -ENOMEM;
Tom Tucker922a8e92006-08-03 16:02:40 -0500176 event->private_data = p;
177 return 0;
178}
179
Steve Wiseebb90982007-02-15 08:09:36 -0600180static void free_cm_id(struct iwcm_id_private *cm_id_priv)
181{
182 dealloc_work_entries(cm_id_priv);
183 kfree(cm_id_priv);
184}
185
Tom Tucker922a8e92006-08-03 16:02:40 -0500186/*
Krishna Kumar9ab1ffa2006-11-09 09:30:48 +0530187 * Release a reference on cm_id. If the last reference is being
Steve Wise59c68ac2016-07-29 11:00:54 -0700188 * released, free the cm_id and return 1.
Tom Tucker922a8e92006-08-03 16:02:40 -0500189 */
190static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
191{
Tom Tucker922a8e92006-08-03 16:02:40 -0500192 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
193 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
194 BUG_ON(!list_empty(&cm_id_priv->work_list));
Steve Wise59c68ac2016-07-29 11:00:54 -0700195 free_cm_id(cm_id_priv);
Steve Wiseebb90982007-02-15 08:09:36 -0600196 return 1;
Tom Tucker922a8e92006-08-03 16:02:40 -0500197 }
198
Steve Wiseebb90982007-02-15 08:09:36 -0600199 return 0;
Tom Tucker922a8e92006-08-03 16:02:40 -0500200}
201
202static void add_ref(struct iw_cm_id *cm_id)
203{
204 struct iwcm_id_private *cm_id_priv;
205 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
206 atomic_inc(&cm_id_priv->refcount);
207}
208
209static void rem_ref(struct iw_cm_id *cm_id)
210{
211 struct iwcm_id_private *cm_id_priv;
Steve Wise6b59ba62013-11-21 15:40:14 -0600212
Tom Tucker922a8e92006-08-03 16:02:40 -0500213 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
Steve Wise6b59ba62013-11-21 15:40:14 -0600214
Steve Wise59c68ac2016-07-29 11:00:54 -0700215 (void)iwcm_deref_id(cm_id_priv);
Tom Tucker922a8e92006-08-03 16:02:40 -0500216}
217
218static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
219
220struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
221 iw_cm_handler cm_handler,
222 void *context)
223{
224 struct iwcm_id_private *cm_id_priv;
225
226 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
227 if (!cm_id_priv)
228 return ERR_PTR(-ENOMEM);
229
230 cm_id_priv->state = IW_CM_STATE_IDLE;
231 cm_id_priv->id.device = device;
232 cm_id_priv->id.cm_handler = cm_handler;
233 cm_id_priv->id.context = context;
234 cm_id_priv->id.event_handler = cm_event_handler;
235 cm_id_priv->id.add_ref = add_ref;
236 cm_id_priv->id.rem_ref = rem_ref;
237 spin_lock_init(&cm_id_priv->lock);
238 atomic_set(&cm_id_priv->refcount, 1);
239 init_waitqueue_head(&cm_id_priv->connect_wait);
240 init_completion(&cm_id_priv->destroy_comp);
241 INIT_LIST_HEAD(&cm_id_priv->work_list);
242 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
243
244 return &cm_id_priv->id;
245}
246EXPORT_SYMBOL(iw_create_cm_id);
247
248
249static int iwcm_modify_qp_err(struct ib_qp *qp)
250{
251 struct ib_qp_attr qp_attr;
252
253 if (!qp)
254 return -EINVAL;
255
256 qp_attr.qp_state = IB_QPS_ERR;
257 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
258}
259
260/*
261 * This is really the RDMAC CLOSING state. It is most similar to the
262 * IB SQD QP state.
263 */
264static int iwcm_modify_qp_sqd(struct ib_qp *qp)
265{
266 struct ib_qp_attr qp_attr;
267
268 BUG_ON(qp == NULL);
269 qp_attr.qp_state = IB_QPS_SQD;
270 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
271}
272
273/*
274 * CM_ID <-- CLOSING
275 *
Krishna Kumar715a5882006-11-09 09:30:45 +0530276 * Block if a passive or active connection is currently being processed. Then
Tom Tucker922a8e92006-08-03 16:02:40 -0500277 * process the event as follows:
278 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
279 * based on the abrupt flag
280 * - If the connection is already in the CLOSING or IDLE state, the peer is
281 * disconnecting concurrently with us and we've already seen the
282 * DISCONNECT event -- ignore the request and return 0
283 * - Disconnect on a listening endpoint returns -EINVAL
284 */
285int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
286{
287 struct iwcm_id_private *cm_id_priv;
288 unsigned long flags;
289 int ret = 0;
290 struct ib_qp *qp = NULL;
291
292 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
293 /* Wait if we're currently in a connect or accept downcall */
294 wait_event(cm_id_priv->connect_wait,
295 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
296
297 spin_lock_irqsave(&cm_id_priv->lock, flags);
298 switch (cm_id_priv->state) {
299 case IW_CM_STATE_ESTABLISHED:
300 cm_id_priv->state = IW_CM_STATE_CLOSING;
301
302 /* QP could be <nul> for user-mode client */
303 if (cm_id_priv->qp)
304 qp = cm_id_priv->qp;
305 else
306 ret = -EINVAL;
307 break;
308 case IW_CM_STATE_LISTEN:
309 ret = -EINVAL;
310 break;
311 case IW_CM_STATE_CLOSING:
312 /* remote peer closed first */
313 case IW_CM_STATE_IDLE:
314 /* accept or connect returned !0 */
315 break;
316 case IW_CM_STATE_CONN_RECV:
317 /*
318 * App called disconnect before/without calling accept after
319 * connect_request event delivered.
320 */
321 break;
322 case IW_CM_STATE_CONN_SENT:
323 /* Can only get here if wait above fails */
324 default:
325 BUG();
326 }
327 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
328
329 if (qp) {
330 if (abrupt)
331 ret = iwcm_modify_qp_err(qp);
332 else
333 ret = iwcm_modify_qp_sqd(qp);
334
335 /*
336 * If both sides are disconnecting the QP could
337 * already be in ERR or SQD states
338 */
339 ret = 0;
340 }
341
342 return ret;
343}
344EXPORT_SYMBOL(iw_cm_disconnect);
345
346/*
347 * CM_ID <-- DESTROYING
348 *
349 * Clean up all resources associated with the connection and release
350 * the initial reference taken by iw_create_cm_id.
351 */
352static void destroy_cm_id(struct iw_cm_id *cm_id)
353{
354 struct iwcm_id_private *cm_id_priv;
355 unsigned long flags;
Tom Tucker922a8e92006-08-03 16:02:40 -0500356
357 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
358 /*
359 * Wait if we're currently in a connect or accept downcall. A
360 * listening endpoint should never block here.
361 */
362 wait_event(cm_id_priv->connect_wait,
363 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
364
Steve Wise59c68ac2016-07-29 11:00:54 -0700365 /*
366 * Since we're deleting the cm_id, drop any events that
367 * might arrive before the last dereference.
368 */
369 set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
370
Tom Tucker922a8e92006-08-03 16:02:40 -0500371 spin_lock_irqsave(&cm_id_priv->lock, flags);
372 switch (cm_id_priv->state) {
373 case IW_CM_STATE_LISTEN:
374 cm_id_priv->state = IW_CM_STATE_DESTROYING;
375 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
376 /* destroy the listening endpoint */
Or Gerlitz05633102013-11-03 10:20:43 +0200377 cm_id->device->iwcm->destroy_listen(cm_id);
Tom Tucker922a8e92006-08-03 16:02:40 -0500378 spin_lock_irqsave(&cm_id_priv->lock, flags);
379 break;
380 case IW_CM_STATE_ESTABLISHED:
381 cm_id_priv->state = IW_CM_STATE_DESTROYING;
382 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
383 /* Abrupt close of the connection */
384 (void)iwcm_modify_qp_err(cm_id_priv->qp);
385 spin_lock_irqsave(&cm_id_priv->lock, flags);
386 break;
387 case IW_CM_STATE_IDLE:
388 case IW_CM_STATE_CLOSING:
389 cm_id_priv->state = IW_CM_STATE_DESTROYING;
390 break;
391 case IW_CM_STATE_CONN_RECV:
392 /*
393 * App called destroy before/without calling accept after
Steve Wiseebb90982007-02-15 08:09:36 -0600394 * receiving connection request event notification or
395 * returned non zero from the event callback function.
396 * In either case, must tell the provider to reject.
Tom Tucker922a8e92006-08-03 16:02:40 -0500397 */
398 cm_id_priv->state = IW_CM_STATE_DESTROYING;
Steve Wise54e05f12009-10-07 15:38:12 -0700399 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
Steve Wisecb581602009-09-09 11:37:38 -0700400 cm_id->device->iwcm->reject(cm_id, NULL, 0);
Steve Wise54e05f12009-10-07 15:38:12 -0700401 spin_lock_irqsave(&cm_id_priv->lock, flags);
Tom Tucker922a8e92006-08-03 16:02:40 -0500402 break;
403 case IW_CM_STATE_CONN_SENT:
404 case IW_CM_STATE_DESTROYING:
405 default:
406 BUG();
407 break;
408 }
409 if (cm_id_priv->qp) {
410 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
411 cm_id_priv->qp = NULL;
412 }
413 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
414
Faisal Latifb493d912016-02-26 09:18:00 -0600415 if (cm_id->mapped) {
416 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
417 iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
418 }
419
Tom Tucker922a8e92006-08-03 16:02:40 -0500420 (void)iwcm_deref_id(cm_id_priv);
421}
422
423/*
424 * This function is only called by the application thread and cannot
425 * be called by the event thread. The function will wait for all
426 * references to be released on the cm_id and then kfree the cm_id
427 * object.
428 */
429void iw_destroy_cm_id(struct iw_cm_id *cm_id)
430{
431 struct iwcm_id_private *cm_id_priv;
432
433 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
Tom Tucker922a8e92006-08-03 16:02:40 -0500434 destroy_cm_id(cm_id);
Tom Tucker922a8e92006-08-03 16:02:40 -0500435}
436EXPORT_SYMBOL(iw_destroy_cm_id);
437
Faisal Latifb493d912016-02-26 09:18:00 -0600438/**
439 * iw_cm_check_wildcard - If IP address is 0 then use original
440 * @pm_addr: sockaddr containing the ip to check for wildcard
441 * @cm_addr: sockaddr containing the actual IP address
442 * @cm_outaddr: sockaddr to set IP addr which leaving port
443 *
444 * Checks the pm_addr for wildcard and then sets cm_outaddr's
445 * IP to the actual (cm_addr).
446 */
447static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
448 struct sockaddr_storage *cm_addr,
449 struct sockaddr_storage *cm_outaddr)
450{
451 if (pm_addr->ss_family == AF_INET) {
452 struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
453
Bart Van Assche825107a2016-04-12 14:43:41 -0700454 if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
Faisal Latifb493d912016-02-26 09:18:00 -0600455 struct sockaddr_in *cm4_addr =
456 (struct sockaddr_in *)cm_addr;
457 struct sockaddr_in *cm4_outaddr =
458 (struct sockaddr_in *)cm_outaddr;
459
460 cm4_outaddr->sin_addr = cm4_addr->sin_addr;
461 }
462 } else {
463 struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
464
465 if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
466 struct sockaddr_in6 *cm6_addr =
467 (struct sockaddr_in6 *)cm_addr;
468 struct sockaddr_in6 *cm6_outaddr =
469 (struct sockaddr_in6 *)cm_outaddr;
470
471 cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
472 }
473 }
474}
475
476/**
477 * iw_cm_map - Use portmapper to map the ports
478 * @cm_id: connection manager pointer
479 * @active: Indicates the active side when true
480 * returns nonzero for error only if iwpm_create_mapinfo() fails
481 *
482 * Tries to add a mapping for a port using the Portmapper. If
483 * successful in mapping the IP/Port it will check the remote
484 * mapped IP address for a wildcard IP address and replace the
485 * zero IP address with the remote_addr.
486 */
487static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
488{
489 struct iwpm_dev_data pm_reg_msg;
490 struct iwpm_sa_data pm_msg;
491 int status;
492
493 cm_id->m_local_addr = cm_id->local_addr;
494 cm_id->m_remote_addr = cm_id->remote_addr;
495
496 memcpy(pm_reg_msg.dev_name, cm_id->device->name,
497 sizeof(pm_reg_msg.dev_name));
498 memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
499 sizeof(pm_reg_msg.if_name));
500
501 if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
502 !iwpm_valid_pid())
503 return 0;
504
505 cm_id->mapped = true;
506 pm_msg.loc_addr = cm_id->local_addr;
507 pm_msg.rem_addr = cm_id->remote_addr;
508 if (active)
509 status = iwpm_add_and_query_mapping(&pm_msg,
510 RDMA_NL_IWCM);
511 else
512 status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
513
514 if (!status) {
515 cm_id->m_local_addr = pm_msg.mapped_loc_addr;
516 if (active) {
517 cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
518 iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
519 &cm_id->remote_addr,
520 &cm_id->m_remote_addr);
521 }
522 }
523
524 return iwpm_create_mapinfo(&cm_id->local_addr,
525 &cm_id->m_local_addr,
526 RDMA_NL_IWCM);
527}
528
Tom Tucker922a8e92006-08-03 16:02:40 -0500529/*
530 * CM_ID <-- LISTEN
531 *
532 * Start listening for connect requests. Generates one CONNECT_REQUEST
533 * event for each inbound connect request.
534 */
535int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
536{
537 struct iwcm_id_private *cm_id_priv;
538 unsigned long flags;
Krishna Kumar13fccdb2006-11-09 09:30:43 +0530539 int ret;
Tom Tucker922a8e92006-08-03 16:02:40 -0500540
541 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
542
Steve Wise2f0304d2014-07-25 09:11:33 -0500543 if (!backlog)
544 backlog = default_backlog;
545
Tom Tucker922a8e92006-08-03 16:02:40 -0500546 ret = alloc_work_entries(cm_id_priv, backlog);
547 if (ret)
548 return ret;
549
550 spin_lock_irqsave(&cm_id_priv->lock, flags);
551 switch (cm_id_priv->state) {
552 case IW_CM_STATE_IDLE:
553 cm_id_priv->state = IW_CM_STATE_LISTEN;
554 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
Faisal Latifb493d912016-02-26 09:18:00 -0600555 ret = iw_cm_map(cm_id, false);
556 if (!ret)
557 ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
Tom Tucker922a8e92006-08-03 16:02:40 -0500558 if (ret)
559 cm_id_priv->state = IW_CM_STATE_IDLE;
560 spin_lock_irqsave(&cm_id_priv->lock, flags);
561 break;
562 default:
563 ret = -EINVAL;
564 }
565 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
566
567 return ret;
568}
569EXPORT_SYMBOL(iw_cm_listen);
570
571/*
572 * CM_ID <-- IDLE
573 *
574 * Rejects an inbound connection request. No events are generated.
575 */
576int iw_cm_reject(struct iw_cm_id *cm_id,
577 const void *private_data,
578 u8 private_data_len)
579{
580 struct iwcm_id_private *cm_id_priv;
581 unsigned long flags;
582 int ret;
583
584 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
585 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
586
587 spin_lock_irqsave(&cm_id_priv->lock, flags);
588 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
589 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
590 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
591 wake_up_all(&cm_id_priv->connect_wait);
592 return -EINVAL;
593 }
594 cm_id_priv->state = IW_CM_STATE_IDLE;
595 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
596
597 ret = cm_id->device->iwcm->reject(cm_id, private_data,
598 private_data_len);
599
600 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
601 wake_up_all(&cm_id_priv->connect_wait);
602
603 return ret;
604}
605EXPORT_SYMBOL(iw_cm_reject);
606
607/*
608 * CM_ID <-- ESTABLISHED
609 *
610 * Accepts an inbound connection request and generates an ESTABLISHED
611 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
612 * until the ESTABLISHED event is received from the provider.
613 */
614int iw_cm_accept(struct iw_cm_id *cm_id,
615 struct iw_cm_conn_param *iw_param)
616{
617 struct iwcm_id_private *cm_id_priv;
618 struct ib_qp *qp;
619 unsigned long flags;
620 int ret;
621
622 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
623 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
624
625 spin_lock_irqsave(&cm_id_priv->lock, flags);
626 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
627 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
628 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
629 wake_up_all(&cm_id_priv->connect_wait);
630 return -EINVAL;
631 }
632 /* Get the ib_qp given the QPN */
633 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
634 if (!qp) {
635 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
Animesh K Trivedi26012f02010-09-28 14:44:02 +0000636 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
637 wake_up_all(&cm_id_priv->connect_wait);
Tom Tucker922a8e92006-08-03 16:02:40 -0500638 return -EINVAL;
639 }
640 cm_id->device->iwcm->add_ref(qp);
641 cm_id_priv->qp = qp;
642 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
643
644 ret = cm_id->device->iwcm->accept(cm_id, iw_param);
645 if (ret) {
646 /* An error on accept precludes provider events */
647 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
648 cm_id_priv->state = IW_CM_STATE_IDLE;
649 spin_lock_irqsave(&cm_id_priv->lock, flags);
650 if (cm_id_priv->qp) {
651 cm_id->device->iwcm->rem_ref(qp);
652 cm_id_priv->qp = NULL;
653 }
654 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
655 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
656 wake_up_all(&cm_id_priv->connect_wait);
657 }
658
659 return ret;
660}
661EXPORT_SYMBOL(iw_cm_accept);
662
663/*
664 * Active Side: CM_ID <-- CONN_SENT
665 *
666 * If successful, results in the generation of a CONNECT_REPLY
667 * event. iw_cm_disconnect and iw_cm_destroy will block until the
668 * CONNECT_REPLY event is received from the provider.
669 */
670int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
671{
672 struct iwcm_id_private *cm_id_priv;
Krishna Kumar13fccdb2006-11-09 09:30:43 +0530673 int ret;
Tom Tucker922a8e92006-08-03 16:02:40 -0500674 unsigned long flags;
675 struct ib_qp *qp;
676
677 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
678
679 ret = alloc_work_entries(cm_id_priv, 4);
680 if (ret)
681 return ret;
682
683 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
684 spin_lock_irqsave(&cm_id_priv->lock, flags);
685
686 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
Faisal Latifb493d912016-02-26 09:18:00 -0600687 ret = -EINVAL;
688 goto err;
Tom Tucker922a8e92006-08-03 16:02:40 -0500689 }
690
691 /* Get the ib_qp given the QPN */
692 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
693 if (!qp) {
Faisal Latifb493d912016-02-26 09:18:00 -0600694 ret = -EINVAL;
695 goto err;
Tom Tucker922a8e92006-08-03 16:02:40 -0500696 }
697 cm_id->device->iwcm->add_ref(qp);
698 cm_id_priv->qp = qp;
699 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
700 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
701
Faisal Latifb493d912016-02-26 09:18:00 -0600702 ret = iw_cm_map(cm_id, true);
703 if (!ret)
704 ret = cm_id->device->iwcm->connect(cm_id, iw_param);
705 if (!ret)
706 return 0; /* success */
Tom Tucker922a8e92006-08-03 16:02:40 -0500707
Faisal Latifb493d912016-02-26 09:18:00 -0600708 spin_lock_irqsave(&cm_id_priv->lock, flags);
709 if (cm_id_priv->qp) {
710 cm_id->device->iwcm->rem_ref(qp);
711 cm_id_priv->qp = NULL;
712 }
713 cm_id_priv->state = IW_CM_STATE_IDLE;
714err:
715 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
716 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
717 wake_up_all(&cm_id_priv->connect_wait);
Tom Tucker922a8e92006-08-03 16:02:40 -0500718 return ret;
719}
720EXPORT_SYMBOL(iw_cm_connect);
721
722/*
723 * Passive Side: new CM_ID <-- CONN_RECV
724 *
725 * Handles an inbound connect request. The function creates a new
726 * iw_cm_id to represent the new connection and inherits the client
727 * callback function and other attributes from the listening parent.
728 *
729 * The work item contains a pointer to the listen_cm_id and the event. The
730 * listen_cm_id contains the client cm_handler, context and
731 * device. These are copied when the device is cloned. The event
732 * contains the new four tuple.
733 *
734 * An error on the child should not affect the parent, so this
735 * function does not return a value.
736 */
737static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
738 struct iw_cm_event *iw_event)
739{
740 unsigned long flags;
741 struct iw_cm_id *cm_id;
742 struct iwcm_id_private *cm_id_priv;
743 int ret;
744
745 /*
746 * The provider should never generate a connection request
747 * event with a bad status.
748 */
749 BUG_ON(iw_event->status);
750
Tom Tucker922a8e92006-08-03 16:02:40 -0500751 cm_id = iw_create_cm_id(listen_id_priv->id.device,
752 listen_id_priv->id.cm_handler,
753 listen_id_priv->id.context);
754 /* If the cm_id could not be created, ignore the request */
755 if (IS_ERR(cm_id))
Krishna Kumar83b96582006-11-09 09:30:41 +0530756 goto out;
Tom Tucker922a8e92006-08-03 16:02:40 -0500757
758 cm_id->provider_data = iw_event->provider_data;
Faisal Latifb493d912016-02-26 09:18:00 -0600759 cm_id->m_local_addr = iw_event->local_addr;
760 cm_id->m_remote_addr = iw_event->remote_addr;
761 cm_id->local_addr = listen_id_priv->id.local_addr;
762
763 ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
764 &iw_event->remote_addr,
765 &cm_id->remote_addr,
766 RDMA_NL_IWCM);
767 if (ret) {
768 cm_id->remote_addr = iw_event->remote_addr;
769 } else {
770 iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
771 &iw_event->local_addr,
772 &cm_id->local_addr);
773 iw_event->local_addr = cm_id->local_addr;
774 iw_event->remote_addr = cm_id->remote_addr;
775 }
Tom Tucker922a8e92006-08-03 16:02:40 -0500776
777 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
778 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
779
Steve Wise3eae7c92012-03-07 16:48:11 -0600780 /*
781 * We could be destroying the listening id. If so, ignore this
782 * upcall.
783 */
784 spin_lock_irqsave(&listen_id_priv->lock, flags);
785 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
786 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
787 iw_cm_reject(cm_id, NULL, 0);
788 iw_destroy_cm_id(cm_id);
789 goto out;
790 }
791 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
792
Tom Tucker922a8e92006-08-03 16:02:40 -0500793 ret = alloc_work_entries(cm_id_priv, 3);
794 if (ret) {
795 iw_cm_reject(cm_id, NULL, 0);
796 iw_destroy_cm_id(cm_id);
Krishna Kumar83b96582006-11-09 09:30:41 +0530797 goto out;
Tom Tucker922a8e92006-08-03 16:02:40 -0500798 }
799
800 /* Call the client CM handler */
801 ret = cm_id->cm_handler(cm_id, iw_event);
802 if (ret) {
Steve Wiseebb90982007-02-15 08:09:36 -0600803 iw_cm_reject(cm_id, NULL, 0);
Steve Wise59c68ac2016-07-29 11:00:54 -0700804 iw_destroy_cm_id(cm_id);
Tom Tucker922a8e92006-08-03 16:02:40 -0500805 }
806
Krishna Kumar83b96582006-11-09 09:30:41 +0530807out:
Tom Tucker922a8e92006-08-03 16:02:40 -0500808 if (iw_event->private_data_len)
809 kfree(iw_event->private_data);
810}
811
812/*
813 * Passive Side: CM_ID <-- ESTABLISHED
814 *
815 * The provider generated an ESTABLISHED event which means that
816 * the MPA negotion has completed successfully and we are now in MPA
817 * FPDU mode.
818 *
819 * This event can only be received in the CONN_RECV state. If the
820 * remote peer closed, the ESTABLISHED event would be received followed
821 * by the CLOSE event. If the app closes, it will block until we wake
822 * it up after processing this event.
823 */
824static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
825 struct iw_cm_event *iw_event)
826{
827 unsigned long flags;
Krishna Kumar13fccdb2006-11-09 09:30:43 +0530828 int ret;
Tom Tucker922a8e92006-08-03 16:02:40 -0500829
830 spin_lock_irqsave(&cm_id_priv->lock, flags);
831
832 /*
833 * We clear the CONNECT_WAIT bit here to allow the callback
834 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
835 * from a callback handler is not allowed.
836 */
837 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
838 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
839 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
840 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
841 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
842 wake_up_all(&cm_id_priv->connect_wait);
843
844 return ret;
845}
846
847/*
848 * Active Side: CM_ID <-- ESTABLISHED
849 *
850 * The app has called connect and is waiting for the established event to
851 * post it's requests to the server. This event will wake up anyone
852 * blocked in iw_cm_disconnect or iw_destroy_id.
853 */
854static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
855 struct iw_cm_event *iw_event)
856{
857 unsigned long flags;
Krishna Kumar13fccdb2006-11-09 09:30:43 +0530858 int ret;
Tom Tucker922a8e92006-08-03 16:02:40 -0500859
860 spin_lock_irqsave(&cm_id_priv->lock, flags);
861 /*
862 * Clear the connect wait bit so a callback function calling
863 * iw_cm_disconnect will not wait and deadlock this thread
864 */
865 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
866 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
Roland Dreierd0c49bf2011-05-09 22:23:57 -0700867 if (iw_event->status == 0) {
Faisal Latifb493d912016-02-26 09:18:00 -0600868 cm_id_priv->id.m_local_addr = iw_event->local_addr;
869 cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
870 iw_event->local_addr = cm_id_priv->id.local_addr;
871 iw_event->remote_addr = cm_id_priv->id.remote_addr;
Tom Tucker922a8e92006-08-03 16:02:40 -0500872 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
873 } else {
874 /* REJECTED or RESET */
875 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
876 cm_id_priv->qp = NULL;
877 cm_id_priv->state = IW_CM_STATE_IDLE;
878 }
879 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
880 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
881
882 if (iw_event->private_data_len)
883 kfree(iw_event->private_data);
884
885 /* Wake up waiters on connect complete */
886 wake_up_all(&cm_id_priv->connect_wait);
887
888 return ret;
889}
890
891/*
892 * CM_ID <-- CLOSING
893 *
894 * If in the ESTABLISHED state, move to CLOSING.
895 */
896static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
897 struct iw_cm_event *iw_event)
898{
899 unsigned long flags;
900
901 spin_lock_irqsave(&cm_id_priv->lock, flags);
902 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
903 cm_id_priv->state = IW_CM_STATE_CLOSING;
904 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
905}
906
907/*
908 * CM_ID <-- IDLE
909 *
910 * If in the ESTBLISHED or CLOSING states, the QP will have have been
911 * moved by the provider to the ERR state. Disassociate the CM_ID from
912 * the QP, move to IDLE, and remove the 'connected' reference.
913 *
914 * If in some other state, the cm_id was destroyed asynchronously.
915 * This is the last reference that will result in waking up
916 * the app thread blocked in iw_destroy_cm_id.
917 */
918static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
919 struct iw_cm_event *iw_event)
920{
921 unsigned long flags;
922 int ret = 0;
923 spin_lock_irqsave(&cm_id_priv->lock, flags);
924
925 if (cm_id_priv->qp) {
926 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
927 cm_id_priv->qp = NULL;
928 }
929 switch (cm_id_priv->state) {
930 case IW_CM_STATE_ESTABLISHED:
931 case IW_CM_STATE_CLOSING:
932 cm_id_priv->state = IW_CM_STATE_IDLE;
933 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
934 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
935 spin_lock_irqsave(&cm_id_priv->lock, flags);
936 break;
937 case IW_CM_STATE_DESTROYING:
938 break;
939 default:
940 BUG();
941 }
942 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
943
944 return ret;
945}
946
947static int process_event(struct iwcm_id_private *cm_id_priv,
948 struct iw_cm_event *iw_event)
949{
950 int ret = 0;
951
952 switch (iw_event->event) {
953 case IW_CM_EVENT_CONNECT_REQUEST:
954 cm_conn_req_handler(cm_id_priv, iw_event);
955 break;
956 case IW_CM_EVENT_CONNECT_REPLY:
957 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
958 break;
959 case IW_CM_EVENT_ESTABLISHED:
960 ret = cm_conn_est_handler(cm_id_priv, iw_event);
961 break;
962 case IW_CM_EVENT_DISCONNECT:
963 cm_disconnect_handler(cm_id_priv, iw_event);
964 break;
965 case IW_CM_EVENT_CLOSE:
966 ret = cm_close_handler(cm_id_priv, iw_event);
967 break;
968 default:
969 BUG();
970 }
971
972 return ret;
973}
974
975/*
976 * Process events on the work_list for the cm_id. If the callback
977 * function requests that the cm_id be deleted, a flag is set in the
978 * cm_id flags to indicate that when the last reference is
979 * removed, the cm_id is to be destroyed. This is necessary to
980 * distinguish between an object that will be destroyed by the app
981 * thread asleep on the destroy_comp list vs. an object destroyed
982 * here synchronously when the last reference is removed.
983 */
David Howellsc4028952006-11-22 14:57:56 +0000984static void cm_work_handler(struct work_struct *_work)
Tom Tucker922a8e92006-08-03 16:02:40 -0500985{
David Howells4c1ac1b2006-12-05 14:37:56 +0000986 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
Krishna Kumar33ba0fa2006-11-09 09:30:34 +0530987 struct iw_cm_event levent;
Tom Tucker922a8e92006-08-03 16:02:40 -0500988 struct iwcm_id_private *cm_id_priv = work->cm_id;
989 unsigned long flags;
990 int empty;
991 int ret = 0;
992
993 spin_lock_irqsave(&cm_id_priv->lock, flags);
994 empty = list_empty(&cm_id_priv->work_list);
995 while (!empty) {
996 work = list_entry(cm_id_priv->work_list.next,
997 struct iwcm_work, list);
998 list_del_init(&work->list);
999 empty = list_empty(&cm_id_priv->work_list);
Krishna Kumar33ba0fa2006-11-09 09:30:34 +05301000 levent = work->event;
Tom Tucker922a8e92006-08-03 16:02:40 -05001001 put_work(work);
1002 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1003
Steve Wise59c68ac2016-07-29 11:00:54 -07001004 if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
1005 ret = process_event(cm_id_priv, &levent);
1006 if (ret)
1007 destroy_cm_id(&cm_id_priv->id);
1008 } else
1009 pr_debug("dropping event %d\n", levent.event);
1010 if (iwcm_deref_id(cm_id_priv))
Tom Tucker922a8e92006-08-03 16:02:40 -05001011 return;
Steve Wisee413a8232013-04-23 16:17:14 +00001012 if (empty)
1013 return;
Tom Tucker922a8e92006-08-03 16:02:40 -05001014 spin_lock_irqsave(&cm_id_priv->lock, flags);
1015 }
1016 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1017}
1018
1019/*
1020 * This function is called on interrupt context. Schedule events on
1021 * the iwcm_wq thread to allow callback functions to downcall into
1022 * the CM and/or block. Events are queued to a per-CM_ID
1023 * work_list. If this is the first event on the work_list, the work
1024 * element is also queued on the iwcm_wq thread.
1025 *
1026 * Each event holds a reference on the cm_id. Until the last posted
1027 * event has been delivered and processed, the cm_id cannot be
1028 * deleted.
1029 *
1030 * Returns:
1031 * 0 - the event was handled.
1032 * -ENOMEM - the event was not handled due to lack of resources.
1033 */
1034static int cm_event_handler(struct iw_cm_id *cm_id,
1035 struct iw_cm_event *iw_event)
1036{
1037 struct iwcm_work *work;
1038 struct iwcm_id_private *cm_id_priv;
1039 unsigned long flags;
1040 int ret = 0;
1041
1042 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1043
1044 spin_lock_irqsave(&cm_id_priv->lock, flags);
1045 work = get_work(cm_id_priv);
1046 if (!work) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
David Howellsc4028952006-11-22 14:57:56 +00001051 INIT_WORK(&work->work, cm_work_handler);
Tom Tucker922a8e92006-08-03 16:02:40 -05001052 work->cm_id = cm_id_priv;
1053 work->event = *iw_event;
1054
1055 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
1056 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
1057 work->event.private_data_len) {
Krishna Kumar715a5882006-11-09 09:30:45 +05301058 ret = copy_private_data(&work->event);
Tom Tucker922a8e92006-08-03 16:02:40 -05001059 if (ret) {
1060 put_work(work);
1061 goto out;
1062 }
1063 }
1064
1065 atomic_inc(&cm_id_priv->refcount);
1066 if (list_empty(&cm_id_priv->work_list)) {
1067 list_add_tail(&work->list, &cm_id_priv->work_list);
1068 queue_work(iwcm_wq, &work->work);
1069 } else
1070 list_add_tail(&work->list, &cm_id_priv->work_list);
1071out:
1072 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1073 return ret;
1074}
1075
1076static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
1077 struct ib_qp_attr *qp_attr,
1078 int *qp_attr_mask)
1079{
1080 unsigned long flags;
1081 int ret;
1082
1083 spin_lock_irqsave(&cm_id_priv->lock, flags);
1084 switch (cm_id_priv->state) {
1085 case IW_CM_STATE_IDLE:
1086 case IW_CM_STATE_CONN_SENT:
1087 case IW_CM_STATE_CONN_RECV:
1088 case IW_CM_STATE_ESTABLISHED:
1089 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
Dotan Barak1ca8d152008-07-22 14:18:34 -07001090 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
Tom Tucker922a8e92006-08-03 16:02:40 -05001091 IB_ACCESS_REMOTE_READ;
1092 ret = 0;
1093 break;
1094 default:
1095 ret = -EINVAL;
1096 break;
1097 }
1098 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1099 return ret;
1100}
1101
1102static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
1103 struct ib_qp_attr *qp_attr,
1104 int *qp_attr_mask)
1105{
1106 unsigned long flags;
1107 int ret;
1108
1109 spin_lock_irqsave(&cm_id_priv->lock, flags);
1110 switch (cm_id_priv->state) {
1111 case IW_CM_STATE_IDLE:
1112 case IW_CM_STATE_CONN_SENT:
1113 case IW_CM_STATE_CONN_RECV:
1114 case IW_CM_STATE_ESTABLISHED:
1115 *qp_attr_mask = 0;
1116 ret = 0;
1117 break;
1118 default:
1119 ret = -EINVAL;
1120 break;
1121 }
1122 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1123 return ret;
1124}
1125
1126int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1127 struct ib_qp_attr *qp_attr,
1128 int *qp_attr_mask)
1129{
1130 struct iwcm_id_private *cm_id_priv;
1131 int ret;
1132
1133 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1134 switch (qp_attr->qp_state) {
1135 case IB_QPS_INIT:
1136 case IB_QPS_RTR:
1137 ret = iwcm_init_qp_init_attr(cm_id_priv,
1138 qp_attr, qp_attr_mask);
1139 break;
1140 case IB_QPS_RTS:
1141 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1142 qp_attr, qp_attr_mask);
1143 break;
1144 default:
1145 ret = -EINVAL;
1146 break;
1147 }
1148 return ret;
1149}
1150EXPORT_SYMBOL(iw_cm_init_qp_attr);
1151
1152static int __init iw_cm_init(void)
1153{
Faisal Latifb493d912016-02-26 09:18:00 -06001154 int ret;
1155
1156 ret = iwpm_init(RDMA_NL_IWCM);
1157 if (ret)
1158 pr_err("iw_cm: couldn't init iwpm\n");
1159
Mark Bloch2fa2d4f2016-05-06 22:45:26 +03001160 ret = ibnl_add_client(RDMA_NL_IWCM, ARRAY_SIZE(iwcm_nl_cb_table),
Faisal Latifb493d912016-02-26 09:18:00 -06001161 iwcm_nl_cb_table);
1162 if (ret)
1163 pr_err("iw_cm: couldn't register netlink callbacks\n");
1164
Bhaktipriya Shridhardaf08792016-08-15 23:37:44 +05301165 iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
Tom Tucker922a8e92006-08-03 16:02:40 -05001166 if (!iwcm_wq)
1167 return -ENOMEM;
1168
Steve Wise2f0304d2014-07-25 09:11:33 -05001169 iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1170 iwcm_ctl_table);
1171 if (!iwcm_ctl_table_hdr) {
1172 pr_err("iw_cm: couldn't register sysctl paths\n");
1173 destroy_workqueue(iwcm_wq);
1174 return -ENOMEM;
1175 }
1176
Tom Tucker922a8e92006-08-03 16:02:40 -05001177 return 0;
1178}
1179
1180static void __exit iw_cm_cleanup(void)
1181{
Steve Wise2f0304d2014-07-25 09:11:33 -05001182 unregister_net_sysctl_table(iwcm_ctl_table_hdr);
Tom Tucker922a8e92006-08-03 16:02:40 -05001183 destroy_workqueue(iwcm_wq);
Faisal Latifb493d912016-02-26 09:18:00 -06001184 ibnl_remove_client(RDMA_NL_IWCM);
1185 iwpm_exit(RDMA_NL_IWCM);
Tom Tucker922a8e92006-08-03 16:02:40 -05001186}
1187
1188module_init(iw_cm_init);
1189module_exit(iw_cm_cleanup);