blob: eab43b17e9cf24f6f2bd1152dbc3cb2f188757a3 [file] [log] [blame]
Sean Hefty75216632006-11-30 16:53:41 -08001/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
Sean Hefty88314e42007-11-14 00:29:50 -080034#include <linux/file.h>
Sean Hefty75216632006-11-30 16:53:41 -080035#include <linux/mutex.h>
36#include <linux/poll.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040037#include <linux/sched.h>
Sean Hefty75216632006-11-30 16:53:41 -080038#include <linux/idr.h>
39#include <linux/in.h>
40#include <linux/in6.h>
41#include <linux/miscdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Steve Wise97cb7e42010-08-13 20:56:34 +000043#include <linux/sysctl.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040044#include <linux/module.h>
Guy Shapiro95893dd2015-10-22 15:20:11 +030045#include <linux/nsproxy.h>
Sean Hefty75216632006-11-30 16:53:41 -080046
47#include <rdma/rdma_user_cm.h>
48#include <rdma/ib_marshall.h>
49#include <rdma/rdma_cm.h>
Sean Heftya7ca1f02009-11-16 09:30:33 -080050#include <rdma/rdma_cm_ib.h>
Sean Heftyee7aed42013-05-29 10:09:25 -070051#include <rdma/ib_addr.h>
Sean Heftyedaa7a52013-05-29 10:09:29 -070052#include <rdma/ib.h>
Sean Hefty75216632006-11-30 16:53:41 -080053
54MODULE_AUTHOR("Sean Hefty");
55MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56MODULE_LICENSE("Dual BSD/GPL");
57
Steve Wise97cb7e42010-08-13 20:56:34 +000058static unsigned int max_backlog = 1024;
59
60static struct ctl_table_header *ucma_ctl_table_hdr;
Joe Perchesf3a5e3e2013-10-22 15:29:50 -070061static struct ctl_table ucma_ctl_table[] = {
Steve Wise97cb7e42010-08-13 20:56:34 +000062 {
63 .procname = "max_backlog",
64 .data = &max_backlog,
65 .maxlen = sizeof max_backlog,
66 .mode = 0644,
67 .proc_handler = proc_dointvec,
68 },
69 { }
70};
71
Sean Hefty75216632006-11-30 16:53:41 -080072struct ucma_file {
73 struct mutex mut;
74 struct file *filp;
75 struct list_head ctx_list;
76 struct list_head event_list;
77 wait_queue_head_t poll_wait;
Yishai Hadase1c30292015-08-13 18:32:07 +030078 struct workqueue_struct *close_wq;
Sean Hefty75216632006-11-30 16:53:41 -080079};
80
81struct ucma_context {
82 int id;
83 struct completion comp;
84 atomic_t ref;
85 int events_reported;
86 int backlog;
87
88 struct ucma_file *file;
89 struct rdma_cm_id *cm_id;
90 u64 uid;
91
92 struct list_head list;
Sean Heftyc8f6a362007-02-15 17:00:18 -080093 struct list_head mc_list;
Yishai Hadase1c30292015-08-13 18:32:07 +030094 /* mark that device is in process of destroying the internal HW
95 * resources, protected by the global mut
96 */
97 int closing;
98 /* sync between removal event and id destroy, protected by file mut */
99 int destroying;
100 struct work_struct close_work;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800101};
102
103struct ucma_multicast {
104 struct ucma_context *ctx;
105 int id;
106 int events_reported;
107
108 u64 uid;
Alex Veskerab15c952016-07-06 16:36:35 +0300109 u8 join_state;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800110 struct list_head list;
Roland Dreier3f446752008-08-04 11:02:14 -0700111 struct sockaddr_storage addr;
Sean Hefty75216632006-11-30 16:53:41 -0800112};
113
114struct ucma_event {
115 struct ucma_context *ctx;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800116 struct ucma_multicast *mc;
Sean Hefty75216632006-11-30 16:53:41 -0800117 struct list_head list;
118 struct rdma_cm_id *cm_id;
119 struct rdma_ucm_event_resp resp;
Yishai Hadase1c30292015-08-13 18:32:07 +0300120 struct work_struct close_work;
Sean Hefty75216632006-11-30 16:53:41 -0800121};
122
123static DEFINE_MUTEX(mut);
124static DEFINE_IDR(ctx_idr);
Sean Heftyc8f6a362007-02-15 17:00:18 -0800125static DEFINE_IDR(multicast_idr);
Sean Hefty75216632006-11-30 16:53:41 -0800126
127static inline struct ucma_context *_ucma_find_context(int id,
128 struct ucma_file *file)
129{
130 struct ucma_context *ctx;
131
132 ctx = idr_find(&ctx_idr, id);
133 if (!ctx)
134 ctx = ERR_PTR(-ENOENT);
Leon Romanovskye8980d62018-03-20 17:05:13 +0200135 else if (ctx->file != file || !ctx->cm_id)
Sean Hefty75216632006-11-30 16:53:41 -0800136 ctx = ERR_PTR(-EINVAL);
137 return ctx;
138}
139
140static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
141{
142 struct ucma_context *ctx;
143
144 mutex_lock(&mut);
145 ctx = _ucma_find_context(id, file);
Yishai Hadase1c30292015-08-13 18:32:07 +0300146 if (!IS_ERR(ctx)) {
147 if (ctx->closing)
148 ctx = ERR_PTR(-EIO);
149 else
150 atomic_inc(&ctx->ref);
151 }
Sean Hefty75216632006-11-30 16:53:41 -0800152 mutex_unlock(&mut);
153 return ctx;
154}
155
156static void ucma_put_ctx(struct ucma_context *ctx)
157{
158 if (atomic_dec_and_test(&ctx->ref))
159 complete(&ctx->comp);
160}
161
Jason Gunthorpe8b775862018-04-04 21:00:01 -0600162/*
163 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
164 * CM_ID is bound.
165 */
166static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
167{
168 struct ucma_context *ctx = ucma_get_ctx(file, id);
169
170 if (IS_ERR(ctx))
171 return ctx;
172 if (!ctx->cm_id->device) {
173 ucma_put_ctx(ctx);
174 return ERR_PTR(-EINVAL);
175 }
176 return ctx;
177}
178
Yishai Hadase1c30292015-08-13 18:32:07 +0300179static void ucma_close_event_id(struct work_struct *work)
180{
181 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
182
183 rdma_destroy_id(uevent_close->cm_id);
184 kfree(uevent_close);
185}
186
187static void ucma_close_id(struct work_struct *work)
188{
189 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
190
191 /* once all inflight tasks are finished, we close all underlying
192 * resources. The context is still alive till its explicit destryoing
193 * by its creator.
194 */
195 ucma_put_ctx(ctx);
196 wait_for_completion(&ctx->comp);
197 /* No new events will be generated after destroying the id. */
198 rdma_destroy_id(ctx->cm_id);
199}
200
Sean Hefty75216632006-11-30 16:53:41 -0800201static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
202{
203 struct ucma_context *ctx;
Sean Hefty75216632006-11-30 16:53:41 -0800204
205 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
206 if (!ctx)
207 return NULL;
208
Yishai Hadase1c30292015-08-13 18:32:07 +0300209 INIT_WORK(&ctx->close_work, ucma_close_id);
Sean Hefty75216632006-11-30 16:53:41 -0800210 atomic_set(&ctx->ref, 1);
211 init_completion(&ctx->comp);
Sean Heftyc8f6a362007-02-15 17:00:18 -0800212 INIT_LIST_HEAD(&ctx->mc_list);
Sean Hefty75216632006-11-30 16:53:41 -0800213 ctx->file = file;
214
Tejun Heo3b069c52013-02-27 17:04:16 -0800215 mutex_lock(&mut);
216 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
217 mutex_unlock(&mut);
218 if (ctx->id < 0)
Sean Hefty75216632006-11-30 16:53:41 -0800219 goto error;
220
221 list_add_tail(&ctx->list, &file->ctx_list);
222 return ctx;
223
224error:
225 kfree(ctx);
226 return NULL;
227}
228
Sean Heftyc8f6a362007-02-15 17:00:18 -0800229static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
230{
231 struct ucma_multicast *mc;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800232
233 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
234 if (!mc)
235 return NULL;
236
Tejun Heo3b069c52013-02-27 17:04:16 -0800237 mutex_lock(&mut);
238 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
239 mutex_unlock(&mut);
240 if (mc->id < 0)
Sean Heftyc8f6a362007-02-15 17:00:18 -0800241 goto error;
242
243 mc->ctx = ctx;
244 list_add_tail(&mc->list, &ctx->mc_list);
245 return mc;
246
247error:
248 kfree(mc);
249 return NULL;
250}
251
Sean Hefty75216632006-11-30 16:53:41 -0800252static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
253 struct rdma_conn_param *src)
254{
255 if (src->private_data_len)
256 memcpy(dst->private_data, src->private_data,
257 src->private_data_len);
258 dst->private_data_len = src->private_data_len;
259 dst->responder_resources =src->responder_resources;
260 dst->initiator_depth = src->initiator_depth;
261 dst->flow_control = src->flow_control;
262 dst->retry_count = src->retry_count;
263 dst->rnr_retry_count = src->rnr_retry_count;
264 dst->srq = src->srq;
265 dst->qp_num = src->qp_num;
266}
267
Dasaratharaman Chandramoulid541e452017-06-08 13:37:43 -0400268static void ucma_copy_ud_event(struct ib_device *device,
269 struct rdma_ucm_ud_param *dst,
Sean Hefty75216632006-11-30 16:53:41 -0800270 struct rdma_ud_param *src)
271{
272 if (src->private_data_len)
273 memcpy(dst->private_data, src->private_data,
274 src->private_data_len);
275 dst->private_data_len = src->private_data_len;
Dasaratharaman Chandramoulid541e452017-06-08 13:37:43 -0400276 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
Sean Hefty75216632006-11-30 16:53:41 -0800277 dst->qp_num = src->qp_num;
278 dst->qkey = src->qkey;
279}
280
281static void ucma_set_event_context(struct ucma_context *ctx,
282 struct rdma_cm_event *event,
283 struct ucma_event *uevent)
284{
285 uevent->ctx = ctx;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800286 switch (event->event) {
287 case RDMA_CM_EVENT_MULTICAST_JOIN:
288 case RDMA_CM_EVENT_MULTICAST_ERROR:
289 uevent->mc = (struct ucma_multicast *)
290 event->param.ud.private_data;
291 uevent->resp.uid = uevent->mc->uid;
292 uevent->resp.id = uevent->mc->id;
293 break;
294 default:
295 uevent->resp.uid = ctx->uid;
296 uevent->resp.id = ctx->id;
297 break;
298 }
Sean Hefty75216632006-11-30 16:53:41 -0800299}
300
Yishai Hadase1c30292015-08-13 18:32:07 +0300301/* Called with file->mut locked for the relevant context. */
302static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
303{
304 struct ucma_context *ctx = cm_id->context;
305 struct ucma_event *con_req_eve;
306 int event_found = 0;
307
308 if (ctx->destroying)
309 return;
310
311 /* only if context is pointing to cm_id that it owns it and can be
312 * queued to be closed, otherwise that cm_id is an inflight one that
313 * is part of that context event list pending to be detached and
314 * reattached to its new context as part of ucma_get_event,
315 * handled separately below.
316 */
317 if (ctx->cm_id == cm_id) {
318 mutex_lock(&mut);
319 ctx->closing = 1;
320 mutex_unlock(&mut);
321 queue_work(ctx->file->close_wq, &ctx->close_work);
322 return;
323 }
324
325 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
326 if (con_req_eve->cm_id == cm_id &&
327 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
328 list_del(&con_req_eve->list);
329 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
330 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
331 event_found = 1;
332 break;
333 }
334 }
335 if (!event_found)
Parav Panditaba25a3e2016-03-02 00:50:29 +0530336 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
Yishai Hadase1c30292015-08-13 18:32:07 +0300337}
338
Sean Hefty75216632006-11-30 16:53:41 -0800339static int ucma_event_handler(struct rdma_cm_id *cm_id,
340 struct rdma_cm_event *event)
341{
342 struct ucma_event *uevent;
343 struct ucma_context *ctx = cm_id->context;
344 int ret = 0;
345
346 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
347 if (!uevent)
348 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
349
Tatyana Nikolova418edaa2012-08-03 23:59:41 +0000350 mutex_lock(&ctx->file->mut);
Sean Hefty75216632006-11-30 16:53:41 -0800351 uevent->cm_id = cm_id;
352 ucma_set_event_context(ctx, event, uevent);
353 uevent->resp.event = event->event;
354 uevent->resp.status = event->status;
Sean Hefty638ef7a2011-06-14 16:31:53 -0700355 if (cm_id->qp_type == IB_QPT_UD)
Dasaratharaman Chandramoulid541e452017-06-08 13:37:43 -0400356 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
357 &event->param.ud);
Sean Hefty75216632006-11-30 16:53:41 -0800358 else
359 ucma_copy_conn_event(&uevent->resp.param.conn,
360 &event->param.conn);
361
Sean Hefty75216632006-11-30 16:53:41 -0800362 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
363 if (!ctx->backlog) {
Sean Hefty34928562007-03-06 11:58:32 -0800364 ret = -ENOMEM;
Sean Hefty30a5ec92006-12-14 11:22:19 -0800365 kfree(uevent);
Sean Hefty75216632006-11-30 16:53:41 -0800366 goto out;
367 }
368 ctx->backlog--;
Sean Heftyc6b21822013-11-01 14:39:50 -0700369 } else if (!ctx->uid || ctx->cm_id != cm_id) {
Sean Hefty0cefcf02007-01-05 12:35:15 -0800370 /*
371 * We ignore events for new connections until userspace has set
372 * their context. This can only happen if an error occurs on a
373 * new connection before the user accepts it. This is okay,
Yishai Hadase1c30292015-08-13 18:32:07 +0300374 * since the accept will just fail later. However, we do need
375 * to release the underlying HW resources in case of a device
376 * removal event.
Sean Hefty0cefcf02007-01-05 12:35:15 -0800377 */
Yishai Hadase1c30292015-08-13 18:32:07 +0300378 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
379 ucma_removal_event_handler(cm_id);
380
Sean Hefty0cefcf02007-01-05 12:35:15 -0800381 kfree(uevent);
382 goto out;
Sean Hefty75216632006-11-30 16:53:41 -0800383 }
Sean Hefty0cefcf02007-01-05 12:35:15 -0800384
Sean Hefty75216632006-11-30 16:53:41 -0800385 list_add_tail(&uevent->list, &ctx->file->event_list);
386 wake_up_interruptible(&ctx->file->poll_wait);
Yishai Hadase1c30292015-08-13 18:32:07 +0300387 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
388 ucma_removal_event_handler(cm_id);
Sean Hefty75216632006-11-30 16:53:41 -0800389out:
390 mutex_unlock(&ctx->file->mut);
391 return ret;
392}
393
394static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
395 int in_len, int out_len)
396{
397 struct ucma_context *ctx;
398 struct rdma_ucm_get_event cmd;
399 struct ucma_event *uevent;
400 int ret = 0;
Sean Hefty75216632006-11-30 16:53:41 -0800401
Jason Gunthorpe611cb922018-03-20 14:19:47 -0600402 /*
403 * Old 32 bit user space does not send the 4 byte padding in the
404 * reserved field. We don't care, allow it to keep working.
405 */
406 if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved))
Sean Hefty75216632006-11-30 16:53:41 -0800407 return -ENOSPC;
408
409 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
410 return -EFAULT;
411
412 mutex_lock(&file->mut);
413 while (list_empty(&file->event_list)) {
Sean Hefty75216632006-11-30 16:53:41 -0800414 mutex_unlock(&file->mut);
Sean Hefty75216632006-11-30 16:53:41 -0800415
Sean Heftyd92f7642007-04-05 10:49:51 -0700416 if (file->filp->f_flags & O_NONBLOCK)
417 return -EAGAIN;
418
419 if (wait_event_interruptible(file->poll_wait,
420 !list_empty(&file->event_list)))
421 return -ERESTARTSYS;
422
423 mutex_lock(&file->mut);
424 }
Sean Hefty75216632006-11-30 16:53:41 -0800425
426 uevent = list_entry(file->event_list.next, struct ucma_event, list);
427
428 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
429 ctx = ucma_alloc_ctx(file);
430 if (!ctx) {
431 ret = -ENOMEM;
432 goto done;
433 }
434 uevent->ctx->backlog++;
435 ctx->cm_id = uevent->cm_id;
436 ctx->cm_id->context = ctx;
437 uevent->resp.id = ctx->id;
438 }
439
Jason Gunthorpe6f57c932018-03-27 14:18:47 -0600440 if (copy_to_user(u64_to_user_ptr(cmd.response),
Jason Gunthorpe611cb922018-03-20 14:19:47 -0600441 &uevent->resp,
442 min_t(size_t, out_len, sizeof(uevent->resp)))) {
Sean Hefty75216632006-11-30 16:53:41 -0800443 ret = -EFAULT;
444 goto done;
445 }
446
447 list_del(&uevent->list);
448 uevent->ctx->events_reported++;
Sean Heftyc8f6a362007-02-15 17:00:18 -0800449 if (uevent->mc)
450 uevent->mc->events_reported++;
Sean Hefty75216632006-11-30 16:53:41 -0800451 kfree(uevent);
452done:
453 mutex_unlock(&file->mut);
454 return ret;
455}
456
Sean Heftyb26f9b92010-04-01 17:08:41 +0000457static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
458{
459 switch (cmd->ps) {
460 case RDMA_PS_TCP:
461 *qp_type = IB_QPT_RC;
462 return 0;
463 case RDMA_PS_UDP:
464 case RDMA_PS_IPOIB:
465 *qp_type = IB_QPT_UD;
466 return 0;
Sean Hefty638ef7a2011-06-14 16:31:53 -0700467 case RDMA_PS_IB:
468 *qp_type = cmd->qp_type;
469 return 0;
Sean Heftyb26f9b92010-04-01 17:08:41 +0000470 default:
471 return -EINVAL;
472 }
473}
474
475static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
476 int in_len, int out_len)
Sean Hefty75216632006-11-30 16:53:41 -0800477{
478 struct rdma_ucm_create_id cmd;
479 struct rdma_ucm_create_id_resp resp;
480 struct ucma_context *ctx;
Leon Romanovskye8980d62018-03-20 17:05:13 +0200481 struct rdma_cm_id *cm_id;
Sean Heftyb26f9b92010-04-01 17:08:41 +0000482 enum ib_qp_type qp_type;
Sean Hefty75216632006-11-30 16:53:41 -0800483 int ret;
484
485 if (out_len < sizeof(resp))
486 return -ENOSPC;
487
488 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
489 return -EFAULT;
490
Sean Heftyb26f9b92010-04-01 17:08:41 +0000491 ret = ucma_get_qp_type(&cmd, &qp_type);
492 if (ret)
493 return ret;
494
Sean Hefty75216632006-11-30 16:53:41 -0800495 mutex_lock(&file->mut);
496 ctx = ucma_alloc_ctx(file);
497 mutex_unlock(&file->mut);
498 if (!ctx)
499 return -ENOMEM;
500
501 ctx->uid = cmd.uid;
Linus Torvalds19fd08b2018-04-06 17:35:43 -0700502 cm_id = __rdma_create_id(current->nsproxy->net_ns,
503 ucma_event_handler, ctx, cmd.ps, qp_type, NULL);
Leon Romanovskye8980d62018-03-20 17:05:13 +0200504 if (IS_ERR(cm_id)) {
505 ret = PTR_ERR(cm_id);
Sean Hefty75216632006-11-30 16:53:41 -0800506 goto err1;
507 }
508
509 resp.id = ctx->id;
Jason Gunthorpe6f57c932018-03-27 14:18:47 -0600510 if (copy_to_user(u64_to_user_ptr(cmd.response),
Sean Hefty75216632006-11-30 16:53:41 -0800511 &resp, sizeof(resp))) {
512 ret = -EFAULT;
513 goto err2;
514 }
Leon Romanovskye8980d62018-03-20 17:05:13 +0200515
516 ctx->cm_id = cm_id;
Sean Hefty75216632006-11-30 16:53:41 -0800517 return 0;
518
519err2:
Leon Romanovskye8980d62018-03-20 17:05:13 +0200520 rdma_destroy_id(cm_id);
Sean Hefty75216632006-11-30 16:53:41 -0800521err1:
522 mutex_lock(&mut);
523 idr_remove(&ctx_idr, ctx->id);
524 mutex_unlock(&mut);
Leon Romanovskyed65a4d2018-03-19 14:20:15 +0200525 mutex_lock(&file->mut);
526 list_del(&ctx->list);
527 mutex_unlock(&file->mut);
Sean Hefty75216632006-11-30 16:53:41 -0800528 kfree(ctx);
529 return ret;
530}
531
Sean Heftyc8f6a362007-02-15 17:00:18 -0800532static void ucma_cleanup_multicast(struct ucma_context *ctx)
533{
534 struct ucma_multicast *mc, *tmp;
535
536 mutex_lock(&mut);
537 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
538 list_del(&mc->list);
539 idr_remove(&multicast_idr, mc->id);
540 kfree(mc);
541 }
542 mutex_unlock(&mut);
543}
544
Sean Heftyc8f6a362007-02-15 17:00:18 -0800545static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
546{
547 struct ucma_event *uevent, *tmp;
548
549 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
550 if (uevent->mc != mc)
551 continue;
552
553 list_del(&uevent->list);
554 kfree(uevent);
555 }
556}
557
Hefty, Sean186834b2012-03-02 00:01:19 +0000558/*
Yishai Hadase1c30292015-08-13 18:32:07 +0300559 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
560 * this point, no new events will be reported from the hardware. However, we
561 * still need to cleanup the UCMA context for this ID. Specifically, there
562 * might be events that have not yet been consumed by the user space software.
563 * These might include pending connect requests which we have not completed
564 * processing. We cannot call rdma_destroy_id while holding the lock of the
565 * context (file->mut), as it might cause a deadlock. We therefore extract all
566 * relevant events from the context pending events list while holding the
567 * mutex. After that we release them as needed.
Hefty, Sean186834b2012-03-02 00:01:19 +0000568 */
Sean Hefty75216632006-11-30 16:53:41 -0800569static int ucma_free_ctx(struct ucma_context *ctx)
570{
571 int events_reported;
Hefty, Sean186834b2012-03-02 00:01:19 +0000572 struct ucma_event *uevent, *tmp;
573 LIST_HEAD(list);
Sean Hefty75216632006-11-30 16:53:41 -0800574
Sean Hefty75216632006-11-30 16:53:41 -0800575
Sean Heftyc8f6a362007-02-15 17:00:18 -0800576 ucma_cleanup_multicast(ctx);
577
Sean Hefty75216632006-11-30 16:53:41 -0800578 /* Cleanup events not yet reported to the user. */
579 mutex_lock(&ctx->file->mut);
Hefty, Sean186834b2012-03-02 00:01:19 +0000580 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
581 if (uevent->ctx == ctx)
582 list_move_tail(&uevent->list, &list);
583 }
Sean Hefty75216632006-11-30 16:53:41 -0800584 list_del(&ctx->list);
585 mutex_unlock(&ctx->file->mut);
586
Hefty, Sean186834b2012-03-02 00:01:19 +0000587 list_for_each_entry_safe(uevent, tmp, &list, list) {
588 list_del(&uevent->list);
589 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
590 rdma_destroy_id(uevent->cm_id);
591 kfree(uevent);
592 }
593
Sean Hefty75216632006-11-30 16:53:41 -0800594 events_reported = ctx->events_reported;
595 kfree(ctx);
596 return events_reported;
597}
598
599static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
600 int in_len, int out_len)
601{
602 struct rdma_ucm_destroy_id cmd;
603 struct rdma_ucm_destroy_id_resp resp;
604 struct ucma_context *ctx;
605 int ret = 0;
606
607 if (out_len < sizeof(resp))
608 return -ENOSPC;
609
610 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
611 return -EFAULT;
612
613 mutex_lock(&mut);
614 ctx = _ucma_find_context(cmd.id, file);
615 if (!IS_ERR(ctx))
616 idr_remove(&ctx_idr, ctx->id);
617 mutex_unlock(&mut);
618
619 if (IS_ERR(ctx))
620 return PTR_ERR(ctx);
621
Yishai Hadase1c30292015-08-13 18:32:07 +0300622 mutex_lock(&ctx->file->mut);
623 ctx->destroying = 1;
624 mutex_unlock(&ctx->file->mut);
Sean Hefty75216632006-11-30 16:53:41 -0800625
Yishai Hadase1c30292015-08-13 18:32:07 +0300626 flush_workqueue(ctx->file->close_wq);
627 /* At this point it's guaranteed that there is no inflight
628 * closing task */
629 mutex_lock(&mut);
630 if (!ctx->closing) {
631 mutex_unlock(&mut);
632 ucma_put_ctx(ctx);
633 wait_for_completion(&ctx->comp);
634 rdma_destroy_id(ctx->cm_id);
635 } else {
636 mutex_unlock(&mut);
637 }
638
639 resp.events_reported = ucma_free_ctx(ctx);
Jason Gunthorpe6f57c932018-03-27 14:18:47 -0600640 if (copy_to_user(u64_to_user_ptr(cmd.response),
Sean Hefty75216632006-11-30 16:53:41 -0800641 &resp, sizeof(resp)))
642 ret = -EFAULT;
643
644 return ret;
645}
646
Sean Hefty05ad9452013-05-29 10:09:30 -0700647static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
Sean Hefty75216632006-11-30 16:53:41 -0800648 int in_len, int out_len)
649{
Sean Hefty05ad9452013-05-29 10:09:30 -0700650 struct rdma_ucm_bind_ip cmd;
Sean Hefty75216632006-11-30 16:53:41 -0800651 struct ucma_context *ctx;
652 int ret;
653
654 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
655 return -EFAULT;
656
Roland Dreier84652ae2018-03-28 11:27:22 -0700657 if (!rdma_addr_size_in6(&cmd.addr))
658 return -EINVAL;
659
Sean Hefty75216632006-11-30 16:53:41 -0800660 ctx = ucma_get_ctx(file, cmd.id);
661 if (IS_ERR(ctx))
662 return PTR_ERR(ctx);
663
664 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
665 ucma_put_ctx(ctx);
666 return ret;
667}
668
Sean Heftyeebe4c32013-05-29 10:09:31 -0700669static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
670 int in_len, int out_len)
671{
672 struct rdma_ucm_bind cmd;
Sean Heftyeebe4c32013-05-29 10:09:31 -0700673 struct ucma_context *ctx;
674 int ret;
675
676 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
677 return -EFAULT;
678
Roland Dreier84652ae2018-03-28 11:27:22 -0700679 if (cmd.reserved || !cmd.addr_size ||
680 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
Sean Heftyeebe4c32013-05-29 10:09:31 -0700681 return -EINVAL;
682
683 ctx = ucma_get_ctx(file, cmd.id);
684 if (IS_ERR(ctx))
685 return PTR_ERR(ctx);
686
Roland Dreier84652ae2018-03-28 11:27:22 -0700687 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
Sean Heftyeebe4c32013-05-29 10:09:31 -0700688 ucma_put_ctx(ctx);
689 return ret;
690}
691
Sean Hefty05ad9452013-05-29 10:09:30 -0700692static ssize_t ucma_resolve_ip(struct ucma_file *file,
693 const char __user *inbuf,
694 int in_len, int out_len)
Sean Hefty75216632006-11-30 16:53:41 -0800695{
Sean Hefty05ad9452013-05-29 10:09:30 -0700696 struct rdma_ucm_resolve_ip cmd;
Sean Hefty75216632006-11-30 16:53:41 -0800697 struct ucma_context *ctx;
698 int ret;
699
700 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
701 return -EFAULT;
702
Roland Dreier09abfe72018-04-19 08:28:11 -0700703 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
Roland Dreier84652ae2018-03-28 11:27:22 -0700704 !rdma_addr_size_in6(&cmd.dst_addr))
Leon Romanovsky2975d5d2018-03-15 15:33:02 +0200705 return -EINVAL;
706
Sean Hefty75216632006-11-30 16:53:41 -0800707 ctx = ucma_get_ctx(file, cmd.id);
708 if (IS_ERR(ctx))
709 return PTR_ERR(ctx);
710
Roland Dreier84652ae2018-03-28 11:27:22 -0700711 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
712 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
Sean Hefty75216632006-11-30 16:53:41 -0800713 ucma_put_ctx(ctx);
714 return ret;
715}
716
Sean Hefty209cf2a2013-05-29 10:09:32 -0700717static ssize_t ucma_resolve_addr(struct ucma_file *file,
718 const char __user *inbuf,
719 int in_len, int out_len)
720{
721 struct rdma_ucm_resolve_addr cmd;
Sean Hefty209cf2a2013-05-29 10:09:32 -0700722 struct ucma_context *ctx;
723 int ret;
724
725 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
726 return -EFAULT;
727
Roland Dreier84652ae2018-03-28 11:27:22 -0700728 if (cmd.reserved ||
729 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
730 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
Sean Hefty209cf2a2013-05-29 10:09:32 -0700731 return -EINVAL;
732
733 ctx = ucma_get_ctx(file, cmd.id);
734 if (IS_ERR(ctx))
735 return PTR_ERR(ctx);
736
Roland Dreier84652ae2018-03-28 11:27:22 -0700737 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
738 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
Sean Hefty209cf2a2013-05-29 10:09:32 -0700739 ucma_put_ctx(ctx);
740 return ret;
741}
742
Sean Hefty75216632006-11-30 16:53:41 -0800743static ssize_t ucma_resolve_route(struct ucma_file *file,
744 const char __user *inbuf,
745 int in_len, int out_len)
746{
747 struct rdma_ucm_resolve_route cmd;
748 struct ucma_context *ctx;
749 int ret;
750
751 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
752 return -EFAULT;
753
Jason Gunthorpe8b775862018-04-04 21:00:01 -0600754 ctx = ucma_get_ctx_dev(file, cmd.id);
Sean Hefty75216632006-11-30 16:53:41 -0800755 if (IS_ERR(ctx))
756 return PTR_ERR(ctx);
757
758 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
759 ucma_put_ctx(ctx);
760 return ret;
761}
762
763static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
764 struct rdma_route *route)
765{
766 struct rdma_dev_addr *dev_addr;
767
768 resp->num_paths = route->num_paths;
769 switch (route->num_paths) {
770 case 0:
771 dev_addr = &route->addr.dev_addr;
Sean Hefty6f8372b2009-11-19 13:26:06 -0800772 rdma_addr_get_dgid(dev_addr,
773 (union ib_gid *) &resp->ib_route[0].dgid);
774 rdma_addr_get_sgid(dev_addr,
775 (union ib_gid *) &resp->ib_route[0].sgid);
Sean Hefty75216632006-11-30 16:53:41 -0800776 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
777 break;
778 case 2:
779 ib_copy_path_rec_to_user(&resp->ib_route[1],
780 &route->path_rec[1]);
781 /* fall through */
782 case 1:
783 ib_copy_path_rec_to_user(&resp->ib_route[0],
784 &route->path_rec[0]);
785 break;
786 default:
787 break;
788 }
789}
790
Eli Cohen3c86aa72010-10-13 21:26:51 +0200791static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
792 struct rdma_route *route)
793{
Eli Cohen3c86aa72010-10-13 21:26:51 +0200794
795 resp->num_paths = route->num_paths;
796 switch (route->num_paths) {
797 case 0:
Moni Shoua7b856272013-12-12 18:03:12 +0200798 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
799 (union ib_gid *)&resp->ib_route[0].dgid);
800 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
801 (union ib_gid *)&resp->ib_route[0].sgid);
Eli Cohen3c86aa72010-10-13 21:26:51 +0200802 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
803 break;
804 case 2:
805 ib_copy_path_rec_to_user(&resp->ib_route[1],
806 &route->path_rec[1]);
807 /* fall through */
808 case 1:
809 ib_copy_path_rec_to_user(&resp->ib_route[0],
810 &route->path_rec[0]);
811 break;
812 default:
813 break;
814 }
815}
816
Steve Wisee86f8b02011-01-21 03:40:46 +0000817static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
818 struct rdma_route *route)
819{
820 struct rdma_dev_addr *dev_addr;
821
822 dev_addr = &route->addr.dev_addr;
823 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
824 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
825}
826
Sean Hefty75216632006-11-30 16:53:41 -0800827static ssize_t ucma_query_route(struct ucma_file *file,
828 const char __user *inbuf,
829 int in_len, int out_len)
830{
Sean Heftyee7aed42013-05-29 10:09:25 -0700831 struct rdma_ucm_query cmd;
Sean Hefty75216632006-11-30 16:53:41 -0800832 struct rdma_ucm_query_route_resp resp;
833 struct ucma_context *ctx;
834 struct sockaddr *addr;
835 int ret = 0;
836
837 if (out_len < sizeof(resp))
838 return -ENOSPC;
839
840 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
841 return -EFAULT;
842
843 ctx = ucma_get_ctx(file, cmd.id);
844 if (IS_ERR(ctx))
845 return PTR_ERR(ctx);
846
847 memset(&resp, 0, sizeof resp);
Roland Dreier3f446752008-08-04 11:02:14 -0700848 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
Sean Hefty75216632006-11-30 16:53:41 -0800849 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
850 sizeof(struct sockaddr_in) :
851 sizeof(struct sockaddr_in6));
Roland Dreier3f446752008-08-04 11:02:14 -0700852 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
Sean Hefty75216632006-11-30 16:53:41 -0800853 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
854 sizeof(struct sockaddr_in) :
855 sizeof(struct sockaddr_in6));
856 if (!ctx->cm_id->device)
857 goto out;
858
Roland Dreier9cda7792008-04-16 21:01:07 -0700859 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
Sean Hefty75216632006-11-30 16:53:41 -0800860 resp.port_num = ctx->cm_id->port_num;
Michael Wangc72f2182015-05-05 14:50:28 +0200861
Michael Wangfe53ba22015-05-05 14:50:36 +0200862 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
Michael Wangc72f2182015-05-05 14:50:28 +0200863 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
Ira Weiny5d9fb042015-05-14 15:01:46 -0400864 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
Michael Wangc72f2182015-05-05 14:50:28 +0200865 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
866 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
Steve Wisee86f8b02011-01-21 03:40:46 +0000867 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
Sean Hefty75216632006-11-30 16:53:41 -0800868
869out:
Jason Gunthorpe6f57c932018-03-27 14:18:47 -0600870 if (copy_to_user(u64_to_user_ptr(cmd.response),
Sean Hefty75216632006-11-30 16:53:41 -0800871 &resp, sizeof(resp)))
872 ret = -EFAULT;
873
874 ucma_put_ctx(ctx);
875 return ret;
876}
877
Sean Heftyee7aed42013-05-29 10:09:25 -0700878static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
879 struct rdma_ucm_query_addr_resp *resp)
880{
881 if (!cm_id->device)
882 return;
883
884 resp->node_guid = (__force __u64) cm_id->device->node_guid;
885 resp->port_num = cm_id->port_num;
886 resp->pkey = (__force __u16) cpu_to_be16(
887 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
888}
889
890static ssize_t ucma_query_addr(struct ucma_context *ctx,
891 void __user *response, int out_len)
892{
893 struct rdma_ucm_query_addr_resp resp;
894 struct sockaddr *addr;
895 int ret = 0;
896
897 if (out_len < sizeof(resp))
898 return -ENOSPC;
899
900 memset(&resp, 0, sizeof resp);
901
902 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
903 resp.src_size = rdma_addr_size(addr);
904 memcpy(&resp.src_addr, addr, resp.src_size);
905
906 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
907 resp.dst_size = rdma_addr_size(addr);
908 memcpy(&resp.dst_addr, addr, resp.dst_size);
909
910 ucma_query_device_addr(ctx->cm_id, &resp);
911
912 if (copy_to_user(response, &resp, sizeof(resp)))
913 ret = -EFAULT;
914
915 return ret;
916}
917
Sean Heftyac53b262013-05-29 10:09:27 -0700918static ssize_t ucma_query_path(struct ucma_context *ctx,
919 void __user *response, int out_len)
920{
921 struct rdma_ucm_query_path_resp *resp;
922 int i, ret = 0;
923
924 if (out_len < sizeof(*resp))
925 return -ENOSPC;
926
927 resp = kzalloc(out_len, GFP_KERNEL);
928 if (!resp)
929 return -ENOMEM;
930
931 resp->num_paths = ctx->cm_id->route.num_paths;
932 for (i = 0, out_len -= sizeof(*resp);
933 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
934 i++, out_len -= sizeof(struct ib_path_rec_data)) {
Dasaratharaman Chandramouli57520752017-04-27 19:06:01 -0400935 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
Sean Heftyac53b262013-05-29 10:09:27 -0700936
937 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
938 IB_PATH_BIDIRECTIONAL;
Parav Pandit89838112018-01-08 17:04:48 +0200939 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
Dasaratharaman Chandramouli57520752017-04-27 19:06:01 -0400940 struct sa_path_rec ib;
941
942 sa_convert_path_opa_to_ib(&ib, rec);
943 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
Parav Pandit89838112018-01-08 17:04:48 +0200944
945 } else {
946 ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
Dasaratharaman Chandramouli57520752017-04-27 19:06:01 -0400947 }
Sean Heftyac53b262013-05-29 10:09:27 -0700948 }
949
950 if (copy_to_user(response, resp,
951 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
952 ret = -EFAULT;
953
954 kfree(resp);
955 return ret;
956}
957
Sean Heftyedaa7a52013-05-29 10:09:29 -0700958static ssize_t ucma_query_gid(struct ucma_context *ctx,
959 void __user *response, int out_len)
960{
961 struct rdma_ucm_query_addr_resp resp;
962 struct sockaddr_ib *addr;
963 int ret = 0;
964
965 if (out_len < sizeof(resp))
966 return -ENOSPC;
967
968 memset(&resp, 0, sizeof resp);
969
970 ucma_query_device_addr(ctx->cm_id, &resp);
971
972 addr = (struct sockaddr_ib *) &resp.src_addr;
973 resp.src_size = sizeof(*addr);
974 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
975 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
976 } else {
977 addr->sib_family = AF_IB;
978 addr->sib_pkey = (__force __be16) resp.pkey;
Parav Pandit7a2f64e2018-01-18 10:11:17 +0200979 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
980 NULL);
Sean Heftyedaa7a52013-05-29 10:09:29 -0700981 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
982 &ctx->cm_id->route.addr.src_addr);
983 }
984
985 addr = (struct sockaddr_ib *) &resp.dst_addr;
986 resp.dst_size = sizeof(*addr);
987 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
988 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
989 } else {
990 addr->sib_family = AF_IB;
991 addr->sib_pkey = (__force __be16) resp.pkey;
Parav Pandit7a2f64e2018-01-18 10:11:17 +0200992 rdma_read_gids(ctx->cm_id, NULL,
993 (union ib_gid *)&addr->sib_addr);
Sean Heftyedaa7a52013-05-29 10:09:29 -0700994 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
995 &ctx->cm_id->route.addr.dst_addr);
996 }
997
998 if (copy_to_user(response, &resp, sizeof(resp)))
999 ret = -EFAULT;
1000
1001 return ret;
1002}
1003
Sean Heftyee7aed42013-05-29 10:09:25 -07001004static ssize_t ucma_query(struct ucma_file *file,
1005 const char __user *inbuf,
1006 int in_len, int out_len)
1007{
1008 struct rdma_ucm_query cmd;
1009 struct ucma_context *ctx;
1010 void __user *response;
1011 int ret;
1012
1013 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1014 return -EFAULT;
1015
Jason Gunthorpe6f57c932018-03-27 14:18:47 -06001016 response = u64_to_user_ptr(cmd.response);
Sean Heftyee7aed42013-05-29 10:09:25 -07001017 ctx = ucma_get_ctx(file, cmd.id);
1018 if (IS_ERR(ctx))
1019 return PTR_ERR(ctx);
1020
1021 switch (cmd.option) {
1022 case RDMA_USER_CM_QUERY_ADDR:
1023 ret = ucma_query_addr(ctx, response, out_len);
1024 break;
Sean Heftyac53b262013-05-29 10:09:27 -07001025 case RDMA_USER_CM_QUERY_PATH:
1026 ret = ucma_query_path(ctx, response, out_len);
1027 break;
Sean Heftyedaa7a52013-05-29 10:09:29 -07001028 case RDMA_USER_CM_QUERY_GID:
1029 ret = ucma_query_gid(ctx, response, out_len);
1030 break;
Sean Heftyee7aed42013-05-29 10:09:25 -07001031 default:
1032 ret = -ENOSYS;
1033 break;
1034 }
1035
1036 ucma_put_ctx(ctx);
1037 return ret;
1038}
1039
Sean Hefty5c438132013-05-29 10:09:23 -07001040static void ucma_copy_conn_param(struct rdma_cm_id *id,
1041 struct rdma_conn_param *dst,
Sean Hefty75216632006-11-30 16:53:41 -08001042 struct rdma_ucm_conn_param *src)
1043{
1044 dst->private_data = src->private_data;
1045 dst->private_data_len = src->private_data_len;
1046 dst->responder_resources =src->responder_resources;
1047 dst->initiator_depth = src->initiator_depth;
1048 dst->flow_control = src->flow_control;
1049 dst->retry_count = src->retry_count;
1050 dst->rnr_retry_count = src->rnr_retry_count;
1051 dst->srq = src->srq;
1052 dst->qp_num = src->qp_num;
Sean Hefty5c438132013-05-29 10:09:23 -07001053 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
Sean Hefty75216632006-11-30 16:53:41 -08001054}
1055
1056static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1057 int in_len, int out_len)
1058{
1059 struct rdma_ucm_connect cmd;
1060 struct rdma_conn_param conn_param;
1061 struct ucma_context *ctx;
1062 int ret;
1063
1064 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1065 return -EFAULT;
1066
1067 if (!cmd.conn_param.valid)
1068 return -EINVAL;
1069
Jason Gunthorpe8b775862018-04-04 21:00:01 -06001070 ctx = ucma_get_ctx_dev(file, cmd.id);
Sean Hefty75216632006-11-30 16:53:41 -08001071 if (IS_ERR(ctx))
1072 return PTR_ERR(ctx);
1073
Sean Hefty5c438132013-05-29 10:09:23 -07001074 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
Sean Hefty75216632006-11-30 16:53:41 -08001075 ret = rdma_connect(ctx->cm_id, &conn_param);
1076 ucma_put_ctx(ctx);
1077 return ret;
1078}
1079
1080static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1081 int in_len, int out_len)
1082{
1083 struct rdma_ucm_listen cmd;
1084 struct ucma_context *ctx;
1085 int ret;
1086
1087 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1088 return -EFAULT;
1089
1090 ctx = ucma_get_ctx(file, cmd.id);
1091 if (IS_ERR(ctx))
1092 return PTR_ERR(ctx);
1093
Steve Wise97cb7e42010-08-13 20:56:34 +00001094 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1095 cmd.backlog : max_backlog;
Sean Hefty75216632006-11-30 16:53:41 -08001096 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1097 ucma_put_ctx(ctx);
1098 return ret;
1099}
1100
1101static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1102 int in_len, int out_len)
1103{
1104 struct rdma_ucm_accept cmd;
1105 struct rdma_conn_param conn_param;
1106 struct ucma_context *ctx;
1107 int ret;
1108
1109 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1110 return -EFAULT;
1111
Jason Gunthorpe8b775862018-04-04 21:00:01 -06001112 ctx = ucma_get_ctx_dev(file, cmd.id);
Sean Hefty75216632006-11-30 16:53:41 -08001113 if (IS_ERR(ctx))
1114 return PTR_ERR(ctx);
1115
1116 if (cmd.conn_param.valid) {
Sean Hefty5c438132013-05-29 10:09:23 -07001117 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
Sean Hefty9ced69c2012-01-10 23:53:41 +00001118 mutex_lock(&file->mut);
Steve Wise00313982018-03-01 13:57:44 -08001119 ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
Sean Hefty9ced69c2012-01-10 23:53:41 +00001120 if (!ret)
1121 ctx->uid = cmd.uid;
1122 mutex_unlock(&file->mut);
Sean Hefty75216632006-11-30 16:53:41 -08001123 } else
Steve Wise00313982018-03-01 13:57:44 -08001124 ret = __rdma_accept(ctx->cm_id, NULL, NULL);
Sean Hefty75216632006-11-30 16:53:41 -08001125
1126 ucma_put_ctx(ctx);
1127 return ret;
1128}
1129
1130static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1131 int in_len, int out_len)
1132{
1133 struct rdma_ucm_reject cmd;
1134 struct ucma_context *ctx;
1135 int ret;
1136
1137 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1138 return -EFAULT;
1139
Jason Gunthorpe8b775862018-04-04 21:00:01 -06001140 ctx = ucma_get_ctx_dev(file, cmd.id);
Sean Hefty75216632006-11-30 16:53:41 -08001141 if (IS_ERR(ctx))
1142 return PTR_ERR(ctx);
1143
1144 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1145 ucma_put_ctx(ctx);
1146 return ret;
1147}
1148
1149static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1150 int in_len, int out_len)
1151{
1152 struct rdma_ucm_disconnect cmd;
1153 struct ucma_context *ctx;
1154 int ret;
1155
1156 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1157 return -EFAULT;
1158
Jason Gunthorpe8b775862018-04-04 21:00:01 -06001159 ctx = ucma_get_ctx_dev(file, cmd.id);
Sean Hefty75216632006-11-30 16:53:41 -08001160 if (IS_ERR(ctx))
1161 return PTR_ERR(ctx);
1162
1163 ret = rdma_disconnect(ctx->cm_id);
1164 ucma_put_ctx(ctx);
1165 return ret;
1166}
1167
1168static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1169 const char __user *inbuf,
1170 int in_len, int out_len)
1171{
1172 struct rdma_ucm_init_qp_attr cmd;
1173 struct ib_uverbs_qp_attr resp;
1174 struct ucma_context *ctx;
1175 struct ib_qp_attr qp_attr;
1176 int ret;
1177
1178 if (out_len < sizeof(resp))
1179 return -ENOSPC;
1180
1181 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1182 return -EFAULT;
1183
Leon Romanovskya5880b82018-03-07 18:49:16 +02001184 if (cmd.qp_state > IB_QPS_ERR)
1185 return -EINVAL;
1186
Jason Gunthorpe8b775862018-04-04 21:00:01 -06001187 ctx = ucma_get_ctx_dev(file, cmd.id);
Sean Hefty75216632006-11-30 16:53:41 -08001188 if (IS_ERR(ctx))
1189 return PTR_ERR(ctx);
1190
1191 resp.qp_attr_mask = 0;
1192 memset(&qp_attr, 0, sizeof qp_attr);
1193 qp_attr.qp_state = cmd.qp_state;
1194 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1195 if (ret)
1196 goto out;
1197
Dasaratharaman Chandramoulid541e452017-06-08 13:37:43 -04001198 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
Jason Gunthorpe6f57c932018-03-27 14:18:47 -06001199 if (copy_to_user(u64_to_user_ptr(cmd.response),
Sean Hefty75216632006-11-30 16:53:41 -08001200 &resp, sizeof(resp)))
1201 ret = -EFAULT;
1202
1203out:
1204 ucma_put_ctx(ctx);
1205 return ret;
1206}
1207
Sean Hefty7ce86402007-08-08 15:51:13 -07001208static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1209 void *optval, size_t optlen)
1210{
1211 int ret = 0;
1212
1213 switch (optname) {
1214 case RDMA_OPTION_ID_TOS:
1215 if (optlen != sizeof(u8)) {
1216 ret = -EINVAL;
1217 break;
1218 }
1219 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1220 break;
Hefty, Seana9bb7912011-05-09 22:06:10 -07001221 case RDMA_OPTION_ID_REUSEADDR:
1222 if (optlen != sizeof(int)) {
1223 ret = -EINVAL;
1224 break;
1225 }
1226 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1227 break;
Sean Hefty68602122012-06-14 20:31:39 +00001228 case RDMA_OPTION_ID_AFONLY:
1229 if (optlen != sizeof(int)) {
1230 ret = -EINVAL;
1231 break;
1232 }
1233 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1234 break;
Sean Hefty7ce86402007-08-08 15:51:13 -07001235 default:
1236 ret = -ENOSYS;
1237 }
1238
1239 return ret;
1240}
1241
Sean Heftya7ca1f02009-11-16 09:30:33 -08001242static int ucma_set_ib_path(struct ucma_context *ctx,
1243 struct ib_path_rec_data *path_data, size_t optlen)
1244{
Dasaratharaman Chandramoulic2f8fc42017-04-27 19:05:58 -04001245 struct sa_path_rec sa_path;
Sean Heftya7ca1f02009-11-16 09:30:33 -08001246 struct rdma_cm_event event;
1247 int ret;
1248
1249 if (optlen % sizeof(*path_data))
1250 return -EINVAL;
1251
1252 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1253 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1254 IB_PATH_BIDIRECTIONAL))
1255 break;
1256 }
1257
1258 if (!optlen)
1259 return -EINVAL;
1260
Roland Dreier84351682018-04-03 15:33:01 -07001261 if (!ctx->cm_id->device)
1262 return -EINVAL;
1263
Ilya Nelkenbaumc2be9dc2015-02-05 13:53:48 +02001264 memset(&sa_path, 0, sizeof(sa_path));
Ilya Nelkenbaumc2be9dc2015-02-05 13:53:48 +02001265
Dasaratharaman Chandramouli57520752017-04-27 19:06:01 -04001266 sa_path.rec_type = SA_PATH_REC_TYPE_IB;
Sean Heftya7ca1f02009-11-16 09:30:33 -08001267 ib_sa_unpack_path(path_data->path_rec, &sa_path);
Dasaratharaman Chandramouli57520752017-04-27 19:06:01 -04001268
1269 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1270 struct sa_path_rec opa;
1271
1272 sa_convert_path_ib_to_opa(&opa, &sa_path);
Parav Panditfe758892018-01-08 17:04:45 +02001273 ret = rdma_set_ib_path(ctx->cm_id, &opa);
Dasaratharaman Chandramouli57520752017-04-27 19:06:01 -04001274 } else {
Parav Panditfe758892018-01-08 17:04:45 +02001275 ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
Dasaratharaman Chandramouli57520752017-04-27 19:06:01 -04001276 }
Sean Heftya7ca1f02009-11-16 09:30:33 -08001277 if (ret)
1278 return ret;
1279
1280 memset(&event, 0, sizeof event);
1281 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1282 return ucma_event_handler(ctx->cm_id, &event);
1283}
1284
1285static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1286 void *optval, size_t optlen)
1287{
1288 int ret;
1289
1290 switch (optname) {
1291 case RDMA_OPTION_IB_PATH:
1292 ret = ucma_set_ib_path(ctx, optval, optlen);
1293 break;
1294 default:
1295 ret = -ENOSYS;
1296 }
1297
1298 return ret;
1299}
1300
Sean Hefty7ce86402007-08-08 15:51:13 -07001301static int ucma_set_option_level(struct ucma_context *ctx, int level,
1302 int optname, void *optval, size_t optlen)
1303{
1304 int ret;
1305
1306 switch (level) {
1307 case RDMA_OPTION_ID:
1308 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1309 break;
Sean Heftya7ca1f02009-11-16 09:30:33 -08001310 case RDMA_OPTION_IB:
1311 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1312 break;
Sean Hefty7ce86402007-08-08 15:51:13 -07001313 default:
1314 ret = -ENOSYS;
1315 }
1316
1317 return ret;
1318}
1319
1320static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1321 int in_len, int out_len)
1322{
1323 struct rdma_ucm_set_option cmd;
1324 struct ucma_context *ctx;
1325 void *optval;
1326 int ret;
1327
1328 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1329 return -EFAULT;
1330
Shamir Rabinovitchef95a902018-04-10 10:26:23 -04001331 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1332 return -EINVAL;
1333
Sean Hefty7ce86402007-08-08 15:51:13 -07001334 ctx = ucma_get_ctx(file, cmd.id);
1335 if (IS_ERR(ctx))
1336 return PTR_ERR(ctx);
1337
Jason Gunthorpe6f57c932018-03-27 14:18:47 -06001338 optval = memdup_user(u64_to_user_ptr(cmd.optval),
Roland Dreier0764c762012-07-27 13:27:45 -07001339 cmd.optlen);
1340 if (IS_ERR(optval)) {
1341 ret = PTR_ERR(optval);
1342 goto out;
Sean Hefty7ce86402007-08-08 15:51:13 -07001343 }
1344
1345 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1346 cmd.optlen);
Sean Hefty7ce86402007-08-08 15:51:13 -07001347 kfree(optval);
Roland Dreier0764c762012-07-27 13:27:45 -07001348
1349out:
Sean Hefty7ce86402007-08-08 15:51:13 -07001350 ucma_put_ctx(ctx);
1351 return ret;
1352}
1353
Sean Hefty75216632006-11-30 16:53:41 -08001354static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1355 int in_len, int out_len)
1356{
1357 struct rdma_ucm_notify cmd;
1358 struct ucma_context *ctx;
Leon Romanovskyc8d3bcb2018-03-25 11:39:05 +03001359 int ret = -EINVAL;
Sean Hefty75216632006-11-30 16:53:41 -08001360
1361 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1362 return -EFAULT;
1363
1364 ctx = ucma_get_ctx(file, cmd.id);
1365 if (IS_ERR(ctx))
1366 return PTR_ERR(ctx);
1367
Leon Romanovskyc8d3bcb2018-03-25 11:39:05 +03001368 if (ctx->cm_id->device)
1369 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1370
Sean Hefty75216632006-11-30 16:53:41 -08001371 ucma_put_ctx(ctx);
1372 return ret;
1373}
1374
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001375static ssize_t ucma_process_join(struct ucma_file *file,
1376 struct rdma_ucm_join_mcast *cmd, int out_len)
Sean Heftyc8f6a362007-02-15 17:00:18 -08001377{
Sean Heftyc8f6a362007-02-15 17:00:18 -08001378 struct rdma_ucm_create_id_resp resp;
1379 struct ucma_context *ctx;
1380 struct ucma_multicast *mc;
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001381 struct sockaddr *addr;
Sean Heftyc8f6a362007-02-15 17:00:18 -08001382 int ret;
Alex Veskerab15c952016-07-06 16:36:35 +03001383 u8 join_state;
Sean Heftyc8f6a362007-02-15 17:00:18 -08001384
1385 if (out_len < sizeof(resp))
1386 return -ENOSPC;
1387
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001388 addr = (struct sockaddr *) &cmd->addr;
Leon Romanovsky0c81ffc2018-03-13 18:37:27 +02001389 if (cmd->addr_size != rdma_addr_size(addr))
Alex Veskerab15c952016-07-06 16:36:35 +03001390 return -EINVAL;
1391
1392 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1393 join_state = BIT(FULLMEMBER_JOIN);
1394 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1395 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1396 else
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001397 return -EINVAL;
Sean Heftyc8f6a362007-02-15 17:00:18 -08001398
Jason Gunthorpe8b775862018-04-04 21:00:01 -06001399 ctx = ucma_get_ctx_dev(file, cmd->id);
Sean Heftyc8f6a362007-02-15 17:00:18 -08001400 if (IS_ERR(ctx))
1401 return PTR_ERR(ctx);
1402
1403 mutex_lock(&file->mut);
1404 mc = ucma_alloc_multicast(ctx);
Julien Brunel6aea9382008-10-10 12:00:19 -07001405 if (!mc) {
1406 ret = -ENOMEM;
Sean Heftyc8f6a362007-02-15 17:00:18 -08001407 goto err1;
1408 }
Alex Veskerab15c952016-07-06 16:36:35 +03001409 mc->join_state = join_state;
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001410 mc->uid = cmd->uid;
1411 memcpy(&mc->addr, addr, cmd->addr_size);
Alex Veskerab15c952016-07-06 16:36:35 +03001412 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1413 join_state, mc);
Sean Heftyc8f6a362007-02-15 17:00:18 -08001414 if (ret)
1415 goto err2;
1416
1417 resp.id = mc->id;
Jason Gunthorpe6f57c932018-03-27 14:18:47 -06001418 if (copy_to_user(u64_to_user_ptr(cmd->response),
Sean Heftyc8f6a362007-02-15 17:00:18 -08001419 &resp, sizeof(resp))) {
1420 ret = -EFAULT;
1421 goto err3;
1422 }
1423
1424 mutex_unlock(&file->mut);
1425 ucma_put_ctx(ctx);
1426 return 0;
1427
1428err3:
Roland Dreier3f446752008-08-04 11:02:14 -07001429 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
Sean Heftyc8f6a362007-02-15 17:00:18 -08001430 ucma_cleanup_mc_events(mc);
1431err2:
1432 mutex_lock(&mut);
1433 idr_remove(&multicast_idr, mc->id);
1434 mutex_unlock(&mut);
1435 list_del(&mc->list);
1436 kfree(mc);
1437err1:
1438 mutex_unlock(&file->mut);
1439 ucma_put_ctx(ctx);
1440 return ret;
1441}
1442
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001443static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1444 const char __user *inbuf,
1445 int in_len, int out_len)
1446{
1447 struct rdma_ucm_join_ip_mcast cmd;
1448 struct rdma_ucm_join_mcast join_cmd;
1449
1450 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1451 return -EFAULT;
1452
1453 join_cmd.response = cmd.response;
1454 join_cmd.uid = cmd.uid;
1455 join_cmd.id = cmd.id;
Roland Dreier84652ae2018-03-28 11:27:22 -07001456 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
Leon Romanovsky0c81ffc2018-03-13 18:37:27 +02001457 if (!join_cmd.addr_size)
1458 return -EINVAL;
1459
Alex Veskerab15c952016-07-06 16:36:35 +03001460 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001461 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1462
1463 return ucma_process_join(file, &join_cmd, out_len);
1464}
1465
1466static ssize_t ucma_join_multicast(struct ucma_file *file,
1467 const char __user *inbuf,
1468 int in_len, int out_len)
1469{
1470 struct rdma_ucm_join_mcast cmd;
1471
1472 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1473 return -EFAULT;
1474
Roland Dreier84652ae2018-03-28 11:27:22 -07001475 if (!rdma_addr_size_kss(&cmd.addr))
Leon Romanovsky0c81ffc2018-03-13 18:37:27 +02001476 return -EINVAL;
1477
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001478 return ucma_process_join(file, &cmd, out_len);
1479}
1480
Sean Heftyc8f6a362007-02-15 17:00:18 -08001481static ssize_t ucma_leave_multicast(struct ucma_file *file,
1482 const char __user *inbuf,
1483 int in_len, int out_len)
1484{
1485 struct rdma_ucm_destroy_id cmd;
1486 struct rdma_ucm_destroy_id_resp resp;
1487 struct ucma_multicast *mc;
1488 int ret = 0;
1489
1490 if (out_len < sizeof(resp))
1491 return -ENOSPC;
1492
1493 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1494 return -EFAULT;
1495
1496 mutex_lock(&mut);
1497 mc = idr_find(&multicast_idr, cmd.id);
1498 if (!mc)
1499 mc = ERR_PTR(-ENOENT);
1500 else if (mc->ctx->file != file)
1501 mc = ERR_PTR(-EINVAL);
Jason Gunthorpe7e967fd2015-08-04 17:13:32 -06001502 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1503 mc = ERR_PTR(-ENXIO);
1504 else
Sean Heftyc8f6a362007-02-15 17:00:18 -08001505 idr_remove(&multicast_idr, mc->id);
Sean Heftyc8f6a362007-02-15 17:00:18 -08001506 mutex_unlock(&mut);
1507
1508 if (IS_ERR(mc)) {
1509 ret = PTR_ERR(mc);
1510 goto out;
1511 }
1512
Roland Dreier3f446752008-08-04 11:02:14 -07001513 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
Sean Heftyc8f6a362007-02-15 17:00:18 -08001514 mutex_lock(&mc->ctx->file->mut);
1515 ucma_cleanup_mc_events(mc);
1516 list_del(&mc->list);
1517 mutex_unlock(&mc->ctx->file->mut);
1518
1519 ucma_put_ctx(mc->ctx);
1520 resp.events_reported = mc->events_reported;
1521 kfree(mc);
1522
Jason Gunthorpe6f57c932018-03-27 14:18:47 -06001523 if (copy_to_user(u64_to_user_ptr(cmd.response),
Sean Heftyc8f6a362007-02-15 17:00:18 -08001524 &resp, sizeof(resp)))
1525 ret = -EFAULT;
1526out:
1527 return ret;
1528}
1529
Sean Hefty88314e42007-11-14 00:29:50 -08001530static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1531{
1532 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1533 if (file1 < file2) {
1534 mutex_lock(&file1->mut);
Haggai Eran31b57b82015-07-07 17:45:12 +03001535 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
Sean Hefty88314e42007-11-14 00:29:50 -08001536 } else {
1537 mutex_lock(&file2->mut);
Haggai Eran31b57b82015-07-07 17:45:12 +03001538 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
Sean Hefty88314e42007-11-14 00:29:50 -08001539 }
1540}
1541
1542static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1543{
1544 if (file1 < file2) {
1545 mutex_unlock(&file2->mut);
1546 mutex_unlock(&file1->mut);
1547 } else {
1548 mutex_unlock(&file1->mut);
1549 mutex_unlock(&file2->mut);
1550 }
1551}
1552
1553static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1554{
1555 struct ucma_event *uevent, *tmp;
1556
1557 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1558 if (uevent->ctx == ctx)
1559 list_move_tail(&uevent->list, &file->event_list);
1560}
1561
1562static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1563 const char __user *inbuf,
1564 int in_len, int out_len)
1565{
1566 struct rdma_ucm_migrate_id cmd;
1567 struct rdma_ucm_migrate_resp resp;
1568 struct ucma_context *ctx;
Al Viro2903ff02012-08-28 12:52:22 -04001569 struct fd f;
Sean Hefty88314e42007-11-14 00:29:50 -08001570 struct ucma_file *cur_file;
1571 int ret = 0;
1572
1573 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1574 return -EFAULT;
1575
1576 /* Get current fd to protect against it being closed */
Al Viro2903ff02012-08-28 12:52:22 -04001577 f = fdget(cmd.fd);
1578 if (!f.file)
Sean Hefty88314e42007-11-14 00:29:50 -08001579 return -ENOENT;
1580
1581 /* Validate current fd and prevent destruction of id. */
Al Viro2903ff02012-08-28 12:52:22 -04001582 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
Sean Hefty88314e42007-11-14 00:29:50 -08001583 if (IS_ERR(ctx)) {
1584 ret = PTR_ERR(ctx);
1585 goto file_put;
1586 }
1587
1588 cur_file = ctx->file;
1589 if (cur_file == new_file) {
1590 resp.events_reported = ctx->events_reported;
1591 goto response;
1592 }
1593
1594 /*
1595 * Migrate events between fd's, maintaining order, and avoiding new
1596 * events being added before existing events.
1597 */
1598 ucma_lock_files(cur_file, new_file);
1599 mutex_lock(&mut);
1600
1601 list_move_tail(&ctx->list, &new_file->ctx_list);
1602 ucma_move_events(ctx, new_file);
1603 ctx->file = new_file;
1604 resp.events_reported = ctx->events_reported;
1605
1606 mutex_unlock(&mut);
1607 ucma_unlock_files(cur_file, new_file);
1608
1609response:
Jason Gunthorpe6f57c932018-03-27 14:18:47 -06001610 if (copy_to_user(u64_to_user_ptr(cmd.response),
Sean Hefty88314e42007-11-14 00:29:50 -08001611 &resp, sizeof(resp)))
1612 ret = -EFAULT;
1613
1614 ucma_put_ctx(ctx);
1615file_put:
Al Viro2903ff02012-08-28 12:52:22 -04001616 fdput(f);
Sean Hefty88314e42007-11-14 00:29:50 -08001617 return ret;
1618}
1619
Sean Hefty75216632006-11-30 16:53:41 -08001620static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1621 const char __user *inbuf,
1622 int in_len, int out_len) = {
Sean Hefty05ad9452013-05-29 10:09:30 -07001623 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1624 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1625 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1626 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1627 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1628 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1629 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1630 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1631 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1632 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1633 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1634 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1635 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1636 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1637 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1638 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1639 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1640 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1641 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
Sean Heftyeebe4c32013-05-29 10:09:31 -07001642 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
Sean Hefty209cf2a2013-05-29 10:09:32 -07001643 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
Sean Hefty5bc2b7b2013-05-29 10:09:33 -07001644 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1645 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
Sean Hefty75216632006-11-30 16:53:41 -08001646};
1647
1648static ssize_t ucma_write(struct file *filp, const char __user *buf,
1649 size_t len, loff_t *pos)
1650{
1651 struct ucma_file *file = filp->private_data;
1652 struct rdma_ucm_cmd_hdr hdr;
1653 ssize_t ret;
1654
Leon Romanovskyf73a1db2016-11-21 19:38:20 +02001655 if (!ib_safe_file_access(filp)) {
1656 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1657 task_tgid_vnr(current), current->comm);
Jason Gunthorpee6bd18f2016-04-10 19:13:13 -06001658 return -EACCES;
Leon Romanovskyf73a1db2016-11-21 19:38:20 +02001659 }
Jason Gunthorpee6bd18f2016-04-10 19:13:13 -06001660
Sean Hefty75216632006-11-30 16:53:41 -08001661 if (len < sizeof(hdr))
1662 return -EINVAL;
1663
1664 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1665 return -EFAULT;
1666
Hefty, Seancaf6e3f2011-10-06 09:33:05 -07001667 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
Sean Hefty75216632006-11-30 16:53:41 -08001668 return -EINVAL;
1669
1670 if (hdr.in + sizeof(hdr) > len)
1671 return -EINVAL;
1672
1673 if (!ucma_cmd_table[hdr.cmd])
1674 return -ENOSYS;
1675
1676 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1677 if (!ret)
1678 ret = len;
1679
1680 return ret;
1681}
1682
Al Viroafc9a422017-07-03 06:39:46 -04001683static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
Sean Hefty75216632006-11-30 16:53:41 -08001684{
1685 struct ucma_file *file = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04001686 __poll_t mask = 0;
Sean Hefty75216632006-11-30 16:53:41 -08001687
1688 poll_wait(filp, &file->poll_wait, wait);
1689
1690 if (!list_empty(&file->event_list))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001691 mask = EPOLLIN | EPOLLRDNORM;
Sean Hefty75216632006-11-30 16:53:41 -08001692
1693 return mask;
1694}
1695
Roland Dreierf7a6117e2008-07-24 20:36:59 -07001696/*
1697 * ucma_open() does not need the BKL:
1698 *
1699 * - no global state is referred to;
1700 * - there is no ioctl method to race against;
1701 * - no further module initialization is required for open to work
1702 * after the device is registered.
1703 */
Sean Hefty75216632006-11-30 16:53:41 -08001704static int ucma_open(struct inode *inode, struct file *filp)
1705{
1706 struct ucma_file *file;
1707
1708 file = kmalloc(sizeof *file, GFP_KERNEL);
1709 if (!file)
1710 return -ENOMEM;
1711
Bhaktipriya Shridhara190d3b2016-08-15 23:29:10 +05301712 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1713 WQ_MEM_RECLAIM);
Sasha Levin0174b382015-09-17 16:04:19 -04001714 if (!file->close_wq) {
1715 kfree(file);
1716 return -ENOMEM;
1717 }
1718
Sean Hefty75216632006-11-30 16:53:41 -08001719 INIT_LIST_HEAD(&file->event_list);
1720 INIT_LIST_HEAD(&file->ctx_list);
1721 init_waitqueue_head(&file->poll_wait);
1722 mutex_init(&file->mut);
1723
1724 filp->private_data = file;
1725 file->filp = filp;
Roland Dreierbc1db9a2010-04-09 17:13:50 -07001726
1727 return nonseekable_open(inode, filp);
Sean Hefty75216632006-11-30 16:53:41 -08001728}
1729
1730static int ucma_close(struct inode *inode, struct file *filp)
1731{
1732 struct ucma_file *file = filp->private_data;
1733 struct ucma_context *ctx, *tmp;
1734
1735 mutex_lock(&file->mut);
1736 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
Yishai Hadase1c30292015-08-13 18:32:07 +03001737 ctx->destroying = 1;
Sean Hefty75216632006-11-30 16:53:41 -08001738 mutex_unlock(&file->mut);
1739
1740 mutex_lock(&mut);
1741 idr_remove(&ctx_idr, ctx->id);
1742 mutex_unlock(&mut);
1743
Yishai Hadase1c30292015-08-13 18:32:07 +03001744 flush_workqueue(file->close_wq);
1745 /* At that step once ctx was marked as destroying and workqueue
1746 * was flushed we are safe from any inflights handlers that
1747 * might put other closing task.
1748 */
1749 mutex_lock(&mut);
1750 if (!ctx->closing) {
1751 mutex_unlock(&mut);
1752 /* rdma_destroy_id ensures that no event handlers are
1753 * inflight for that id before releasing it.
1754 */
1755 rdma_destroy_id(ctx->cm_id);
1756 } else {
1757 mutex_unlock(&mut);
1758 }
1759
Sean Hefty75216632006-11-30 16:53:41 -08001760 ucma_free_ctx(ctx);
1761 mutex_lock(&file->mut);
1762 }
1763 mutex_unlock(&file->mut);
Yishai Hadase1c30292015-08-13 18:32:07 +03001764 destroy_workqueue(file->close_wq);
Sean Hefty75216632006-11-30 16:53:41 -08001765 kfree(file);
1766 return 0;
1767}
1768
Arjan van de Ven2b8693c2007-02-12 00:55:32 -08001769static const struct file_operations ucma_fops = {
Sean Hefty75216632006-11-30 16:53:41 -08001770 .owner = THIS_MODULE,
1771 .open = ucma_open,
1772 .release = ucma_close,
1773 .write = ucma_write,
1774 .poll = ucma_poll,
Roland Dreierbc1db9a2010-04-09 17:13:50 -07001775 .llseek = no_llseek,
Sean Hefty75216632006-11-30 16:53:41 -08001776};
1777
1778static struct miscdevice ucma_misc = {
Roland Dreier04ea2f82011-05-23 10:48:43 -07001779 .minor = MISC_DYNAMIC_MINOR,
1780 .name = "rdma_cm",
1781 .nodename = "infiniband/rdma_cm",
1782 .mode = 0666,
1783 .fops = &ucma_fops,
Sean Hefty75216632006-11-30 16:53:41 -08001784};
1785
1786static ssize_t show_abi_version(struct device *dev,
1787 struct device_attribute *attr,
1788 char *buf)
1789{
1790 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1791}
1792static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1793
1794static int __init ucma_init(void)
1795{
1796 int ret;
1797
1798 ret = misc_register(&ucma_misc);
1799 if (ret)
1800 return ret;
1801
1802 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1803 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +05301804 pr_err("rdma_ucm: couldn't create abi_version attr\n");
Steve Wise97cb7e42010-08-13 20:56:34 +00001805 goto err1;
1806 }
1807
Eric W. Biedermanec8f23c2012-04-19 13:44:49 +00001808 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
Steve Wise97cb7e42010-08-13 20:56:34 +00001809 if (!ucma_ctl_table_hdr) {
Parav Panditaba25a3e2016-03-02 00:50:29 +05301810 pr_err("rdma_ucm: couldn't register sysctl paths\n");
Steve Wise97cb7e42010-08-13 20:56:34 +00001811 ret = -ENOMEM;
1812 goto err2;
Sean Hefty75216632006-11-30 16:53:41 -08001813 }
1814 return 0;
Steve Wise97cb7e42010-08-13 20:56:34 +00001815err2:
1816 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1817err1:
Sean Hefty75216632006-11-30 16:53:41 -08001818 misc_deregister(&ucma_misc);
1819 return ret;
1820}
1821
1822static void __exit ucma_cleanup(void)
1823{
Eric W. Biederman5dd3df12012-04-19 13:24:33 +00001824 unregister_net_sysctl_table(ucma_ctl_table_hdr);
Sean Hefty75216632006-11-30 16:53:41 -08001825 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1826 misc_deregister(&ucma_misc);
1827 idr_destroy(&ctx_idr);
Johannes Thumshirn45d25422015-07-08 17:21:15 +02001828 idr_destroy(&multicast_idr);
Sean Hefty75216632006-11-30 16:53:41 -08001829}
1830
1831module_init(ucma_init);
1832module_exit(ucma_cleanup);