blob: 182ab8430594a967533fe64b44ebe41a97fab83c [file] [log] [blame]
Andy Grovereff5f532009-02-24 15:30:29 +00001/*
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -07002 * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
Andy Grovereff5f532009-02-24 15:30:29 +00003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grovereff5f532009-02-24 15:30:29 +000035#include <linux/rbtree.h>
36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37
Andy Grover21f79af2010-01-12 12:57:27 -080038#include "rds.h"
Andy Grovereff5f532009-02-24 15:30:29 +000039
40/*
41 * XXX
42 * - build with sparse
Andy Grovereff5f532009-02-24 15:30:29 +000043 * - should we detect duplicate keys on a socket? hmm.
44 * - an rdma is an mlock, apply rlimit?
45 */
46
47/*
48 * get the number of pages by looking at the page indices that the start and
49 * end addresses fall in.
50 *
51 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
52 * causes the address to wrap or overflows an unsigned int. This comes
53 * from being stored in the 'length' member of 'struct scatterlist'.
54 */
55static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
56{
57 if ((vec->addr + vec->bytes <= vec->addr) ||
58 (vec->bytes > (u64)UINT_MAX))
59 return 0;
60
61 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62 (vec->addr >> PAGE_SHIFT);
63}
64
65static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66 struct rds_mr *insert)
67{
68 struct rb_node **p = &root->rb_node;
69 struct rb_node *parent = NULL;
70 struct rds_mr *mr;
71
72 while (*p) {
73 parent = *p;
74 mr = rb_entry(parent, struct rds_mr, r_rb_node);
75
76 if (key < mr->r_key)
77 p = &(*p)->rb_left;
78 else if (key > mr->r_key)
79 p = &(*p)->rb_right;
80 else
81 return mr;
82 }
83
84 if (insert) {
85 rb_link_node(&insert->r_rb_node, parent, p);
86 rb_insert_color(&insert->r_rb_node, root);
Reshetova, Elena803ea852017-07-04 15:53:17 +030087 refcount_inc(&insert->r_refcount);
Andy Grovereff5f532009-02-24 15:30:29 +000088 }
89 return NULL;
90}
91
92/*
93 * Destroy the transport-specific part of a MR.
94 */
95static void rds_destroy_mr(struct rds_mr *mr)
96{
97 struct rds_sock *rs = mr->r_sock;
98 void *trans_private = NULL;
99 unsigned long flags;
100
101 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
Reshetova, Elena803ea852017-07-04 15:53:17 +0300102 mr->r_key, refcount_read(&mr->r_refcount));
Andy Grovereff5f532009-02-24 15:30:29 +0000103
104 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
105 return;
106
107 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
108 if (!RB_EMPTY_NODE(&mr->r_rb_node))
109 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
110 trans_private = mr->r_trans_private;
111 mr->r_trans_private = NULL;
112 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
113
114 if (trans_private)
115 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
116}
117
118void __rds_put_mr_final(struct rds_mr *mr)
119{
120 rds_destroy_mr(mr);
121 kfree(mr);
122}
123
124/*
125 * By the time this is called we can't have any more ioctls called on
126 * the socket so we don't need to worry about racing with others.
127 */
128void rds_rdma_drop_keys(struct rds_sock *rs)
129{
130 struct rds_mr *mr;
131 struct rb_node *node;
Tina Yang35b52c72010-04-01 14:09:00 -0700132 unsigned long flags;
Andy Grovereff5f532009-02-24 15:30:29 +0000133
134 /* Release any MRs associated with this socket */
Tina Yang35b52c72010-04-01 14:09:00 -0700135 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000136 while ((node = rb_first(&rs->rs_rdma_keys))) {
Geliang Tanga763f782016-12-20 22:02:18 +0800137 mr = rb_entry(node, struct rds_mr, r_rb_node);
Andy Grovereff5f532009-02-24 15:30:29 +0000138 if (mr->r_trans == rs->rs_transport)
139 mr->r_invalidate = 0;
Tina Yang35b52c72010-04-01 14:09:00 -0700140 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
141 RB_CLEAR_NODE(&mr->r_rb_node);
142 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
143 rds_destroy_mr(mr);
Andy Grovereff5f532009-02-24 15:30:29 +0000144 rds_mr_put(mr);
Tina Yang35b52c72010-04-01 14:09:00 -0700145 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000146 }
Tina Yang35b52c72010-04-01 14:09:00 -0700147 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000148
149 if (rs->rs_transport && rs->rs_transport->flush_mrs)
150 rs->rs_transport->flush_mrs();
151}
152
153/*
154 * Helper function to pin user pages.
155 */
156static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
157 struct page **pages, int write)
158{
159 int ret;
160
Andy Grover830eb7d2009-04-09 14:09:42 +0000161 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
Andy Grovereff5f532009-02-24 15:30:29 +0000162
Andy Grover7acd4a72009-04-09 14:09:40 +0000163 if (ret >= 0 && ret < nr_pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000164 while (ret--)
165 put_page(pages[ret]);
166 ret = -EFAULT;
167 }
168
169 return ret;
170}
171
172static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700173 u64 *cookie_ret, struct rds_mr **mr_ret,
174 struct rds_conn_path *cp)
Andy Grovereff5f532009-02-24 15:30:29 +0000175{
176 struct rds_mr *mr = NULL, *found;
177 unsigned int nr_pages;
178 struct page **pages = NULL;
179 struct scatterlist *sg;
180 void *trans_private;
181 unsigned long flags;
182 rds_rdma_cookie_t cookie;
183 unsigned int nents;
184 long i;
185 int ret;
186
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700187 if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
Andy Grovereff5f532009-02-24 15:30:29 +0000188 ret = -ENOTCONN; /* XXX not a great errno */
189 goto out;
190 }
191
Andy Grover8690bfa2010-01-12 11:56:44 -0800192 if (!rs->rs_transport->get_mr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000193 ret = -EOPNOTSUPP;
194 goto out;
195 }
196
197 nr_pages = rds_pages_in_vec(&args->vec);
198 if (nr_pages == 0) {
199 ret = -EINVAL;
200 goto out;
201 }
202
Avinash Repakaf9fb69a2016-02-29 15:30:57 -0800203 /* Restrict the size of mr irrespective of underlying transport
204 * To account for unaligned mr regions, subtract one from nr_pages
205 */
206 if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
207 ret = -EMSGSIZE;
208 goto out;
209 }
210
Andy Grovereff5f532009-02-24 15:30:29 +0000211 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
212 args->vec.addr, args->vec.bytes, nr_pages);
213
214 /* XXX clamp nr_pages to limit the size of this alloc? */
215 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800216 if (!pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000217 ret = -ENOMEM;
218 goto out;
219 }
220
221 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800222 if (!mr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000223 ret = -ENOMEM;
224 goto out;
225 }
226
Reshetova, Elena803ea852017-07-04 15:53:17 +0300227 refcount_set(&mr->r_refcount, 1);
Andy Grovereff5f532009-02-24 15:30:29 +0000228 RB_CLEAR_NODE(&mr->r_rb_node);
229 mr->r_trans = rs->rs_transport;
230 mr->r_sock = rs;
231
232 if (args->flags & RDS_RDMA_USE_ONCE)
233 mr->r_use_once = 1;
234 if (args->flags & RDS_RDMA_INVALIDATE)
235 mr->r_invalidate = 1;
236 if (args->flags & RDS_RDMA_READWRITE)
237 mr->r_write = 1;
238
239 /*
240 * Pin the pages that make up the user buffer and transfer the page
241 * pointers to the mr's sg array. We check to see if we've mapped
242 * the whole region after transferring the partial page references
243 * to the sg array so that we can have one page ref cleanup path.
244 *
245 * For now we have no flag that tells us whether the mapping is
246 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
247 * the zero page.
248 */
Andy Groverd22faec2010-01-12 10:52:28 -0800249 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
Andy Grovereff5f532009-02-24 15:30:29 +0000250 if (ret < 0)
251 goto out;
252
253 nents = ret;
254 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800255 if (!sg) {
Andy Grovereff5f532009-02-24 15:30:29 +0000256 ret = -ENOMEM;
257 goto out;
258 }
259 WARN_ON(!nents);
260 sg_init_table(sg, nents);
261
262 /* Stick all pages into the scatterlist */
263 for (i = 0 ; i < nents; i++)
264 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
265
266 rdsdebug("RDS: trans_private nents is %u\n", nents);
267
268 /* Obtain a transport specific MR. If this succeeds, the
269 * s/g list is now owned by the MR.
270 * Note that dma_map() implies that pending writes are
271 * flushed to RAM, so no dma_sync is needed here. */
272 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700273 &mr->r_key,
274 cp ? cp->cp_conn : NULL);
Andy Grovereff5f532009-02-24 15:30:29 +0000275
276 if (IS_ERR(trans_private)) {
277 for (i = 0 ; i < nents; i++)
278 put_page(sg_page(&sg[i]));
279 kfree(sg);
280 ret = PTR_ERR(trans_private);
281 goto out;
282 }
283
284 mr->r_trans_private = trans_private;
285
286 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
287 mr->r_key, (void *)(unsigned long) args->cookie_addr);
288
289 /* The user may pass us an unaligned address, but we can only
290 * map page aligned regions. So we keep the offset, and build
291 * a 64bit cookie containing <R_Key, offset> and pass that
292 * around. */
293 cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
294 if (cookie_ret)
295 *cookie_ret = cookie;
296
297 if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
298 ret = -EFAULT;
299 goto out;
300 }
301
302 /* Inserting the new MR into the rbtree bumps its
303 * reference count. */
304 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
305 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
306 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
307
308 BUG_ON(found && found != mr);
309
310 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
311 if (mr_ret) {
Reshetova, Elena803ea852017-07-04 15:53:17 +0300312 refcount_inc(&mr->r_refcount);
Andy Grovereff5f532009-02-24 15:30:29 +0000313 *mr_ret = mr;
314 }
315
316 ret = 0;
317out:
318 kfree(pages);
319 if (mr)
320 rds_mr_put(mr);
321 return ret;
322}
323
324int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
325{
326 struct rds_get_mr_args args;
327
328 if (optlen != sizeof(struct rds_get_mr_args))
329 return -EINVAL;
330
331 if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
332 sizeof(struct rds_get_mr_args)))
333 return -EFAULT;
334
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700335 return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
Andy Grovereff5f532009-02-24 15:30:29 +0000336}
337
Andy Grover244546f2009-10-30 08:54:53 +0000338int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
339{
340 struct rds_get_mr_for_dest_args args;
341 struct rds_get_mr_args new_args;
342
343 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
344 return -EINVAL;
345
346 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
347 sizeof(struct rds_get_mr_for_dest_args)))
348 return -EFAULT;
349
350 /*
351 * Initially, just behave like get_mr().
352 * TODO: Implement get_mr as wrapper around this
353 * and deprecate it.
354 */
355 new_args.vec = args.vec;
356 new_args.cookie_addr = args.cookie_addr;
357 new_args.flags = args.flags;
358
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700359 return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
Andy Grover244546f2009-10-30 08:54:53 +0000360}
361
Andy Grovereff5f532009-02-24 15:30:29 +0000362/*
363 * Free the MR indicated by the given R_Key
364 */
365int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
366{
367 struct rds_free_mr_args args;
368 struct rds_mr *mr;
369 unsigned long flags;
370
371 if (optlen != sizeof(struct rds_free_mr_args))
372 return -EINVAL;
373
374 if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
375 sizeof(struct rds_free_mr_args)))
376 return -EFAULT;
377
378 /* Special case - a null cookie means flush all unused MRs */
379 if (args.cookie == 0) {
380 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
381 return -EINVAL;
382 rs->rs_transport->flush_mrs();
383 return 0;
384 }
385
386 /* Look up the MR given its R_key and remove it from the rbtree
387 * so nobody else finds it.
388 * This should also prevent races with rds_rdma_unuse.
389 */
390 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
391 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
392 if (mr) {
393 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
394 RB_CLEAR_NODE(&mr->r_rb_node);
395 if (args.flags & RDS_RDMA_INVALIDATE)
396 mr->r_invalidate = 1;
397 }
398 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
399
400 if (!mr)
401 return -EINVAL;
402
403 /*
404 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
405 * we return. If we let rds_mr_put() do it it might not happen until
406 * someone else drops their ref.
407 */
408 rds_destroy_mr(mr);
409 rds_mr_put(mr);
410 return 0;
411}
412
413/*
414 * This is called when we receive an extension header that
415 * tells us this MR was used. It allows us to implement
416 * use_once semantics
417 */
418void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
419{
420 struct rds_mr *mr;
421 unsigned long flags;
422 int zot_me = 0;
423
424 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
425 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
Andy Grover3ef13f32010-01-12 12:37:17 -0800426 if (!mr) {
Santosh Shilimkarc536a062016-07-03 19:14:10 -0700427 pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
428 r_key);
Andy Grover3ef13f32010-01-12 12:37:17 -0800429 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
430 return;
431 }
432
433 if (mr->r_use_once || force) {
Andy Grovereff5f532009-02-24 15:30:29 +0000434 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
435 RB_CLEAR_NODE(&mr->r_rb_node);
436 zot_me = 1;
Andy Grover3ef13f32010-01-12 12:37:17 -0800437 }
Andy Grovereff5f532009-02-24 15:30:29 +0000438 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
439
440 /* May have to issue a dma_sync on this memory region.
441 * Note we could avoid this if the operation was a RDMA READ,
442 * but at this point we can't tell. */
Andy Grover3ef13f32010-01-12 12:37:17 -0800443 if (mr->r_trans->sync_mr)
444 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
Andy Grovereff5f532009-02-24 15:30:29 +0000445
Andy Grover3ef13f32010-01-12 12:37:17 -0800446 /* If the MR was marked as invalidate, this will
447 * trigger an async flush. */
santosh.shilimkar@oracle.com3f6b3142015-08-25 12:01:59 -0700448 if (zot_me) {
Andy Grover3ef13f32010-01-12 12:37:17 -0800449 rds_destroy_mr(mr);
santosh.shilimkar@oracle.com3f6b3142015-08-25 12:01:59 -0700450 rds_mr_put(mr);
451 }
Andy Grovereff5f532009-02-24 15:30:29 +0000452}
453
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800454void rds_rdma_free_op(struct rm_rdma_op *ro)
Andy Grovereff5f532009-02-24 15:30:29 +0000455{
456 unsigned int i;
457
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800458 for (i = 0; i < ro->op_nents; i++) {
459 struct page *page = sg_page(&ro->op_sg[i]);
Andy Grovereff5f532009-02-24 15:30:29 +0000460
461 /* Mark page dirty if it was possibly modified, which
462 * is the case for a RDMA_READ which copies from remote
463 * to local memory */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800464 if (!ro->op_write) {
santosh.shilimkar@oracle.com5c240fa2015-08-22 15:45:31 -0700465 WARN_ON(!page->mapping && irqs_disabled());
Andy Grovereff5f532009-02-24 15:30:29 +0000466 set_page_dirty(page);
Andy Grover561c7df2010-03-11 13:50:06 +0000467 }
Andy Grovereff5f532009-02-24 15:30:29 +0000468 put_page(page);
469 }
470
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800471 kfree(ro->op_notifier);
472 ro->op_notifier = NULL;
473 ro->op_active = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800474}
475
Andy Groverd0ab25a2010-01-27 16:15:48 -0800476void rds_atomic_free_op(struct rm_atomic_op *ao)
477{
478 struct page *page = sg_page(ao->op_sg);
479
480 /* Mark page dirty if it was possibly modified, which
481 * is the case for a RDMA_READ which copies from remote
482 * to local memory */
483 set_page_dirty(page);
484 put_page(page);
485
486 kfree(ao->op_notifier);
487 ao->op_notifier = NULL;
488 ao->op_active = 0;
489}
490
491
Andy Groverff87e972010-01-12 14:13:15 -0800492/*
Andy Groverfc8162e2010-10-28 15:40:58 +0000493 * Count the number of pages needed to describe an incoming iovec array.
Andy Groverff87e972010-01-12 14:13:15 -0800494 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000495static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
496{
497 int tot_pages = 0;
498 unsigned int nr_pages;
499 unsigned int i;
500
501 /* figure out the number of pages in the vector */
502 for (i = 0; i < nr_iovecs; i++) {
503 nr_pages = rds_pages_in_vec(&iov[i]);
504 if (nr_pages == 0)
505 return -EINVAL;
506
507 tot_pages += nr_pages;
508
509 /*
510 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
511 * so tot_pages cannot overflow without first going negative.
512 */
513 if (tot_pages < 0)
514 return -EINVAL;
515 }
516
517 return tot_pages;
518}
519
shamir rabinovitchea010072018-12-16 09:01:08 +0200520int rds_rdma_extra_size(struct rds_rdma_args *args,
521 struct rds_iov_vector *iov)
Andy Groverff87e972010-01-12 14:13:15 -0800522{
shamir rabinovitchea010072018-12-16 09:01:08 +0200523 struct rds_iovec *vec;
Andy Groverff87e972010-01-12 14:13:15 -0800524 struct rds_iovec __user *local_vec;
Andy Groverfc8162e2010-10-28 15:40:58 +0000525 int tot_pages = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800526 unsigned int nr_pages;
527 unsigned int i;
528
529 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
530
Mohamed Ghannamc0955082018-01-02 19:44:34 +0000531 if (args->nr_local == 0)
532 return -EINVAL;
533
shamir rabinovitchea010072018-12-16 09:01:08 +0200534 iov->iov = kcalloc(args->nr_local,
535 sizeof(struct rds_iovec),
536 GFP_KERNEL);
537 if (!iov->iov)
538 return -ENOMEM;
Andy Groverff87e972010-01-12 14:13:15 -0800539
shamir rabinovitchea010072018-12-16 09:01:08 +0200540 vec = &iov->iov[0];
541
542 if (copy_from_user(vec, local_vec, args->nr_local *
543 sizeof(struct rds_iovec)))
544 return -EFAULT;
545 iov->len = args->nr_local;
546
547 /* figure out the number of pages in the vector */
548 for (i = 0; i < args->nr_local; i++, vec++) {
549
550 nr_pages = rds_pages_in_vec(vec);
Andy Groverff87e972010-01-12 14:13:15 -0800551 if (nr_pages == 0)
552 return -EINVAL;
553
554 tot_pages += nr_pages;
Linus Torvalds1b1f6932010-10-28 15:40:55 +0000555
556 /*
557 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
558 * so tot_pages cannot overflow without first going negative.
559 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000560 if (tot_pages < 0)
Linus Torvalds1b1f6932010-10-28 15:40:55 +0000561 return -EINVAL;
Andy Groverff87e972010-01-12 14:13:15 -0800562 }
563
Andy Groverfc8162e2010-10-28 15:40:58 +0000564 return tot_pages * sizeof(struct scatterlist);
Andy Grovereff5f532009-02-24 15:30:29 +0000565}
566
567/*
Andy Grover43248792010-01-27 16:07:30 -0800568 * The application asks for a RDMA transfer.
569 * Extract all arguments and set up the rdma_op
Andy Grovereff5f532009-02-24 15:30:29 +0000570 */
Andy Grover43248792010-01-27 16:07:30 -0800571int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
shamir rabinovitchea010072018-12-16 09:01:08 +0200572 struct cmsghdr *cmsg,
573 struct rds_iov_vector *vec)
Andy Grovereff5f532009-02-24 15:30:29 +0000574{
Andy Grover43248792010-01-27 16:07:30 -0800575 struct rds_rdma_args *args;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800576 struct rm_rdma_op *op = &rm->rdma;
Dan Carpenter9b9d2e02010-09-18 13:42:25 +0000577 int nr_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000578 unsigned int nr_bytes;
579 struct page **pages = NULL;
shamir rabinovitchea010072018-12-16 09:01:08 +0200580 struct rds_iovec *iovs;
Andy Grovereff5f532009-02-24 15:30:29 +0000581 unsigned int i, j;
Andy Groverff87e972010-01-12 14:13:15 -0800582 int ret = 0;
Andy Grovereff5f532009-02-24 15:30:29 +0000583
Andy Grover43248792010-01-27 16:07:30 -0800584 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800585 || rm->rdma.op_active)
Andy Grover43248792010-01-27 16:07:30 -0800586 return -EINVAL;
587
588 args = CMSG_DATA(cmsg);
Andy Grovereff5f532009-02-24 15:30:29 +0000589
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700590 if (ipv6_addr_any(&rs->rs_bound_addr)) {
Andy Grovereff5f532009-02-24 15:30:29 +0000591 ret = -ENOTCONN; /* XXX not a great errno */
Cong Wangdee49f22014-10-14 12:35:08 -0700592 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000593 }
594
Dan Rosenberg218854a2010-11-17 06:37:16 +0000595 if (args->nr_local > UIO_MAXIOV) {
Andy Grovereff5f532009-02-24 15:30:29 +0000596 ret = -EMSGSIZE;
Cong Wangdee49f22014-10-14 12:35:08 -0700597 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000598 }
599
shamir rabinovitchea010072018-12-16 09:01:08 +0200600 if (vec->len != args->nr_local) {
601 ret = -EINVAL;
602 goto out_ret;
Andy Groverfc8162e2010-10-28 15:40:58 +0000603 }
604
shamir rabinovitchea010072018-12-16 09:01:08 +0200605 iovs = vec->iov;
Andy Groverfc8162e2010-10-28 15:40:58 +0000606
607 nr_pages = rds_rdma_pages(iovs, args->nr_local);
Andy Grovera09f69c2010-10-28 15:40:56 +0000608 if (nr_pages < 0) {
609 ret = -EINVAL;
shamir rabinovitchea010072018-12-16 09:01:08 +0200610 goto out_ret;
Andy Grovera09f69c2010-10-28 15:40:56 +0000611 }
Andy Grovereff5f532009-02-24 15:30:29 +0000612
Andy Groverff87e972010-01-12 14:13:15 -0800613 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800614 if (!pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000615 ret = -ENOMEM;
shamir rabinovitchea010072018-12-16 09:01:08 +0200616 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000617 }
618
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800619 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
620 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
621 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800622 op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800623 op->op_active = 1;
624 op->op_recverr = rs->rs_recverr;
Andy Grovereff5f532009-02-24 15:30:29 +0000625 WARN_ON(!nr_pages);
shamir rabinovitchc75ab8a2018-12-16 09:01:09 +0200626 op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
627 if (!op->op_sg)
shamir rabinovitchea010072018-12-16 09:01:08 +0200628 goto out_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000629
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800630 if (op->op_notify || op->op_recverr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000631 /* We allocate an uninitialized notifier here, because
632 * we don't want to do that in the completion handler. We
633 * would have to use GFP_ATOMIC there, and don't want to deal
634 * with failed allocations.
635 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800636 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
637 if (!op->op_notifier) {
Andy Grovereff5f532009-02-24 15:30:29 +0000638 ret = -ENOMEM;
shamir rabinovitchea010072018-12-16 09:01:08 +0200639 goto out_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000640 }
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800641 op->op_notifier->n_user_token = args->user_token;
642 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
Santosh Shilimkar941f8d52016-02-18 20:06:47 -0800643
644 /* Enable rmda notification on data operation for composite
645 * rds messages and make sure notification is enabled only
646 * for the data operation which follows it so that application
647 * gets notified only after full message gets delivered.
648 */
649 if (rm->data.op_sg) {
650 rm->rdma.op_notify = 0;
651 rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
652 }
Andy Grovereff5f532009-02-24 15:30:29 +0000653 }
654
655 /* The cookie contains the R_Key of the remote memory region, and
656 * optionally an offset into it. This is how we implement RDMA into
657 * unaligned memory.
658 * When setting up the RDMA, we need to add that offset to the
659 * destination address (which is really an offset into the MR)
660 * FIXME: We may want to move this into ib_rdma.c
661 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800662 op->op_rkey = rds_rdma_cookie_key(args->cookie);
663 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
Andy Grovereff5f532009-02-24 15:30:29 +0000664
665 nr_bytes = 0;
666
667 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
668 (unsigned long long)args->nr_local,
669 (unsigned long long)args->remote_vec.addr,
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800670 op->op_rkey);
Andy Grovereff5f532009-02-24 15:30:29 +0000671
672 for (i = 0; i < args->nr_local; i++) {
Andy Groverfc8162e2010-10-28 15:40:58 +0000673 struct rds_iovec *iov = &iovs[i];
674 /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
675 unsigned int nr = rds_pages_in_vec(iov);
Andy Grovereff5f532009-02-24 15:30:29 +0000676
Andy Groverfc8162e2010-10-28 15:40:58 +0000677 rs->rs_user_addr = iov->addr;
678 rs->rs_user_bytes = iov->bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000679
Andy Grovereff5f532009-02-24 15:30:29 +0000680 /* If it's a WRITE operation, we want to pin the pages for reading.
681 * If it's a READ operation, we need to pin the pages for writing.
682 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000683 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
Andy Grovereff5f532009-02-24 15:30:29 +0000684 if (ret < 0)
shamir rabinovitchea010072018-12-16 09:01:08 +0200685 goto out_pages;
santosh.shilimkar@oracle.com1d2e3f32015-08-22 15:45:22 -0700686 else
687 ret = 0;
Andy Grovereff5f532009-02-24 15:30:29 +0000688
Andy Groverfc8162e2010-10-28 15:40:58 +0000689 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
690 nr_bytes, nr, iov->bytes, iov->addr);
Andy Grovereff5f532009-02-24 15:30:29 +0000691
Andy Groverfc8162e2010-10-28 15:40:58 +0000692 nr_bytes += iov->bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000693
694 for (j = 0; j < nr; j++) {
Andy Groverfc8162e2010-10-28 15:40:58 +0000695 unsigned int offset = iov->addr & ~PAGE_MASK;
Andy Groverff87e972010-01-12 14:13:15 -0800696 struct scatterlist *sg;
Andy Grovereff5f532009-02-24 15:30:29 +0000697
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800698 sg = &op->op_sg[op->op_nents + j];
Andy Grovereff5f532009-02-24 15:30:29 +0000699 sg_set_page(sg, pages[j],
Andy Groverfc8162e2010-10-28 15:40:58 +0000700 min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
Andy Grovereff5f532009-02-24 15:30:29 +0000701 offset);
702
Andy Groverfc8162e2010-10-28 15:40:58 +0000703 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
704 sg->offset, sg->length, iov->addr, iov->bytes);
Andy Grovereff5f532009-02-24 15:30:29 +0000705
Andy Groverfc8162e2010-10-28 15:40:58 +0000706 iov->addr += sg->length;
707 iov->bytes -= sg->length;
Andy Grovereff5f532009-02-24 15:30:29 +0000708 }
709
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800710 op->op_nents += nr;
Andy Grovereff5f532009-02-24 15:30:29 +0000711 }
712
Andy Grovereff5f532009-02-24 15:30:29 +0000713 if (nr_bytes > args->remote_vec.bytes) {
714 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
715 nr_bytes,
716 (unsigned int) args->remote_vec.bytes);
717 ret = -EINVAL;
shamir rabinovitchea010072018-12-16 09:01:08 +0200718 goto out_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000719 }
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800720 op->op_bytes = nr_bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000721
shamir rabinovitchea010072018-12-16 09:01:08 +0200722out_pages:
Andy Grovereff5f532009-02-24 15:30:29 +0000723 kfree(pages);
Cong Wangdee49f22014-10-14 12:35:08 -0700724out_ret:
Andy Groverff87e972010-01-12 14:13:15 -0800725 if (ret)
726 rds_rdma_free_op(op);
Andy Groverf4a3fc02010-10-28 15:40:57 +0000727 else
728 rds_stats_inc(s_send_rdma);
Andy Grover43248792010-01-27 16:07:30 -0800729
730 return ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000731}
732
733/*
734 * The application wants us to pass an RDMA destination (aka MR)
735 * to the remote
736 */
737int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
738 struct cmsghdr *cmsg)
739{
740 unsigned long flags;
741 struct rds_mr *mr;
742 u32 r_key;
743 int err = 0;
744
Joe Perchesf64f9e72009-11-29 16:55:45 -0800745 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
746 rm->m_rdma_cookie != 0)
Andy Grovereff5f532009-02-24 15:30:29 +0000747 return -EINVAL;
748
749 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
750
751 /* We are reusing a previously mapped MR here. Most likely, the
752 * application has written to the buffer, so we need to explicitly
753 * flush those writes to RAM. Otherwise the HCA may not see them
754 * when doing a DMA from that buffer.
755 */
756 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
757
758 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
759 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800760 if (!mr)
Andy Grovereff5f532009-02-24 15:30:29 +0000761 err = -EINVAL; /* invalid r_key */
762 else
Reshetova, Elena803ea852017-07-04 15:53:17 +0300763 refcount_inc(&mr->r_refcount);
Andy Grovereff5f532009-02-24 15:30:29 +0000764 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
765
766 if (mr) {
767 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800768 rm->rdma.op_rdma_mr = mr;
Andy Grovereff5f532009-02-24 15:30:29 +0000769 }
770 return err;
771}
772
773/*
774 * The application passes us an address range it wants to enable RDMA
775 * to/from. We map the area, and save the <R_Key,offset> pair
776 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
777 * in an extension header.
778 */
779int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
780 struct cmsghdr *cmsg)
781{
Joe Perchesf64f9e72009-11-29 16:55:45 -0800782 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
783 rm->m_rdma_cookie != 0)
Andy Grovereff5f532009-02-24 15:30:29 +0000784 return -EINVAL;
785
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700786 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
787 &rm->rdma.op_rdma_mr, rm->m_conn_path);
Andy Grovereff5f532009-02-24 15:30:29 +0000788}
Andy Grover15133f62010-01-12 14:33:38 -0800789
790/*
791 * Fill in rds_message for an atomic request.
792 */
793int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
794 struct cmsghdr *cmsg)
795{
796 struct page *page = NULL;
797 struct rds_atomic_args *args;
798 int ret = 0;
799
800 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
801 || rm->atomic.op_active)
802 return -EINVAL;
803
804 args = CMSG_DATA(cmsg);
805
Andy Grover20c72bd2010-08-25 05:51:28 -0700806 /* Nonmasked & masked cmsg ops converted to masked hw ops */
807 switch (cmsg->cmsg_type) {
808 case RDS_CMSG_ATOMIC_FADD:
Andy Grover15133f62010-01-12 14:33:38 -0800809 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
Andy Grover20c72bd2010-08-25 05:51:28 -0700810 rm->atomic.op_m_fadd.add = args->fadd.add;
811 rm->atomic.op_m_fadd.nocarry_mask = 0;
812 break;
813 case RDS_CMSG_MASKED_ATOMIC_FADD:
814 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
815 rm->atomic.op_m_fadd.add = args->m_fadd.add;
816 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
817 break;
818 case RDS_CMSG_ATOMIC_CSWP:
819 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
820 rm->atomic.op_m_cswp.compare = args->cswp.compare;
821 rm->atomic.op_m_cswp.swap = args->cswp.swap;
822 rm->atomic.op_m_cswp.compare_mask = ~0;
823 rm->atomic.op_m_cswp.swap_mask = ~0;
824 break;
825 case RDS_CMSG_MASKED_ATOMIC_CSWP:
826 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
827 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
828 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
829 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
830 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
831 break;
832 default:
833 BUG(); /* should never happen */
Andy Grover15133f62010-01-12 14:33:38 -0800834 }
835
Andy Grover15133f62010-01-12 14:33:38 -0800836 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800837 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
Andy Grover7e3bd652010-03-01 16:04:59 -0800838 rm->atomic.op_active = 1;
Andy Grover15133f62010-01-12 14:33:38 -0800839 rm->atomic.op_recverr = rs->rs_recverr;
shamir rabinovitchc75ab8a2018-12-16 09:01:09 +0200840 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
841 if (!rm->atomic.op_sg)
Andy Groverd139ff02010-10-28 15:40:59 +0000842 goto err;
Andy Grover15133f62010-01-12 14:33:38 -0800843
844 /* verify 8 byte-aligned */
845 if (args->local_addr & 0x7) {
846 ret = -EFAULT;
847 goto err;
848 }
849
850 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
851 if (ret != 1)
852 goto err;
853 ret = 0;
854
855 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
856
857 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
858 /* We allocate an uninitialized notifier here, because
859 * we don't want to do that in the completion handler. We
860 * would have to use GFP_ATOMIC there, and don't want to deal
861 * with failed allocations.
862 */
863 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
864 if (!rm->atomic.op_notifier) {
865 ret = -ENOMEM;
866 goto err;
867 }
868
869 rm->atomic.op_notifier->n_user_token = args->user_token;
870 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
871 }
872
Andy Grover40589e72010-01-12 10:50:48 -0800873 rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
Andy Grover15133f62010-01-12 14:33:38 -0800874 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
875
Andy Grover15133f62010-01-12 14:33:38 -0800876 return ret;
877err:
878 if (page)
879 put_page(page);
Mohamed Ghannam7d11f772018-01-03 21:06:06 +0000880 rm->atomic.op_active = 0;
Andy Grover15133f62010-01-12 14:33:38 -0800881 kfree(rm->atomic.op_notifier);
882
883 return ret;
884}