blob: 40084d843e9fe33bc1545f1f573a32780880e223 [file] [log] [blame]
Andy Grovereff5f532009-02-24 15:30:29 +00001/*
2 * Copyright (c) 2007 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grovereff5f532009-02-24 15:30:29 +000035#include <linux/rbtree.h>
36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37
Andy Grover21f79af2010-01-12 12:57:27 -080038#include "rds.h"
Andy Grovereff5f532009-02-24 15:30:29 +000039
40/*
41 * XXX
42 * - build with sparse
43 * - should we limit the size of a mr region? let transport return failure?
44 * - should we detect duplicate keys on a socket? hmm.
45 * - an rdma is an mlock, apply rlimit?
46 */
47
48/*
49 * get the number of pages by looking at the page indices that the start and
50 * end addresses fall in.
51 *
52 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
53 * causes the address to wrap or overflows an unsigned int. This comes
54 * from being stored in the 'length' member of 'struct scatterlist'.
55 */
56static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
57{
58 if ((vec->addr + vec->bytes <= vec->addr) ||
59 (vec->bytes > (u64)UINT_MAX))
60 return 0;
61
62 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
63 (vec->addr >> PAGE_SHIFT);
64}
65
66static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
67 struct rds_mr *insert)
68{
69 struct rb_node **p = &root->rb_node;
70 struct rb_node *parent = NULL;
71 struct rds_mr *mr;
72
73 while (*p) {
74 parent = *p;
75 mr = rb_entry(parent, struct rds_mr, r_rb_node);
76
77 if (key < mr->r_key)
78 p = &(*p)->rb_left;
79 else if (key > mr->r_key)
80 p = &(*p)->rb_right;
81 else
82 return mr;
83 }
84
85 if (insert) {
86 rb_link_node(&insert->r_rb_node, parent, p);
87 rb_insert_color(&insert->r_rb_node, root);
88 atomic_inc(&insert->r_refcount);
89 }
90 return NULL;
91}
92
93/*
94 * Destroy the transport-specific part of a MR.
95 */
96static void rds_destroy_mr(struct rds_mr *mr)
97{
98 struct rds_sock *rs = mr->r_sock;
99 void *trans_private = NULL;
100 unsigned long flags;
101
102 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
103 mr->r_key, atomic_read(&mr->r_refcount));
104
105 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
106 return;
107
108 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
109 if (!RB_EMPTY_NODE(&mr->r_rb_node))
110 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
111 trans_private = mr->r_trans_private;
112 mr->r_trans_private = NULL;
113 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
114
115 if (trans_private)
116 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
117}
118
119void __rds_put_mr_final(struct rds_mr *mr)
120{
121 rds_destroy_mr(mr);
122 kfree(mr);
123}
124
125/*
126 * By the time this is called we can't have any more ioctls called on
127 * the socket so we don't need to worry about racing with others.
128 */
129void rds_rdma_drop_keys(struct rds_sock *rs)
130{
131 struct rds_mr *mr;
132 struct rb_node *node;
Tina Yang35b52c72010-04-01 14:09:00 -0700133 unsigned long flags;
Andy Grovereff5f532009-02-24 15:30:29 +0000134
135 /* Release any MRs associated with this socket */
Tina Yang35b52c72010-04-01 14:09:00 -0700136 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000137 while ((node = rb_first(&rs->rs_rdma_keys))) {
138 mr = container_of(node, struct rds_mr, r_rb_node);
139 if (mr->r_trans == rs->rs_transport)
140 mr->r_invalidate = 0;
Tina Yang35b52c72010-04-01 14:09:00 -0700141 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
142 RB_CLEAR_NODE(&mr->r_rb_node);
143 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
144 rds_destroy_mr(mr);
Andy Grovereff5f532009-02-24 15:30:29 +0000145 rds_mr_put(mr);
Tina Yang35b52c72010-04-01 14:09:00 -0700146 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000147 }
Tina Yang35b52c72010-04-01 14:09:00 -0700148 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000149
150 if (rs->rs_transport && rs->rs_transport->flush_mrs)
151 rs->rs_transport->flush_mrs();
152}
153
154/*
155 * Helper function to pin user pages.
156 */
157static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
158 struct page **pages, int write)
159{
160 int ret;
161
Andy Grover830eb7d2009-04-09 14:09:42 +0000162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
Andy Grovereff5f532009-02-24 15:30:29 +0000163
Andy Grover7acd4a72009-04-09 14:09:40 +0000164 if (ret >= 0 && ret < nr_pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000165 while (ret--)
166 put_page(pages[ret]);
167 ret = -EFAULT;
168 }
169
170 return ret;
171}
172
173static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
174 u64 *cookie_ret, struct rds_mr **mr_ret)
175{
176 struct rds_mr *mr = NULL, *found;
177 unsigned int nr_pages;
178 struct page **pages = NULL;
179 struct scatterlist *sg;
180 void *trans_private;
181 unsigned long flags;
182 rds_rdma_cookie_t cookie;
183 unsigned int nents;
184 long i;
185 int ret;
186
187 if (rs->rs_bound_addr == 0) {
188 ret = -ENOTCONN; /* XXX not a great errno */
189 goto out;
190 }
191
Andy Grover8690bfa2010-01-12 11:56:44 -0800192 if (!rs->rs_transport->get_mr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000193 ret = -EOPNOTSUPP;
194 goto out;
195 }
196
197 nr_pages = rds_pages_in_vec(&args->vec);
198 if (nr_pages == 0) {
199 ret = -EINVAL;
200 goto out;
201 }
202
203 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
204 args->vec.addr, args->vec.bytes, nr_pages);
205
206 /* XXX clamp nr_pages to limit the size of this alloc? */
207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800208 if (!pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000209 ret = -ENOMEM;
210 goto out;
211 }
212
213 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800214 if (!mr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000215 ret = -ENOMEM;
216 goto out;
217 }
218
219 atomic_set(&mr->r_refcount, 1);
220 RB_CLEAR_NODE(&mr->r_rb_node);
221 mr->r_trans = rs->rs_transport;
222 mr->r_sock = rs;
223
224 if (args->flags & RDS_RDMA_USE_ONCE)
225 mr->r_use_once = 1;
226 if (args->flags & RDS_RDMA_INVALIDATE)
227 mr->r_invalidate = 1;
228 if (args->flags & RDS_RDMA_READWRITE)
229 mr->r_write = 1;
230
231 /*
232 * Pin the pages that make up the user buffer and transfer the page
233 * pointers to the mr's sg array. We check to see if we've mapped
234 * the whole region after transferring the partial page references
235 * to the sg array so that we can have one page ref cleanup path.
236 *
237 * For now we have no flag that tells us whether the mapping is
238 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
239 * the zero page.
240 */
Andy Groverd22faec2010-01-12 10:52:28 -0800241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
Andy Grovereff5f532009-02-24 15:30:29 +0000242 if (ret < 0)
243 goto out;
244
245 nents = ret;
246 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800247 if (!sg) {
Andy Grovereff5f532009-02-24 15:30:29 +0000248 ret = -ENOMEM;
249 goto out;
250 }
251 WARN_ON(!nents);
252 sg_init_table(sg, nents);
253
254 /* Stick all pages into the scatterlist */
255 for (i = 0 ; i < nents; i++)
256 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
257
258 rdsdebug("RDS: trans_private nents is %u\n", nents);
259
260 /* Obtain a transport specific MR. If this succeeds, the
261 * s/g list is now owned by the MR.
262 * Note that dma_map() implies that pending writes are
263 * flushed to RAM, so no dma_sync is needed here. */
264 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
265 &mr->r_key);
266
267 if (IS_ERR(trans_private)) {
268 for (i = 0 ; i < nents; i++)
269 put_page(sg_page(&sg[i]));
270 kfree(sg);
271 ret = PTR_ERR(trans_private);
272 goto out;
273 }
274
275 mr->r_trans_private = trans_private;
276
277 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
278 mr->r_key, (void *)(unsigned long) args->cookie_addr);
279
280 /* The user may pass us an unaligned address, but we can only
281 * map page aligned regions. So we keep the offset, and build
282 * a 64bit cookie containing <R_Key, offset> and pass that
283 * around. */
284 cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
285 if (cookie_ret)
286 *cookie_ret = cookie;
287
288 if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
289 ret = -EFAULT;
290 goto out;
291 }
292
293 /* Inserting the new MR into the rbtree bumps its
294 * reference count. */
295 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
296 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
297 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
298
299 BUG_ON(found && found != mr);
300
301 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
302 if (mr_ret) {
303 atomic_inc(&mr->r_refcount);
304 *mr_ret = mr;
305 }
306
307 ret = 0;
308out:
309 kfree(pages);
310 if (mr)
311 rds_mr_put(mr);
312 return ret;
313}
314
315int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
316{
317 struct rds_get_mr_args args;
318
319 if (optlen != sizeof(struct rds_get_mr_args))
320 return -EINVAL;
321
322 if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
323 sizeof(struct rds_get_mr_args)))
324 return -EFAULT;
325
326 return __rds_rdma_map(rs, &args, NULL, NULL);
327}
328
Andy Grover244546f2009-10-30 08:54:53 +0000329int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
330{
331 struct rds_get_mr_for_dest_args args;
332 struct rds_get_mr_args new_args;
333
334 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
335 return -EINVAL;
336
337 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
338 sizeof(struct rds_get_mr_for_dest_args)))
339 return -EFAULT;
340
341 /*
342 * Initially, just behave like get_mr().
343 * TODO: Implement get_mr as wrapper around this
344 * and deprecate it.
345 */
346 new_args.vec = args.vec;
347 new_args.cookie_addr = args.cookie_addr;
348 new_args.flags = args.flags;
349
350 return __rds_rdma_map(rs, &new_args, NULL, NULL);
351}
352
Andy Grovereff5f532009-02-24 15:30:29 +0000353/*
354 * Free the MR indicated by the given R_Key
355 */
356int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
357{
358 struct rds_free_mr_args args;
359 struct rds_mr *mr;
360 unsigned long flags;
361
362 if (optlen != sizeof(struct rds_free_mr_args))
363 return -EINVAL;
364
365 if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
366 sizeof(struct rds_free_mr_args)))
367 return -EFAULT;
368
369 /* Special case - a null cookie means flush all unused MRs */
370 if (args.cookie == 0) {
371 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
372 return -EINVAL;
373 rs->rs_transport->flush_mrs();
374 return 0;
375 }
376
377 /* Look up the MR given its R_key and remove it from the rbtree
378 * so nobody else finds it.
379 * This should also prevent races with rds_rdma_unuse.
380 */
381 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
382 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
383 if (mr) {
384 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
385 RB_CLEAR_NODE(&mr->r_rb_node);
386 if (args.flags & RDS_RDMA_INVALIDATE)
387 mr->r_invalidate = 1;
388 }
389 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
390
391 if (!mr)
392 return -EINVAL;
393
394 /*
395 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
396 * we return. If we let rds_mr_put() do it it might not happen until
397 * someone else drops their ref.
398 */
399 rds_destroy_mr(mr);
400 rds_mr_put(mr);
401 return 0;
402}
403
404/*
405 * This is called when we receive an extension header that
406 * tells us this MR was used. It allows us to implement
407 * use_once semantics
408 */
409void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
410{
411 struct rds_mr *mr;
412 unsigned long flags;
413 int zot_me = 0;
414
415 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
416 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
Andy Grover3ef13f32010-01-12 12:37:17 -0800417 if (!mr) {
418 printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
419 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
420 return;
421 }
422
423 if (mr->r_use_once || force) {
Andy Grovereff5f532009-02-24 15:30:29 +0000424 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
425 RB_CLEAR_NODE(&mr->r_rb_node);
426 zot_me = 1;
Andy Grover3ef13f32010-01-12 12:37:17 -0800427 }
Andy Grovereff5f532009-02-24 15:30:29 +0000428 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
429
430 /* May have to issue a dma_sync on this memory region.
431 * Note we could avoid this if the operation was a RDMA READ,
432 * but at this point we can't tell. */
Andy Grover3ef13f32010-01-12 12:37:17 -0800433 if (mr->r_trans->sync_mr)
434 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
Andy Grovereff5f532009-02-24 15:30:29 +0000435
Andy Grover3ef13f32010-01-12 12:37:17 -0800436 /* If the MR was marked as invalidate, this will
437 * trigger an async flush. */
438 if (zot_me)
439 rds_destroy_mr(mr);
440 rds_mr_put(mr);
Andy Grovereff5f532009-02-24 15:30:29 +0000441}
442
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800443void rds_rdma_free_op(struct rm_rdma_op *ro)
Andy Grovereff5f532009-02-24 15:30:29 +0000444{
445 unsigned int i;
446
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800447 for (i = 0; i < ro->op_nents; i++) {
448 struct page *page = sg_page(&ro->op_sg[i]);
Andy Grovereff5f532009-02-24 15:30:29 +0000449
450 /* Mark page dirty if it was possibly modified, which
451 * is the case for a RDMA_READ which copies from remote
452 * to local memory */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800453 if (!ro->op_write) {
Andy Grover9e2effb2010-03-12 16:22:32 -0800454 BUG_ON(irqs_disabled());
Andy Grovereff5f532009-02-24 15:30:29 +0000455 set_page_dirty(page);
Andy Grover561c7df2010-03-11 13:50:06 +0000456 }
Andy Grovereff5f532009-02-24 15:30:29 +0000457 put_page(page);
458 }
459
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800460 kfree(ro->op_notifier);
461 ro->op_notifier = NULL;
462 ro->op_active = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800463}
464
Andy Groverd0ab25a2010-01-27 16:15:48 -0800465void rds_atomic_free_op(struct rm_atomic_op *ao)
466{
467 struct page *page = sg_page(ao->op_sg);
468
469 /* Mark page dirty if it was possibly modified, which
470 * is the case for a RDMA_READ which copies from remote
471 * to local memory */
472 set_page_dirty(page);
473 put_page(page);
474
475 kfree(ao->op_notifier);
476 ao->op_notifier = NULL;
477 ao->op_active = 0;
478}
479
480
Andy Groverff87e972010-01-12 14:13:15 -0800481/*
Andy Groverfc8162e2010-10-28 15:40:58 +0000482 * Count the number of pages needed to describe an incoming iovec array.
Andy Groverff87e972010-01-12 14:13:15 -0800483 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000484static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
485{
486 int tot_pages = 0;
487 unsigned int nr_pages;
488 unsigned int i;
489
490 /* figure out the number of pages in the vector */
491 for (i = 0; i < nr_iovecs; i++) {
492 nr_pages = rds_pages_in_vec(&iov[i]);
493 if (nr_pages == 0)
494 return -EINVAL;
495
496 tot_pages += nr_pages;
497
498 /*
499 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
500 * so tot_pages cannot overflow without first going negative.
501 */
502 if (tot_pages < 0)
503 return -EINVAL;
504 }
505
506 return tot_pages;
507}
508
509int rds_rdma_extra_size(struct rds_rdma_args *args)
Andy Groverff87e972010-01-12 14:13:15 -0800510{
511 struct rds_iovec vec;
512 struct rds_iovec __user *local_vec;
Andy Groverfc8162e2010-10-28 15:40:58 +0000513 int tot_pages = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800514 unsigned int nr_pages;
515 unsigned int i;
516
517 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
518
519 /* figure out the number of pages in the vector */
520 for (i = 0; i < args->nr_local; i++) {
521 if (copy_from_user(&vec, &local_vec[i],
522 sizeof(struct rds_iovec)))
523 return -EFAULT;
524
525 nr_pages = rds_pages_in_vec(&vec);
526 if (nr_pages == 0)
527 return -EINVAL;
528
529 tot_pages += nr_pages;
Linus Torvalds1b1f6932010-10-28 15:40:55 +0000530
531 /*
532 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
533 * so tot_pages cannot overflow without first going negative.
534 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000535 if (tot_pages < 0)
Linus Torvalds1b1f6932010-10-28 15:40:55 +0000536 return -EINVAL;
Andy Groverff87e972010-01-12 14:13:15 -0800537 }
538
Andy Groverfc8162e2010-10-28 15:40:58 +0000539 return tot_pages * sizeof(struct scatterlist);
Andy Grovereff5f532009-02-24 15:30:29 +0000540}
541
542/*
Andy Grover43248792010-01-27 16:07:30 -0800543 * The application asks for a RDMA transfer.
544 * Extract all arguments and set up the rdma_op
Andy Grovereff5f532009-02-24 15:30:29 +0000545 */
Andy Grover43248792010-01-27 16:07:30 -0800546int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
547 struct cmsghdr *cmsg)
Andy Grovereff5f532009-02-24 15:30:29 +0000548{
Andy Grover43248792010-01-27 16:07:30 -0800549 struct rds_rdma_args *args;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800550 struct rm_rdma_op *op = &rm->rdma;
Dan Carpenter9b9d2e02010-09-18 13:42:25 +0000551 int nr_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000552 unsigned int nr_bytes;
553 struct page **pages = NULL;
Andy Groverfc8162e2010-10-28 15:40:58 +0000554 struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
555 int iov_size;
Andy Grovereff5f532009-02-24 15:30:29 +0000556 unsigned int i, j;
Andy Groverff87e972010-01-12 14:13:15 -0800557 int ret = 0;
Andy Grovereff5f532009-02-24 15:30:29 +0000558
Andy Grover43248792010-01-27 16:07:30 -0800559 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800560 || rm->rdma.op_active)
Andy Grover43248792010-01-27 16:07:30 -0800561 return -EINVAL;
562
563 args = CMSG_DATA(cmsg);
Andy Grovereff5f532009-02-24 15:30:29 +0000564
565 if (rs->rs_bound_addr == 0) {
566 ret = -ENOTCONN; /* XXX not a great errno */
Cong Wangdee49f22014-10-14 12:35:08 -0700567 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000568 }
569
Dan Rosenberg218854a2010-11-17 06:37:16 +0000570 if (args->nr_local > UIO_MAXIOV) {
Andy Grovereff5f532009-02-24 15:30:29 +0000571 ret = -EMSGSIZE;
Cong Wangdee49f22014-10-14 12:35:08 -0700572 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000573 }
574
Andy Groverfc8162e2010-10-28 15:40:58 +0000575 /* Check whether to allocate the iovec area */
576 iov_size = args->nr_local * sizeof(struct rds_iovec);
577 if (args->nr_local > UIO_FASTIOV) {
578 iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
579 if (!iovs) {
580 ret = -ENOMEM;
Cong Wangdee49f22014-10-14 12:35:08 -0700581 goto out_ret;
Andy Groverfc8162e2010-10-28 15:40:58 +0000582 }
583 }
584
585 if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
586 ret = -EFAULT;
587 goto out;
588 }
589
590 nr_pages = rds_rdma_pages(iovs, args->nr_local);
Andy Grovera09f69c2010-10-28 15:40:56 +0000591 if (nr_pages < 0) {
592 ret = -EINVAL;
Andy Groverff87e972010-01-12 14:13:15 -0800593 goto out;
Andy Grovera09f69c2010-10-28 15:40:56 +0000594 }
Andy Grovereff5f532009-02-24 15:30:29 +0000595
Andy Groverff87e972010-01-12 14:13:15 -0800596 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800597 if (!pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000598 ret = -ENOMEM;
599 goto out;
600 }
601
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800602 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
603 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
604 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800605 op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800606 op->op_active = 1;
607 op->op_recverr = rs->rs_recverr;
Andy Grovereff5f532009-02-24 15:30:29 +0000608 WARN_ON(!nr_pages);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800609 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
Andy Groverd139ff02010-10-28 15:40:59 +0000610 if (!op->op_sg) {
611 ret = -ENOMEM;
612 goto out;
613 }
Andy Grovereff5f532009-02-24 15:30:29 +0000614
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800615 if (op->op_notify || op->op_recverr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000616 /* We allocate an uninitialized notifier here, because
617 * we don't want to do that in the completion handler. We
618 * would have to use GFP_ATOMIC there, and don't want to deal
619 * with failed allocations.
620 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800621 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
622 if (!op->op_notifier) {
Andy Grovereff5f532009-02-24 15:30:29 +0000623 ret = -ENOMEM;
624 goto out;
625 }
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800626 op->op_notifier->n_user_token = args->user_token;
627 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
Andy Grovereff5f532009-02-24 15:30:29 +0000628 }
629
630 /* The cookie contains the R_Key of the remote memory region, and
631 * optionally an offset into it. This is how we implement RDMA into
632 * unaligned memory.
633 * When setting up the RDMA, we need to add that offset to the
634 * destination address (which is really an offset into the MR)
635 * FIXME: We may want to move this into ib_rdma.c
636 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800637 op->op_rkey = rds_rdma_cookie_key(args->cookie);
638 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
Andy Grovereff5f532009-02-24 15:30:29 +0000639
640 nr_bytes = 0;
641
642 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
643 (unsigned long long)args->nr_local,
644 (unsigned long long)args->remote_vec.addr,
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800645 op->op_rkey);
Andy Grovereff5f532009-02-24 15:30:29 +0000646
647 for (i = 0; i < args->nr_local; i++) {
Andy Groverfc8162e2010-10-28 15:40:58 +0000648 struct rds_iovec *iov = &iovs[i];
649 /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
650 unsigned int nr = rds_pages_in_vec(iov);
Andy Grovereff5f532009-02-24 15:30:29 +0000651
Andy Groverfc8162e2010-10-28 15:40:58 +0000652 rs->rs_user_addr = iov->addr;
653 rs->rs_user_bytes = iov->bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000654
Andy Grovereff5f532009-02-24 15:30:29 +0000655 /* If it's a WRITE operation, we want to pin the pages for reading.
656 * If it's a READ operation, we need to pin the pages for writing.
657 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000658 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
Andy Grovereff5f532009-02-24 15:30:29 +0000659 if (ret < 0)
660 goto out;
661
Andy Groverfc8162e2010-10-28 15:40:58 +0000662 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
663 nr_bytes, nr, iov->bytes, iov->addr);
Andy Grovereff5f532009-02-24 15:30:29 +0000664
Andy Groverfc8162e2010-10-28 15:40:58 +0000665 nr_bytes += iov->bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000666
667 for (j = 0; j < nr; j++) {
Andy Groverfc8162e2010-10-28 15:40:58 +0000668 unsigned int offset = iov->addr & ~PAGE_MASK;
Andy Groverff87e972010-01-12 14:13:15 -0800669 struct scatterlist *sg;
Andy Grovereff5f532009-02-24 15:30:29 +0000670
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800671 sg = &op->op_sg[op->op_nents + j];
Andy Grovereff5f532009-02-24 15:30:29 +0000672 sg_set_page(sg, pages[j],
Andy Groverfc8162e2010-10-28 15:40:58 +0000673 min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
Andy Grovereff5f532009-02-24 15:30:29 +0000674 offset);
675
Andy Groverfc8162e2010-10-28 15:40:58 +0000676 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
677 sg->offset, sg->length, iov->addr, iov->bytes);
Andy Grovereff5f532009-02-24 15:30:29 +0000678
Andy Groverfc8162e2010-10-28 15:40:58 +0000679 iov->addr += sg->length;
680 iov->bytes -= sg->length;
Andy Grovereff5f532009-02-24 15:30:29 +0000681 }
682
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800683 op->op_nents += nr;
Andy Grovereff5f532009-02-24 15:30:29 +0000684 }
685
Andy Grovereff5f532009-02-24 15:30:29 +0000686 if (nr_bytes > args->remote_vec.bytes) {
687 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
688 nr_bytes,
689 (unsigned int) args->remote_vec.bytes);
690 ret = -EINVAL;
691 goto out;
692 }
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800693 op->op_bytes = nr_bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000694
Andy Grovereff5f532009-02-24 15:30:29 +0000695out:
Andy Groverfc8162e2010-10-28 15:40:58 +0000696 if (iovs != iovstack)
697 sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
Andy Grovereff5f532009-02-24 15:30:29 +0000698 kfree(pages);
Cong Wangdee49f22014-10-14 12:35:08 -0700699out_ret:
Andy Groverff87e972010-01-12 14:13:15 -0800700 if (ret)
701 rds_rdma_free_op(op);
Andy Groverf4a3fc02010-10-28 15:40:57 +0000702 else
703 rds_stats_inc(s_send_rdma);
Andy Grover43248792010-01-27 16:07:30 -0800704
705 return ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000706}
707
708/*
709 * The application wants us to pass an RDMA destination (aka MR)
710 * to the remote
711 */
712int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
713 struct cmsghdr *cmsg)
714{
715 unsigned long flags;
716 struct rds_mr *mr;
717 u32 r_key;
718 int err = 0;
719
Joe Perchesf64f9e72009-11-29 16:55:45 -0800720 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
721 rm->m_rdma_cookie != 0)
Andy Grovereff5f532009-02-24 15:30:29 +0000722 return -EINVAL;
723
724 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
725
726 /* We are reusing a previously mapped MR here. Most likely, the
727 * application has written to the buffer, so we need to explicitly
728 * flush those writes to RAM. Otherwise the HCA may not see them
729 * when doing a DMA from that buffer.
730 */
731 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
732
733 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
734 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800735 if (!mr)
Andy Grovereff5f532009-02-24 15:30:29 +0000736 err = -EINVAL; /* invalid r_key */
737 else
738 atomic_inc(&mr->r_refcount);
739 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
740
741 if (mr) {
742 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800743 rm->rdma.op_rdma_mr = mr;
Andy Grovereff5f532009-02-24 15:30:29 +0000744 }
745 return err;
746}
747
748/*
749 * The application passes us an address range it wants to enable RDMA
750 * to/from. We map the area, and save the <R_Key,offset> pair
751 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
752 * in an extension header.
753 */
754int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
755 struct cmsghdr *cmsg)
756{
Joe Perchesf64f9e72009-11-29 16:55:45 -0800757 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
758 rm->m_rdma_cookie != 0)
Andy Grovereff5f532009-02-24 15:30:29 +0000759 return -EINVAL;
760
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800761 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
Andy Grovereff5f532009-02-24 15:30:29 +0000762}
Andy Grover15133f62010-01-12 14:33:38 -0800763
764/*
765 * Fill in rds_message for an atomic request.
766 */
767int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
768 struct cmsghdr *cmsg)
769{
770 struct page *page = NULL;
771 struct rds_atomic_args *args;
772 int ret = 0;
773
774 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
775 || rm->atomic.op_active)
776 return -EINVAL;
777
778 args = CMSG_DATA(cmsg);
779
Andy Grover20c72bd2010-08-25 05:51:28 -0700780 /* Nonmasked & masked cmsg ops converted to masked hw ops */
781 switch (cmsg->cmsg_type) {
782 case RDS_CMSG_ATOMIC_FADD:
Andy Grover15133f62010-01-12 14:33:38 -0800783 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
Andy Grover20c72bd2010-08-25 05:51:28 -0700784 rm->atomic.op_m_fadd.add = args->fadd.add;
785 rm->atomic.op_m_fadd.nocarry_mask = 0;
786 break;
787 case RDS_CMSG_MASKED_ATOMIC_FADD:
788 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
789 rm->atomic.op_m_fadd.add = args->m_fadd.add;
790 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
791 break;
792 case RDS_CMSG_ATOMIC_CSWP:
793 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
794 rm->atomic.op_m_cswp.compare = args->cswp.compare;
795 rm->atomic.op_m_cswp.swap = args->cswp.swap;
796 rm->atomic.op_m_cswp.compare_mask = ~0;
797 rm->atomic.op_m_cswp.swap_mask = ~0;
798 break;
799 case RDS_CMSG_MASKED_ATOMIC_CSWP:
800 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
801 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
802 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
803 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
804 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
805 break;
806 default:
807 BUG(); /* should never happen */
Andy Grover15133f62010-01-12 14:33:38 -0800808 }
809
Andy Grover15133f62010-01-12 14:33:38 -0800810 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800811 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
Andy Grover7e3bd652010-03-01 16:04:59 -0800812 rm->atomic.op_active = 1;
Andy Grover15133f62010-01-12 14:33:38 -0800813 rm->atomic.op_recverr = rs->rs_recverr;
814 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
Andy Groverd139ff02010-10-28 15:40:59 +0000815 if (!rm->atomic.op_sg) {
816 ret = -ENOMEM;
817 goto err;
818 }
Andy Grover15133f62010-01-12 14:33:38 -0800819
820 /* verify 8 byte-aligned */
821 if (args->local_addr & 0x7) {
822 ret = -EFAULT;
823 goto err;
824 }
825
826 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
827 if (ret != 1)
828 goto err;
829 ret = 0;
830
831 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
832
833 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
834 /* We allocate an uninitialized notifier here, because
835 * we don't want to do that in the completion handler. We
836 * would have to use GFP_ATOMIC there, and don't want to deal
837 * with failed allocations.
838 */
839 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
840 if (!rm->atomic.op_notifier) {
841 ret = -ENOMEM;
842 goto err;
843 }
844
845 rm->atomic.op_notifier->n_user_token = args->user_token;
846 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
847 }
848
Andy Grover40589e72010-01-12 10:50:48 -0800849 rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
Andy Grover15133f62010-01-12 14:33:38 -0800850 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
851
Andy Grover15133f62010-01-12 14:33:38 -0800852 return ret;
853err:
854 if (page)
855 put_page(page);
856 kfree(rm->atomic.op_notifier);
857
858 return ret;
859}