Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/pagemap.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 35 | #include <linux/rbtree.h> |
| 36 | #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ |
| 37 | |
Andy Grover | 21f79af | 2010-01-12 12:57:27 -0800 | [diff] [blame] | 38 | #include "rds.h" |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 39 | |
| 40 | /* |
| 41 | * XXX |
| 42 | * - build with sparse |
| 43 | * - should we limit the size of a mr region? let transport return failure? |
| 44 | * - should we detect duplicate keys on a socket? hmm. |
| 45 | * - an rdma is an mlock, apply rlimit? |
| 46 | */ |
| 47 | |
| 48 | /* |
| 49 | * get the number of pages by looking at the page indices that the start and |
| 50 | * end addresses fall in. |
| 51 | * |
| 52 | * Returns 0 if the vec is invalid. It is invalid if the number of bytes |
| 53 | * causes the address to wrap or overflows an unsigned int. This comes |
| 54 | * from being stored in the 'length' member of 'struct scatterlist'. |
| 55 | */ |
| 56 | static unsigned int rds_pages_in_vec(struct rds_iovec *vec) |
| 57 | { |
| 58 | if ((vec->addr + vec->bytes <= vec->addr) || |
| 59 | (vec->bytes > (u64)UINT_MAX)) |
| 60 | return 0; |
| 61 | |
| 62 | return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - |
| 63 | (vec->addr >> PAGE_SHIFT); |
| 64 | } |
| 65 | |
| 66 | static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, |
| 67 | struct rds_mr *insert) |
| 68 | { |
| 69 | struct rb_node **p = &root->rb_node; |
| 70 | struct rb_node *parent = NULL; |
| 71 | struct rds_mr *mr; |
| 72 | |
| 73 | while (*p) { |
| 74 | parent = *p; |
| 75 | mr = rb_entry(parent, struct rds_mr, r_rb_node); |
| 76 | |
| 77 | if (key < mr->r_key) |
| 78 | p = &(*p)->rb_left; |
| 79 | else if (key > mr->r_key) |
| 80 | p = &(*p)->rb_right; |
| 81 | else |
| 82 | return mr; |
| 83 | } |
| 84 | |
| 85 | if (insert) { |
| 86 | rb_link_node(&insert->r_rb_node, parent, p); |
| 87 | rb_insert_color(&insert->r_rb_node, root); |
| 88 | atomic_inc(&insert->r_refcount); |
| 89 | } |
| 90 | return NULL; |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * Destroy the transport-specific part of a MR. |
| 95 | */ |
| 96 | static void rds_destroy_mr(struct rds_mr *mr) |
| 97 | { |
| 98 | struct rds_sock *rs = mr->r_sock; |
| 99 | void *trans_private = NULL; |
| 100 | unsigned long flags; |
| 101 | |
| 102 | rdsdebug("RDS: destroy mr key is %x refcnt %u\n", |
| 103 | mr->r_key, atomic_read(&mr->r_refcount)); |
| 104 | |
| 105 | if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) |
| 106 | return; |
| 107 | |
| 108 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
| 109 | if (!RB_EMPTY_NODE(&mr->r_rb_node)) |
| 110 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
| 111 | trans_private = mr->r_trans_private; |
| 112 | mr->r_trans_private = NULL; |
| 113 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
| 114 | |
| 115 | if (trans_private) |
| 116 | mr->r_trans->free_mr(trans_private, mr->r_invalidate); |
| 117 | } |
| 118 | |
| 119 | void __rds_put_mr_final(struct rds_mr *mr) |
| 120 | { |
| 121 | rds_destroy_mr(mr); |
| 122 | kfree(mr); |
| 123 | } |
| 124 | |
| 125 | /* |
| 126 | * By the time this is called we can't have any more ioctls called on |
| 127 | * the socket so we don't need to worry about racing with others. |
| 128 | */ |
| 129 | void rds_rdma_drop_keys(struct rds_sock *rs) |
| 130 | { |
| 131 | struct rds_mr *mr; |
| 132 | struct rb_node *node; |
Tina Yang | 35b52c7 | 2010-04-01 14:09:00 -0700 | [diff] [blame] | 133 | unsigned long flags; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 134 | |
| 135 | /* Release any MRs associated with this socket */ |
Tina Yang | 35b52c7 | 2010-04-01 14:09:00 -0700 | [diff] [blame] | 136 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 137 | while ((node = rb_first(&rs->rs_rdma_keys))) { |
| 138 | mr = container_of(node, struct rds_mr, r_rb_node); |
| 139 | if (mr->r_trans == rs->rs_transport) |
| 140 | mr->r_invalidate = 0; |
Tina Yang | 35b52c7 | 2010-04-01 14:09:00 -0700 | [diff] [blame] | 141 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
| 142 | RB_CLEAR_NODE(&mr->r_rb_node); |
| 143 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
| 144 | rds_destroy_mr(mr); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 145 | rds_mr_put(mr); |
Tina Yang | 35b52c7 | 2010-04-01 14:09:00 -0700 | [diff] [blame] | 146 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 147 | } |
Tina Yang | 35b52c7 | 2010-04-01 14:09:00 -0700 | [diff] [blame] | 148 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 149 | |
| 150 | if (rs->rs_transport && rs->rs_transport->flush_mrs) |
| 151 | rs->rs_transport->flush_mrs(); |
| 152 | } |
| 153 | |
| 154 | /* |
| 155 | * Helper function to pin user pages. |
| 156 | */ |
| 157 | static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, |
| 158 | struct page **pages, int write) |
| 159 | { |
| 160 | int ret; |
| 161 | |
Andy Grover | 830eb7d | 2009-04-09 14:09:42 +0000 | [diff] [blame] | 162 | ret = get_user_pages_fast(user_addr, nr_pages, write, pages); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 163 | |
Andy Grover | 7acd4a7 | 2009-04-09 14:09:40 +0000 | [diff] [blame] | 164 | if (ret >= 0 && ret < nr_pages) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 165 | while (ret--) |
| 166 | put_page(pages[ret]); |
| 167 | ret = -EFAULT; |
| 168 | } |
| 169 | |
| 170 | return ret; |
| 171 | } |
| 172 | |
| 173 | static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, |
| 174 | u64 *cookie_ret, struct rds_mr **mr_ret) |
| 175 | { |
| 176 | struct rds_mr *mr = NULL, *found; |
| 177 | unsigned int nr_pages; |
| 178 | struct page **pages = NULL; |
| 179 | struct scatterlist *sg; |
| 180 | void *trans_private; |
| 181 | unsigned long flags; |
| 182 | rds_rdma_cookie_t cookie; |
| 183 | unsigned int nents; |
| 184 | long i; |
| 185 | int ret; |
| 186 | |
| 187 | if (rs->rs_bound_addr == 0) { |
| 188 | ret = -ENOTCONN; /* XXX not a great errno */ |
| 189 | goto out; |
| 190 | } |
| 191 | |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 192 | if (!rs->rs_transport->get_mr) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 193 | ret = -EOPNOTSUPP; |
| 194 | goto out; |
| 195 | } |
| 196 | |
| 197 | nr_pages = rds_pages_in_vec(&args->vec); |
| 198 | if (nr_pages == 0) { |
| 199 | ret = -EINVAL; |
| 200 | goto out; |
| 201 | } |
| 202 | |
| 203 | rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", |
| 204 | args->vec.addr, args->vec.bytes, nr_pages); |
| 205 | |
| 206 | /* XXX clamp nr_pages to limit the size of this alloc? */ |
| 207 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 208 | if (!pages) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 209 | ret = -ENOMEM; |
| 210 | goto out; |
| 211 | } |
| 212 | |
| 213 | mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 214 | if (!mr) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 215 | ret = -ENOMEM; |
| 216 | goto out; |
| 217 | } |
| 218 | |
| 219 | atomic_set(&mr->r_refcount, 1); |
| 220 | RB_CLEAR_NODE(&mr->r_rb_node); |
| 221 | mr->r_trans = rs->rs_transport; |
| 222 | mr->r_sock = rs; |
| 223 | |
| 224 | if (args->flags & RDS_RDMA_USE_ONCE) |
| 225 | mr->r_use_once = 1; |
| 226 | if (args->flags & RDS_RDMA_INVALIDATE) |
| 227 | mr->r_invalidate = 1; |
| 228 | if (args->flags & RDS_RDMA_READWRITE) |
| 229 | mr->r_write = 1; |
| 230 | |
| 231 | /* |
| 232 | * Pin the pages that make up the user buffer and transfer the page |
| 233 | * pointers to the mr's sg array. We check to see if we've mapped |
| 234 | * the whole region after transferring the partial page references |
| 235 | * to the sg array so that we can have one page ref cleanup path. |
| 236 | * |
| 237 | * For now we have no flag that tells us whether the mapping is |
| 238 | * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to |
| 239 | * the zero page. |
| 240 | */ |
Andy Grover | d22faec | 2010-01-12 10:52:28 -0800 | [diff] [blame] | 241 | ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 242 | if (ret < 0) |
| 243 | goto out; |
| 244 | |
| 245 | nents = ret; |
| 246 | sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 247 | if (!sg) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 248 | ret = -ENOMEM; |
| 249 | goto out; |
| 250 | } |
| 251 | WARN_ON(!nents); |
| 252 | sg_init_table(sg, nents); |
| 253 | |
| 254 | /* Stick all pages into the scatterlist */ |
| 255 | for (i = 0 ; i < nents; i++) |
| 256 | sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); |
| 257 | |
| 258 | rdsdebug("RDS: trans_private nents is %u\n", nents); |
| 259 | |
| 260 | /* Obtain a transport specific MR. If this succeeds, the |
| 261 | * s/g list is now owned by the MR. |
| 262 | * Note that dma_map() implies that pending writes are |
| 263 | * flushed to RAM, so no dma_sync is needed here. */ |
| 264 | trans_private = rs->rs_transport->get_mr(sg, nents, rs, |
| 265 | &mr->r_key); |
| 266 | |
| 267 | if (IS_ERR(trans_private)) { |
| 268 | for (i = 0 ; i < nents; i++) |
| 269 | put_page(sg_page(&sg[i])); |
| 270 | kfree(sg); |
| 271 | ret = PTR_ERR(trans_private); |
| 272 | goto out; |
| 273 | } |
| 274 | |
| 275 | mr->r_trans_private = trans_private; |
| 276 | |
| 277 | rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", |
| 278 | mr->r_key, (void *)(unsigned long) args->cookie_addr); |
| 279 | |
| 280 | /* The user may pass us an unaligned address, but we can only |
| 281 | * map page aligned regions. So we keep the offset, and build |
| 282 | * a 64bit cookie containing <R_Key, offset> and pass that |
| 283 | * around. */ |
| 284 | cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); |
| 285 | if (cookie_ret) |
| 286 | *cookie_ret = cookie; |
| 287 | |
| 288 | if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { |
| 289 | ret = -EFAULT; |
| 290 | goto out; |
| 291 | } |
| 292 | |
| 293 | /* Inserting the new MR into the rbtree bumps its |
| 294 | * reference count. */ |
| 295 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
| 296 | found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); |
| 297 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
| 298 | |
| 299 | BUG_ON(found && found != mr); |
| 300 | |
| 301 | rdsdebug("RDS: get_mr key is %x\n", mr->r_key); |
| 302 | if (mr_ret) { |
| 303 | atomic_inc(&mr->r_refcount); |
| 304 | *mr_ret = mr; |
| 305 | } |
| 306 | |
| 307 | ret = 0; |
| 308 | out: |
| 309 | kfree(pages); |
| 310 | if (mr) |
| 311 | rds_mr_put(mr); |
| 312 | return ret; |
| 313 | } |
| 314 | |
| 315 | int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) |
| 316 | { |
| 317 | struct rds_get_mr_args args; |
| 318 | |
| 319 | if (optlen != sizeof(struct rds_get_mr_args)) |
| 320 | return -EINVAL; |
| 321 | |
| 322 | if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, |
| 323 | sizeof(struct rds_get_mr_args))) |
| 324 | return -EFAULT; |
| 325 | |
| 326 | return __rds_rdma_map(rs, &args, NULL, NULL); |
| 327 | } |
| 328 | |
Andy Grover | 244546f | 2009-10-30 08:54:53 +0000 | [diff] [blame] | 329 | int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) |
| 330 | { |
| 331 | struct rds_get_mr_for_dest_args args; |
| 332 | struct rds_get_mr_args new_args; |
| 333 | |
| 334 | if (optlen != sizeof(struct rds_get_mr_for_dest_args)) |
| 335 | return -EINVAL; |
| 336 | |
| 337 | if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, |
| 338 | sizeof(struct rds_get_mr_for_dest_args))) |
| 339 | return -EFAULT; |
| 340 | |
| 341 | /* |
| 342 | * Initially, just behave like get_mr(). |
| 343 | * TODO: Implement get_mr as wrapper around this |
| 344 | * and deprecate it. |
| 345 | */ |
| 346 | new_args.vec = args.vec; |
| 347 | new_args.cookie_addr = args.cookie_addr; |
| 348 | new_args.flags = args.flags; |
| 349 | |
| 350 | return __rds_rdma_map(rs, &new_args, NULL, NULL); |
| 351 | } |
| 352 | |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 353 | /* |
| 354 | * Free the MR indicated by the given R_Key |
| 355 | */ |
| 356 | int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) |
| 357 | { |
| 358 | struct rds_free_mr_args args; |
| 359 | struct rds_mr *mr; |
| 360 | unsigned long flags; |
| 361 | |
| 362 | if (optlen != sizeof(struct rds_free_mr_args)) |
| 363 | return -EINVAL; |
| 364 | |
| 365 | if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, |
| 366 | sizeof(struct rds_free_mr_args))) |
| 367 | return -EFAULT; |
| 368 | |
| 369 | /* Special case - a null cookie means flush all unused MRs */ |
| 370 | if (args.cookie == 0) { |
| 371 | if (!rs->rs_transport || !rs->rs_transport->flush_mrs) |
| 372 | return -EINVAL; |
| 373 | rs->rs_transport->flush_mrs(); |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | /* Look up the MR given its R_key and remove it from the rbtree |
| 378 | * so nobody else finds it. |
| 379 | * This should also prevent races with rds_rdma_unuse. |
| 380 | */ |
| 381 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
| 382 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); |
| 383 | if (mr) { |
| 384 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
| 385 | RB_CLEAR_NODE(&mr->r_rb_node); |
| 386 | if (args.flags & RDS_RDMA_INVALIDATE) |
| 387 | mr->r_invalidate = 1; |
| 388 | } |
| 389 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
| 390 | |
| 391 | if (!mr) |
| 392 | return -EINVAL; |
| 393 | |
| 394 | /* |
| 395 | * call rds_destroy_mr() ourselves so that we're sure it's done by the time |
| 396 | * we return. If we let rds_mr_put() do it it might not happen until |
| 397 | * someone else drops their ref. |
| 398 | */ |
| 399 | rds_destroy_mr(mr); |
| 400 | rds_mr_put(mr); |
| 401 | return 0; |
| 402 | } |
| 403 | |
| 404 | /* |
| 405 | * This is called when we receive an extension header that |
| 406 | * tells us this MR was used. It allows us to implement |
| 407 | * use_once semantics |
| 408 | */ |
| 409 | void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) |
| 410 | { |
| 411 | struct rds_mr *mr; |
| 412 | unsigned long flags; |
| 413 | int zot_me = 0; |
| 414 | |
| 415 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
| 416 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); |
Andy Grover | 3ef13f3 | 2010-01-12 12:37:17 -0800 | [diff] [blame] | 417 | if (!mr) { |
| 418 | printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key); |
| 419 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
| 420 | return; |
| 421 | } |
| 422 | |
| 423 | if (mr->r_use_once || force) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 424 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
| 425 | RB_CLEAR_NODE(&mr->r_rb_node); |
| 426 | zot_me = 1; |
Andy Grover | 3ef13f3 | 2010-01-12 12:37:17 -0800 | [diff] [blame] | 427 | } |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 428 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
| 429 | |
| 430 | /* May have to issue a dma_sync on this memory region. |
| 431 | * Note we could avoid this if the operation was a RDMA READ, |
| 432 | * but at this point we can't tell. */ |
Andy Grover | 3ef13f3 | 2010-01-12 12:37:17 -0800 | [diff] [blame] | 433 | if (mr->r_trans->sync_mr) |
| 434 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 435 | |
Andy Grover | 3ef13f3 | 2010-01-12 12:37:17 -0800 | [diff] [blame] | 436 | /* If the MR was marked as invalidate, this will |
| 437 | * trigger an async flush. */ |
santosh.shilimkar@oracle.com | 3f6b314 | 2015-08-25 12:01:59 -0700 | [diff] [blame] | 438 | if (zot_me) { |
Andy Grover | 3ef13f3 | 2010-01-12 12:37:17 -0800 | [diff] [blame] | 439 | rds_destroy_mr(mr); |
santosh.shilimkar@oracle.com | 3f6b314 | 2015-08-25 12:01:59 -0700 | [diff] [blame] | 440 | rds_mr_put(mr); |
| 441 | } |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 442 | } |
| 443 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 444 | void rds_rdma_free_op(struct rm_rdma_op *ro) |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 445 | { |
| 446 | unsigned int i; |
| 447 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 448 | for (i = 0; i < ro->op_nents; i++) { |
| 449 | struct page *page = sg_page(&ro->op_sg[i]); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 450 | |
| 451 | /* Mark page dirty if it was possibly modified, which |
| 452 | * is the case for a RDMA_READ which copies from remote |
| 453 | * to local memory */ |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 454 | if (!ro->op_write) { |
santosh.shilimkar@oracle.com | 5c240fa | 2015-08-22 15:45:31 -0700 | [diff] [blame] | 455 | WARN_ON(!page->mapping && irqs_disabled()); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 456 | set_page_dirty(page); |
Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 457 | } |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 458 | put_page(page); |
| 459 | } |
| 460 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 461 | kfree(ro->op_notifier); |
| 462 | ro->op_notifier = NULL; |
| 463 | ro->op_active = 0; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 464 | } |
| 465 | |
Andy Grover | d0ab25a | 2010-01-27 16:15:48 -0800 | [diff] [blame] | 466 | void rds_atomic_free_op(struct rm_atomic_op *ao) |
| 467 | { |
| 468 | struct page *page = sg_page(ao->op_sg); |
| 469 | |
| 470 | /* Mark page dirty if it was possibly modified, which |
| 471 | * is the case for a RDMA_READ which copies from remote |
| 472 | * to local memory */ |
| 473 | set_page_dirty(page); |
| 474 | put_page(page); |
| 475 | |
| 476 | kfree(ao->op_notifier); |
| 477 | ao->op_notifier = NULL; |
| 478 | ao->op_active = 0; |
| 479 | } |
| 480 | |
| 481 | |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 482 | /* |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 483 | * Count the number of pages needed to describe an incoming iovec array. |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 484 | */ |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 485 | static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) |
| 486 | { |
| 487 | int tot_pages = 0; |
| 488 | unsigned int nr_pages; |
| 489 | unsigned int i; |
| 490 | |
| 491 | /* figure out the number of pages in the vector */ |
| 492 | for (i = 0; i < nr_iovecs; i++) { |
| 493 | nr_pages = rds_pages_in_vec(&iov[i]); |
| 494 | if (nr_pages == 0) |
| 495 | return -EINVAL; |
| 496 | |
| 497 | tot_pages += nr_pages; |
| 498 | |
| 499 | /* |
| 500 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, |
| 501 | * so tot_pages cannot overflow without first going negative. |
| 502 | */ |
| 503 | if (tot_pages < 0) |
| 504 | return -EINVAL; |
| 505 | } |
| 506 | |
| 507 | return tot_pages; |
| 508 | } |
| 509 | |
| 510 | int rds_rdma_extra_size(struct rds_rdma_args *args) |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 511 | { |
| 512 | struct rds_iovec vec; |
| 513 | struct rds_iovec __user *local_vec; |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 514 | int tot_pages = 0; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 515 | unsigned int nr_pages; |
| 516 | unsigned int i; |
| 517 | |
| 518 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; |
| 519 | |
| 520 | /* figure out the number of pages in the vector */ |
| 521 | for (i = 0; i < args->nr_local; i++) { |
| 522 | if (copy_from_user(&vec, &local_vec[i], |
| 523 | sizeof(struct rds_iovec))) |
| 524 | return -EFAULT; |
| 525 | |
| 526 | nr_pages = rds_pages_in_vec(&vec); |
| 527 | if (nr_pages == 0) |
| 528 | return -EINVAL; |
| 529 | |
| 530 | tot_pages += nr_pages; |
Linus Torvalds | 1b1f693 | 2010-10-28 15:40:55 +0000 | [diff] [blame] | 531 | |
| 532 | /* |
| 533 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, |
| 534 | * so tot_pages cannot overflow without first going negative. |
| 535 | */ |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 536 | if (tot_pages < 0) |
Linus Torvalds | 1b1f693 | 2010-10-28 15:40:55 +0000 | [diff] [blame] | 537 | return -EINVAL; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 538 | } |
| 539 | |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 540 | return tot_pages * sizeof(struct scatterlist); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 541 | } |
| 542 | |
| 543 | /* |
Andy Grover | 4324879 | 2010-01-27 16:07:30 -0800 | [diff] [blame] | 544 | * The application asks for a RDMA transfer. |
| 545 | * Extract all arguments and set up the rdma_op |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 546 | */ |
Andy Grover | 4324879 | 2010-01-27 16:07:30 -0800 | [diff] [blame] | 547 | int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, |
| 548 | struct cmsghdr *cmsg) |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 549 | { |
Andy Grover | 4324879 | 2010-01-27 16:07:30 -0800 | [diff] [blame] | 550 | struct rds_rdma_args *args; |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 551 | struct rm_rdma_op *op = &rm->rdma; |
Dan Carpenter | 9b9d2e0 | 2010-09-18 13:42:25 +0000 | [diff] [blame] | 552 | int nr_pages; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 553 | unsigned int nr_bytes; |
| 554 | struct page **pages = NULL; |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 555 | struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; |
| 556 | int iov_size; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 557 | unsigned int i, j; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 558 | int ret = 0; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 559 | |
Andy Grover | 4324879 | 2010-01-27 16:07:30 -0800 | [diff] [blame] | 560 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 561 | || rm->rdma.op_active) |
Andy Grover | 4324879 | 2010-01-27 16:07:30 -0800 | [diff] [blame] | 562 | return -EINVAL; |
| 563 | |
| 564 | args = CMSG_DATA(cmsg); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 565 | |
| 566 | if (rs->rs_bound_addr == 0) { |
| 567 | ret = -ENOTCONN; /* XXX not a great errno */ |
Cong Wang | dee49f2 | 2014-10-14 12:35:08 -0700 | [diff] [blame] | 568 | goto out_ret; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 569 | } |
| 570 | |
Dan Rosenberg | 218854a | 2010-11-17 06:37:16 +0000 | [diff] [blame] | 571 | if (args->nr_local > UIO_MAXIOV) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 572 | ret = -EMSGSIZE; |
Cong Wang | dee49f2 | 2014-10-14 12:35:08 -0700 | [diff] [blame] | 573 | goto out_ret; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 574 | } |
| 575 | |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 576 | /* Check whether to allocate the iovec area */ |
| 577 | iov_size = args->nr_local * sizeof(struct rds_iovec); |
| 578 | if (args->nr_local > UIO_FASTIOV) { |
| 579 | iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); |
| 580 | if (!iovs) { |
| 581 | ret = -ENOMEM; |
Cong Wang | dee49f2 | 2014-10-14 12:35:08 -0700 | [diff] [blame] | 582 | goto out_ret; |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 583 | } |
| 584 | } |
| 585 | |
| 586 | if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { |
| 587 | ret = -EFAULT; |
| 588 | goto out; |
| 589 | } |
| 590 | |
| 591 | nr_pages = rds_rdma_pages(iovs, args->nr_local); |
Andy Grover | a09f69c | 2010-10-28 15:40:56 +0000 | [diff] [blame] | 592 | if (nr_pages < 0) { |
| 593 | ret = -EINVAL; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 594 | goto out; |
Andy Grover | a09f69c | 2010-10-28 15:40:56 +0000 | [diff] [blame] | 595 | } |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 596 | |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 597 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 598 | if (!pages) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 599 | ret = -ENOMEM; |
| 600 | goto out; |
| 601 | } |
| 602 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 603 | op->op_write = !!(args->flags & RDS_RDMA_READWRITE); |
| 604 | op->op_fence = !!(args->flags & RDS_RDMA_FENCE); |
| 605 | op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 606 | op->op_silent = !!(args->flags & RDS_RDMA_SILENT); |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 607 | op->op_active = 1; |
| 608 | op->op_recverr = rs->rs_recverr; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 609 | WARN_ON(!nr_pages); |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 610 | op->op_sg = rds_message_alloc_sgs(rm, nr_pages); |
Andy Grover | d139ff0 | 2010-10-28 15:40:59 +0000 | [diff] [blame] | 611 | if (!op->op_sg) { |
| 612 | ret = -ENOMEM; |
| 613 | goto out; |
| 614 | } |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 615 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 616 | if (op->op_notify || op->op_recverr) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 617 | /* We allocate an uninitialized notifier here, because |
| 618 | * we don't want to do that in the completion handler. We |
| 619 | * would have to use GFP_ATOMIC there, and don't want to deal |
| 620 | * with failed allocations. |
| 621 | */ |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 622 | op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); |
| 623 | if (!op->op_notifier) { |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 624 | ret = -ENOMEM; |
| 625 | goto out; |
| 626 | } |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 627 | op->op_notifier->n_user_token = args->user_token; |
| 628 | op->op_notifier->n_status = RDS_RDMA_SUCCESS; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | /* The cookie contains the R_Key of the remote memory region, and |
| 632 | * optionally an offset into it. This is how we implement RDMA into |
| 633 | * unaligned memory. |
| 634 | * When setting up the RDMA, we need to add that offset to the |
| 635 | * destination address (which is really an offset into the MR) |
| 636 | * FIXME: We may want to move this into ib_rdma.c |
| 637 | */ |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 638 | op->op_rkey = rds_rdma_cookie_key(args->cookie); |
| 639 | op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 640 | |
| 641 | nr_bytes = 0; |
| 642 | |
| 643 | rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", |
| 644 | (unsigned long long)args->nr_local, |
| 645 | (unsigned long long)args->remote_vec.addr, |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 646 | op->op_rkey); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 647 | |
| 648 | for (i = 0; i < args->nr_local; i++) { |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 649 | struct rds_iovec *iov = &iovs[i]; |
| 650 | /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ |
| 651 | unsigned int nr = rds_pages_in_vec(iov); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 652 | |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 653 | rs->rs_user_addr = iov->addr; |
| 654 | rs->rs_user_bytes = iov->bytes; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 655 | |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 656 | /* If it's a WRITE operation, we want to pin the pages for reading. |
| 657 | * If it's a READ operation, we need to pin the pages for writing. |
| 658 | */ |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 659 | ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 660 | if (ret < 0) |
| 661 | goto out; |
santosh.shilimkar@oracle.com | 1d2e3f3 | 2015-08-22 15:45:22 -0700 | [diff] [blame] | 662 | else |
| 663 | ret = 0; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 664 | |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 665 | rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", |
| 666 | nr_bytes, nr, iov->bytes, iov->addr); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 667 | |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 668 | nr_bytes += iov->bytes; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 669 | |
| 670 | for (j = 0; j < nr; j++) { |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 671 | unsigned int offset = iov->addr & ~PAGE_MASK; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 672 | struct scatterlist *sg; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 673 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 674 | sg = &op->op_sg[op->op_nents + j]; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 675 | sg_set_page(sg, pages[j], |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 676 | min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 677 | offset); |
| 678 | |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 679 | rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", |
| 680 | sg->offset, sg->length, iov->addr, iov->bytes); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 681 | |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 682 | iov->addr += sg->length; |
| 683 | iov->bytes -= sg->length; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 684 | } |
| 685 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 686 | op->op_nents += nr; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 687 | } |
| 688 | |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 689 | if (nr_bytes > args->remote_vec.bytes) { |
| 690 | rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", |
| 691 | nr_bytes, |
| 692 | (unsigned int) args->remote_vec.bytes); |
| 693 | ret = -EINVAL; |
| 694 | goto out; |
| 695 | } |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 696 | op->op_bytes = nr_bytes; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 697 | |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 698 | out: |
Andy Grover | fc8162e | 2010-10-28 15:40:58 +0000 | [diff] [blame] | 699 | if (iovs != iovstack) |
| 700 | sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 701 | kfree(pages); |
Cong Wang | dee49f2 | 2014-10-14 12:35:08 -0700 | [diff] [blame] | 702 | out_ret: |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 703 | if (ret) |
| 704 | rds_rdma_free_op(op); |
Andy Grover | f4a3fc0 | 2010-10-28 15:40:57 +0000 | [diff] [blame] | 705 | else |
| 706 | rds_stats_inc(s_send_rdma); |
Andy Grover | 4324879 | 2010-01-27 16:07:30 -0800 | [diff] [blame] | 707 | |
| 708 | return ret; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 709 | } |
| 710 | |
| 711 | /* |
| 712 | * The application wants us to pass an RDMA destination (aka MR) |
| 713 | * to the remote |
| 714 | */ |
| 715 | int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, |
| 716 | struct cmsghdr *cmsg) |
| 717 | { |
| 718 | unsigned long flags; |
| 719 | struct rds_mr *mr; |
| 720 | u32 r_key; |
| 721 | int err = 0; |
| 722 | |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 723 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || |
| 724 | rm->m_rdma_cookie != 0) |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 725 | return -EINVAL; |
| 726 | |
| 727 | memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); |
| 728 | |
| 729 | /* We are reusing a previously mapped MR here. Most likely, the |
| 730 | * application has written to the buffer, so we need to explicitly |
| 731 | * flush those writes to RAM. Otherwise the HCA may not see them |
| 732 | * when doing a DMA from that buffer. |
| 733 | */ |
| 734 | r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); |
| 735 | |
| 736 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
| 737 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 738 | if (!mr) |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 739 | err = -EINVAL; /* invalid r_key */ |
| 740 | else |
| 741 | atomic_inc(&mr->r_refcount); |
| 742 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
| 743 | |
| 744 | if (mr) { |
| 745 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 746 | rm->rdma.op_rdma_mr = mr; |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 747 | } |
| 748 | return err; |
| 749 | } |
| 750 | |
| 751 | /* |
| 752 | * The application passes us an address range it wants to enable RDMA |
| 753 | * to/from. We map the area, and save the <R_Key,offset> pair |
| 754 | * in rm->m_rdma_cookie. This causes it to be sent along to the peer |
| 755 | * in an extension header. |
| 756 | */ |
| 757 | int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, |
| 758 | struct cmsghdr *cmsg) |
| 759 | { |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 760 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || |
| 761 | rm->m_rdma_cookie != 0) |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 762 | return -EINVAL; |
| 763 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 764 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); |
Andy Grover | eff5f53 | 2009-02-24 15:30:29 +0000 | [diff] [blame] | 765 | } |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 766 | |
| 767 | /* |
| 768 | * Fill in rds_message for an atomic request. |
| 769 | */ |
| 770 | int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, |
| 771 | struct cmsghdr *cmsg) |
| 772 | { |
| 773 | struct page *page = NULL; |
| 774 | struct rds_atomic_args *args; |
| 775 | int ret = 0; |
| 776 | |
| 777 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args)) |
| 778 | || rm->atomic.op_active) |
| 779 | return -EINVAL; |
| 780 | |
| 781 | args = CMSG_DATA(cmsg); |
| 782 | |
Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 783 | /* Nonmasked & masked cmsg ops converted to masked hw ops */ |
| 784 | switch (cmsg->cmsg_type) { |
| 785 | case RDS_CMSG_ATOMIC_FADD: |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 786 | rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; |
Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 787 | rm->atomic.op_m_fadd.add = args->fadd.add; |
| 788 | rm->atomic.op_m_fadd.nocarry_mask = 0; |
| 789 | break; |
| 790 | case RDS_CMSG_MASKED_ATOMIC_FADD: |
| 791 | rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; |
| 792 | rm->atomic.op_m_fadd.add = args->m_fadd.add; |
| 793 | rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; |
| 794 | break; |
| 795 | case RDS_CMSG_ATOMIC_CSWP: |
| 796 | rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; |
| 797 | rm->atomic.op_m_cswp.compare = args->cswp.compare; |
| 798 | rm->atomic.op_m_cswp.swap = args->cswp.swap; |
| 799 | rm->atomic.op_m_cswp.compare_mask = ~0; |
| 800 | rm->atomic.op_m_cswp.swap_mask = ~0; |
| 801 | break; |
| 802 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
| 803 | rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; |
| 804 | rm->atomic.op_m_cswp.compare = args->m_cswp.compare; |
| 805 | rm->atomic.op_m_cswp.swap = args->m_cswp.swap; |
| 806 | rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask; |
| 807 | rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask; |
| 808 | break; |
| 809 | default: |
| 810 | BUG(); /* should never happen */ |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 811 | } |
| 812 | |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 813 | rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 814 | rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); |
Andy Grover | 7e3bd65 | 2010-03-01 16:04:59 -0800 | [diff] [blame] | 815 | rm->atomic.op_active = 1; |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 816 | rm->atomic.op_recverr = rs->rs_recverr; |
| 817 | rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); |
Andy Grover | d139ff0 | 2010-10-28 15:40:59 +0000 | [diff] [blame] | 818 | if (!rm->atomic.op_sg) { |
| 819 | ret = -ENOMEM; |
| 820 | goto err; |
| 821 | } |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 822 | |
| 823 | /* verify 8 byte-aligned */ |
| 824 | if (args->local_addr & 0x7) { |
| 825 | ret = -EFAULT; |
| 826 | goto err; |
| 827 | } |
| 828 | |
| 829 | ret = rds_pin_pages(args->local_addr, 1, &page, 1); |
| 830 | if (ret != 1) |
| 831 | goto err; |
| 832 | ret = 0; |
| 833 | |
| 834 | sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr)); |
| 835 | |
| 836 | if (rm->atomic.op_notify || rm->atomic.op_recverr) { |
| 837 | /* We allocate an uninitialized notifier here, because |
| 838 | * we don't want to do that in the completion handler. We |
| 839 | * would have to use GFP_ATOMIC there, and don't want to deal |
| 840 | * with failed allocations. |
| 841 | */ |
| 842 | rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL); |
| 843 | if (!rm->atomic.op_notifier) { |
| 844 | ret = -ENOMEM; |
| 845 | goto err; |
| 846 | } |
| 847 | |
| 848 | rm->atomic.op_notifier->n_user_token = args->user_token; |
| 849 | rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS; |
| 850 | } |
| 851 | |
Andy Grover | 40589e7 | 2010-01-12 10:50:48 -0800 | [diff] [blame] | 852 | rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie); |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 853 | rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie); |
| 854 | |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 855 | return ret; |
| 856 | err: |
| 857 | if (page) |
| 858 | put_page(page); |
| 859 | kfree(rm->atomic.op_notifier); |
| 860 | |
| 861 | return ret; |
| 862 | } |