Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 35 | #include <linux/export.h> |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 36 | |
| 37 | #include "rds.h" |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 38 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 39 | static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = { |
| 40 | [RDS_EXTHDR_NONE] = 0, |
| 41 | [RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version), |
| 42 | [RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma), |
| 43 | [RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest), |
| 44 | }; |
| 45 | |
| 46 | |
| 47 | void rds_message_addref(struct rds_message *rm) |
| 48 | { |
| 49 | rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); |
| 50 | atomic_inc(&rm->m_refcount); |
| 51 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 52 | EXPORT_SYMBOL_GPL(rds_message_addref); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 53 | |
| 54 | /* |
| 55 | * This relies on dma_map_sg() not touching sg[].page during merging. |
| 56 | */ |
| 57 | static void rds_message_purge(struct rds_message *rm) |
| 58 | { |
| 59 | unsigned long i; |
| 60 | |
| 61 | if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags))) |
| 62 | return; |
| 63 | |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 64 | for (i = 0; i < rm->data.op_nents; i++) { |
| 65 | rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i])); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 66 | /* XXX will have to put_page for page refs */ |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 67 | __free_page(sg_page(&rm->data.op_sg[i])); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 68 | } |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 69 | rm->data.op_nents = 0; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 70 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 71 | if (rm->rdma.op_active) |
| 72 | rds_rdma_free_op(&rm->rdma); |
| 73 | if (rm->rdma.op_rdma_mr) |
| 74 | rds_mr_put(rm->rdma.op_rdma_mr); |
Andy Grover | d0ab25a | 2010-01-27 16:15:48 -0800 | [diff] [blame] | 75 | |
| 76 | if (rm->atomic.op_active) |
| 77 | rds_atomic_free_op(&rm->atomic); |
| 78 | if (rm->atomic.op_rdma_mr) |
| 79 | rds_mr_put(rm->atomic.op_rdma_mr); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 80 | } |
| 81 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 82 | void rds_message_put(struct rds_message *rm) |
| 83 | { |
| 84 | rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); |
Cong Wang | 7dac1b5 | 2013-03-03 20:57:18 +0000 | [diff] [blame] | 85 | WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 86 | if (atomic_dec_and_test(&rm->m_refcount)) { |
| 87 | BUG_ON(!list_empty(&rm->m_sock_item)); |
| 88 | BUG_ON(!list_empty(&rm->m_conn_item)); |
| 89 | rds_message_purge(rm); |
| 90 | |
| 91 | kfree(rm); |
| 92 | } |
| 93 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 94 | EXPORT_SYMBOL_GPL(rds_message_put); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 95 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 96 | void rds_message_populate_header(struct rds_header *hdr, __be16 sport, |
| 97 | __be16 dport, u64 seq) |
| 98 | { |
| 99 | hdr->h_flags = 0; |
| 100 | hdr->h_sport = sport; |
| 101 | hdr->h_dport = dport; |
| 102 | hdr->h_sequence = cpu_to_be64(seq); |
| 103 | hdr->h_exthdr[0] = RDS_EXTHDR_NONE; |
| 104 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 105 | EXPORT_SYMBOL_GPL(rds_message_populate_header); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 106 | |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 107 | int rds_message_add_extension(struct rds_header *hdr, unsigned int type, |
| 108 | const void *data, unsigned int len) |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 109 | { |
| 110 | unsigned int ext_len = sizeof(u8) + len; |
| 111 | unsigned char *dst; |
| 112 | |
| 113 | /* For now, refuse to add more than one extension header */ |
| 114 | if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE) |
| 115 | return 0; |
| 116 | |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 117 | if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type]) |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 118 | return 0; |
| 119 | |
| 120 | if (ext_len >= RDS_HEADER_EXT_SPACE) |
| 121 | return 0; |
| 122 | dst = hdr->h_exthdr; |
| 123 | |
| 124 | *dst++ = type; |
| 125 | memcpy(dst, data, len); |
| 126 | |
| 127 | dst[len] = RDS_EXTHDR_NONE; |
| 128 | return 1; |
| 129 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 130 | EXPORT_SYMBOL_GPL(rds_message_add_extension); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 131 | |
| 132 | /* |
| 133 | * If a message has extension headers, retrieve them here. |
| 134 | * Call like this: |
| 135 | * |
| 136 | * unsigned int pos = 0; |
| 137 | * |
| 138 | * while (1) { |
| 139 | * buflen = sizeof(buffer); |
| 140 | * type = rds_message_next_extension(hdr, &pos, buffer, &buflen); |
| 141 | * if (type == RDS_EXTHDR_NONE) |
| 142 | * break; |
| 143 | * ... |
| 144 | * } |
| 145 | */ |
| 146 | int rds_message_next_extension(struct rds_header *hdr, |
| 147 | unsigned int *pos, void *buf, unsigned int *buflen) |
| 148 | { |
| 149 | unsigned int offset, ext_type, ext_len; |
| 150 | u8 *src = hdr->h_exthdr; |
| 151 | |
| 152 | offset = *pos; |
| 153 | if (offset >= RDS_HEADER_EXT_SPACE) |
| 154 | goto none; |
| 155 | |
| 156 | /* Get the extension type and length. For now, the |
| 157 | * length is implied by the extension type. */ |
| 158 | ext_type = src[offset++]; |
| 159 | |
| 160 | if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX) |
| 161 | goto none; |
| 162 | ext_len = rds_exthdr_size[ext_type]; |
| 163 | if (offset + ext_len > RDS_HEADER_EXT_SPACE) |
| 164 | goto none; |
| 165 | |
| 166 | *pos = offset + ext_len; |
| 167 | if (ext_len < *buflen) |
| 168 | *buflen = ext_len; |
| 169 | memcpy(buf, src + offset, *buflen); |
| 170 | return ext_type; |
| 171 | |
| 172 | none: |
| 173 | *pos = RDS_HEADER_EXT_SPACE; |
| 174 | *buflen = 0; |
| 175 | return RDS_EXTHDR_NONE; |
| 176 | } |
| 177 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 178 | int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset) |
| 179 | { |
| 180 | struct rds_ext_header_rdma_dest ext_hdr; |
| 181 | |
| 182 | ext_hdr.h_rdma_rkey = cpu_to_be32(r_key); |
| 183 | ext_hdr.h_rdma_offset = cpu_to_be32(offset); |
| 184 | return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr)); |
| 185 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 186 | EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 187 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 188 | /* |
| 189 | * Each rds_message is allocated with extra space for the scatterlist entries |
| 190 | * rds ops will need. This is to minimize memory allocation count. Then, each rds op |
| 191 | * can grab SGs when initializing its part of the rds_message. |
| 192 | */ |
| 193 | struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp) |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 194 | { |
| 195 | struct rds_message *rm; |
| 196 | |
Cong Wang | ece6b0a | 2013-03-03 16:18:11 +0000 | [diff] [blame] | 197 | if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message)) |
| 198 | return NULL; |
| 199 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 200 | rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 201 | if (!rm) |
| 202 | goto out; |
| 203 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 204 | rm->m_used_sgs = 0; |
| 205 | rm->m_total_sgs = extra_len / sizeof(struct scatterlist); |
| 206 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 207 | atomic_set(&rm->m_refcount, 1); |
| 208 | INIT_LIST_HEAD(&rm->m_sock_item); |
| 209 | INIT_LIST_HEAD(&rm->m_conn_item); |
| 210 | spin_lock_init(&rm->m_rs_lock); |
Chris Mason | c83188d | 2010-04-21 13:09:28 -0700 | [diff] [blame] | 211 | init_waitqueue_head(&rm->m_flush_wait); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 212 | |
| 213 | out: |
| 214 | return rm; |
| 215 | } |
| 216 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 217 | /* |
| 218 | * RDS ops use this to grab SG entries from the rm's sg pool. |
| 219 | */ |
| 220 | struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) |
| 221 | { |
| 222 | struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; |
| 223 | struct scatterlist *sg_ret; |
| 224 | |
| 225 | WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); |
Andy Grover | ee4c7b4 | 2010-02-03 19:41:52 -0800 | [diff] [blame] | 226 | WARN_ON(!nents); |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 227 | |
Andy Grover | d139ff0 | 2010-10-28 15:40:59 +0000 | [diff] [blame] | 228 | if (rm->m_used_sgs + nents > rm->m_total_sgs) |
| 229 | return NULL; |
| 230 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 231 | sg_ret = &sg_first[rm->m_used_sgs]; |
Andy Grover | f4dd96f7 | 2010-01-12 14:17:31 -0800 | [diff] [blame] | 232 | sg_init_table(sg_ret, nents); |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 233 | rm->m_used_sgs += nents; |
| 234 | |
| 235 | return sg_ret; |
| 236 | } |
| 237 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 238 | struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len) |
| 239 | { |
| 240 | struct rds_message *rm; |
| 241 | unsigned int i; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 242 | int num_sgs = ceil(total_len, PAGE_SIZE); |
| 243 | int extra_bytes = num_sgs * sizeof(struct scatterlist); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 244 | |
Andy Grover | f2ec76f | 2010-03-29 16:46:46 -0700 | [diff] [blame] | 245 | rm = rds_message_alloc(extra_bytes, GFP_NOWAIT); |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 246 | if (!rm) |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 247 | return ERR_PTR(-ENOMEM); |
| 248 | |
| 249 | set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); |
| 250 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 251 | rm->data.op_nents = ceil(total_len, PAGE_SIZE); |
| 252 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); |
Pavel Emelyanov | aa58163 | 2010-11-08 06:20:50 +0000 | [diff] [blame] | 253 | if (!rm->data.op_sg) { |
| 254 | rds_message_put(rm); |
Andy Grover | d139ff0 | 2010-10-28 15:40:59 +0000 | [diff] [blame] | 255 | return ERR_PTR(-ENOMEM); |
Pavel Emelyanov | aa58163 | 2010-11-08 06:20:50 +0000 | [diff] [blame] | 256 | } |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 257 | |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 258 | for (i = 0; i < rm->data.op_nents; ++i) { |
| 259 | sg_set_page(&rm->data.op_sg[i], |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 260 | virt_to_page(page_addrs[i]), |
| 261 | PAGE_SIZE, 0); |
| 262 | } |
| 263 | |
| 264 | return rm; |
| 265 | } |
| 266 | |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 267 | int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from) |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 268 | { |
Sowmini Varadhan | d0a47d3 | 2015-02-05 17:41:43 -0500 | [diff] [blame] | 269 | unsigned long to_copy, nbytes; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 270 | unsigned long sg_off; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 271 | struct scatterlist *sg; |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 272 | int ret = 0; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 273 | |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 274 | rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from)); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 275 | |
| 276 | /* |
| 277 | * now allocate and copy in the data payload. |
| 278 | */ |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 279 | sg = rm->data.op_sg; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 280 | sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ |
| 281 | |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 282 | while (iov_iter_count(from)) { |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 283 | if (!sg_page(sg)) { |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 284 | ret = rds_page_remainder_alloc(sg, iov_iter_count(from), |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 285 | GFP_HIGHUSER); |
| 286 | if (ret) |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 287 | return ret; |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 288 | rm->data.op_nents++; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 289 | sg_off = 0; |
| 290 | } |
| 291 | |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 292 | to_copy = min_t(unsigned long, iov_iter_count(from), |
| 293 | sg->length - sg_off); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 294 | |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 295 | rds_stats_add(s_copy_from_user, to_copy); |
Sowmini Varadhan | d0a47d3 | 2015-02-05 17:41:43 -0500 | [diff] [blame] | 296 | nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, |
| 297 | to_copy, from); |
| 298 | if (nbytes != to_copy) |
Al Viro | 083735f | 2014-11-20 09:31:08 -0500 | [diff] [blame] | 299 | return -EFAULT; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 300 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 301 | sg_off += to_copy; |
| 302 | |
| 303 | if (sg_off == sg->length) |
| 304 | sg++; |
| 305 | } |
| 306 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 307 | return ret; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 308 | } |
| 309 | |
Al Viro | c310e72 | 2014-11-20 09:21:14 -0500 | [diff] [blame] | 310 | int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 311 | { |
| 312 | struct rds_message *rm; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 313 | struct scatterlist *sg; |
| 314 | unsigned long to_copy; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 315 | unsigned long vec_off; |
| 316 | int copied; |
| 317 | int ret; |
| 318 | u32 len; |
| 319 | |
| 320 | rm = container_of(inc, struct rds_message, m_inc); |
| 321 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); |
| 322 | |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 323 | sg = rm->data.op_sg; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 324 | vec_off = 0; |
| 325 | copied = 0; |
| 326 | |
Al Viro | c310e72 | 2014-11-20 09:21:14 -0500 | [diff] [blame] | 327 | while (iov_iter_count(to) && copied < len) { |
Geert Uytterhoeven | 6ff4a8a | 2014-12-15 13:21:42 +0100 | [diff] [blame] | 328 | to_copy = min_t(unsigned long, iov_iter_count(to), |
| 329 | sg->length - vec_off); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 330 | to_copy = min_t(unsigned long, to_copy, len - copied); |
| 331 | |
Al Viro | c310e72 | 2014-11-20 09:21:14 -0500 | [diff] [blame] | 332 | rds_stats_add(s_copy_to_user, to_copy); |
| 333 | ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off, |
| 334 | to_copy, to); |
| 335 | if (ret != to_copy) |
| 336 | return -EFAULT; |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 337 | |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 338 | vec_off += to_copy; |
| 339 | copied += to_copy; |
| 340 | |
| 341 | if (vec_off == sg->length) { |
| 342 | vec_off = 0; |
| 343 | sg++; |
| 344 | } |
| 345 | } |
| 346 | |
| 347 | return copied; |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * If the message is still on the send queue, wait until the transport |
| 352 | * is done with it. This is particularly important for RDMA operations. |
| 353 | */ |
| 354 | void rds_message_wait(struct rds_message *rm) |
| 355 | { |
Chris Mason | c83188d | 2010-04-21 13:09:28 -0700 | [diff] [blame] | 356 | wait_event_interruptible(rm->m_flush_wait, |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 357 | !test_bit(RDS_MSG_MAPPED, &rm->m_flags)); |
| 358 | } |
| 359 | |
| 360 | void rds_message_unmapped(struct rds_message *rm) |
| 361 | { |
| 362 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); |
Chris Mason | c83188d | 2010-04-21 13:09:28 -0700 | [diff] [blame] | 363 | wake_up_interruptible(&rm->m_flush_wait); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 364 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 365 | EXPORT_SYMBOL_GPL(rds_message_unmapped); |
Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 366 | |