Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the BSD-type |
| 8 | * license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * |
| 14 | * Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * |
| 17 | * Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials provided |
| 20 | * with the distribution. |
| 21 | * |
| 22 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 23 | * its contributors may be used to endorse or promote products |
| 24 | * derived from this software without specific prior written |
| 25 | * permission. |
| 26 | * |
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 38 | * |
| 39 | * Author: Tom Tucker <tom@opengridcomputing.com> |
| 40 | */ |
| 41 | |
| 42 | #include <linux/sunrpc/debug.h> |
| 43 | #include <linux/sunrpc/rpc_rdma.h> |
| 44 | #include <linux/spinlock.h> |
| 45 | #include <asm/unaligned.h> |
| 46 | #include <rdma/ib_verbs.h> |
| 47 | #include <rdma/rdma_cm.h> |
| 48 | #include <linux/sunrpc/svc_rdma.h> |
| 49 | |
| 50 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
| 51 | |
| 52 | /* |
| 53 | * Replace the pages in the rq_argpages array with the pages from the SGE in |
| 54 | * the RDMA_RECV completion. The SGL should contain full pages up until the |
| 55 | * last one. |
| 56 | */ |
| 57 | static void rdma_build_arg_xdr(struct svc_rqst *rqstp, |
| 58 | struct svc_rdma_op_ctxt *ctxt, |
| 59 | u32 byte_count) |
| 60 | { |
| 61 | struct page *page; |
| 62 | u32 bc; |
| 63 | int sge_no; |
| 64 | |
| 65 | /* Swap the page in the SGE with the page in argpages */ |
| 66 | page = ctxt->pages[0]; |
| 67 | put_page(rqstp->rq_pages[0]); |
| 68 | rqstp->rq_pages[0] = page; |
| 69 | |
| 70 | /* Set up the XDR head */ |
| 71 | rqstp->rq_arg.head[0].iov_base = page_address(page); |
| 72 | rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length); |
| 73 | rqstp->rq_arg.len = byte_count; |
| 74 | rqstp->rq_arg.buflen = byte_count; |
| 75 | |
| 76 | /* Compute bytes past head in the SGL */ |
| 77 | bc = byte_count - rqstp->rq_arg.head[0].iov_len; |
| 78 | |
| 79 | /* If data remains, store it in the pagelist */ |
| 80 | rqstp->rq_arg.page_len = bc; |
| 81 | rqstp->rq_arg.page_base = 0; |
| 82 | rqstp->rq_arg.pages = &rqstp->rq_pages[1]; |
| 83 | sge_no = 1; |
| 84 | while (bc && sge_no < ctxt->count) { |
| 85 | page = ctxt->pages[sge_no]; |
| 86 | put_page(rqstp->rq_pages[sge_no]); |
| 87 | rqstp->rq_pages[sge_no] = page; |
| 88 | bc -= min(bc, ctxt->sge[sge_no].length); |
| 89 | rqstp->rq_arg.buflen += ctxt->sge[sge_no].length; |
| 90 | sge_no++; |
| 91 | } |
| 92 | rqstp->rq_respages = &rqstp->rq_pages[sge_no]; |
| 93 | |
| 94 | /* We should never run out of SGE because the limit is defined to |
| 95 | * support the max allowed RPC data length |
| 96 | */ |
| 97 | BUG_ON(bc && (sge_no == ctxt->count)); |
| 98 | BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len) |
| 99 | != byte_count); |
| 100 | BUG_ON(rqstp->rq_arg.len != byte_count); |
| 101 | |
| 102 | /* If not all pages were used from the SGL, free the remaining ones */ |
| 103 | bc = sge_no; |
| 104 | while (sge_no < ctxt->count) { |
| 105 | page = ctxt->pages[sge_no++]; |
| 106 | put_page(page); |
| 107 | } |
| 108 | ctxt->count = bc; |
| 109 | |
| 110 | /* Set up tail */ |
| 111 | rqstp->rq_arg.tail[0].iov_base = NULL; |
| 112 | rqstp->rq_arg.tail[0].iov_len = 0; |
| 113 | } |
| 114 | |
| 115 | struct chunk_sge { |
| 116 | int start; /* sge no for this chunk */ |
| 117 | int count; /* sge count for this chunk */ |
| 118 | }; |
| 119 | |
| 120 | /* Encode a read-chunk-list as an array of IB SGE |
| 121 | * |
| 122 | * Assumptions: |
| 123 | * - chunk[0]->position points to pages[0] at an offset of 0 |
| 124 | * - pages[] is not physically or virtually contigous and consists of |
| 125 | * PAGE_SIZE elements. |
| 126 | * |
| 127 | * Output: |
| 128 | * - sge array pointing into pages[] array. |
| 129 | * - chunk_sge array specifying sge index and count for each |
| 130 | * chunk in the read list |
| 131 | * |
| 132 | */ |
| 133 | static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt, |
| 134 | struct svc_rqst *rqstp, |
| 135 | struct svc_rdma_op_ctxt *head, |
| 136 | struct rpcrdma_msg *rmsgp, |
| 137 | struct ib_sge *sge, |
| 138 | struct chunk_sge *ch_sge_ary, |
| 139 | int ch_count, |
| 140 | int byte_count) |
| 141 | { |
| 142 | int sge_no; |
| 143 | int sge_bytes; |
| 144 | int page_off; |
| 145 | int page_no; |
| 146 | int ch_bytes; |
| 147 | int ch_no; |
| 148 | struct rpcrdma_read_chunk *ch; |
| 149 | |
| 150 | sge_no = 0; |
| 151 | page_no = 0; |
| 152 | page_off = 0; |
| 153 | ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; |
| 154 | ch_no = 0; |
| 155 | ch_bytes = ch->rc_target.rs_length; |
| 156 | head->arg.head[0] = rqstp->rq_arg.head[0]; |
| 157 | head->arg.tail[0] = rqstp->rq_arg.tail[0]; |
| 158 | head->arg.pages = &head->pages[head->count]; |
| 159 | head->sge[0].length = head->count; /* save count of hdr pages */ |
| 160 | head->arg.page_base = 0; |
| 161 | head->arg.page_len = ch_bytes; |
| 162 | head->arg.len = rqstp->rq_arg.len + ch_bytes; |
| 163 | head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes; |
| 164 | head->count++; |
| 165 | ch_sge_ary[0].start = 0; |
| 166 | while (byte_count) { |
| 167 | sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes); |
| 168 | sge[sge_no].addr = |
| 169 | ib_dma_map_page(xprt->sc_cm_id->device, |
| 170 | rqstp->rq_arg.pages[page_no], |
| 171 | page_off, sge_bytes, |
| 172 | DMA_FROM_DEVICE); |
| 173 | sge[sge_no].length = sge_bytes; |
| 174 | sge[sge_no].lkey = xprt->sc_phys_mr->lkey; |
| 175 | /* |
| 176 | * Don't bump head->count here because the same page |
| 177 | * may be used by multiple SGE. |
| 178 | */ |
| 179 | head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no]; |
| 180 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1]; |
| 181 | |
| 182 | byte_count -= sge_bytes; |
| 183 | ch_bytes -= sge_bytes; |
| 184 | sge_no++; |
| 185 | /* |
| 186 | * If all bytes for this chunk have been mapped to an |
| 187 | * SGE, move to the next SGE |
| 188 | */ |
| 189 | if (ch_bytes == 0) { |
| 190 | ch_sge_ary[ch_no].count = |
| 191 | sge_no - ch_sge_ary[ch_no].start; |
| 192 | ch_no++; |
| 193 | ch++; |
| 194 | ch_sge_ary[ch_no].start = sge_no; |
| 195 | ch_bytes = ch->rc_target.rs_length; |
| 196 | /* If bytes remaining account for next chunk */ |
| 197 | if (byte_count) { |
| 198 | head->arg.page_len += ch_bytes; |
| 199 | head->arg.len += ch_bytes; |
| 200 | head->arg.buflen += ch_bytes; |
| 201 | } |
| 202 | } |
| 203 | /* |
| 204 | * If this SGE consumed all of the page, move to the |
| 205 | * next page |
| 206 | */ |
| 207 | if ((sge_bytes + page_off) == PAGE_SIZE) { |
| 208 | page_no++; |
| 209 | page_off = 0; |
| 210 | /* |
| 211 | * If there are still bytes left to map, bump |
| 212 | * the page count |
| 213 | */ |
| 214 | if (byte_count) |
| 215 | head->count++; |
| 216 | } else |
| 217 | page_off += sge_bytes; |
| 218 | } |
| 219 | BUG_ON(byte_count != 0); |
| 220 | return sge_no; |
| 221 | } |
| 222 | |
| 223 | static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt, |
| 224 | struct ib_sge *sge, |
| 225 | u64 *sgl_offset, |
| 226 | int count) |
| 227 | { |
| 228 | int i; |
| 229 | |
| 230 | ctxt->count = count; |
| 231 | for (i = 0; i < count; i++) { |
| 232 | ctxt->sge[i].addr = sge[i].addr; |
| 233 | ctxt->sge[i].length = sge[i].length; |
| 234 | *sgl_offset = *sgl_offset + sge[i].length; |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) |
| 239 | { |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 240 | if ((RDMA_TRANSPORT_IWARP == |
| 241 | rdma_node_get_transport(xprt->sc_cm_id-> |
| 242 | device->node_type)) |
| 243 | && sge_count > 1) |
| 244 | return 1; |
| 245 | else |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 246 | return min_t(int, sge_count, xprt->sc_max_sge); |
| 247 | } |
| 248 | |
| 249 | /* |
| 250 | * Use RDMA_READ to read data from the advertised client buffer into the |
| 251 | * XDR stream starting at rq_arg.head[0].iov_base. |
| 252 | * Each chunk in the array |
| 253 | * contains the following fields: |
| 254 | * discrim - '1', This isn't used for data placement |
| 255 | * position - The xdr stream offset (the same for every chunk) |
| 256 | * handle - RMR for client memory region |
| 257 | * length - data transfer length |
| 258 | * offset - 64 bit tagged offset in remote memory region |
| 259 | * |
| 260 | * On our side, we need to read into a pagelist. The first page immediately |
| 261 | * follows the RPC header. |
| 262 | * |
Tom Tucker | d16d400 | 2008-05-06 10:04:50 -0500 | [diff] [blame] | 263 | * This function returns: |
| 264 | * 0 - No error and no read-list found. |
| 265 | * |
| 266 | * 1 - Successful read-list processing. The data is not yet in |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 267 | * the pagelist and therefore the RPC request must be deferred. The |
| 268 | * I/O completion will enqueue the transport again and |
| 269 | * svc_rdma_recvfrom will complete the request. |
| 270 | * |
Tom Tucker | d16d400 | 2008-05-06 10:04:50 -0500 | [diff] [blame] | 271 | * <0 - Error processing/posting read-list. |
| 272 | * |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 273 | * NOTE: The ctxt must not be touched after the last WR has been posted |
| 274 | * because the I/O completion processing may occur on another |
| 275 | * processor and free / modify the context. Ne touche pas! |
| 276 | */ |
| 277 | static int rdma_read_xdr(struct svcxprt_rdma *xprt, |
| 278 | struct rpcrdma_msg *rmsgp, |
| 279 | struct svc_rqst *rqstp, |
| 280 | struct svc_rdma_op_ctxt *hdr_ctxt) |
| 281 | { |
| 282 | struct ib_send_wr read_wr; |
| 283 | int err = 0; |
| 284 | int ch_no; |
| 285 | struct ib_sge *sge; |
| 286 | int ch_count; |
| 287 | int byte_count; |
| 288 | int sge_count; |
| 289 | u64 sgl_offset; |
| 290 | struct rpcrdma_read_chunk *ch; |
| 291 | struct svc_rdma_op_ctxt *ctxt = NULL; |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 292 | struct svc_rdma_op_ctxt *tmp_sge_ctxt; |
| 293 | struct svc_rdma_op_ctxt *tmp_ch_ctxt; |
| 294 | struct chunk_sge *ch_sge_ary; |
| 295 | |
| 296 | /* If no read list is present, return 0 */ |
| 297 | ch = svc_rdma_get_read_chunk(rmsgp); |
| 298 | if (!ch) |
| 299 | return 0; |
| 300 | |
| 301 | /* Allocate temporary contexts to keep SGE */ |
| 302 | BUG_ON(sizeof(struct ib_sge) < sizeof(struct chunk_sge)); |
| 303 | tmp_sge_ctxt = svc_rdma_get_context(xprt); |
| 304 | sge = tmp_sge_ctxt->sge; |
| 305 | tmp_ch_ctxt = svc_rdma_get_context(xprt); |
| 306 | ch_sge_ary = (struct chunk_sge *)tmp_ch_ctxt->sge; |
| 307 | |
| 308 | svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count); |
| 309 | sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp, |
| 310 | sge, ch_sge_ary, |
| 311 | ch_count, byte_count); |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 312 | sgl_offset = 0; |
| 313 | ch_no = 0; |
| 314 | |
| 315 | for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; |
| 316 | ch->rc_discrim != 0; ch++, ch_no++) { |
| 317 | next_sge: |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 318 | ctxt = svc_rdma_get_context(xprt); |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 319 | ctxt->direction = DMA_FROM_DEVICE; |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 320 | clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 321 | |
| 322 | /* Prepare READ WR */ |
| 323 | memset(&read_wr, 0, sizeof read_wr); |
| 324 | ctxt->wr_op = IB_WR_RDMA_READ; |
| 325 | read_wr.wr_id = (unsigned long)ctxt; |
| 326 | read_wr.opcode = IB_WR_RDMA_READ; |
| 327 | read_wr.send_flags = IB_SEND_SIGNALED; |
| 328 | read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; |
| 329 | read_wr.wr.rdma.remote_addr = |
| 330 | get_unaligned(&(ch->rc_target.rs_offset)) + |
| 331 | sgl_offset; |
| 332 | read_wr.sg_list = &sge[ch_sge_ary[ch_no].start]; |
| 333 | read_wr.num_sge = |
| 334 | rdma_read_max_sge(xprt, ch_sge_ary[ch_no].count); |
| 335 | rdma_set_ctxt_sge(ctxt, &sge[ch_sge_ary[ch_no].start], |
| 336 | &sgl_offset, |
| 337 | read_wr.num_sge); |
Tom Tucker | c8237a5 | 2008-03-25 22:27:19 -0400 | [diff] [blame] | 338 | if (((ch+1)->rc_discrim == 0) && |
| 339 | (read_wr.num_sge == ch_sge_ary[ch_no].count)) { |
| 340 | /* |
| 341 | * Mark the last RDMA_READ with a bit to |
| 342 | * indicate all RPC data has been fetched from |
| 343 | * the client and the RPC needs to be enqueued. |
| 344 | */ |
| 345 | set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 346 | ctxt->read_hdr = hdr_ctxt; |
Tom Tucker | c8237a5 | 2008-03-25 22:27:19 -0400 | [diff] [blame] | 347 | } |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 348 | /* Post the read */ |
| 349 | err = svc_rdma_send(xprt, &read_wr); |
| 350 | if (err) { |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 351 | printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n", |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 352 | err); |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 353 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 354 | svc_rdma_put_context(ctxt, 0); |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 355 | goto out; |
| 356 | } |
| 357 | atomic_inc(&rdma_stat_read); |
| 358 | |
| 359 | if (read_wr.num_sge < ch_sge_ary[ch_no].count) { |
| 360 | ch_sge_ary[ch_no].count -= read_wr.num_sge; |
| 361 | ch_sge_ary[ch_no].start += read_wr.num_sge; |
| 362 | goto next_sge; |
| 363 | } |
| 364 | sgl_offset = 0; |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 365 | err = 1; |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | out: |
| 369 | svc_rdma_put_context(tmp_sge_ctxt, 0); |
| 370 | svc_rdma_put_context(tmp_ch_ctxt, 0); |
| 371 | |
| 372 | /* Detach arg pages. svc_recv will replenish them */ |
| 373 | for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++) |
| 374 | rqstp->rq_pages[ch_no] = NULL; |
| 375 | |
| 376 | /* |
| 377 | * Detach res pages. svc_release must see a resused count of |
| 378 | * zero or it will attempt to put them. |
| 379 | */ |
| 380 | while (rqstp->rq_resused) |
| 381 | rqstp->rq_respages[--rqstp->rq_resused] = NULL; |
| 382 | |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 383 | return err; |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 384 | } |
| 385 | |
| 386 | static int rdma_read_complete(struct svc_rqst *rqstp, |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 387 | struct svc_rdma_op_ctxt *head) |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 388 | { |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 389 | int page_no; |
| 390 | int ret; |
| 391 | |
| 392 | BUG_ON(!head); |
| 393 | |
| 394 | /* Copy RPC pages */ |
| 395 | for (page_no = 0; page_no < head->count; page_no++) { |
| 396 | put_page(rqstp->rq_pages[page_no]); |
| 397 | rqstp->rq_pages[page_no] = head->pages[page_no]; |
| 398 | } |
| 399 | /* Point rq_arg.pages past header */ |
| 400 | rqstp->rq_arg.pages = &rqstp->rq_pages[head->sge[0].length]; |
| 401 | rqstp->rq_arg.page_len = head->arg.page_len; |
| 402 | rqstp->rq_arg.page_base = head->arg.page_base; |
| 403 | |
| 404 | /* rq_respages starts after the last arg page */ |
| 405 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; |
| 406 | rqstp->rq_resused = 0; |
| 407 | |
| 408 | /* Rebuild rq_arg head and tail. */ |
| 409 | rqstp->rq_arg.head[0] = head->arg.head[0]; |
| 410 | rqstp->rq_arg.tail[0] = head->arg.tail[0]; |
| 411 | rqstp->rq_arg.len = head->arg.len; |
| 412 | rqstp->rq_arg.buflen = head->arg.buflen; |
| 413 | |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 414 | /* Free the context */ |
| 415 | svc_rdma_put_context(head, 0); |
| 416 | |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 417 | /* XXX: What should this be? */ |
| 418 | rqstp->rq_prot = IPPROTO_MAX; |
Tom Tucker | 69500c4 | 2008-05-07 13:49:58 -0500 | [diff] [blame^] | 419 | svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt); |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 420 | |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 421 | ret = rqstp->rq_arg.head[0].iov_len |
| 422 | + rqstp->rq_arg.page_len |
| 423 | + rqstp->rq_arg.tail[0].iov_len; |
| 424 | dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, " |
| 425 | "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n", |
| 426 | ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, |
| 427 | rqstp->rq_arg.head[0].iov_len); |
| 428 | |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 429 | svc_xprt_received(rqstp->rq_xprt); |
| 430 | return ret; |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * Set up the rqstp thread context to point to the RQ buffer. If |
| 435 | * necessary, pull additional data from the client with an RDMA_READ |
| 436 | * request. |
| 437 | */ |
| 438 | int svc_rdma_recvfrom(struct svc_rqst *rqstp) |
| 439 | { |
| 440 | struct svc_xprt *xprt = rqstp->rq_xprt; |
| 441 | struct svcxprt_rdma *rdma_xprt = |
| 442 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 443 | struct svc_rdma_op_ctxt *ctxt = NULL; |
| 444 | struct rpcrdma_msg *rmsgp; |
| 445 | int ret = 0; |
| 446 | int len; |
| 447 | |
| 448 | dprintk("svcrdma: rqstp=%p\n", rqstp); |
| 449 | |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 450 | spin_lock_bh(&rdma_xprt->sc_read_complete_lock); |
| 451 | if (!list_empty(&rdma_xprt->sc_read_complete_q)) { |
| 452 | ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, |
| 453 | struct svc_rdma_op_ctxt, |
| 454 | dto_q); |
| 455 | list_del_init(&ctxt->dto_q); |
| 456 | } |
| 457 | spin_unlock_bh(&rdma_xprt->sc_read_complete_lock); |
| 458 | if (ctxt) |
| 459 | return rdma_read_complete(rqstp, ctxt); |
| 460 | |
| 461 | spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); |
| 462 | if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { |
| 463 | ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, |
| 464 | struct svc_rdma_op_ctxt, |
| 465 | dto_q); |
| 466 | list_del_init(&ctxt->dto_q); |
| 467 | } else { |
| 468 | atomic_inc(&rdma_stat_rq_starve); |
| 469 | clear_bit(XPT_DATA, &xprt->xpt_flags); |
| 470 | ctxt = NULL; |
| 471 | } |
| 472 | spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); |
| 473 | if (!ctxt) { |
| 474 | /* This is the EAGAIN path. The svc_recv routine will |
| 475 | * return -EAGAIN, the nfsd thread will go to call into |
| 476 | * svc_recv again and we shouldn't be on the active |
| 477 | * transport list |
| 478 | */ |
| 479 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) |
| 480 | goto close_out; |
| 481 | |
| 482 | BUG_ON(ret); |
| 483 | goto out; |
| 484 | } |
| 485 | dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", |
| 486 | ctxt, rdma_xprt, rqstp, ctxt->wc_status); |
| 487 | BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); |
| 488 | atomic_inc(&rdma_stat_recv); |
| 489 | |
| 490 | /* Build up the XDR from the receive buffers. */ |
| 491 | rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); |
| 492 | |
| 493 | /* Decode the RDMA header. */ |
| 494 | len = svc_rdma_xdr_decode_req(&rmsgp, rqstp); |
| 495 | rqstp->rq_xprt_hlen = len; |
| 496 | |
| 497 | /* If the request is invalid, reply with an error */ |
| 498 | if (len < 0) { |
| 499 | if (len == -ENOSYS) |
| 500 | (void)svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS); |
| 501 | goto close_out; |
| 502 | } |
| 503 | |
Tom Tucker | d16d400 | 2008-05-06 10:04:50 -0500 | [diff] [blame] | 504 | /* Read read-list data. */ |
| 505 | ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt); |
| 506 | if (ret > 0) { |
| 507 | /* read-list posted, defer until data received from client. */ |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 508 | svc_xprt_received(xprt); |
| 509 | return 0; |
| 510 | } |
Tom Tucker | d16d400 | 2008-05-06 10:04:50 -0500 | [diff] [blame] | 511 | if (ret < 0) { |
| 512 | /* Post of read-list failed, free context. */ |
| 513 | svc_rdma_put_context(ctxt, 1); |
| 514 | return 0; |
| 515 | } |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 516 | |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 517 | ret = rqstp->rq_arg.head[0].iov_len |
| 518 | + rqstp->rq_arg.page_len |
| 519 | + rqstp->rq_arg.tail[0].iov_len; |
| 520 | svc_rdma_put_context(ctxt, 0); |
| 521 | out: |
| 522 | dprintk("svcrdma: ret = %d, rq_arg.len =%d, " |
| 523 | "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n", |
| 524 | ret, rqstp->rq_arg.len, |
| 525 | rqstp->rq_arg.head[0].iov_base, |
| 526 | rqstp->rq_arg.head[0].iov_len); |
| 527 | rqstp->rq_prot = IPPROTO_MAX; |
| 528 | svc_xprt_copy_addrs(rqstp, xprt); |
| 529 | svc_xprt_received(xprt); |
| 530 | return ret; |
| 531 | |
| 532 | close_out: |
Tom Tucker | 0e7f011 | 2008-04-23 16:49:54 -0500 | [diff] [blame] | 533 | if (ctxt) |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 534 | svc_rdma_put_context(ctxt, 1); |
Tom Tucker | d5b31be | 2007-12-12 16:13:23 -0600 | [diff] [blame] | 535 | dprintk("svcrdma: transport %p is closing\n", xprt); |
| 536 | /* |
| 537 | * Set the close bit and enqueue it. svc_recv will see the |
| 538 | * close bit and call svc_xprt_delete |
| 539 | */ |
| 540 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 541 | svc_xprt_received(xprt); |
| 542 | return 0; |
| 543 | } |