Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. |
Or Gerlitz | 3ee07d2 | 2014-04-01 16:28:41 +0300 | [diff] [blame^] | 3 | * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 32 | */ |
| 33 | #include <linux/module.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/slab.h> |
| 36 | #include <linux/mm.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 37 | #include <linux/highmem.h> |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 38 | #include <linux/scatterlist.h> |
| 39 | |
| 40 | #include "iscsi_iser.h" |
| 41 | |
| 42 | #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 43 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 44 | /** |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 45 | * iser_start_rdma_unaligned_sg |
| 46 | */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 47 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 48 | struct iser_data_buf *data, |
| 49 | struct iser_data_buf *data_copy, |
Roland Dreier | 41179e2 | 2007-07-17 18:37:42 -0700 | [diff] [blame] | 50 | enum iser_data_dir cmd_dir) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 51 | { |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 52 | struct ib_device *dev = iser_task->ib_conn->device->ib_device; |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 53 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 54 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 55 | char *mem = NULL; |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 56 | unsigned long cmd_data_len = 0; |
| 57 | int dma_nents, i; |
| 58 | |
| 59 | for_each_sg(sgl, sg, data->size, i) |
| 60 | cmd_data_len += ib_sg_dma_len(dev, sg); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 61 | |
| 62 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
Or Gerlitz | 528f4e8 | 2010-02-08 13:20:43 +0000 | [diff] [blame] | 63 | mem = (void *)__get_free_pages(GFP_ATOMIC, |
David Howells | f0d1b0b | 2006-12-08 02:37:49 -0800 | [diff] [blame] | 64 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 65 | else |
Or Gerlitz | 528f4e8 | 2010-02-08 13:20:43 +0000 | [diff] [blame] | 66 | mem = kmalloc(cmd_data_len, GFP_ATOMIC); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 67 | |
| 68 | if (mem == NULL) { |
| 69 | iser_err("Failed to allocate mem size %d %d for copying sglist\n", |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 70 | data->size, (int)cmd_data_len); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 71 | return -ENOMEM; |
| 72 | } |
| 73 | |
| 74 | if (cmd_dir == ISER_DIR_OUT) { |
| 75 | /* copy the unaligned sg the buffer which is used for RDMA */ |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 76 | int i; |
| 77 | char *p, *from; |
| 78 | |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 79 | sgl = (struct scatterlist *)data->buf; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 80 | p = mem; |
| 81 | for_each_sg(sgl, sg, data->size, i) { |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 82 | from = kmap_atomic(sg_page(sg)); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 83 | memcpy(p, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 84 | from + sg->offset, |
| 85 | sg->length); |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 86 | kunmap_atomic(from); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 87 | p += sg->length; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 88 | } |
| 89 | } |
| 90 | |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 91 | sg_init_one(&data_copy->sg_single, mem, cmd_data_len); |
| 92 | data_copy->buf = &data_copy->sg_single; |
| 93 | data_copy->size = 1; |
| 94 | data_copy->copy_buf = mem; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 95 | |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 96 | dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 97 | (cmd_dir == ISER_DIR_OUT) ? |
| 98 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 99 | BUG_ON(dma_nents == 0); |
| 100 | |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 101 | data_copy->dma_nents = dma_nents; |
| 102 | data_copy->data_len = cmd_data_len; |
| 103 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | /** |
| 108 | * iser_finalize_rdma_unaligned_sg |
| 109 | */ |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 110 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 111 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 112 | struct iser_data_buf *data, |
| 113 | struct iser_data_buf *data_copy, |
| 114 | enum iser_data_dir cmd_dir) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 115 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 116 | struct ib_device *dev; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 117 | unsigned long cmd_data_len; |
| 118 | |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 119 | dev = iser_task->ib_conn->device->ib_device; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 120 | |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 121 | ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 122 | (cmd_dir == ISER_DIR_OUT) ? |
| 123 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 124 | |
| 125 | if (cmd_dir == ISER_DIR_IN) { |
| 126 | char *mem; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 127 | struct scatterlist *sgl, *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 128 | unsigned char *p, *to; |
| 129 | unsigned int sg_size; |
| 130 | int i; |
| 131 | |
| 132 | /* copy back read RDMA to unaligned sg */ |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 133 | mem = data_copy->copy_buf; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 134 | |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 135 | sgl = (struct scatterlist *)data->buf; |
| 136 | sg_size = data->size; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 137 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 138 | p = mem; |
| 139 | for_each_sg(sgl, sg, sg_size, i) { |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 140 | to = kmap_atomic(sg_page(sg)); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 141 | memcpy(to + sg->offset, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 142 | p, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 143 | sg->length); |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 144 | kunmap_atomic(to); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 145 | p += sg->length; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 146 | } |
| 147 | } |
| 148 | |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 149 | cmd_data_len = data->data_len; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 150 | |
| 151 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 152 | free_pages((unsigned long)data_copy->copy_buf, |
David Howells | f0d1b0b | 2006-12-08 02:37:49 -0800 | [diff] [blame] | 153 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 154 | else |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 155 | kfree(data_copy->copy_buf); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 156 | |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 157 | data_copy->copy_buf = NULL; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 158 | } |
| 159 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 160 | #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) |
| 161 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 162 | /** |
| 163 | * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses |
| 164 | * and returns the length of resulting physical address array (may be less than |
| 165 | * the original due to possible compaction). |
| 166 | * |
| 167 | * we build a "page vec" under the assumption that the SG meets the RDMA |
| 168 | * alignment requirements. Other then the first and last SG elements, all |
| 169 | * the "internal" elements can be compacted into a list whose elements are |
| 170 | * dma addresses of physical pages. The code supports also the weird case |
| 171 | * where --few fragments of the same page-- are present in the SG as |
| 172 | * consecutive elements. Also, it handles one entry SG. |
| 173 | */ |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 174 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 175 | static int iser_sg_to_page_vec(struct iser_data_buf *data, |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 176 | struct ib_device *ibdev, u64 *pages, |
| 177 | int *offset, int *data_size) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 178 | { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 179 | struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf; |
| 180 | u64 start_addr, end_addr, page, chunk_start = 0; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 181 | unsigned long total_sz = 0; |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 182 | unsigned int dma_len; |
| 183 | int i, new_chunk, cur_page, last_ent = data->dma_nents - 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 184 | |
| 185 | /* compute the offset of first element */ |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 186 | *offset = (u64) sgl[0].offset & ~MASK_4K; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 187 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 188 | new_chunk = 1; |
| 189 | cur_page = 0; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 190 | for_each_sg(sgl, sg, data->dma_nents, i) { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 191 | start_addr = ib_sg_dma_address(ibdev, sg); |
| 192 | if (new_chunk) |
| 193 | chunk_start = start_addr; |
| 194 | dma_len = ib_sg_dma_len(ibdev, sg); |
| 195 | end_addr = start_addr + dma_len; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 196 | total_sz += dma_len; |
| 197 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 198 | /* collect page fragments until aligned or end of SG list */ |
| 199 | if (!IS_4K_ALIGNED(end_addr) && i < last_ent) { |
| 200 | new_chunk = 0; |
| 201 | continue; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 202 | } |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 203 | new_chunk = 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 204 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 205 | /* address of the first page in the contiguous chunk; |
| 206 | masking relevant for the very first SG entry, |
| 207 | which might be unaligned */ |
| 208 | page = chunk_start & MASK_4K; |
| 209 | do { |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 210 | pages[cur_page++] = page; |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 211 | page += SIZE_4K; |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 212 | } while (page < end_addr); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 213 | } |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 214 | |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 215 | *data_size = total_sz; |
| 216 | iser_dbg("page_vec->data_size:%d cur_page %d\n", |
| 217 | *data_size, cur_page); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 218 | return cur_page; |
| 219 | } |
| 220 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 221 | |
| 222 | /** |
| 223 | * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned |
| 224 | * for RDMA sub-list of a scatter-gather list of memory buffers, and returns |
| 225 | * the number of entries which are aligned correctly. Supports the case where |
| 226 | * consecutive SG elements are actually fragments of the same physcial page. |
| 227 | */ |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 228 | static int iser_data_buf_aligned_len(struct iser_data_buf *data, |
| 229 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 230 | { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 231 | struct scatterlist *sgl, *sg, *next_sg = NULL; |
| 232 | u64 start_addr, end_addr; |
| 233 | int i, ret_len, start_check = 0; |
| 234 | |
| 235 | if (data->dma_nents == 1) |
| 236 | return 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 237 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 238 | sgl = (struct scatterlist *)data->buf; |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 239 | start_addr = ib_sg_dma_address(ibdev, sgl); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 240 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 241 | for_each_sg(sgl, sg, data->dma_nents, i) { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 242 | if (start_check && !IS_4K_ALIGNED(start_addr)) |
| 243 | break; |
| 244 | |
| 245 | next_sg = sg_next(sg); |
| 246 | if (!next_sg) |
| 247 | break; |
| 248 | |
| 249 | end_addr = start_addr + ib_sg_dma_len(ibdev, sg); |
| 250 | start_addr = ib_sg_dma_address(ibdev, next_sg); |
| 251 | |
| 252 | if (end_addr == start_addr) { |
| 253 | start_check = 0; |
| 254 | continue; |
| 255 | } else |
| 256 | start_check = 1; |
| 257 | |
| 258 | if (!IS_4K_ALIGNED(end_addr)) |
| 259 | break; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 260 | } |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 261 | ret_len = (next_sg) ? i : i+1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 262 | iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", |
| 263 | ret_len, data->dma_nents, data); |
| 264 | return ret_len; |
| 265 | } |
| 266 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 267 | static void iser_data_buf_dump(struct iser_data_buf *data, |
| 268 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 269 | { |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 270 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 271 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 272 | int i; |
| 273 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 274 | for_each_sg(sgl, sg, data->dma_nents, i) |
Or Gerlitz | f91424c | 2013-07-28 12:35:36 +0300 | [diff] [blame] | 275 | iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 276 | "off:0x%x sz:0x%x dma_len:0x%x\n", |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 277 | i, (unsigned long)ib_sg_dma_address(ibdev, sg), |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 278 | sg_page(sg), sg->offset, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 279 | sg->length, ib_sg_dma_len(ibdev, sg)); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) |
| 283 | { |
| 284 | int i; |
| 285 | |
| 286 | iser_err("page vec length %d data size %d\n", |
| 287 | page_vec->length, page_vec->data_size); |
| 288 | for (i = 0; i < page_vec->length; i++) |
| 289 | iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); |
| 290 | } |
| 291 | |
| 292 | static void iser_page_vec_build(struct iser_data_buf *data, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 293 | struct iser_page_vec *page_vec, |
| 294 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 295 | { |
| 296 | int page_vec_len = 0; |
| 297 | |
| 298 | page_vec->length = 0; |
| 299 | page_vec->offset = 0; |
| 300 | |
| 301 | iser_dbg("Translating sg sz: %d\n", data->dma_nents); |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 302 | page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages, |
| 303 | &page_vec->offset, |
| 304 | &page_vec->data_size); |
| 305 | iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 306 | |
| 307 | page_vec->length = page_vec_len; |
| 308 | |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 309 | if (page_vec_len * SIZE_4K < page_vec->data_size) { |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 310 | iser_err("page_vec too short to hold this SG\n"); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 311 | iser_data_buf_dump(data, ibdev); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 312 | iser_dump_page_vec(page_vec); |
| 313 | BUG(); |
| 314 | } |
| 315 | } |
| 316 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 317 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
| 318 | struct iser_data_buf *data, |
| 319 | enum iser_data_dir iser_dir, |
| 320 | enum dma_data_direction dma_dir) |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 321 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 322 | struct ib_device *dev; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 323 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 324 | iser_task->dir[iser_dir] = 1; |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 325 | dev = iser_task->ib_conn->device->ib_device; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 326 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 327 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 328 | if (data->dma_nents == 0) { |
| 329 | iser_err("dma_map_sg failed!!!\n"); |
| 330 | return -EINVAL; |
| 331 | } |
| 332 | return 0; |
| 333 | } |
| 334 | |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 335 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, |
| 336 | struct iser_data_buf *data) |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 337 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 338 | struct ib_device *dev; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 339 | |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 340 | dev = iser_task->ib_conn->device->ib_device; |
Sagi Grimberg | 9a8b08f | 2014-03-05 19:43:44 +0200 | [diff] [blame] | 341 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 342 | } |
| 343 | |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 344 | static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, |
| 345 | struct ib_device *ibdev, |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 346 | struct iser_data_buf *mem, |
| 347 | struct iser_data_buf *mem_copy, |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 348 | enum iser_data_dir cmd_dir, |
| 349 | int aligned_len) |
| 350 | { |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 351 | struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn; |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 352 | |
| 353 | iscsi_conn->fmr_unalign_cnt++; |
| 354 | iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", |
| 355 | aligned_len, mem->size); |
| 356 | |
| 357 | if (iser_debug_level > 0) |
| 358 | iser_data_buf_dump(mem, ibdev); |
| 359 | |
| 360 | /* unmap the command data before accessing it */ |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 361 | iser_dma_unmap_task_data(iser_task, mem); |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 362 | |
| 363 | /* allocate copy buf, if we are writing, copy the */ |
| 364 | /* unaligned scatterlist, dma map the copy */ |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 365 | if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0) |
| 366 | return -ENOMEM; |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 371 | /** |
Sagi Grimberg | e657571 | 2013-07-28 12:35:41 +0300 | [diff] [blame] | 372 | * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA, |
| 373 | * using FMR (if possible) obtaining rkey and va |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 374 | * |
| 375 | * returns 0 on success, errno code on failure |
| 376 | */ |
Sagi Grimberg | e657571 | 2013-07-28 12:35:41 +0300 | [diff] [blame] | 377 | int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, |
| 378 | enum iser_data_dir cmd_dir) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 379 | { |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 380 | struct iser_conn *ib_conn = iser_task->ib_conn; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 381 | struct iser_device *device = ib_conn->device; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 382 | struct ib_device *ibdev = device->ib_device; |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 383 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 384 | struct iser_regd_buf *regd_buf; |
| 385 | int aligned_len; |
| 386 | int err; |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 387 | int i; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 388 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 389 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 390 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 391 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 392 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 393 | if (aligned_len != mem->dma_nents) { |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 394 | err = fall_to_bounce_buf(iser_task, ibdev, mem, |
| 395 | &iser_task->data_copy[cmd_dir], |
Sagi Grimberg | 919fc27 | 2013-07-28 12:35:40 +0300 | [diff] [blame] | 396 | cmd_dir, aligned_len); |
| 397 | if (err) { |
| 398 | iser_err("failed to allocate bounce buffer\n"); |
| 399 | return err; |
| 400 | } |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 401 | mem = &iser_task->data_copy[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 402 | } |
| 403 | |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 404 | /* if there a single dma entry, FMR is not needed */ |
| 405 | if (mem->dma_nents == 1) { |
| 406 | sg = (struct scatterlist *)mem->buf; |
| 407 | |
| 408 | regd_buf->reg.lkey = device->mr->lkey; |
| 409 | regd_buf->reg.rkey = device->mr->rkey; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 410 | regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); |
| 411 | regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 412 | regd_buf->reg.is_mr = 0; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 413 | |
| 414 | iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " |
| 415 | "va: 0x%08lX sz: %ld]\n", |
| 416 | (unsigned int)regd_buf->reg.lkey, |
| 417 | (unsigned int)regd_buf->reg.rkey, |
| 418 | (unsigned long)regd_buf->reg.va, |
| 419 | (unsigned long)regd_buf->reg.len); |
| 420 | } else { /* use FMR for multiple dma entries */ |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 421 | iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); |
| 422 | err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, |
Sagi Grimberg | e657571 | 2013-07-28 12:35:41 +0300 | [diff] [blame] | 423 | ®d_buf->reg); |
Or Gerlitz | 819a087 | 2013-02-21 14:50:09 +0000 | [diff] [blame] | 424 | if (err && err != -EAGAIN) { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 425 | iser_data_buf_dump(mem, ibdev); |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 426 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
| 427 | mem->dma_nents, |
| 428 | ntoh24(iser_task->desc.iscsi_header.dlength)); |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 429 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 430 | ib_conn->fmr.page_vec->data_size, |
| 431 | ib_conn->fmr.page_vec->length, |
| 432 | ib_conn->fmr.page_vec->offset); |
| 433 | for (i = 0; i < ib_conn->fmr.page_vec->length; i++) |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 434 | iser_err("page_vec[%d] = 0x%llx\n", i, |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 435 | (unsigned long long) ib_conn->fmr.page_vec->pages[i]); |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 436 | } |
Or Gerlitz | 450d1e4 | 2013-05-01 13:25:26 +0000 | [diff] [blame] | 437 | if (err) |
| 438 | return err; |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 439 | } |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 440 | return 0; |
| 441 | } |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 442 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 443 | static inline enum ib_t10_dif_type |
| 444 | scsi2ib_prot_type(unsigned char prot_type) |
| 445 | { |
| 446 | switch (prot_type) { |
| 447 | case SCSI_PROT_DIF_TYPE0: |
| 448 | return IB_T10DIF_NONE; |
| 449 | case SCSI_PROT_DIF_TYPE1: |
| 450 | return IB_T10DIF_TYPE1; |
| 451 | case SCSI_PROT_DIF_TYPE2: |
| 452 | return IB_T10DIF_TYPE2; |
| 453 | case SCSI_PROT_DIF_TYPE3: |
| 454 | return IB_T10DIF_TYPE3; |
| 455 | default: |
| 456 | return IB_T10DIF_NONE; |
| 457 | } |
| 458 | } |
| 459 | |
| 460 | |
| 461 | static int |
| 462 | iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) |
| 463 | { |
| 464 | unsigned char scsi_ptype = scsi_get_prot_type(sc); |
| 465 | |
| 466 | sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF; |
| 467 | sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF; |
| 468 | sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size; |
| 469 | sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size; |
| 470 | |
| 471 | switch (scsi_get_prot_op(sc)) { |
| 472 | case SCSI_PROT_WRITE_INSERT: |
| 473 | case SCSI_PROT_READ_STRIP: |
| 474 | sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; |
| 475 | sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); |
| 476 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; |
| 477 | sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & |
| 478 | 0xffffffff; |
| 479 | break; |
| 480 | case SCSI_PROT_READ_INSERT: |
| 481 | case SCSI_PROT_WRITE_STRIP: |
| 482 | sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); |
| 483 | sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; |
| 484 | sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & |
| 485 | 0xffffffff; |
| 486 | sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE; |
| 487 | break; |
| 488 | case SCSI_PROT_READ_PASS: |
| 489 | case SCSI_PROT_WRITE_PASS: |
| 490 | sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); |
| 491 | sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; |
| 492 | sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & |
| 493 | 0xffffffff; |
| 494 | sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); |
| 495 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; |
| 496 | sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & |
| 497 | 0xffffffff; |
| 498 | break; |
| 499 | default: |
| 500 | iser_err("Unsupported PI operation %d\n", |
| 501 | scsi_get_prot_op(sc)); |
| 502 | return -EINVAL; |
| 503 | } |
| 504 | return 0; |
| 505 | } |
| 506 | |
| 507 | |
| 508 | static int |
| 509 | iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) |
| 510 | { |
| 511 | switch (scsi_get_prot_type(sc)) { |
| 512 | case SCSI_PROT_DIF_TYPE0: |
| 513 | *mask = 0x0; |
| 514 | break; |
| 515 | case SCSI_PROT_DIF_TYPE1: |
| 516 | case SCSI_PROT_DIF_TYPE2: |
| 517 | *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG; |
| 518 | break; |
| 519 | case SCSI_PROT_DIF_TYPE3: |
| 520 | *mask = ISER_CHECK_GUARD; |
| 521 | break; |
| 522 | default: |
| 523 | iser_err("Unsupported protection type %d\n", |
| 524 | scsi_get_prot_type(sc)); |
| 525 | return -EINVAL; |
| 526 | } |
| 527 | |
| 528 | return 0; |
| 529 | } |
| 530 | |
| 531 | static int |
| 532 | iser_reg_sig_mr(struct iscsi_iser_task *iser_task, |
| 533 | struct fast_reg_descriptor *desc, struct ib_sge *data_sge, |
| 534 | struct ib_sge *prot_sge, struct ib_sge *sig_sge) |
| 535 | { |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 536 | struct iser_conn *ib_conn = iser_task->ib_conn; |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 537 | struct iser_pi_context *pi_ctx = desc->pi_ctx; |
| 538 | struct ib_send_wr sig_wr, inv_wr; |
| 539 | struct ib_send_wr *bad_wr, *wr = NULL; |
| 540 | struct ib_sig_attrs sig_attrs; |
| 541 | int ret; |
| 542 | u32 key; |
| 543 | |
| 544 | memset(&sig_attrs, 0, sizeof(sig_attrs)); |
| 545 | ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs); |
| 546 | if (ret) |
| 547 | goto err; |
| 548 | |
| 549 | ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask); |
| 550 | if (ret) |
| 551 | goto err; |
| 552 | |
| 553 | if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) { |
| 554 | memset(&inv_wr, 0, sizeof(inv_wr)); |
| 555 | inv_wr.opcode = IB_WR_LOCAL_INV; |
| 556 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; |
| 557 | inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; |
| 558 | wr = &inv_wr; |
| 559 | /* Bump the key */ |
| 560 | key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); |
| 561 | ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); |
| 562 | } |
| 563 | |
| 564 | memset(&sig_wr, 0, sizeof(sig_wr)); |
| 565 | sig_wr.opcode = IB_WR_REG_SIG_MR; |
| 566 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; |
| 567 | sig_wr.sg_list = data_sge; |
| 568 | sig_wr.num_sge = 1; |
| 569 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; |
| 570 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; |
| 571 | if (scsi_prot_sg_count(iser_task->sc)) |
| 572 | sig_wr.wr.sig_handover.prot = prot_sge; |
| 573 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | |
| 574 | IB_ACCESS_REMOTE_READ | |
| 575 | IB_ACCESS_REMOTE_WRITE; |
| 576 | |
| 577 | if (!wr) |
| 578 | wr = &sig_wr; |
| 579 | else |
| 580 | wr->next = &sig_wr; |
| 581 | |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 582 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 583 | if (ret) { |
| 584 | iser_err("reg_sig_mr failed, ret:%d\n", ret); |
| 585 | goto err; |
| 586 | } |
| 587 | desc->reg_indicators &= ~ISER_SIG_KEY_VALID; |
| 588 | |
| 589 | sig_sge->lkey = pi_ctx->sig_mr->lkey; |
| 590 | sig_sge->addr = 0; |
| 591 | sig_sge->length = data_sge->length + prot_sge->length; |
| 592 | if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT || |
| 593 | scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) { |
| 594 | sig_sge->length += (data_sge->length / |
| 595 | iser_task->sc->device->sector_size) * 8; |
| 596 | } |
| 597 | |
| 598 | iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n", |
| 599 | sig_sge->addr, sig_sge->length, |
| 600 | sig_sge->lkey); |
| 601 | err: |
| 602 | return ret; |
| 603 | } |
| 604 | |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 605 | static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 606 | struct iser_regd_buf *regd_buf, |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 607 | struct iser_data_buf *mem, |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 608 | enum iser_reg_indicator ind, |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 609 | struct ib_sge *sge) |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 610 | { |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 611 | struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 612 | struct iser_conn *ib_conn = iser_task->ib_conn; |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 613 | struct iser_device *device = ib_conn->device; |
| 614 | struct ib_device *ibdev = device->ib_device; |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 615 | struct ib_mr *mr; |
| 616 | struct ib_fast_reg_page_list *frpl; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 617 | struct ib_send_wr fastreg_wr, inv_wr; |
| 618 | struct ib_send_wr *bad_wr, *wr = NULL; |
| 619 | u8 key; |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 620 | int ret, offset, size, plen; |
| 621 | |
| 622 | /* if there a single dma entry, dma mr suffices */ |
| 623 | if (mem->dma_nents == 1) { |
| 624 | struct scatterlist *sg = (struct scatterlist *)mem->buf; |
| 625 | |
| 626 | sge->lkey = device->mr->lkey; |
| 627 | sge->addr = ib_sg_dma_address(ibdev, &sg[0]); |
| 628 | sge->length = ib_sg_dma_len(ibdev, &sg[0]); |
| 629 | |
| 630 | iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n", |
| 631 | sge->lkey, sge->addr, sge->length); |
| 632 | return 0; |
| 633 | } |
| 634 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 635 | if (ind == ISER_DATA_KEY_VALID) { |
| 636 | mr = desc->data_mr; |
| 637 | frpl = desc->data_frpl; |
| 638 | } else { |
| 639 | mr = desc->pi_ctx->prot_mr; |
| 640 | frpl = desc->pi_ctx->prot_frpl; |
| 641 | } |
| 642 | |
| 643 | plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 644 | &offset, &size); |
| 645 | if (plen * SIZE_4K < size) { |
| 646 | iser_err("fast reg page_list too short to hold this SG\n"); |
| 647 | return -EINVAL; |
| 648 | } |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 649 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 650 | if (!(desc->reg_indicators & ind)) { |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 651 | memset(&inv_wr, 0, sizeof(inv_wr)); |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 652 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 653 | inv_wr.opcode = IB_WR_LOCAL_INV; |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 654 | inv_wr.ex.invalidate_rkey = mr->rkey; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 655 | wr = &inv_wr; |
| 656 | /* Bump the key */ |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 657 | key = (u8)(mr->rkey & 0x000000FF); |
| 658 | ib_update_fast_reg_key(mr, ++key); |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | /* Prepare FASTREG WR */ |
| 662 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 663 | fastreg_wr.wr_id = ISER_FASTREG_LI_WRID; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 664 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 665 | fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; |
| 666 | fastreg_wr.wr.fast_reg.page_list = frpl; |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 667 | fastreg_wr.wr.fast_reg.page_list_len = plen; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 668 | fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 669 | fastreg_wr.wr.fast_reg.length = size; |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 670 | fastreg_wr.wr.fast_reg.rkey = mr->rkey; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 671 | fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | |
| 672 | IB_ACCESS_REMOTE_WRITE | |
| 673 | IB_ACCESS_REMOTE_READ); |
| 674 | |
Sagi Grimberg | db523b8 | 2014-01-23 12:31:28 +0200 | [diff] [blame] | 675 | if (!wr) |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 676 | wr = &fastreg_wr; |
Sagi Grimberg | db523b8 | 2014-01-23 12:31:28 +0200 | [diff] [blame] | 677 | else |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 678 | wr->next = &fastreg_wr; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 679 | |
| 680 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); |
| 681 | if (ret) { |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 682 | iser_err("fast registration failed, ret:%d\n", ret); |
| 683 | return ret; |
| 684 | } |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 685 | desc->reg_indicators &= ~ind; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 686 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 687 | sge->lkey = mr->lkey; |
| 688 | sge->addr = frpl->page_list[0] + offset; |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 689 | sge->length = size; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 690 | |
| 691 | return ret; |
| 692 | } |
| 693 | |
| 694 | /** |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 695 | * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA, |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 696 | * using Fast Registration WR (if possible) obtaining rkey and va |
| 697 | * |
| 698 | * returns 0 on success, errno code on failure |
| 699 | */ |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 700 | int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, |
| 701 | enum iser_data_dir cmd_dir) |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 702 | { |
Ariel Nahum | 4667f5d | 2014-04-01 16:28:39 +0300 | [diff] [blame] | 703 | struct iser_conn *ib_conn = iser_task->ib_conn; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 704 | struct iser_device *device = ib_conn->device; |
| 705 | struct ib_device *ibdev = device->ib_device; |
| 706 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
| 707 | struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 708 | struct fast_reg_descriptor *desc = NULL; |
| 709 | struct ib_sge data_sge; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 710 | int err, aligned_len; |
| 711 | unsigned long flags; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 712 | |
| 713 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
| 714 | if (aligned_len != mem->dma_nents) { |
Sagi Grimberg | 5f588e3 | 2014-03-05 19:43:45 +0200 | [diff] [blame] | 715 | err = fall_to_bounce_buf(iser_task, ibdev, mem, |
| 716 | &iser_task->data_copy[cmd_dir], |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 717 | cmd_dir, aligned_len); |
| 718 | if (err) { |
| 719 | iser_err("failed to allocate bounce buffer\n"); |
| 720 | return err; |
| 721 | } |
| 722 | mem = &iser_task->data_copy[cmd_dir]; |
| 723 | } |
| 724 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 725 | if (mem->dma_nents != 1 || |
| 726 | scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 727 | spin_lock_irqsave(&ib_conn->lock, flags); |
Sagi Grimberg | 7306b8f | 2014-03-05 19:43:39 +0200 | [diff] [blame] | 728 | desc = list_first_entry(&ib_conn->fastreg.pool, |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 729 | struct fast_reg_descriptor, list); |
| 730 | list_del(&desc->list); |
| 731 | spin_unlock_irqrestore(&ib_conn->lock, flags); |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 732 | regd_buf->reg.mem_h = desc; |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 733 | } |
| 734 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 735 | err = iser_fast_reg_mr(iser_task, regd_buf, mem, |
| 736 | ISER_DATA_KEY_VALID, &data_sge); |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 737 | if (err) |
| 738 | goto err_reg; |
| 739 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 740 | if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { |
| 741 | struct ib_sge prot_sge, sig_sge; |
| 742 | |
| 743 | memset(&prot_sge, 0, sizeof(prot_sge)); |
| 744 | if (scsi_prot_sg_count(iser_task->sc)) { |
| 745 | mem = &iser_task->prot[cmd_dir]; |
| 746 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
| 747 | if (aligned_len != mem->dma_nents) { |
| 748 | err = fall_to_bounce_buf(iser_task, ibdev, mem, |
| 749 | &iser_task->prot_copy[cmd_dir], |
| 750 | cmd_dir, aligned_len); |
| 751 | if (err) { |
| 752 | iser_err("failed to allocate bounce buffer\n"); |
| 753 | return err; |
| 754 | } |
| 755 | mem = &iser_task->prot_copy[cmd_dir]; |
| 756 | } |
| 757 | |
| 758 | err = iser_fast_reg_mr(iser_task, regd_buf, mem, |
| 759 | ISER_PROT_KEY_VALID, &prot_sge); |
| 760 | if (err) |
| 761 | goto err_reg; |
| 762 | } |
| 763 | |
| 764 | err = iser_reg_sig_mr(iser_task, desc, &data_sge, |
| 765 | &prot_sge, &sig_sge); |
| 766 | if (err) { |
| 767 | iser_err("Failed to register signature mr\n"); |
| 768 | return err; |
| 769 | } |
| 770 | desc->reg_indicators |= ISER_FASTREG_PROTECTED; |
| 771 | |
| 772 | regd_buf->reg.lkey = sig_sge.lkey; |
| 773 | regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey; |
| 774 | regd_buf->reg.va = sig_sge.addr; |
| 775 | regd_buf->reg.len = sig_sge.length; |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 776 | regd_buf->reg.is_mr = 1; |
| 777 | } else { |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 778 | if (desc) { |
| 779 | regd_buf->reg.rkey = desc->data_mr->rkey; |
| 780 | regd_buf->reg.is_mr = 1; |
| 781 | } else { |
| 782 | regd_buf->reg.rkey = device->mr->rkey; |
| 783 | regd_buf->reg.is_mr = 0; |
| 784 | } |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 785 | |
Sagi Grimberg | 177e31bd | 2014-03-05 19:43:48 +0200 | [diff] [blame] | 786 | regd_buf->reg.lkey = data_sge.lkey; |
| 787 | regd_buf->reg.va = data_sge.addr; |
| 788 | regd_buf->reg.len = data_sge.length; |
| 789 | } |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 790 | |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 791 | return 0; |
| 792 | err_reg: |
Sagi Grimberg | d11ec4e | 2014-03-05 19:43:40 +0200 | [diff] [blame] | 793 | if (desc) { |
| 794 | spin_lock_irqsave(&ib_conn->lock, flags); |
| 795 | list_add_tail(&desc->list, &ib_conn->fastreg.pool); |
| 796 | spin_unlock_irqrestore(&ib_conn->lock, flags); |
| 797 | } |
| 798 | |
Sagi Grimberg | 5587856 | 2013-07-28 12:35:42 +0300 | [diff] [blame] | 799 | return err; |
| 800 | } |