Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. |
Or Gerlitz | 28f292e | 2013-05-08 12:21:18 +0000 | [diff] [blame] | 3 | * Copyright (c) 2013 Mellanox Technologies. All rights reserved. |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 32 | */ |
| 33 | #include <linux/module.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/slab.h> |
| 36 | #include <linux/mm.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 37 | #include <linux/highmem.h> |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 38 | #include <linux/scatterlist.h> |
| 39 | |
| 40 | #include "iscsi_iser.h" |
| 41 | |
| 42 | #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 43 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 44 | /** |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 45 | * iser_start_rdma_unaligned_sg |
| 46 | */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 47 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
Roland Dreier | 41179e2 | 2007-07-17 18:37:42 -0700 | [diff] [blame] | 48 | enum iser_data_dir cmd_dir) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 49 | { |
| 50 | int dma_nents; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 51 | struct ib_device *dev; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 52 | char *mem = NULL; |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 53 | struct iser_data_buf *data = &iser_task->data[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 54 | unsigned long cmd_data_len = data->data_len; |
| 55 | |
| 56 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
Or Gerlitz | 528f4e8 | 2010-02-08 13:20:43 +0000 | [diff] [blame] | 57 | mem = (void *)__get_free_pages(GFP_ATOMIC, |
David Howells | f0d1b0b | 2006-12-08 02:37:49 -0800 | [diff] [blame] | 58 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 59 | else |
Or Gerlitz | 528f4e8 | 2010-02-08 13:20:43 +0000 | [diff] [blame] | 60 | mem = kmalloc(cmd_data_len, GFP_ATOMIC); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 61 | |
| 62 | if (mem == NULL) { |
| 63 | iser_err("Failed to allocate mem size %d %d for copying sglist\n", |
| 64 | data->size,(int)cmd_data_len); |
| 65 | return -ENOMEM; |
| 66 | } |
| 67 | |
| 68 | if (cmd_dir == ISER_DIR_OUT) { |
| 69 | /* copy the unaligned sg the buffer which is used for RDMA */ |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 70 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 71 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 72 | int i; |
| 73 | char *p, *from; |
| 74 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 75 | p = mem; |
| 76 | for_each_sg(sgl, sg, data->size, i) { |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 77 | from = kmap_atomic(sg_page(sg)); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 78 | memcpy(p, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 79 | from + sg->offset, |
| 80 | sg->length); |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 81 | kunmap_atomic(from); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 82 | p += sg->length; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 83 | } |
| 84 | } |
| 85 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 86 | sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); |
| 87 | iser_task->data_copy[cmd_dir].buf = |
| 88 | &iser_task->data_copy[cmd_dir].sg_single; |
| 89 | iser_task->data_copy[cmd_dir].size = 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 90 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 91 | iser_task->data_copy[cmd_dir].copy_buf = mem; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 92 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 93 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 94 | dma_nents = ib_dma_map_sg(dev, |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 95 | &iser_task->data_copy[cmd_dir].sg_single, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 96 | 1, |
| 97 | (cmd_dir == ISER_DIR_OUT) ? |
| 98 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 99 | BUG_ON(dma_nents == 0); |
| 100 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 101 | iser_task->data_copy[cmd_dir].dma_nents = dma_nents; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | /** |
| 106 | * iser_finalize_rdma_unaligned_sg |
| 107 | */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 108 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 109 | enum iser_data_dir cmd_dir) |
| 110 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 111 | struct ib_device *dev; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 112 | struct iser_data_buf *mem_copy; |
| 113 | unsigned long cmd_data_len; |
| 114 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 115 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
| 116 | mem_copy = &iser_task->data_copy[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 117 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 118 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, |
| 119 | (cmd_dir == ISER_DIR_OUT) ? |
| 120 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 121 | |
| 122 | if (cmd_dir == ISER_DIR_IN) { |
| 123 | char *mem; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 124 | struct scatterlist *sgl, *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 125 | unsigned char *p, *to; |
| 126 | unsigned int sg_size; |
| 127 | int i; |
| 128 | |
| 129 | /* copy back read RDMA to unaligned sg */ |
| 130 | mem = mem_copy->copy_buf; |
| 131 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 132 | sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; |
| 133 | sg_size = iser_task->data[ISER_DIR_IN].size; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 134 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 135 | p = mem; |
| 136 | for_each_sg(sgl, sg, sg_size, i) { |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 137 | to = kmap_atomic(sg_page(sg)); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 138 | memcpy(to + sg->offset, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 139 | p, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 140 | sg->length); |
Cong Wang | 2a156d0 | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 141 | kunmap_atomic(to); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 142 | p += sg->length; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 143 | } |
| 144 | } |
| 145 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 146 | cmd_data_len = iser_task->data[cmd_dir].data_len; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 147 | |
| 148 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
| 149 | free_pages((unsigned long)mem_copy->copy_buf, |
David Howells | f0d1b0b | 2006-12-08 02:37:49 -0800 | [diff] [blame] | 150 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 151 | else |
| 152 | kfree(mem_copy->copy_buf); |
| 153 | |
| 154 | mem_copy->copy_buf = NULL; |
| 155 | } |
| 156 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 157 | #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) |
| 158 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 159 | /** |
| 160 | * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses |
| 161 | * and returns the length of resulting physical address array (may be less than |
| 162 | * the original due to possible compaction). |
| 163 | * |
| 164 | * we build a "page vec" under the assumption that the SG meets the RDMA |
| 165 | * alignment requirements. Other then the first and last SG elements, all |
| 166 | * the "internal" elements can be compacted into a list whose elements are |
| 167 | * dma addresses of physical pages. The code supports also the weird case |
| 168 | * where --few fragments of the same page-- are present in the SG as |
| 169 | * consecutive elements. Also, it handles one entry SG. |
| 170 | */ |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 171 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 172 | static int iser_sg_to_page_vec(struct iser_data_buf *data, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 173 | struct iser_page_vec *page_vec, |
| 174 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 175 | { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 176 | struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf; |
| 177 | u64 start_addr, end_addr, page, chunk_start = 0; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 178 | unsigned long total_sz = 0; |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 179 | unsigned int dma_len; |
| 180 | int i, new_chunk, cur_page, last_ent = data->dma_nents - 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 181 | |
| 182 | /* compute the offset of first element */ |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 183 | page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 184 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 185 | new_chunk = 1; |
| 186 | cur_page = 0; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 187 | for_each_sg(sgl, sg, data->dma_nents, i) { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 188 | start_addr = ib_sg_dma_address(ibdev, sg); |
| 189 | if (new_chunk) |
| 190 | chunk_start = start_addr; |
| 191 | dma_len = ib_sg_dma_len(ibdev, sg); |
| 192 | end_addr = start_addr + dma_len; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 193 | total_sz += dma_len; |
| 194 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 195 | /* collect page fragments until aligned or end of SG list */ |
| 196 | if (!IS_4K_ALIGNED(end_addr) && i < last_ent) { |
| 197 | new_chunk = 0; |
| 198 | continue; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 199 | } |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 200 | new_chunk = 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 201 | |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 202 | /* address of the first page in the contiguous chunk; |
| 203 | masking relevant for the very first SG entry, |
| 204 | which might be unaligned */ |
| 205 | page = chunk_start & MASK_4K; |
| 206 | do { |
| 207 | page_vec->pages[cur_page++] = page; |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 208 | page += SIZE_4K; |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 209 | } while (page < end_addr); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 210 | } |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 211 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 212 | page_vec->data_size = total_sz; |
| 213 | iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page); |
| 214 | return cur_page; |
| 215 | } |
| 216 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 217 | |
| 218 | /** |
| 219 | * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned |
| 220 | * for RDMA sub-list of a scatter-gather list of memory buffers, and returns |
| 221 | * the number of entries which are aligned correctly. Supports the case where |
| 222 | * consecutive SG elements are actually fragments of the same physcial page. |
| 223 | */ |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 224 | static int iser_data_buf_aligned_len(struct iser_data_buf *data, |
| 225 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 226 | { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 227 | struct scatterlist *sgl, *sg, *next_sg = NULL; |
| 228 | u64 start_addr, end_addr; |
| 229 | int i, ret_len, start_check = 0; |
| 230 | |
| 231 | if (data->dma_nents == 1) |
| 232 | return 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 233 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 234 | sgl = (struct scatterlist *)data->buf; |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 235 | start_addr = ib_sg_dma_address(ibdev, sgl); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 236 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 237 | for_each_sg(sgl, sg, data->dma_nents, i) { |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 238 | if (start_check && !IS_4K_ALIGNED(start_addr)) |
| 239 | break; |
| 240 | |
| 241 | next_sg = sg_next(sg); |
| 242 | if (!next_sg) |
| 243 | break; |
| 244 | |
| 245 | end_addr = start_addr + ib_sg_dma_len(ibdev, sg); |
| 246 | start_addr = ib_sg_dma_address(ibdev, next_sg); |
| 247 | |
| 248 | if (end_addr == start_addr) { |
| 249 | start_check = 0; |
| 250 | continue; |
| 251 | } else |
| 252 | start_check = 1; |
| 253 | |
| 254 | if (!IS_4K_ALIGNED(end_addr)) |
| 255 | break; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 256 | } |
Or Gerlitz | c1ccaf2 | 2009-11-12 11:32:27 -0800 | [diff] [blame] | 257 | ret_len = (next_sg) ? i : i+1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 258 | iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", |
| 259 | ret_len, data->dma_nents, data); |
| 260 | return ret_len; |
| 261 | } |
| 262 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 263 | static void iser_data_buf_dump(struct iser_data_buf *data, |
| 264 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 265 | { |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 266 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 267 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 268 | int i; |
| 269 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 270 | for_each_sg(sgl, sg, data->dma_nents, i) |
Or Gerlitz | f91424c | 2013-07-28 12:35:36 +0300 | [diff] [blame^] | 271 | iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 272 | "off:0x%x sz:0x%x dma_len:0x%x\n", |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 273 | i, (unsigned long)ib_sg_dma_address(ibdev, sg), |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 274 | sg_page(sg), sg->offset, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 275 | sg->length, ib_sg_dma_len(ibdev, sg)); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) |
| 279 | { |
| 280 | int i; |
| 281 | |
| 282 | iser_err("page vec length %d data size %d\n", |
| 283 | page_vec->length, page_vec->data_size); |
| 284 | for (i = 0; i < page_vec->length; i++) |
| 285 | iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); |
| 286 | } |
| 287 | |
| 288 | static void iser_page_vec_build(struct iser_data_buf *data, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 289 | struct iser_page_vec *page_vec, |
| 290 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 291 | { |
| 292 | int page_vec_len = 0; |
| 293 | |
| 294 | page_vec->length = 0; |
| 295 | page_vec->offset = 0; |
| 296 | |
| 297 | iser_dbg("Translating sg sz: %d\n", data->dma_nents); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 298 | page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 299 | iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); |
| 300 | |
| 301 | page_vec->length = page_vec_len; |
| 302 | |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 303 | if (page_vec_len * SIZE_4K < page_vec->data_size) { |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 304 | iser_err("page_vec too short to hold this SG\n"); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 305 | iser_data_buf_dump(data, ibdev); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 306 | iser_dump_page_vec(page_vec); |
| 307 | BUG(); |
| 308 | } |
| 309 | } |
| 310 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 311 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
| 312 | struct iser_data_buf *data, |
| 313 | enum iser_data_dir iser_dir, |
| 314 | enum dma_data_direction dma_dir) |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 315 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 316 | struct ib_device *dev; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 317 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 318 | iser_task->dir[iser_dir] = 1; |
| 319 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 320 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 321 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 322 | if (data->dma_nents == 0) { |
| 323 | iser_err("dma_map_sg failed!!!\n"); |
| 324 | return -EINVAL; |
| 325 | } |
| 326 | return 0; |
| 327 | } |
| 328 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 329 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 330 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 331 | struct ib_device *dev; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 332 | struct iser_data_buf *data; |
| 333 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 334 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 335 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 336 | if (iser_task->dir[ISER_DIR_IN]) { |
| 337 | data = &iser_task->data[ISER_DIR_IN]; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 338 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 339 | } |
| 340 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 341 | if (iser_task->dir[ISER_DIR_OUT]) { |
| 342 | data = &iser_task->data[ISER_DIR_OUT]; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 343 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 344 | } |
| 345 | } |
| 346 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 347 | /** |
| 348 | * iser_reg_rdma_mem - Registers memory intended for RDMA, |
| 349 | * obtaining rkey and va |
| 350 | * |
| 351 | * returns 0 on success, errno code on failure |
| 352 | */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 353 | int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 354 | enum iser_data_dir cmd_dir) |
| 355 | { |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 356 | struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; |
| 357 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 358 | struct iser_device *device = ib_conn->device; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 359 | struct ib_device *ibdev = device->ib_device; |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 360 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 361 | struct iser_regd_buf *regd_buf; |
| 362 | int aligned_len; |
| 363 | int err; |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 364 | int i; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 365 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 366 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 367 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 368 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 369 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
Or Gerlitz | 5525d21 | 2013-02-21 14:50:10 +0000 | [diff] [blame] | 370 | if (aligned_len != mem->dma_nents || |
| 371 | (!ib_conn->fmr_pool && mem->dma_nents > 1)) { |
Eli Dorfman | 8752822 | 2008-04-29 13:46:52 -0700 | [diff] [blame] | 372 | iscsi_conn->fmr_unalign_cnt++; |
Or Gerlitz | f91424c | 2013-07-28 12:35:36 +0300 | [diff] [blame^] | 373 | iser_dbg("rdma alignment violation (%d/%d aligned) or FMR not supported\n", |
| 374 | aligned_len, mem->size); |
| 375 | |
| 376 | if (iser_debug_level > 0) |
| 377 | iser_data_buf_dump(mem, ibdev); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 378 | |
| 379 | /* unmap the command data before accessing it */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 380 | iser_dma_unmap_task_data(iser_task); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 381 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 382 | /* allocate copy buf, if we are writing, copy the */ |
| 383 | /* unaligned scatterlist, dma map the copy */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 384 | if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 385 | return -ENOMEM; |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 386 | mem = &iser_task->data_copy[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 387 | } |
| 388 | |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 389 | /* if there a single dma entry, FMR is not needed */ |
| 390 | if (mem->dma_nents == 1) { |
| 391 | sg = (struct scatterlist *)mem->buf; |
| 392 | |
| 393 | regd_buf->reg.lkey = device->mr->lkey; |
| 394 | regd_buf->reg.rkey = device->mr->rkey; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 395 | regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); |
| 396 | regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 397 | regd_buf->reg.is_fmr = 0; |
| 398 | |
| 399 | iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " |
| 400 | "va: 0x%08lX sz: %ld]\n", |
| 401 | (unsigned int)regd_buf->reg.lkey, |
| 402 | (unsigned int)regd_buf->reg.rkey, |
| 403 | (unsigned long)regd_buf->reg.va, |
| 404 | (unsigned long)regd_buf->reg.len); |
| 405 | } else { /* use FMR for multiple dma entries */ |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 406 | iser_page_vec_build(mem, ib_conn->page_vec, ibdev); |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 407 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
Or Gerlitz | 819a087 | 2013-02-21 14:50:09 +0000 | [diff] [blame] | 408 | if (err && err != -EAGAIN) { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 409 | iser_data_buf_dump(mem, ibdev); |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 410 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
| 411 | mem->dma_nents, |
| 412 | ntoh24(iser_task->desc.iscsi_header.dlength)); |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 413 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
| 414 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, |
| 415 | ib_conn->page_vec->offset); |
| 416 | for (i=0 ; i<ib_conn->page_vec->length ; i++) |
| 417 | iser_err("page_vec[%d] = 0x%llx\n", i, |
| 418 | (unsigned long long) ib_conn->page_vec->pages[i]); |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 419 | } |
Or Gerlitz | 450d1e4 | 2013-05-01 13:25:26 +0000 | [diff] [blame] | 420 | if (err) |
| 421 | return err; |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 422 | } |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 423 | return 0; |
| 424 | } |