Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 31 | */ |
| 32 | #include <linux/module.h> |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/slab.h> |
| 35 | #include <linux/mm.h> |
Al Viro | a1f8e7f | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 36 | #include <linux/highmem.h> |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 37 | #include <linux/scatterlist.h> |
| 38 | |
| 39 | #include "iscsi_iser.h" |
| 40 | |
| 41 | #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 42 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 43 | /** |
| 44 | * Decrements the reference count for the |
| 45 | * registered buffer & releases it |
| 46 | * |
| 47 | * returns 0 if released, 1 if deferred |
| 48 | */ |
| 49 | int iser_regd_buff_release(struct iser_regd_buf *regd_buf) |
| 50 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 51 | struct ib_device *dev; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 52 | |
| 53 | if ((atomic_read(®d_buf->ref_count) == 0) || |
| 54 | atomic_dec_and_test(®d_buf->ref_count)) { |
| 55 | /* if we used the dma mr, unreg is just NOP */ |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 56 | if (regd_buf->reg.is_fmr) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 57 | iser_unreg_mem(®d_buf->reg); |
| 58 | |
| 59 | if (regd_buf->dma_addr) { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 60 | dev = regd_buf->device->ib_device; |
| 61 | ib_dma_unmap_single(dev, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 62 | regd_buf->dma_addr, |
| 63 | regd_buf->data_size, |
| 64 | regd_buf->direction); |
| 65 | } |
| 66 | /* else this regd buf is associated with task which we */ |
| 67 | /* dma_unmap_single/sg later */ |
| 68 | return 0; |
| 69 | } else { |
| 70 | iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf); |
| 71 | return 1; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | /** |
| 76 | * iser_reg_single - fills registered buffer descriptor with |
| 77 | * registration information |
| 78 | */ |
| 79 | void iser_reg_single(struct iser_device *device, |
| 80 | struct iser_regd_buf *regd_buf, |
| 81 | enum dma_data_direction direction) |
| 82 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 83 | u64 dma_addr; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 84 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 85 | dma_addr = ib_dma_map_single(device->ib_device, |
| 86 | regd_buf->virt_addr, |
| 87 | regd_buf->data_size, direction); |
| 88 | BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 89 | |
| 90 | regd_buf->reg.lkey = device->mr->lkey; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 91 | regd_buf->reg.len = regd_buf->data_size; |
| 92 | regd_buf->reg.va = dma_addr; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 93 | regd_buf->reg.is_fmr = 0; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 94 | |
| 95 | regd_buf->dma_addr = dma_addr; |
| 96 | regd_buf->direction = direction; |
| 97 | } |
| 98 | |
| 99 | /** |
| 100 | * iser_start_rdma_unaligned_sg |
| 101 | */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 102 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
Roland Dreier | 41179e2 | 2007-07-17 18:37:42 -0700 | [diff] [blame] | 103 | enum iser_data_dir cmd_dir) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 104 | { |
| 105 | int dma_nents; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 106 | struct ib_device *dev; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 107 | char *mem = NULL; |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 108 | struct iser_data_buf *data = &iser_task->data[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 109 | unsigned long cmd_data_len = data->data_len; |
| 110 | |
| 111 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
| 112 | mem = (void *)__get_free_pages(GFP_NOIO, |
David Howells | f0d1b0b | 2006-12-08 02:37:49 -0800 | [diff] [blame] | 113 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 114 | else |
| 115 | mem = kmalloc(cmd_data_len, GFP_NOIO); |
| 116 | |
| 117 | if (mem == NULL) { |
| 118 | iser_err("Failed to allocate mem size %d %d for copying sglist\n", |
| 119 | data->size,(int)cmd_data_len); |
| 120 | return -ENOMEM; |
| 121 | } |
| 122 | |
| 123 | if (cmd_dir == ISER_DIR_OUT) { |
| 124 | /* copy the unaligned sg the buffer which is used for RDMA */ |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 125 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 126 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 127 | int i; |
| 128 | char *p, *from; |
| 129 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 130 | p = mem; |
| 131 | for_each_sg(sgl, sg, data->size, i) { |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 132 | from = kmap_atomic(sg_page(sg), KM_USER0); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 133 | memcpy(p, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 134 | from + sg->offset, |
| 135 | sg->length); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 136 | kunmap_atomic(from, KM_USER0); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 137 | p += sg->length; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 138 | } |
| 139 | } |
| 140 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 141 | sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); |
| 142 | iser_task->data_copy[cmd_dir].buf = |
| 143 | &iser_task->data_copy[cmd_dir].sg_single; |
| 144 | iser_task->data_copy[cmd_dir].size = 1; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 145 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 146 | iser_task->data_copy[cmd_dir].copy_buf = mem; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 147 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 148 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 149 | dma_nents = ib_dma_map_sg(dev, |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 150 | &iser_task->data_copy[cmd_dir].sg_single, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 151 | 1, |
| 152 | (cmd_dir == ISER_DIR_OUT) ? |
| 153 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 154 | BUG_ON(dma_nents == 0); |
| 155 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 156 | iser_task->data_copy[cmd_dir].dma_nents = dma_nents; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 157 | return 0; |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * iser_finalize_rdma_unaligned_sg |
| 162 | */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 163 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 164 | enum iser_data_dir cmd_dir) |
| 165 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 166 | struct ib_device *dev; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 167 | struct iser_data_buf *mem_copy; |
| 168 | unsigned long cmd_data_len; |
| 169 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 170 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
| 171 | mem_copy = &iser_task->data_copy[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 172 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 173 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, |
| 174 | (cmd_dir == ISER_DIR_OUT) ? |
| 175 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 176 | |
| 177 | if (cmd_dir == ISER_DIR_IN) { |
| 178 | char *mem; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 179 | struct scatterlist *sgl, *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 180 | unsigned char *p, *to; |
| 181 | unsigned int sg_size; |
| 182 | int i; |
| 183 | |
| 184 | /* copy back read RDMA to unaligned sg */ |
| 185 | mem = mem_copy->copy_buf; |
| 186 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 187 | sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; |
| 188 | sg_size = iser_task->data[ISER_DIR_IN].size; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 189 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 190 | p = mem; |
| 191 | for_each_sg(sgl, sg, sg_size, i) { |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 192 | to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 193 | memcpy(to + sg->offset, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 194 | p, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 195 | sg->length); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 196 | kunmap_atomic(to, KM_SOFTIRQ0); |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 197 | p += sg->length; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 198 | } |
| 199 | } |
| 200 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 201 | cmd_data_len = iser_task->data[cmd_dir].data_len; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 202 | |
| 203 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
| 204 | free_pages((unsigned long)mem_copy->copy_buf, |
David Howells | f0d1b0b | 2006-12-08 02:37:49 -0800 | [diff] [blame] | 205 | ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 206 | else |
| 207 | kfree(mem_copy->copy_buf); |
| 208 | |
| 209 | mem_copy->copy_buf = NULL; |
| 210 | } |
| 211 | |
| 212 | /** |
| 213 | * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses |
| 214 | * and returns the length of resulting physical address array (may be less than |
| 215 | * the original due to possible compaction). |
| 216 | * |
| 217 | * we build a "page vec" under the assumption that the SG meets the RDMA |
| 218 | * alignment requirements. Other then the first and last SG elements, all |
| 219 | * the "internal" elements can be compacted into a list whose elements are |
| 220 | * dma addresses of physical pages. The code supports also the weird case |
| 221 | * where --few fragments of the same page-- are present in the SG as |
| 222 | * consecutive elements. Also, it handles one entry SG. |
| 223 | */ |
| 224 | static int iser_sg_to_page_vec(struct iser_data_buf *data, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 225 | struct iser_page_vec *page_vec, |
| 226 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 227 | { |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 228 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 229 | struct scatterlist *sg; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 230 | u64 first_addr, last_addr, page; |
Roland Dreier | dee234f | 2006-12-12 11:50:20 -0800 | [diff] [blame] | 231 | int end_aligned; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 232 | unsigned int cur_page = 0; |
| 233 | unsigned long total_sz = 0; |
| 234 | int i; |
| 235 | |
| 236 | /* compute the offset of first element */ |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 237 | page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 238 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 239 | for_each_sg(sgl, sg, data->dma_nents, i) { |
| 240 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 241 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 242 | total_sz += dma_len; |
| 243 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 244 | first_addr = ib_sg_dma_address(ibdev, sg); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 245 | last_addr = first_addr + dma_len; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 246 | |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 247 | end_aligned = !(last_addr & ~MASK_4K); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 248 | |
| 249 | /* continue to collect page fragments till aligned or SG ends */ |
| 250 | while (!end_aligned && (i + 1 < data->dma_nents)) { |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 251 | sg = sg_next(sg); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 252 | i++; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 253 | dma_len = ib_sg_dma_len(ibdev, sg); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 254 | total_sz += dma_len; |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 255 | last_addr = ib_sg_dma_address(ibdev, sg) + dma_len; |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 256 | end_aligned = !(last_addr & ~MASK_4K); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 257 | } |
| 258 | |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 259 | /* handle the 1st page in the 1st DMA element */ |
| 260 | if (cur_page == 0) { |
| 261 | page = first_addr & MASK_4K; |
| 262 | page_vec->pages[cur_page] = page; |
| 263 | cur_page++; |
| 264 | page += SIZE_4K; |
| 265 | } else |
| 266 | page = first_addr; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 267 | |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 268 | for (; page < last_addr; page += SIZE_4K) { |
| 269 | page_vec->pages[cur_page] = page; |
| 270 | cur_page++; |
| 271 | } |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 272 | |
| 273 | } |
| 274 | page_vec->data_size = total_sz; |
| 275 | iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page); |
| 276 | return cur_page; |
| 277 | } |
| 278 | |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 279 | #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 280 | |
| 281 | /** |
| 282 | * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned |
| 283 | * for RDMA sub-list of a scatter-gather list of memory buffers, and returns |
| 284 | * the number of entries which are aligned correctly. Supports the case where |
| 285 | * consecutive SG elements are actually fragments of the same physcial page. |
| 286 | */ |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 287 | static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, |
| 288 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 289 | { |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 290 | struct scatterlist *sgl, *sg; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 291 | u64 end_addr, next_addr; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 292 | int i, cnt; |
| 293 | unsigned int ret_len = 0; |
| 294 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 295 | sgl = (struct scatterlist *)data->buf; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 296 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 297 | cnt = 0; |
| 298 | for_each_sg(sgl, sg, data->dma_nents, i) { |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 299 | /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " |
| 300 | "offset: %ld sz: %ld\n", i, |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 301 | (unsigned long)sg_phys(sg), |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 302 | (unsigned long)sg->offset, |
| 303 | (unsigned long)sg->length); */ |
| 304 | end_addr = ib_sg_dma_address(ibdev, sg) + |
| 305 | ib_sg_dma_len(ibdev, sg); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 306 | /* iser_dbg("Checking sg iobuf end address " |
| 307 | "0x%08lX\n", end_addr); */ |
| 308 | if (i + 1 < data->dma_nents) { |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 309 | next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 310 | /* are i, i+1 fragments of the same page? */ |
Erez Zilber | a316b79 | 2007-11-21 13:11:37 +0200 | [diff] [blame] | 311 | if (end_addr == next_addr) { |
| 312 | cnt++; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 313 | continue; |
Erez Zilber | a316b79 | 2007-11-21 13:11:37 +0200 | [diff] [blame] | 314 | } else if (!IS_4K_ALIGNED(end_addr)) { |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 315 | ret_len = cnt + 1; |
| 316 | break; |
| 317 | } |
| 318 | } |
Erez Zilber | a316b79 | 2007-11-21 13:11:37 +0200 | [diff] [blame] | 319 | cnt++; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 320 | } |
| 321 | if (i == data->dma_nents) |
| 322 | ret_len = cnt; /* loop ended */ |
| 323 | iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", |
| 324 | ret_len, data->dma_nents, data); |
| 325 | return ret_len; |
| 326 | } |
| 327 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 328 | static void iser_data_buf_dump(struct iser_data_buf *data, |
| 329 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 330 | { |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 331 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 332 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 333 | int i; |
| 334 | |
Eli Dorfman | 6f735e3 | 2008-04-29 13:46:52 -0700 | [diff] [blame] | 335 | if (iser_debug_level == 0) |
| 336 | return; |
| 337 | |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 338 | for_each_sg(sgl, sg, data->dma_nents, i) |
Eli Dorfman | 6f735e3 | 2008-04-29 13:46:52 -0700 | [diff] [blame] | 339 | iser_warn("sg[%d] dma_addr:0x%lX page:0x%p " |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 340 | "off:0x%x sz:0x%x dma_len:0x%x\n", |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 341 | i, (unsigned long)ib_sg_dma_address(ibdev, sg), |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 342 | sg_page(sg), sg->offset, |
Jens Axboe | 53d412f | 2007-07-24 14:41:13 +0200 | [diff] [blame] | 343 | sg->length, ib_sg_dma_len(ibdev, sg)); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 344 | } |
| 345 | |
| 346 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) |
| 347 | { |
| 348 | int i; |
| 349 | |
| 350 | iser_err("page vec length %d data size %d\n", |
| 351 | page_vec->length, page_vec->data_size); |
| 352 | for (i = 0; i < page_vec->length; i++) |
| 353 | iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); |
| 354 | } |
| 355 | |
| 356 | static void iser_page_vec_build(struct iser_data_buf *data, |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 357 | struct iser_page_vec *page_vec, |
| 358 | struct ib_device *ibdev) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 359 | { |
| 360 | int page_vec_len = 0; |
| 361 | |
| 362 | page_vec->length = 0; |
| 363 | page_vec->offset = 0; |
| 364 | |
| 365 | iser_dbg("Translating sg sz: %d\n", data->dma_nents); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 366 | page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 367 | iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); |
| 368 | |
| 369 | page_vec->length = page_vec_len; |
| 370 | |
Erez Zilber | 8dfa087 | 2006-09-11 12:22:30 +0300 | [diff] [blame] | 371 | if (page_vec_len * SIZE_4K < page_vec->data_size) { |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 372 | iser_err("page_vec too short to hold this SG\n"); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 373 | iser_data_buf_dump(data, ibdev); |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 374 | iser_dump_page_vec(page_vec); |
| 375 | BUG(); |
| 376 | } |
| 377 | } |
| 378 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 379 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
| 380 | struct iser_data_buf *data, |
| 381 | enum iser_data_dir iser_dir, |
| 382 | enum dma_data_direction dma_dir) |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 383 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 384 | struct ib_device *dev; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 385 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 386 | iser_task->dir[iser_dir] = 1; |
| 387 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 388 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 389 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 390 | if (data->dma_nents == 0) { |
| 391 | iser_err("dma_map_sg failed!!!\n"); |
| 392 | return -EINVAL; |
| 393 | } |
| 394 | return 0; |
| 395 | } |
| 396 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 397 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 398 | { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 399 | struct ib_device *dev; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 400 | struct iser_data_buf *data; |
| 401 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 402 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 403 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 404 | if (iser_task->dir[ISER_DIR_IN]) { |
| 405 | data = &iser_task->data[ISER_DIR_IN]; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 406 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 407 | } |
| 408 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 409 | if (iser_task->dir[ISER_DIR_OUT]) { |
| 410 | data = &iser_task->data[ISER_DIR_OUT]; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 411 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 412 | } |
| 413 | } |
| 414 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 415 | /** |
| 416 | * iser_reg_rdma_mem - Registers memory intended for RDMA, |
| 417 | * obtaining rkey and va |
| 418 | * |
| 419 | * returns 0 on success, errno code on failure |
| 420 | */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 421 | int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 422 | enum iser_data_dir cmd_dir) |
| 423 | { |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 424 | struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; |
| 425 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 426 | struct iser_device *device = ib_conn->device; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 427 | struct ib_device *ibdev = device->ib_device; |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 428 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 429 | struct iser_regd_buf *regd_buf; |
| 430 | int aligned_len; |
| 431 | int err; |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 432 | int i; |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 433 | struct scatterlist *sg; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 434 | |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 435 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 436 | |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 437 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
Erez Zilber | 777a71d | 2006-09-11 12:19:17 +0300 | [diff] [blame] | 438 | if (aligned_len != mem->dma_nents) { |
Eli Dorfman | 8752822 | 2008-04-29 13:46:52 -0700 | [diff] [blame] | 439 | iscsi_conn->fmr_unalign_cnt++; |
Eli Dorfman | 6f735e3 | 2008-04-29 13:46:52 -0700 | [diff] [blame] | 440 | iser_warn("rdma alignment violation %d/%d aligned\n", |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 441 | aligned_len, mem->size); |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 442 | iser_data_buf_dump(mem, ibdev); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 443 | |
| 444 | /* unmap the command data before accessing it */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 445 | iser_dma_unmap_task_data(iser_task); |
Erez Zilber | 74a2078 | 2006-09-27 16:43:06 +0300 | [diff] [blame] | 446 | |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 447 | /* allocate copy buf, if we are writing, copy the */ |
| 448 | /* unaligned scatterlist, dma map the copy */ |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 449 | if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 450 | return -ENOMEM; |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 451 | mem = &iser_task->data_copy[cmd_dir]; |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 452 | } |
| 453 | |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 454 | /* if there a single dma entry, FMR is not needed */ |
| 455 | if (mem->dma_nents == 1) { |
| 456 | sg = (struct scatterlist *)mem->buf; |
| 457 | |
| 458 | regd_buf->reg.lkey = device->mr->lkey; |
| 459 | regd_buf->reg.rkey = device->mr->rkey; |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 460 | regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); |
| 461 | regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 462 | regd_buf->reg.is_fmr = 0; |
| 463 | |
| 464 | iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " |
| 465 | "va: 0x%08lX sz: %ld]\n", |
| 466 | (unsigned int)regd_buf->reg.lkey, |
| 467 | (unsigned int)regd_buf->reg.rkey, |
| 468 | (unsigned long)regd_buf->reg.va, |
| 469 | (unsigned long)regd_buf->reg.len); |
| 470 | } else { /* use FMR for multiple dma entries */ |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 471 | iser_page_vec_build(mem, ib_conn->page_vec, ibdev); |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 472 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
| 473 | if (err) { |
Ralph Campbell | 5180311 | 2006-12-12 14:31:00 -0800 | [diff] [blame] | 474 | iser_data_buf_dump(mem, ibdev); |
Mike Christie | 2261ec3 | 2008-05-21 15:54:11 -0500 | [diff] [blame] | 475 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
| 476 | mem->dma_nents, |
| 477 | ntoh24(iser_task->desc.iscsi_header.dlength)); |
Erez Zilber | d811102 | 2006-09-11 12:26:33 +0300 | [diff] [blame] | 478 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
| 479 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, |
| 480 | ib_conn->page_vec->offset); |
| 481 | for (i=0 ; i<ib_conn->page_vec->length ; i++) |
| 482 | iser_err("page_vec[%d] = 0x%llx\n", i, |
| 483 | (unsigned long long) ib_conn->page_vec->pages[i]); |
| 484 | return err; |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 485 | } |
Erez Zilber | e981f1d | 2006-09-11 12:24:00 +0300 | [diff] [blame] | 486 | } |
Or Gerlitz | 6461f64 | 2006-05-11 10:03:08 +0300 | [diff] [blame] | 487 | |
| 488 | /* take a reference on this regd buf such that it will not be released * |
| 489 | * (eg in send dto completion) before we get the scsi response */ |
| 490 | atomic_inc(®d_buf->ref_count); |
| 491 | return 0; |
| 492 | } |