blob: 797e49ff7f8ed475b9c45d40efa698acf8d0891d [file] [log] [blame]
Or Gerlitz6461f642006-05-11 10:03:08 +03001/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
Or Gerlitz28f292e2013-05-08 12:21:18 +00003 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
Or Gerlitz6461f642006-05-11 10:03:08 +03004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
Or Gerlitz6461f642006-05-11 10:03:08 +030032 */
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/slab.h>
36#include <linux/mm.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040037#include <linux/highmem.h>
Or Gerlitz6461f642006-05-11 10:03:08 +030038#include <linux/scatterlist.h>
39
40#include "iscsi_iser.h"
41
42#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
Erez Zilber8dfa0872006-09-11 12:22:30 +030043
Or Gerlitz6461f642006-05-11 10:03:08 +030044/**
Or Gerlitz6461f642006-05-11 10:03:08 +030045 * iser_start_rdma_unaligned_sg
46 */
Mike Christie2261ec32008-05-21 15:54:11 -050047static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
Roland Dreier41179e22007-07-17 18:37:42 -070048 enum iser_data_dir cmd_dir)
Or Gerlitz6461f642006-05-11 10:03:08 +030049{
50 int dma_nents;
Ralph Campbell51803112006-12-12 14:31:00 -080051 struct ib_device *dev;
Or Gerlitz6461f642006-05-11 10:03:08 +030052 char *mem = NULL;
Mike Christie2261ec32008-05-21 15:54:11 -050053 struct iser_data_buf *data = &iser_task->data[cmd_dir];
Or Gerlitz6461f642006-05-11 10:03:08 +030054 unsigned long cmd_data_len = data->data_len;
55
56 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
Or Gerlitz528f4e82010-02-08 13:20:43 +000057 mem = (void *)__get_free_pages(GFP_ATOMIC,
David Howellsf0d1b0b2006-12-08 02:37:49 -080058 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
Or Gerlitz6461f642006-05-11 10:03:08 +030059 else
Or Gerlitz528f4e82010-02-08 13:20:43 +000060 mem = kmalloc(cmd_data_len, GFP_ATOMIC);
Or Gerlitz6461f642006-05-11 10:03:08 +030061
62 if (mem == NULL) {
63 iser_err("Failed to allocate mem size %d %d for copying sglist\n",
64 data->size,(int)cmd_data_len);
65 return -ENOMEM;
66 }
67
68 if (cmd_dir == ISER_DIR_OUT) {
69 /* copy the unaligned sg the buffer which is used for RDMA */
Jens Axboe53d412f2007-07-24 14:41:13 +020070 struct scatterlist *sgl = (struct scatterlist *)data->buf;
71 struct scatterlist *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +030072 int i;
73 char *p, *from;
74
Jens Axboe53d412f2007-07-24 14:41:13 +020075 p = mem;
76 for_each_sg(sgl, sg, data->size, i) {
Cong Wang2a156d02011-11-25 23:14:20 +080077 from = kmap_atomic(sg_page(sg));
Or Gerlitz6461f642006-05-11 10:03:08 +030078 memcpy(p,
Jens Axboe53d412f2007-07-24 14:41:13 +020079 from + sg->offset,
80 sg->length);
Cong Wang2a156d02011-11-25 23:14:20 +080081 kunmap_atomic(from);
Jens Axboe53d412f2007-07-24 14:41:13 +020082 p += sg->length;
Or Gerlitz6461f642006-05-11 10:03:08 +030083 }
84 }
85
Mike Christie2261ec32008-05-21 15:54:11 -050086 sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
87 iser_task->data_copy[cmd_dir].buf =
88 &iser_task->data_copy[cmd_dir].sg_single;
89 iser_task->data_copy[cmd_dir].size = 1;
Or Gerlitz6461f642006-05-11 10:03:08 +030090
Mike Christie2261ec32008-05-21 15:54:11 -050091 iser_task->data_copy[cmd_dir].copy_buf = mem;
Or Gerlitz6461f642006-05-11 10:03:08 +030092
Mike Christie2261ec32008-05-21 15:54:11 -050093 dev = iser_task->iser_conn->ib_conn->device->ib_device;
Ralph Campbell51803112006-12-12 14:31:00 -080094 dma_nents = ib_dma_map_sg(dev,
Mike Christie2261ec32008-05-21 15:54:11 -050095 &iser_task->data_copy[cmd_dir].sg_single,
Ralph Campbell51803112006-12-12 14:31:00 -080096 1,
97 (cmd_dir == ISER_DIR_OUT) ?
98 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Or Gerlitz6461f642006-05-11 10:03:08 +030099 BUG_ON(dma_nents == 0);
100
Mike Christie2261ec32008-05-21 15:54:11 -0500101 iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
Or Gerlitz6461f642006-05-11 10:03:08 +0300102 return 0;
103}
104
105/**
106 * iser_finalize_rdma_unaligned_sg
107 */
Mike Christie2261ec32008-05-21 15:54:11 -0500108void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
Or Gerlitz6461f642006-05-11 10:03:08 +0300109 enum iser_data_dir cmd_dir)
110{
Ralph Campbell51803112006-12-12 14:31:00 -0800111 struct ib_device *dev;
Or Gerlitz6461f642006-05-11 10:03:08 +0300112 struct iser_data_buf *mem_copy;
113 unsigned long cmd_data_len;
114
Mike Christie2261ec32008-05-21 15:54:11 -0500115 dev = iser_task->iser_conn->ib_conn->device->ib_device;
116 mem_copy = &iser_task->data_copy[cmd_dir];
Or Gerlitz6461f642006-05-11 10:03:08 +0300117
Ralph Campbell51803112006-12-12 14:31:00 -0800118 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
119 (cmd_dir == ISER_DIR_OUT) ?
120 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Or Gerlitz6461f642006-05-11 10:03:08 +0300121
122 if (cmd_dir == ISER_DIR_IN) {
123 char *mem;
Jens Axboe53d412f2007-07-24 14:41:13 +0200124 struct scatterlist *sgl, *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300125 unsigned char *p, *to;
126 unsigned int sg_size;
127 int i;
128
129 /* copy back read RDMA to unaligned sg */
130 mem = mem_copy->copy_buf;
131
Mike Christie2261ec32008-05-21 15:54:11 -0500132 sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
133 sg_size = iser_task->data[ISER_DIR_IN].size;
Or Gerlitz6461f642006-05-11 10:03:08 +0300134
Jens Axboe53d412f2007-07-24 14:41:13 +0200135 p = mem;
136 for_each_sg(sgl, sg, sg_size, i) {
Cong Wang2a156d02011-11-25 23:14:20 +0800137 to = kmap_atomic(sg_page(sg));
Jens Axboe53d412f2007-07-24 14:41:13 +0200138 memcpy(to + sg->offset,
Or Gerlitz6461f642006-05-11 10:03:08 +0300139 p,
Jens Axboe53d412f2007-07-24 14:41:13 +0200140 sg->length);
Cong Wang2a156d02011-11-25 23:14:20 +0800141 kunmap_atomic(to);
Jens Axboe53d412f2007-07-24 14:41:13 +0200142 p += sg->length;
Or Gerlitz6461f642006-05-11 10:03:08 +0300143 }
144 }
145
Mike Christie2261ec32008-05-21 15:54:11 -0500146 cmd_data_len = iser_task->data[cmd_dir].data_len;
Or Gerlitz6461f642006-05-11 10:03:08 +0300147
148 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
149 free_pages((unsigned long)mem_copy->copy_buf,
David Howellsf0d1b0b2006-12-08 02:37:49 -0800150 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
Or Gerlitz6461f642006-05-11 10:03:08 +0300151 else
152 kfree(mem_copy->copy_buf);
153
154 mem_copy->copy_buf = NULL;
155}
156
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800157#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
158
Or Gerlitz6461f642006-05-11 10:03:08 +0300159/**
160 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
161 * and returns the length of resulting physical address array (may be less than
162 * the original due to possible compaction).
163 *
164 * we build a "page vec" under the assumption that the SG meets the RDMA
165 * alignment requirements. Other then the first and last SG elements, all
166 * the "internal" elements can be compacted into a list whose elements are
167 * dma addresses of physical pages. The code supports also the weird case
168 * where --few fragments of the same page-- are present in the SG as
169 * consecutive elements. Also, it handles one entry SG.
170 */
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800171
Or Gerlitz6461f642006-05-11 10:03:08 +0300172static int iser_sg_to_page_vec(struct iser_data_buf *data,
Ralph Campbell51803112006-12-12 14:31:00 -0800173 struct iser_page_vec *page_vec,
174 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300175{
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800176 struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
177 u64 start_addr, end_addr, page, chunk_start = 0;
Or Gerlitz6461f642006-05-11 10:03:08 +0300178 unsigned long total_sz = 0;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800179 unsigned int dma_len;
180 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300181
182 /* compute the offset of first element */
Jens Axboe53d412f2007-07-24 14:41:13 +0200183 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
Or Gerlitz6461f642006-05-11 10:03:08 +0300184
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800185 new_chunk = 1;
186 cur_page = 0;
Jens Axboe53d412f2007-07-24 14:41:13 +0200187 for_each_sg(sgl, sg, data->dma_nents, i) {
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800188 start_addr = ib_sg_dma_address(ibdev, sg);
189 if (new_chunk)
190 chunk_start = start_addr;
191 dma_len = ib_sg_dma_len(ibdev, sg);
192 end_addr = start_addr + dma_len;
Ralph Campbell51803112006-12-12 14:31:00 -0800193 total_sz += dma_len;
194
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800195 /* collect page fragments until aligned or end of SG list */
196 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
197 new_chunk = 0;
198 continue;
Or Gerlitz6461f642006-05-11 10:03:08 +0300199 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800200 new_chunk = 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300201
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800202 /* address of the first page in the contiguous chunk;
203 masking relevant for the very first SG entry,
204 which might be unaligned */
205 page = chunk_start & MASK_4K;
206 do {
207 page_vec->pages[cur_page++] = page;
Erez Zilber8dfa0872006-09-11 12:22:30 +0300208 page += SIZE_4K;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800209 } while (page < end_addr);
Or Gerlitz6461f642006-05-11 10:03:08 +0300210 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800211
Or Gerlitz6461f642006-05-11 10:03:08 +0300212 page_vec->data_size = total_sz;
213 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
214 return cur_page;
215}
216
Or Gerlitz6461f642006-05-11 10:03:08 +0300217
218/**
219 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
220 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
221 * the number of entries which are aligned correctly. Supports the case where
222 * consecutive SG elements are actually fragments of the same physcial page.
223 */
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800224static int iser_data_buf_aligned_len(struct iser_data_buf *data,
225 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300226{
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800227 struct scatterlist *sgl, *sg, *next_sg = NULL;
228 u64 start_addr, end_addr;
229 int i, ret_len, start_check = 0;
230
231 if (data->dma_nents == 1)
232 return 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300233
Jens Axboe53d412f2007-07-24 14:41:13 +0200234 sgl = (struct scatterlist *)data->buf;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800235 start_addr = ib_sg_dma_address(ibdev, sgl);
Or Gerlitz6461f642006-05-11 10:03:08 +0300236
Jens Axboe53d412f2007-07-24 14:41:13 +0200237 for_each_sg(sgl, sg, data->dma_nents, i) {
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800238 if (start_check && !IS_4K_ALIGNED(start_addr))
239 break;
240
241 next_sg = sg_next(sg);
242 if (!next_sg)
243 break;
244
245 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
246 start_addr = ib_sg_dma_address(ibdev, next_sg);
247
248 if (end_addr == start_addr) {
249 start_check = 0;
250 continue;
251 } else
252 start_check = 1;
253
254 if (!IS_4K_ALIGNED(end_addr))
255 break;
Or Gerlitz6461f642006-05-11 10:03:08 +0300256 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800257 ret_len = (next_sg) ? i : i+1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300258 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
259 ret_len, data->dma_nents, data);
260 return ret_len;
261}
262
Ralph Campbell51803112006-12-12 14:31:00 -0800263static void iser_data_buf_dump(struct iser_data_buf *data,
264 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300265{
Jens Axboe53d412f2007-07-24 14:41:13 +0200266 struct scatterlist *sgl = (struct scatterlist *)data->buf;
267 struct scatterlist *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300268 int i;
269
Jens Axboe53d412f2007-07-24 14:41:13 +0200270 for_each_sg(sgl, sg, data->dma_nents, i)
Or Gerlitzf91424c2013-07-28 12:35:36 +0300271 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
Erez Zilbere981f1d2006-09-11 12:24:00 +0300272 "off:0x%x sz:0x%x dma_len:0x%x\n",
Jens Axboe53d412f2007-07-24 14:41:13 +0200273 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
Jens Axboe45711f12007-10-22 21:19:53 +0200274 sg_page(sg), sg->offset,
Jens Axboe53d412f2007-07-24 14:41:13 +0200275 sg->length, ib_sg_dma_len(ibdev, sg));
Or Gerlitz6461f642006-05-11 10:03:08 +0300276}
277
278static void iser_dump_page_vec(struct iser_page_vec *page_vec)
279{
280 int i;
281
282 iser_err("page vec length %d data size %d\n",
283 page_vec->length, page_vec->data_size);
284 for (i = 0; i < page_vec->length; i++)
285 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
286}
287
288static void iser_page_vec_build(struct iser_data_buf *data,
Ralph Campbell51803112006-12-12 14:31:00 -0800289 struct iser_page_vec *page_vec,
290 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300291{
292 int page_vec_len = 0;
293
294 page_vec->length = 0;
295 page_vec->offset = 0;
296
297 iser_dbg("Translating sg sz: %d\n", data->dma_nents);
Ralph Campbell51803112006-12-12 14:31:00 -0800298 page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
Or Gerlitz6461f642006-05-11 10:03:08 +0300299 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
300
301 page_vec->length = page_vec_len;
302
Erez Zilber8dfa0872006-09-11 12:22:30 +0300303 if (page_vec_len * SIZE_4K < page_vec->data_size) {
Or Gerlitz6461f642006-05-11 10:03:08 +0300304 iser_err("page_vec too short to hold this SG\n");
Ralph Campbell51803112006-12-12 14:31:00 -0800305 iser_data_buf_dump(data, ibdev);
Or Gerlitz6461f642006-05-11 10:03:08 +0300306 iser_dump_page_vec(page_vec);
307 BUG();
308 }
309}
310
Mike Christie2261ec32008-05-21 15:54:11 -0500311int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
312 struct iser_data_buf *data,
313 enum iser_data_dir iser_dir,
314 enum dma_data_direction dma_dir)
Erez Zilber74a20782006-09-27 16:43:06 +0300315{
Ralph Campbell51803112006-12-12 14:31:00 -0800316 struct ib_device *dev;
Erez Zilber74a20782006-09-27 16:43:06 +0300317
Mike Christie2261ec32008-05-21 15:54:11 -0500318 iser_task->dir[iser_dir] = 1;
319 dev = iser_task->iser_conn->ib_conn->device->ib_device;
Erez Zilber74a20782006-09-27 16:43:06 +0300320
Ralph Campbell51803112006-12-12 14:31:00 -0800321 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
Erez Zilber74a20782006-09-27 16:43:06 +0300322 if (data->dma_nents == 0) {
323 iser_err("dma_map_sg failed!!!\n");
324 return -EINVAL;
325 }
326 return 0;
327}
328
Mike Christie2261ec32008-05-21 15:54:11 -0500329void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
Erez Zilber74a20782006-09-27 16:43:06 +0300330{
Ralph Campbell51803112006-12-12 14:31:00 -0800331 struct ib_device *dev;
Erez Zilber74a20782006-09-27 16:43:06 +0300332 struct iser_data_buf *data;
333
Mike Christie2261ec32008-05-21 15:54:11 -0500334 dev = iser_task->iser_conn->ib_conn->device->ib_device;
Erez Zilber74a20782006-09-27 16:43:06 +0300335
Mike Christie2261ec32008-05-21 15:54:11 -0500336 if (iser_task->dir[ISER_DIR_IN]) {
337 data = &iser_task->data[ISER_DIR_IN];
Ralph Campbell51803112006-12-12 14:31:00 -0800338 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
Erez Zilber74a20782006-09-27 16:43:06 +0300339 }
340
Mike Christie2261ec32008-05-21 15:54:11 -0500341 if (iser_task->dir[ISER_DIR_OUT]) {
342 data = &iser_task->data[ISER_DIR_OUT];
Ralph Campbell51803112006-12-12 14:31:00 -0800343 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
Erez Zilber74a20782006-09-27 16:43:06 +0300344 }
345}
346
Or Gerlitz6461f642006-05-11 10:03:08 +0300347/**
348 * iser_reg_rdma_mem - Registers memory intended for RDMA,
349 * obtaining rkey and va
350 *
351 * returns 0 on success, errno code on failure
352 */
Mike Christie2261ec32008-05-21 15:54:11 -0500353int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
Or Gerlitz6461f642006-05-11 10:03:08 +0300354 enum iser_data_dir cmd_dir)
355{
Mike Christie2261ec32008-05-21 15:54:11 -0500356 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
357 struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
Erez Zilberd8111022006-09-11 12:26:33 +0300358 struct iser_device *device = ib_conn->device;
Ralph Campbell51803112006-12-12 14:31:00 -0800359 struct ib_device *ibdev = device->ib_device;
Mike Christie2261ec32008-05-21 15:54:11 -0500360 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
Or Gerlitz6461f642006-05-11 10:03:08 +0300361 struct iser_regd_buf *regd_buf;
362 int aligned_len;
363 int err;
Erez Zilbere981f1d2006-09-11 12:24:00 +0300364 int i;
Erez Zilberd8111022006-09-11 12:26:33 +0300365 struct scatterlist *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300366
Mike Christie2261ec32008-05-21 15:54:11 -0500367 regd_buf = &iser_task->rdma_regd[cmd_dir];
Or Gerlitz6461f642006-05-11 10:03:08 +0300368
Ralph Campbell51803112006-12-12 14:31:00 -0800369 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
Or Gerlitz5525d212013-02-21 14:50:10 +0000370 if (aligned_len != mem->dma_nents ||
371 (!ib_conn->fmr_pool && mem->dma_nents > 1)) {
Eli Dorfman87528222008-04-29 13:46:52 -0700372 iscsi_conn->fmr_unalign_cnt++;
Or Gerlitzf91424c2013-07-28 12:35:36 +0300373 iser_dbg("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
374 aligned_len, mem->size);
375
376 if (iser_debug_level > 0)
377 iser_data_buf_dump(mem, ibdev);
Erez Zilber74a20782006-09-27 16:43:06 +0300378
379 /* unmap the command data before accessing it */
Mike Christie2261ec32008-05-21 15:54:11 -0500380 iser_dma_unmap_task_data(iser_task);
Erez Zilber74a20782006-09-27 16:43:06 +0300381
Or Gerlitz6461f642006-05-11 10:03:08 +0300382 /* allocate copy buf, if we are writing, copy the */
383 /* unaligned scatterlist, dma map the copy */
Mike Christie2261ec32008-05-21 15:54:11 -0500384 if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
Or Gerlitz6461f642006-05-11 10:03:08 +0300385 return -ENOMEM;
Mike Christie2261ec32008-05-21 15:54:11 -0500386 mem = &iser_task->data_copy[cmd_dir];
Or Gerlitz6461f642006-05-11 10:03:08 +0300387 }
388
Erez Zilberd8111022006-09-11 12:26:33 +0300389 /* if there a single dma entry, FMR is not needed */
390 if (mem->dma_nents == 1) {
391 sg = (struct scatterlist *)mem->buf;
392
393 regd_buf->reg.lkey = device->mr->lkey;
394 regd_buf->reg.rkey = device->mr->rkey;
Ralph Campbell51803112006-12-12 14:31:00 -0800395 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
396 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
Erez Zilberd8111022006-09-11 12:26:33 +0300397 regd_buf->reg.is_fmr = 0;
398
399 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
400 "va: 0x%08lX sz: %ld]\n",
401 (unsigned int)regd_buf->reg.lkey,
402 (unsigned int)regd_buf->reg.rkey,
403 (unsigned long)regd_buf->reg.va,
404 (unsigned long)regd_buf->reg.len);
405 } else { /* use FMR for multiple dma entries */
Ralph Campbell51803112006-12-12 14:31:00 -0800406 iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
Erez Zilberd8111022006-09-11 12:26:33 +0300407 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
Or Gerlitz819a0872013-02-21 14:50:09 +0000408 if (err && err != -EAGAIN) {
Ralph Campbell51803112006-12-12 14:31:00 -0800409 iser_data_buf_dump(mem, ibdev);
Mike Christie2261ec32008-05-21 15:54:11 -0500410 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
411 mem->dma_nents,
412 ntoh24(iser_task->desc.iscsi_header.dlength));
Erez Zilberd8111022006-09-11 12:26:33 +0300413 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
414 ib_conn->page_vec->data_size, ib_conn->page_vec->length,
415 ib_conn->page_vec->offset);
416 for (i=0 ; i<ib_conn->page_vec->length ; i++)
417 iser_err("page_vec[%d] = 0x%llx\n", i,
418 (unsigned long long) ib_conn->page_vec->pages[i]);
Erez Zilbere981f1d2006-09-11 12:24:00 +0300419 }
Or Gerlitz450d1e42013-05-01 13:25:26 +0000420 if (err)
421 return err;
Erez Zilbere981f1d2006-09-11 12:24:00 +0300422 }
Or Gerlitz6461f642006-05-11 10:03:08 +0300423 return 0;
424}