blob: 9febfc1e5e7cef76131d544520d43638284c471c [file] [log] [blame]
Or Gerlitz6461f642006-05-11 10:03:08 +03001/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
Or Gerlitz3ee07d22014-04-01 16:28:41 +03003 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
Or Gerlitz6461f642006-05-11 10:03:08 +03004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
Or Gerlitz6461f642006-05-11 10:03:08 +030032 */
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/slab.h>
36#include <linux/mm.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040037#include <linux/highmem.h>
Or Gerlitz6461f642006-05-11 10:03:08 +030038#include <linux/scatterlist.h>
39
40#include "iscsi_iser.h"
41
Sagi Grimberg48afbff2015-08-06 18:32:56 +030042static struct iser_reg_ops fastreg_ops = {
43 .alloc_reg_res = iser_alloc_fastreg_pool,
44 .free_reg_res = iser_free_fastreg_pool,
45 .reg_rdma_mem = iser_reg_rdma_mem_fastreg,
46 .unreg_rdma_mem = iser_unreg_mem_fastreg,
47};
48
49static struct iser_reg_ops fmr_ops = {
50 .alloc_reg_res = iser_alloc_fmr_pool,
51 .free_reg_res = iser_free_fmr_pool,
52 .reg_rdma_mem = iser_reg_rdma_mem_fmr,
53 .unreg_rdma_mem = iser_unreg_mem_fmr,
54};
55
56int iser_assign_reg_ops(struct iser_device *device)
57{
58 struct ib_device_attr *dev_attr = &device->dev_attr;
59
60 /* Assign function handles - based on FMR support */
61 if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
62 device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
63 iser_info("FMR supported, using FMR for registration\n");
64 device->reg_ops = &fmr_ops;
65 } else
66 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
67 iser_info("FastReg supported, using FastReg for registration\n");
68 device->reg_ops = &fastreg_ops;
69 } else {
70 iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
71 return -1;
72 }
73
74 return 0;
75}
76
Sagi Grimbergba943fb2015-04-14 18:08:28 +030077static void
78iser_free_bounce_sg(struct iser_data_buf *data)
79{
80 struct scatterlist *sg;
81 int count;
82
83 for_each_sg(data->sg, sg, data->size, count)
84 __free_page(sg_page(sg));
85
86 kfree(data->sg);
87
88 data->sg = data->orig_sg;
89 data->size = data->orig_size;
90 data->orig_sg = NULL;
91 data->orig_size = 0;
92}
93
94static int
95iser_alloc_bounce_sg(struct iser_data_buf *data)
96{
97 struct scatterlist *sg;
98 struct page *page;
99 unsigned long length = data->data_len;
100 int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
101
102 sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
103 if (!sg)
104 goto err;
105
106 sg_init_table(sg, nents);
107 while (length) {
108 u32 page_len = min_t(u32, length, PAGE_SIZE);
109
110 page = alloc_page(GFP_ATOMIC);
111 if (!page)
112 goto err;
113
114 sg_set_page(&sg[i], page, page_len, 0);
115 length -= page_len;
116 i++;
117 }
118
119 data->orig_sg = data->sg;
120 data->orig_size = data->size;
121 data->sg = sg;
122 data->size = nents;
123
124 return 0;
125
126err:
127 for (; i > 0; i--)
128 __free_page(sg_page(&sg[i - 1]));
129 kfree(sg);
130
131 return -ENOMEM;
132}
133
134static void
135iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
136{
137 struct scatterlist *osg, *bsg = data->sg;
138 void *oaddr, *baddr;
139 unsigned int left = data->data_len;
140 unsigned int bsg_off = 0;
141 int i;
142
143 for_each_sg(data->orig_sg, osg, data->orig_size, i) {
144 unsigned int copy_len, osg_off = 0;
145
146 oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
147 copy_len = min(left, osg->length);
148 while (copy_len) {
149 unsigned int len = min(copy_len, bsg->length - bsg_off);
150
151 baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
152 if (to_buffer)
153 memcpy(baddr + bsg_off, oaddr + osg_off, len);
154 else
155 memcpy(oaddr + osg_off, baddr + bsg_off, len);
156
157 kunmap_atomic(baddr - bsg->offset);
158 osg_off += len;
159 bsg_off += len;
160 copy_len -= len;
161
162 if (bsg_off >= bsg->length) {
163 bsg = sg_next(bsg);
164 bsg_off = 0;
165 }
166 }
167 kunmap_atomic(oaddr - osg->offset);
168 left -= osg_off;
169 }
170}
171
172static inline void
173iser_copy_from_bounce(struct iser_data_buf *data)
174{
175 iser_copy_bounce(data, false);
176}
177
178static inline void
179iser_copy_to_bounce(struct iser_data_buf *data)
180{
181 iser_copy_bounce(data, true);
182}
Erez Zilber8dfa0872006-09-11 12:22:30 +0300183
Sagi Grimberg5190cc22015-08-06 18:32:54 +0300184struct iser_fr_desc *
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300185iser_reg_desc_get(struct ib_conn *ib_conn)
186{
Sagi Grimberg5190cc22015-08-06 18:32:54 +0300187 struct iser_fr_desc *desc;
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300188 unsigned long flags;
189
190 spin_lock_irqsave(&ib_conn->lock, flags);
191 desc = list_first_entry(&ib_conn->fastreg.pool,
Sagi Grimberg5190cc22015-08-06 18:32:54 +0300192 struct iser_fr_desc, list);
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300193 list_del(&desc->list);
194 spin_unlock_irqrestore(&ib_conn->lock, flags);
195
196 return desc;
197}
198
199void
200iser_reg_desc_put(struct ib_conn *ib_conn,
Sagi Grimberg5190cc22015-08-06 18:32:54 +0300201 struct iser_fr_desc *desc)
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300202{
203 unsigned long flags;
204
205 spin_lock_irqsave(&ib_conn->lock, flags);
Sagi Grimberg8b95aa22015-04-14 18:08:23 +0300206 list_add(&desc->list, &ib_conn->fastreg.pool);
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300207 spin_unlock_irqrestore(&ib_conn->lock, flags);
208}
209
Or Gerlitz6461f642006-05-11 10:03:08 +0300210/**
Or Gerlitz6461f642006-05-11 10:03:08 +0300211 * iser_start_rdma_unaligned_sg
212 */
Mike Christie2261ec32008-05-21 15:54:11 -0500213static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
Sagi Grimberg5f588e32014-03-05 19:43:45 +0200214 struct iser_data_buf *data,
Roland Dreier41179e22007-07-17 18:37:42 -0700215 enum iser_data_dir cmd_dir)
Or Gerlitz6461f642006-05-11 10:03:08 +0300216{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300217 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300218 int rc;
Sagi Grimberg5f588e32014-03-05 19:43:45 +0200219
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300220 rc = iser_alloc_bounce_sg(data);
221 if (rc) {
222 iser_err("Failed to allocate bounce for data len %lu\n",
223 data->data_len);
224 return rc;
Or Gerlitz6461f642006-05-11 10:03:08 +0300225 }
226
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300227 if (cmd_dir == ISER_DIR_OUT)
228 iser_copy_to_bounce(data);
Or Gerlitz6461f642006-05-11 10:03:08 +0300229
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300230 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
231 (cmd_dir == ISER_DIR_OUT) ?
232 DMA_TO_DEVICE : DMA_FROM_DEVICE);
233 if (!data->dma_nents) {
234 iser_err("Got dma_nents %d, something went wrong...\n",
235 data->dma_nents);
236 rc = -ENOMEM;
237 goto err;
Or Gerlitz6461f642006-05-11 10:03:08 +0300238 }
239
Or Gerlitz6461f642006-05-11 10:03:08 +0300240 return 0;
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300241err:
242 iser_free_bounce_sg(data);
243 return rc;
Or Gerlitz6461f642006-05-11 10:03:08 +0300244}
245
246/**
247 * iser_finalize_rdma_unaligned_sg
248 */
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200249
Mike Christie2261ec32008-05-21 15:54:11 -0500250void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200251 struct iser_data_buf *data,
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200252 enum iser_data_dir cmd_dir)
Or Gerlitz6461f642006-05-11 10:03:08 +0300253{
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300254 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
Or Gerlitz6461f642006-05-11 10:03:08 +0300255
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300256 ib_dma_unmap_sg(dev, data->sg, data->size,
Ralph Campbell51803112006-12-12 14:31:00 -0800257 (cmd_dir == ISER_DIR_OUT) ?
258 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Or Gerlitz6461f642006-05-11 10:03:08 +0300259
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300260 if (cmd_dir == ISER_DIR_IN)
261 iser_copy_from_bounce(data);
Or Gerlitz6461f642006-05-11 10:03:08 +0300262
Sagi Grimbergba943fb2015-04-14 18:08:28 +0300263 iser_free_bounce_sg(data);
Or Gerlitz6461f642006-05-11 10:03:08 +0300264}
265
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800266#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
267
Or Gerlitz6461f642006-05-11 10:03:08 +0300268/**
269 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
270 * and returns the length of resulting physical address array (may be less than
271 * the original due to possible compaction).
272 *
273 * we build a "page vec" under the assumption that the SG meets the RDMA
274 * alignment requirements. Other then the first and last SG elements, all
275 * the "internal" elements can be compacted into a list whose elements are
276 * dma addresses of physical pages. The code supports also the weird case
277 * where --few fragments of the same page-- are present in the SG as
278 * consecutive elements. Also, it handles one entry SG.
279 */
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800280
Or Gerlitz6461f642006-05-11 10:03:08 +0300281static int iser_sg_to_page_vec(struct iser_data_buf *data,
Sagi Grimberg919fc272013-07-28 12:35:40 +0300282 struct ib_device *ibdev, u64 *pages,
283 int *offset, int *data_size)
Or Gerlitz6461f642006-05-11 10:03:08 +0300284{
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300285 struct scatterlist *sg, *sgl = data->sg;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800286 u64 start_addr, end_addr, page, chunk_start = 0;
Or Gerlitz6461f642006-05-11 10:03:08 +0300287 unsigned long total_sz = 0;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800288 unsigned int dma_len;
289 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300290
291 /* compute the offset of first element */
Sagi Grimberg919fc272013-07-28 12:35:40 +0300292 *offset = (u64) sgl[0].offset & ~MASK_4K;
Or Gerlitz6461f642006-05-11 10:03:08 +0300293
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800294 new_chunk = 1;
295 cur_page = 0;
Jens Axboe53d412f2007-07-24 14:41:13 +0200296 for_each_sg(sgl, sg, data->dma_nents, i) {
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800297 start_addr = ib_sg_dma_address(ibdev, sg);
298 if (new_chunk)
299 chunk_start = start_addr;
300 dma_len = ib_sg_dma_len(ibdev, sg);
301 end_addr = start_addr + dma_len;
Ralph Campbell51803112006-12-12 14:31:00 -0800302 total_sz += dma_len;
303
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800304 /* collect page fragments until aligned or end of SG list */
305 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
306 new_chunk = 0;
307 continue;
Or Gerlitz6461f642006-05-11 10:03:08 +0300308 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800309 new_chunk = 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300310
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800311 /* address of the first page in the contiguous chunk;
312 masking relevant for the very first SG entry,
313 which might be unaligned */
314 page = chunk_start & MASK_4K;
315 do {
Sagi Grimberg919fc272013-07-28 12:35:40 +0300316 pages[cur_page++] = page;
Erez Zilber8dfa0872006-09-11 12:22:30 +0300317 page += SIZE_4K;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800318 } while (page < end_addr);
Or Gerlitz6461f642006-05-11 10:03:08 +0300319 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800320
Sagi Grimberg919fc272013-07-28 12:35:40 +0300321 *data_size = total_sz;
322 iser_dbg("page_vec->data_size:%d cur_page %d\n",
323 *data_size, cur_page);
Or Gerlitz6461f642006-05-11 10:03:08 +0300324 return cur_page;
325}
326
Or Gerlitz6461f642006-05-11 10:03:08 +0300327
328/**
329 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
330 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
331 * the number of entries which are aligned correctly. Supports the case where
332 * consecutive SG elements are actually fragments of the same physcial page.
333 */
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800334static int iser_data_buf_aligned_len(struct iser_data_buf *data,
335 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300336{
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300337 struct scatterlist *sg, *sgl, *next_sg = NULL;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800338 u64 start_addr, end_addr;
339 int i, ret_len, start_check = 0;
340
341 if (data->dma_nents == 1)
342 return 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300343
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300344 sgl = data->sg;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800345 start_addr = ib_sg_dma_address(ibdev, sgl);
Or Gerlitz6461f642006-05-11 10:03:08 +0300346
Jens Axboe53d412f2007-07-24 14:41:13 +0200347 for_each_sg(sgl, sg, data->dma_nents, i) {
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800348 if (start_check && !IS_4K_ALIGNED(start_addr))
349 break;
350
351 next_sg = sg_next(sg);
352 if (!next_sg)
353 break;
354
355 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
356 start_addr = ib_sg_dma_address(ibdev, next_sg);
357
358 if (end_addr == start_addr) {
359 start_check = 0;
360 continue;
361 } else
362 start_check = 1;
363
364 if (!IS_4K_ALIGNED(end_addr))
365 break;
Or Gerlitz6461f642006-05-11 10:03:08 +0300366 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800367 ret_len = (next_sg) ? i : i+1;
Sagi Grimbergea18f5d2015-08-06 18:32:52 +0300368
369 if (unlikely(ret_len != data->dma_nents))
370 iser_warn("rdma alignment violation (%d/%d aligned)\n",
371 ret_len, data->dma_nents);
372
Or Gerlitz6461f642006-05-11 10:03:08 +0300373 return ret_len;
374}
375
Ralph Campbell51803112006-12-12 14:31:00 -0800376static void iser_data_buf_dump(struct iser_data_buf *data,
377 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300378{
Jens Axboe53d412f2007-07-24 14:41:13 +0200379 struct scatterlist *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300380 int i;
381
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300382 for_each_sg(data->sg, sg, data->dma_nents, i)
Or Gerlitzf91424c2013-07-28 12:35:36 +0300383 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
Erez Zilbere981f1d2006-09-11 12:24:00 +0300384 "off:0x%x sz:0x%x dma_len:0x%x\n",
Jens Axboe53d412f2007-07-24 14:41:13 +0200385 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
Jens Axboe45711f12007-10-22 21:19:53 +0200386 sg_page(sg), sg->offset,
Jens Axboe53d412f2007-07-24 14:41:13 +0200387 sg->length, ib_sg_dma_len(ibdev, sg));
Or Gerlitz6461f642006-05-11 10:03:08 +0300388}
389
390static void iser_dump_page_vec(struct iser_page_vec *page_vec)
391{
392 int i;
393
394 iser_err("page vec length %d data size %d\n",
395 page_vec->length, page_vec->data_size);
396 for (i = 0; i < page_vec->length; i++)
397 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
398}
399
Mike Christie2261ec32008-05-21 15:54:11 -0500400int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
401 struct iser_data_buf *data,
402 enum iser_data_dir iser_dir,
403 enum dma_data_direction dma_dir)
Erez Zilber74a20782006-09-27 16:43:06 +0300404{
Ralph Campbell51803112006-12-12 14:31:00 -0800405 struct ib_device *dev;
Erez Zilber74a20782006-09-27 16:43:06 +0300406
Mike Christie2261ec32008-05-21 15:54:11 -0500407 iser_task->dir[iser_dir] = 1;
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300408 dev = iser_task->iser_conn->ib_conn.device->ib_device;
Erez Zilber74a20782006-09-27 16:43:06 +0300409
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300410 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
Erez Zilber74a20782006-09-27 16:43:06 +0300411 if (data->dma_nents == 0) {
412 iser_err("dma_map_sg failed!!!\n");
413 return -EINVAL;
414 }
415 return 0;
416}
417
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200418void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
Roi Dayanc6c95ef2014-12-28 14:26:11 +0200419 struct iser_data_buf *data,
420 enum dma_data_direction dir)
Erez Zilber74a20782006-09-27 16:43:06 +0300421{
Ralph Campbell51803112006-12-12 14:31:00 -0800422 struct ib_device *dev;
Erez Zilber74a20782006-09-27 16:43:06 +0300423
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300424 dev = iser_task->iser_conn->ib_conn.device->ib_device;
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300425 ib_dma_unmap_sg(dev, data->sg, data->size, dir);
Erez Zilber74a20782006-09-27 16:43:06 +0300426}
427
Sagi Grimbergad1e5672015-04-14 18:08:26 +0300428static int
429iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
430 struct iser_mem_reg *reg)
431{
432 struct scatterlist *sg = mem->sg;
433
434 reg->sge.lkey = device->mr->lkey;
435 reg->rkey = device->mr->rkey;
436 reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
437 reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
438
439 iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
440 " length=0x%x\n", reg->sge.lkey, reg->rkey,
441 reg->sge.addr, reg->sge.length);
442
443 return 0;
444}
445
Sagi Grimberg919fc272013-07-28 12:35:40 +0300446static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
Sagi Grimberg5f588e32014-03-05 19:43:45 +0200447 struct iser_data_buf *mem,
Sagi Grimbergea18f5d2015-08-06 18:32:52 +0300448 enum iser_data_dir cmd_dir)
Sagi Grimberg919fc272013-07-28 12:35:40 +0300449{
Sagi Grimberg56408322015-04-14 18:08:16 +0300450 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
451 struct iser_device *device = iser_task->iser_conn->ib_conn.device;
Sagi Grimberg919fc272013-07-28 12:35:40 +0300452
453 iscsi_conn->fmr_unalign_cnt++;
Sagi Grimberg919fc272013-07-28 12:35:40 +0300454
455 if (iser_debug_level > 0)
Sagi Grimberg56408322015-04-14 18:08:16 +0300456 iser_data_buf_dump(mem, device->ib_device);
Sagi Grimberg919fc272013-07-28 12:35:40 +0300457
458 /* unmap the command data before accessing it */
Roi Dayanc6c95ef2014-12-28 14:26:11 +0200459 iser_dma_unmap_task_data(iser_task, mem,
460 (cmd_dir == ISER_DIR_OUT) ?
461 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Sagi Grimberg919fc272013-07-28 12:35:40 +0300462
463 /* allocate copy buf, if we are writing, copy the */
464 /* unaligned scatterlist, dma map the copy */
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300465 if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
Sagi Grimberg5f588e32014-03-05 19:43:45 +0200466 return -ENOMEM;
Sagi Grimberg919fc272013-07-28 12:35:40 +0300467
468 return 0;
469}
470
Or Gerlitz6461f642006-05-11 10:03:08 +0300471/**
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300472 * iser_reg_page_vec - Register physical memory
473 *
474 * returns: 0 on success, errno code on failure
475 */
476static
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300477int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
478 struct iser_data_buf *mem,
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300479 struct iser_page_vec *page_vec,
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300480 struct iser_mem_reg *mem_reg)
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300481{
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300482 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
483 struct iser_device *device = ib_conn->device;
484 struct ib_pool_fmr *fmr;
485 int ret, plen;
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300486
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300487 plen = iser_sg_to_page_vec(mem, device->ib_device,
488 page_vec->pages,
489 &page_vec->offset,
490 &page_vec->data_size);
491 page_vec->length = plen;
492 if (plen * SIZE_4K < page_vec->data_size) {
493 iser_err("page vec too short to hold this SG\n");
494 iser_data_buf_dump(mem, device->ib_device);
495 iser_dump_page_vec(page_vec);
496 return -EINVAL;
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300497 }
498
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300499 fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
500 page_vec->pages,
501 page_vec->length,
502 page_vec->pages[0]);
503 if (IS_ERR(fmr)) {
504 ret = PTR_ERR(fmr);
505 iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
506 return ret;
507 }
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300508
Sagi Grimberg90a66842015-04-14 18:08:24 +0300509 mem_reg->sge.lkey = fmr->fmr->lkey;
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300510 mem_reg->rkey = fmr->fmr->rkey;
Sagi Grimberg90a66842015-04-14 18:08:24 +0300511 mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset;
512 mem_reg->sge.length = page_vec->data_size;
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300513 mem_reg->mem_h = fmr;
514
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300515 return 0;
516}
517
518/**
519 * Unregister (previosuly registered using FMR) memory.
520 * If memory is non-FMR does nothing.
521 */
522void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
523 enum iser_data_dir cmd_dir)
524{
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300525 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300526 int ret;
527
528 if (!reg->mem_h)
529 return;
530
531 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
532
533 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
534 if (ret)
535 iser_err("ib_fmr_pool_unmap failed %d\n", ret);
536
537 reg->mem_h = NULL;
538}
539
540void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
541 enum iser_data_dir cmd_dir)
542{
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300543 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300544
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300545 if (!reg->mem_h)
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300546 return;
547
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300548 iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
549 reg->mem_h);
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300550 reg->mem_h = NULL;
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300551}
552
553/**
Sagi Grimberge6575712013-07-28 12:35:41 +0300554 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
555 * using FMR (if possible) obtaining rkey and va
Or Gerlitz6461f642006-05-11 10:03:08 +0300556 *
557 * returns 0 on success, errno code on failure
558 */
Sagi Grimberge6575712013-07-28 12:35:41 +0300559int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
560 enum iser_data_dir cmd_dir)
Or Gerlitz6461f642006-05-11 10:03:08 +0300561{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300562 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
563 struct iser_device *device = ib_conn->device;
Ralph Campbell51803112006-12-12 14:31:00 -0800564 struct ib_device *ibdev = device->ib_device;
Mike Christie2261ec32008-05-21 15:54:11 -0500565 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300566 struct iser_mem_reg *mem_reg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300567 int aligned_len;
568 int err;
Erez Zilbere981f1d2006-09-11 12:24:00 +0300569 int i;
Or Gerlitz6461f642006-05-11 10:03:08 +0300570
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300571 mem_reg = &iser_task->rdma_reg[cmd_dir];
Or Gerlitz6461f642006-05-11 10:03:08 +0300572
Ralph Campbell51803112006-12-12 14:31:00 -0800573 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
Sagi Grimberg55878562013-07-28 12:35:42 +0300574 if (aligned_len != mem->dma_nents) {
Sagi Grimbergea18f5d2015-08-06 18:32:52 +0300575 err = fall_to_bounce_buf(iser_task, mem, cmd_dir);
Sagi Grimberg919fc272013-07-28 12:35:40 +0300576 if (err) {
577 iser_err("failed to allocate bounce buffer\n");
578 return err;
579 }
Or Gerlitz6461f642006-05-11 10:03:08 +0300580 }
581
Erez Zilberd8111022006-09-11 12:26:33 +0300582 /* if there a single dma entry, FMR is not needed */
583 if (mem->dma_nents == 1) {
Sagi Grimbergad1e5672015-04-14 18:08:26 +0300584 return iser_reg_dma(device, mem, mem_reg);
Erez Zilberd8111022006-09-11 12:26:33 +0300585 } else { /* use FMR for multiple dma entries */
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300586 err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300587 mem_reg);
Or Gerlitz819a0872013-02-21 14:50:09 +0000588 if (err && err != -EAGAIN) {
Ralph Campbell51803112006-12-12 14:31:00 -0800589 iser_data_buf_dump(mem, ibdev);
Mike Christie2261ec32008-05-21 15:54:11 -0500590 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
591 mem->dma_nents,
592 ntoh24(iser_task->desc.iscsi_header.dlength));
Erez Zilberd8111022006-09-11 12:26:33 +0300593 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300594 ib_conn->fmr.page_vec->data_size,
595 ib_conn->fmr.page_vec->length,
596 ib_conn->fmr.page_vec->offset);
597 for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
Erez Zilberd8111022006-09-11 12:26:33 +0300598 iser_err("page_vec[%d] = 0x%llx\n", i,
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300599 (unsigned long long)ib_conn->fmr.page_vec->pages[i]);
Erez Zilbere981f1d2006-09-11 12:24:00 +0300600 }
Or Gerlitz450d1e42013-05-01 13:25:26 +0000601 if (err)
602 return err;
Erez Zilbere981f1d2006-09-11 12:24:00 +0300603 }
Or Gerlitz6461f642006-05-11 10:03:08 +0300604 return 0;
605}
Sagi Grimberg55878562013-07-28 12:35:42 +0300606
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200607static void
Sagi Grimberg92792c02014-08-13 19:54:33 +0300608iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
609 struct ib_sig_domain *domain)
610{
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300611 domain->sig_type = IB_SIG_TYPE_T10_DIF;
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200612 domain->sig.dif.pi_interval = scsi_prot_interval(sc);
613 domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300614 /*
615 * At the moment we hard code those, but in the future
616 * we will take them from sc.
617 */
618 domain->sig.dif.apptag_check_mask = 0xffff;
619 domain->sig.dif.app_escape = true;
620 domain->sig.dif.ref_escape = true;
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200621 if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300622 domain->sig.dif.ref_remap = true;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300623};
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200624
625static int
626iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
627{
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200628 switch (scsi_get_prot_op(sc)) {
629 case SCSI_PROT_WRITE_INSERT:
630 case SCSI_PROT_READ_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300631 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300632 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200633 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200634 break;
635 case SCSI_PROT_READ_INSERT:
636 case SCSI_PROT_WRITE_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300637 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300638 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200639 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
640 IB_T10DIF_CSUM : IB_T10DIF_CRC;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200641 break;
642 case SCSI_PROT_READ_PASS:
643 case SCSI_PROT_WRITE_PASS:
Sagi Grimberg92792c02014-08-13 19:54:33 +0300644 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200645 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300646 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200647 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
648 IB_T10DIF_CSUM : IB_T10DIF_CRC;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200649 break;
650 default:
651 iser_err("Unsupported PI operation %d\n",
652 scsi_get_prot_op(sc));
653 return -EINVAL;
654 }
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300655
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200656 return 0;
657}
658
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200659static inline void
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200660iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
661{
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200662 *mask = 0;
663 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
664 *mask |= ISER_CHECK_REFTAG;
665 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
666 *mask |= ISER_CHECK_GUARD;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200667}
668
Sagi Grimberga11b3e62014-12-07 16:10:01 +0200669static void
670iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
671{
672 u32 rkey;
673
674 memset(inv_wr, 0, sizeof(*inv_wr));
675 inv_wr->opcode = IB_WR_LOCAL_INV;
676 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
677 inv_wr->ex.invalidate_rkey = mr->rkey;
678
679 rkey = ib_inc_rkey(mr->rkey);
680 ib_update_fast_reg_key(mr, rkey);
681}
682
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200683static int
684iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
Sagi Grimbergd711d812015-08-06 18:32:53 +0300685 struct iser_pi_context *pi_ctx,
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300686 struct iser_mem_reg *data_reg,
687 struct iser_mem_reg *prot_reg,
688 struct iser_mem_reg *sig_reg)
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200689{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300690 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200691 struct ib_send_wr sig_wr, inv_wr;
692 struct ib_send_wr *bad_wr, *wr = NULL;
693 struct ib_sig_attrs sig_attrs;
694 int ret;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200695
696 memset(&sig_attrs, 0, sizeof(sig_attrs));
697 ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
698 if (ret)
699 goto err;
700
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200701 iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200702
Sagi Grimbergd711d812015-08-06 18:32:53 +0300703 if (!pi_ctx->sig_mr_valid) {
Sagi Grimberga11b3e62014-12-07 16:10:01 +0200704 iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200705 wr = &inv_wr;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200706 }
707
708 memset(&sig_wr, 0, sizeof(sig_wr));
709 sig_wr.opcode = IB_WR_REG_SIG_MR;
710 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300711 sig_wr.sg_list = &data_reg->sge;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200712 sig_wr.num_sge = 1;
713 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
714 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
715 if (scsi_prot_sg_count(iser_task->sc))
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300716 sig_wr.wr.sig_handover.prot = &prot_reg->sge;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200717 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
718 IB_ACCESS_REMOTE_READ |
719 IB_ACCESS_REMOTE_WRITE;
720
721 if (!wr)
722 wr = &sig_wr;
723 else
724 wr->next = &sig_wr;
725
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300726 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200727 if (ret) {
728 iser_err("reg_sig_mr failed, ret:%d\n", ret);
729 goto err;
730 }
Sagi Grimbergd711d812015-08-06 18:32:53 +0300731 pi_ctx->sig_mr_valid = 0;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200732
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300733 sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
734 sig_reg->rkey = pi_ctx->sig_mr->rkey;
735 sig_reg->sge.addr = 0;
736 sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200737
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300738 iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
739 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
740 sig_reg->sge.length);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200741err:
742 return ret;
743}
744
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200745static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200746 struct iser_data_buf *mem,
Sagi Grimbergd711d812015-08-06 18:32:53 +0300747 struct iser_reg_resources *rsc,
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300748 struct iser_mem_reg *reg)
Sagi Grimberg55878562013-07-28 12:35:42 +0300749{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300750 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
751 struct iser_device *device = ib_conn->device;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200752 struct ib_mr *mr;
753 struct ib_fast_reg_page_list *frpl;
Sagi Grimberg55878562013-07-28 12:35:42 +0300754 struct ib_send_wr fastreg_wr, inv_wr;
755 struct ib_send_wr *bad_wr, *wr = NULL;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200756 int ret, offset, size, plen;
757
758 /* if there a single dma entry, dma mr suffices */
Sagi Grimbergad1e5672015-04-14 18:08:26 +0300759 if (mem->dma_nents == 1)
760 return iser_reg_dma(device, mem, reg);
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200761
Sagi Grimbergd711d812015-08-06 18:32:53 +0300762 mr = rsc->mr;
763 frpl = rsc->frpl;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200764
765 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200766 &offset, &size);
767 if (plen * SIZE_4K < size) {
768 iser_err("fast reg page_list too short to hold this SG\n");
769 return -EINVAL;
770 }
Sagi Grimberg55878562013-07-28 12:35:42 +0300771
Sagi Grimbergd711d812015-08-06 18:32:53 +0300772 if (!rsc->mr_valid) {
Sagi Grimberga11b3e62014-12-07 16:10:01 +0200773 iser_inv_rkey(&inv_wr, mr);
Sagi Grimberg55878562013-07-28 12:35:42 +0300774 wr = &inv_wr;
Sagi Grimberg55878562013-07-28 12:35:42 +0300775 }
776
777 /* Prepare FASTREG WR */
778 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
Sagi Grimberg7306b8f2014-03-05 19:43:39 +0200779 fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg55878562013-07-28 12:35:42 +0300780 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200781 fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
782 fastreg_wr.wr.fast_reg.page_list = frpl;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200783 fastreg_wr.wr.fast_reg.page_list_len = plen;
Sagi Grimberg55878562013-07-28 12:35:42 +0300784 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200785 fastreg_wr.wr.fast_reg.length = size;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200786 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
Sagi Grimberg55878562013-07-28 12:35:42 +0300787 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
788 IB_ACCESS_REMOTE_WRITE |
789 IB_ACCESS_REMOTE_READ);
790
Sagi Grimbergdb523b82014-01-23 12:31:28 +0200791 if (!wr)
Sagi Grimberg55878562013-07-28 12:35:42 +0300792 wr = &fastreg_wr;
Sagi Grimbergdb523b82014-01-23 12:31:28 +0200793 else
Sagi Grimberg55878562013-07-28 12:35:42 +0300794 wr->next = &fastreg_wr;
Sagi Grimberg55878562013-07-28 12:35:42 +0300795
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300796 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
Sagi Grimberg55878562013-07-28 12:35:42 +0300797 if (ret) {
Sagi Grimberg55878562013-07-28 12:35:42 +0300798 iser_err("fast registration failed, ret:%d\n", ret);
799 return ret;
800 }
Sagi Grimbergd711d812015-08-06 18:32:53 +0300801 rsc->mr_valid = 0;
Sagi Grimberg55878562013-07-28 12:35:42 +0300802
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300803 reg->sge.lkey = mr->lkey;
804 reg->rkey = mr->rkey;
805 reg->sge.addr = frpl->page_list[0] + offset;
806 reg->sge.length = size;
Sagi Grimberg55878562013-07-28 12:35:42 +0300807
808 return ret;
809}
810
811/**
Sagi Grimberg7306b8f2014-03-05 19:43:39 +0200812 * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
Sagi Grimberg55878562013-07-28 12:35:42 +0300813 * using Fast Registration WR (if possible) obtaining rkey and va
814 *
815 * returns 0 on success, errno code on failure
816 */
Sagi Grimberg7306b8f2014-03-05 19:43:39 +0200817int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
818 enum iser_data_dir cmd_dir)
Sagi Grimberg55878562013-07-28 12:35:42 +0300819{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300820 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
821 struct iser_device *device = ib_conn->device;
Sagi Grimberg55878562013-07-28 12:35:42 +0300822 struct ib_device *ibdev = device->ib_device;
823 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300824 struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
Sagi Grimberg5190cc22015-08-06 18:32:54 +0300825 struct iser_fr_desc *desc = NULL;
Sagi Grimberg55878562013-07-28 12:35:42 +0300826 int err, aligned_len;
Sagi Grimberg55878562013-07-28 12:35:42 +0300827
828 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
829 if (aligned_len != mem->dma_nents) {
Sagi Grimbergea18f5d2015-08-06 18:32:52 +0300830 err = fall_to_bounce_buf(iser_task, mem, cmd_dir);
Sagi Grimberg55878562013-07-28 12:35:42 +0300831 if (err) {
832 iser_err("failed to allocate bounce buffer\n");
833 return err;
834 }
Sagi Grimberg55878562013-07-28 12:35:42 +0300835 }
836
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200837 if (mem->dma_nents != 1 ||
838 scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300839 desc = iser_reg_desc_get(ib_conn);
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300840 mem_reg->mem_h = desc;
Sagi Grimberg55878562013-07-28 12:35:42 +0300841 }
842
Sagi Grimbergd711d812015-08-06 18:32:53 +0300843 err = iser_fast_reg_mr(iser_task, mem,
844 desc ? &desc->rsc : NULL, mem_reg);
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200845 if (err)
846 goto err_reg;
847
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200848 if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300849 struct iser_mem_reg prot_reg;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200850
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300851 memset(&prot_reg, 0, sizeof(prot_reg));
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200852 if (scsi_prot_sg_count(iser_task->sc)) {
853 mem = &iser_task->prot[cmd_dir];
854 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
855 if (aligned_len != mem->dma_nents) {
Sagi Grimberg56408322015-04-14 18:08:16 +0300856 err = fall_to_bounce_buf(iser_task, mem,
Sagi Grimbergea18f5d2015-08-06 18:32:52 +0300857 cmd_dir);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200858 if (err) {
859 iser_err("failed to allocate bounce buffer\n");
860 return err;
861 }
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200862 }
863
Sagi Grimbergd711d812015-08-06 18:32:53 +0300864 err = iser_fast_reg_mr(iser_task, mem,
865 &desc->pi_ctx->rsc, &prot_reg);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200866 if (err)
867 goto err_reg;
868 }
869
Sagi Grimbergd711d812015-08-06 18:32:53 +0300870 err = iser_reg_sig_mr(iser_task, desc->pi_ctx, mem_reg,
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300871 &prot_reg, mem_reg);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200872 if (err) {
873 iser_err("Failed to register signature mr\n");
874 return err;
875 }
Sagi Grimbergd711d812015-08-06 18:32:53 +0300876 desc->pi_ctx->sig_protected = 1;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200877 }
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200878
Sagi Grimberg55878562013-07-28 12:35:42 +0300879 return 0;
880err_reg:
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300881 if (desc)
882 iser_reg_desc_put(ib_conn, desc);
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200883
Sagi Grimberg55878562013-07-28 12:35:42 +0300884 return err;
885}