blob: 0575052d9f8fffcae4b1c6249a719f3ed0415058 [file] [log] [blame]
Or Gerlitz6461f642006-05-11 10:03:08 +03001/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
Or Gerlitz3ee07d22014-04-01 16:28:41 +03003 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
Or Gerlitz6461f642006-05-11 10:03:08 +03004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
Or Gerlitz6461f642006-05-11 10:03:08 +030032 */
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/slab.h>
36#include <linux/mm.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040037#include <linux/highmem.h>
Or Gerlitz6461f642006-05-11 10:03:08 +030038#include <linux/scatterlist.h>
39
40#include "iscsi_iser.h"
41
42#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
Erez Zilber8dfa0872006-09-11 12:22:30 +030043
Sagi Grimbergbd8b9442015-04-14 18:08:21 +030044struct fast_reg_descriptor *
45iser_reg_desc_get(struct ib_conn *ib_conn)
46{
47 struct fast_reg_descriptor *desc;
48 unsigned long flags;
49
50 spin_lock_irqsave(&ib_conn->lock, flags);
51 desc = list_first_entry(&ib_conn->fastreg.pool,
52 struct fast_reg_descriptor, list);
53 list_del(&desc->list);
54 spin_unlock_irqrestore(&ib_conn->lock, flags);
55
56 return desc;
57}
58
59void
60iser_reg_desc_put(struct ib_conn *ib_conn,
61 struct fast_reg_descriptor *desc)
62{
63 unsigned long flags;
64
65 spin_lock_irqsave(&ib_conn->lock, flags);
Sagi Grimberg8b95aa22015-04-14 18:08:23 +030066 list_add(&desc->list, &ib_conn->fastreg.pool);
Sagi Grimbergbd8b9442015-04-14 18:08:21 +030067 spin_unlock_irqrestore(&ib_conn->lock, flags);
68}
69
Or Gerlitz6461f642006-05-11 10:03:08 +030070/**
Or Gerlitz6461f642006-05-11 10:03:08 +030071 * iser_start_rdma_unaligned_sg
72 */
Mike Christie2261ec32008-05-21 15:54:11 -050073static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
Sagi Grimberg5f588e32014-03-05 19:43:45 +020074 struct iser_data_buf *data,
Roland Dreier41179e22007-07-17 18:37:42 -070075 enum iser_data_dir cmd_dir)
Or Gerlitz6461f642006-05-11 10:03:08 +030076{
Sagi Grimberga4ee3532014-10-01 14:01:58 +030077 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
Sagi Grimberge3784bd2015-04-14 18:08:15 +030078 struct scatterlist *sgl = data->sg;
Sagi Grimberg5f588e32014-03-05 19:43:45 +020079 struct scatterlist *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +030080 char *mem = NULL;
Sagi Grimbergecc39932015-04-14 18:08:14 +030081 unsigned long cmd_data_len = data->data_len;
Sagi Grimberg5f588e32014-03-05 19:43:45 +020082 int dma_nents, i;
83
Or Gerlitz6461f642006-05-11 10:03:08 +030084 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
Or Gerlitz528f4e82010-02-08 13:20:43 +000085 mem = (void *)__get_free_pages(GFP_ATOMIC,
David Howellsf0d1b0b2006-12-08 02:37:49 -080086 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
Or Gerlitz6461f642006-05-11 10:03:08 +030087 else
Or Gerlitz528f4e82010-02-08 13:20:43 +000088 mem = kmalloc(cmd_data_len, GFP_ATOMIC);
Or Gerlitz6461f642006-05-11 10:03:08 +030089
90 if (mem == NULL) {
91 iser_err("Failed to allocate mem size %d %d for copying sglist\n",
Sagi Grimberg5f588e32014-03-05 19:43:45 +020092 data->size, (int)cmd_data_len);
Or Gerlitz6461f642006-05-11 10:03:08 +030093 return -ENOMEM;
94 }
95
96 if (cmd_dir == ISER_DIR_OUT) {
97 /* copy the unaligned sg the buffer which is used for RDMA */
Or Gerlitz6461f642006-05-11 10:03:08 +030098 char *p, *from;
99
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300100 sgl = data->sg;
Jens Axboe53d412f2007-07-24 14:41:13 +0200101 p = mem;
102 for_each_sg(sgl, sg, data->size, i) {
Cong Wang2a156d02011-11-25 23:14:20 +0800103 from = kmap_atomic(sg_page(sg));
Or Gerlitz6461f642006-05-11 10:03:08 +0300104 memcpy(p,
Jens Axboe53d412f2007-07-24 14:41:13 +0200105 from + sg->offset,
106 sg->length);
Cong Wang2a156d02011-11-25 23:14:20 +0800107 kunmap_atomic(from);
Jens Axboe53d412f2007-07-24 14:41:13 +0200108 p += sg->length;
Or Gerlitz6461f642006-05-11 10:03:08 +0300109 }
110 }
111
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300112 sg_init_one(&data->sg_single, mem, cmd_data_len);
113 data->orig_sg = data->sg;
114 data->sg = &data->sg_single;
115 data->copy_buf = mem;
116 dma_nents = ib_dma_map_sg(dev, data->sg, 1,
Ralph Campbell51803112006-12-12 14:31:00 -0800117 (cmd_dir == ISER_DIR_OUT) ?
118 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Or Gerlitz6461f642006-05-11 10:03:08 +0300119 BUG_ON(dma_nents == 0);
120
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300121 data->dma_nents = dma_nents;
Sagi Grimberg5f588e32014-03-05 19:43:45 +0200122
Or Gerlitz6461f642006-05-11 10:03:08 +0300123 return 0;
124}
125
126/**
127 * iser_finalize_rdma_unaligned_sg
128 */
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200129
Mike Christie2261ec32008-05-21 15:54:11 -0500130void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200131 struct iser_data_buf *data,
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200132 enum iser_data_dir cmd_dir)
Or Gerlitz6461f642006-05-11 10:03:08 +0300133{
Ralph Campbell51803112006-12-12 14:31:00 -0800134 struct ib_device *dev;
Or Gerlitz6461f642006-05-11 10:03:08 +0300135 unsigned long cmd_data_len;
136
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300137 dev = iser_task->iser_conn->ib_conn.device->ib_device;
Or Gerlitz6461f642006-05-11 10:03:08 +0300138
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300139 ib_dma_unmap_sg(dev, data->sg, 1,
Ralph Campbell51803112006-12-12 14:31:00 -0800140 (cmd_dir == ISER_DIR_OUT) ?
141 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Or Gerlitz6461f642006-05-11 10:03:08 +0300142
143 if (cmd_dir == ISER_DIR_IN) {
144 char *mem;
Jens Axboe53d412f2007-07-24 14:41:13 +0200145 struct scatterlist *sgl, *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300146 unsigned char *p, *to;
147 unsigned int sg_size;
148 int i;
149
150 /* copy back read RDMA to unaligned sg */
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300151 mem = data->copy_buf;
Or Gerlitz6461f642006-05-11 10:03:08 +0300152
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300153 sgl = data->sg;
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200154 sg_size = data->size;
Or Gerlitz6461f642006-05-11 10:03:08 +0300155
Jens Axboe53d412f2007-07-24 14:41:13 +0200156 p = mem;
157 for_each_sg(sgl, sg, sg_size, i) {
Cong Wang2a156d02011-11-25 23:14:20 +0800158 to = kmap_atomic(sg_page(sg));
Jens Axboe53d412f2007-07-24 14:41:13 +0200159 memcpy(to + sg->offset,
Or Gerlitz6461f642006-05-11 10:03:08 +0300160 p,
Jens Axboe53d412f2007-07-24 14:41:13 +0200161 sg->length);
Cong Wang2a156d02011-11-25 23:14:20 +0800162 kunmap_atomic(to);
Jens Axboe53d412f2007-07-24 14:41:13 +0200163 p += sg->length;
Or Gerlitz6461f642006-05-11 10:03:08 +0300164 }
165 }
166
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200167 cmd_data_len = data->data_len;
Or Gerlitz6461f642006-05-11 10:03:08 +0300168
169 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300170 free_pages((unsigned long)data->copy_buf,
David Howellsf0d1b0b2006-12-08 02:37:49 -0800171 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
Or Gerlitz6461f642006-05-11 10:03:08 +0300172 else
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300173 kfree(data->copy_buf);
Or Gerlitz6461f642006-05-11 10:03:08 +0300174
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300175 data->copy_buf = NULL;
Or Gerlitz6461f642006-05-11 10:03:08 +0300176}
177
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800178#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
179
Or Gerlitz6461f642006-05-11 10:03:08 +0300180/**
181 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
182 * and returns the length of resulting physical address array (may be less than
183 * the original due to possible compaction).
184 *
185 * we build a "page vec" under the assumption that the SG meets the RDMA
186 * alignment requirements. Other then the first and last SG elements, all
187 * the "internal" elements can be compacted into a list whose elements are
188 * dma addresses of physical pages. The code supports also the weird case
189 * where --few fragments of the same page-- are present in the SG as
190 * consecutive elements. Also, it handles one entry SG.
191 */
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800192
Or Gerlitz6461f642006-05-11 10:03:08 +0300193static int iser_sg_to_page_vec(struct iser_data_buf *data,
Sagi Grimberg919fc272013-07-28 12:35:40 +0300194 struct ib_device *ibdev, u64 *pages,
195 int *offset, int *data_size)
Or Gerlitz6461f642006-05-11 10:03:08 +0300196{
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300197 struct scatterlist *sg, *sgl = data->sg;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800198 u64 start_addr, end_addr, page, chunk_start = 0;
Or Gerlitz6461f642006-05-11 10:03:08 +0300199 unsigned long total_sz = 0;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800200 unsigned int dma_len;
201 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300202
203 /* compute the offset of first element */
Sagi Grimberg919fc272013-07-28 12:35:40 +0300204 *offset = (u64) sgl[0].offset & ~MASK_4K;
Or Gerlitz6461f642006-05-11 10:03:08 +0300205
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800206 new_chunk = 1;
207 cur_page = 0;
Jens Axboe53d412f2007-07-24 14:41:13 +0200208 for_each_sg(sgl, sg, data->dma_nents, i) {
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800209 start_addr = ib_sg_dma_address(ibdev, sg);
210 if (new_chunk)
211 chunk_start = start_addr;
212 dma_len = ib_sg_dma_len(ibdev, sg);
213 end_addr = start_addr + dma_len;
Ralph Campbell51803112006-12-12 14:31:00 -0800214 total_sz += dma_len;
215
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800216 /* collect page fragments until aligned or end of SG list */
217 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
218 new_chunk = 0;
219 continue;
Or Gerlitz6461f642006-05-11 10:03:08 +0300220 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800221 new_chunk = 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300222
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800223 /* address of the first page in the contiguous chunk;
224 masking relevant for the very first SG entry,
225 which might be unaligned */
226 page = chunk_start & MASK_4K;
227 do {
Sagi Grimberg919fc272013-07-28 12:35:40 +0300228 pages[cur_page++] = page;
Erez Zilber8dfa0872006-09-11 12:22:30 +0300229 page += SIZE_4K;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800230 } while (page < end_addr);
Or Gerlitz6461f642006-05-11 10:03:08 +0300231 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800232
Sagi Grimberg919fc272013-07-28 12:35:40 +0300233 *data_size = total_sz;
234 iser_dbg("page_vec->data_size:%d cur_page %d\n",
235 *data_size, cur_page);
Or Gerlitz6461f642006-05-11 10:03:08 +0300236 return cur_page;
237}
238
Or Gerlitz6461f642006-05-11 10:03:08 +0300239
240/**
241 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
242 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
243 * the number of entries which are aligned correctly. Supports the case where
244 * consecutive SG elements are actually fragments of the same physcial page.
245 */
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800246static int iser_data_buf_aligned_len(struct iser_data_buf *data,
247 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300248{
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300249 struct scatterlist *sg, *sgl, *next_sg = NULL;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800250 u64 start_addr, end_addr;
251 int i, ret_len, start_check = 0;
252
253 if (data->dma_nents == 1)
254 return 1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300255
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300256 sgl = data->sg;
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800257 start_addr = ib_sg_dma_address(ibdev, sgl);
Or Gerlitz6461f642006-05-11 10:03:08 +0300258
Jens Axboe53d412f2007-07-24 14:41:13 +0200259 for_each_sg(sgl, sg, data->dma_nents, i) {
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800260 if (start_check && !IS_4K_ALIGNED(start_addr))
261 break;
262
263 next_sg = sg_next(sg);
264 if (!next_sg)
265 break;
266
267 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
268 start_addr = ib_sg_dma_address(ibdev, next_sg);
269
270 if (end_addr == start_addr) {
271 start_check = 0;
272 continue;
273 } else
274 start_check = 1;
275
276 if (!IS_4K_ALIGNED(end_addr))
277 break;
Or Gerlitz6461f642006-05-11 10:03:08 +0300278 }
Or Gerlitzc1ccaf22009-11-12 11:32:27 -0800279 ret_len = (next_sg) ? i : i+1;
Or Gerlitz6461f642006-05-11 10:03:08 +0300280 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
281 ret_len, data->dma_nents, data);
282 return ret_len;
283}
284
Ralph Campbell51803112006-12-12 14:31:00 -0800285static void iser_data_buf_dump(struct iser_data_buf *data,
286 struct ib_device *ibdev)
Or Gerlitz6461f642006-05-11 10:03:08 +0300287{
Jens Axboe53d412f2007-07-24 14:41:13 +0200288 struct scatterlist *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300289 int i;
290
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300291 for_each_sg(data->sg, sg, data->dma_nents, i)
Or Gerlitzf91424c2013-07-28 12:35:36 +0300292 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
Erez Zilbere981f1d2006-09-11 12:24:00 +0300293 "off:0x%x sz:0x%x dma_len:0x%x\n",
Jens Axboe53d412f2007-07-24 14:41:13 +0200294 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
Jens Axboe45711f12007-10-22 21:19:53 +0200295 sg_page(sg), sg->offset,
Jens Axboe53d412f2007-07-24 14:41:13 +0200296 sg->length, ib_sg_dma_len(ibdev, sg));
Or Gerlitz6461f642006-05-11 10:03:08 +0300297}
298
299static void iser_dump_page_vec(struct iser_page_vec *page_vec)
300{
301 int i;
302
303 iser_err("page vec length %d data size %d\n",
304 page_vec->length, page_vec->data_size);
305 for (i = 0; i < page_vec->length; i++)
306 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
307}
308
Mike Christie2261ec32008-05-21 15:54:11 -0500309int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
310 struct iser_data_buf *data,
311 enum iser_data_dir iser_dir,
312 enum dma_data_direction dma_dir)
Erez Zilber74a20782006-09-27 16:43:06 +0300313{
Ralph Campbell51803112006-12-12 14:31:00 -0800314 struct ib_device *dev;
Erez Zilber74a20782006-09-27 16:43:06 +0300315
Mike Christie2261ec32008-05-21 15:54:11 -0500316 iser_task->dir[iser_dir] = 1;
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300317 dev = iser_task->iser_conn->ib_conn.device->ib_device;
Erez Zilber74a20782006-09-27 16:43:06 +0300318
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300319 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
Erez Zilber74a20782006-09-27 16:43:06 +0300320 if (data->dma_nents == 0) {
321 iser_err("dma_map_sg failed!!!\n");
322 return -EINVAL;
323 }
324 return 0;
325}
326
Sagi Grimberg9a8b08f2014-03-05 19:43:44 +0200327void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
Roi Dayanc6c95ef2014-12-28 14:26:11 +0200328 struct iser_data_buf *data,
329 enum dma_data_direction dir)
Erez Zilber74a20782006-09-27 16:43:06 +0300330{
Ralph Campbell51803112006-12-12 14:31:00 -0800331 struct ib_device *dev;
Erez Zilber74a20782006-09-27 16:43:06 +0300332
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300333 dev = iser_task->iser_conn->ib_conn.device->ib_device;
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300334 ib_dma_unmap_sg(dev, data->sg, data->size, dir);
Erez Zilber74a20782006-09-27 16:43:06 +0300335}
336
Sagi Grimberg919fc272013-07-28 12:35:40 +0300337static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
Sagi Grimberg5f588e32014-03-05 19:43:45 +0200338 struct iser_data_buf *mem,
Sagi Grimberg919fc272013-07-28 12:35:40 +0300339 enum iser_data_dir cmd_dir,
340 int aligned_len)
341{
Sagi Grimberg56408322015-04-14 18:08:16 +0300342 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
343 struct iser_device *device = iser_task->iser_conn->ib_conn.device;
Sagi Grimberg919fc272013-07-28 12:35:40 +0300344
345 iscsi_conn->fmr_unalign_cnt++;
346 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
347 aligned_len, mem->size);
348
349 if (iser_debug_level > 0)
Sagi Grimberg56408322015-04-14 18:08:16 +0300350 iser_data_buf_dump(mem, device->ib_device);
Sagi Grimberg919fc272013-07-28 12:35:40 +0300351
352 /* unmap the command data before accessing it */
Roi Dayanc6c95ef2014-12-28 14:26:11 +0200353 iser_dma_unmap_task_data(iser_task, mem,
354 (cmd_dir == ISER_DIR_OUT) ?
355 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Sagi Grimberg919fc272013-07-28 12:35:40 +0300356
357 /* allocate copy buf, if we are writing, copy the */
358 /* unaligned scatterlist, dma map the copy */
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300359 if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
Sagi Grimberg5f588e32014-03-05 19:43:45 +0200360 return -ENOMEM;
Sagi Grimberg919fc272013-07-28 12:35:40 +0300361
362 return 0;
363}
364
Or Gerlitz6461f642006-05-11 10:03:08 +0300365/**
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300366 * iser_reg_page_vec - Register physical memory
367 *
368 * returns: 0 on success, errno code on failure
369 */
370static
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300371int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
372 struct iser_data_buf *mem,
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300373 struct iser_page_vec *page_vec,
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300374 struct iser_mem_reg *mem_reg)
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300375{
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300376 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
377 struct iser_device *device = ib_conn->device;
378 struct ib_pool_fmr *fmr;
379 int ret, plen;
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300380
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300381 plen = iser_sg_to_page_vec(mem, device->ib_device,
382 page_vec->pages,
383 &page_vec->offset,
384 &page_vec->data_size);
385 page_vec->length = plen;
386 if (plen * SIZE_4K < page_vec->data_size) {
387 iser_err("page vec too short to hold this SG\n");
388 iser_data_buf_dump(mem, device->ib_device);
389 iser_dump_page_vec(page_vec);
390 return -EINVAL;
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300391 }
392
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300393 fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
394 page_vec->pages,
395 page_vec->length,
396 page_vec->pages[0]);
397 if (IS_ERR(fmr)) {
398 ret = PTR_ERR(fmr);
399 iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
400 return ret;
401 }
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300402
Sagi Grimberg90a66842015-04-14 18:08:24 +0300403 mem_reg->sge.lkey = fmr->fmr->lkey;
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300404 mem_reg->rkey = fmr->fmr->rkey;
Sagi Grimberg90a66842015-04-14 18:08:24 +0300405 mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset;
406 mem_reg->sge.length = page_vec->data_size;
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300407 mem_reg->mem_h = fmr;
408
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300409 return 0;
410}
411
412/**
413 * Unregister (previosuly registered using FMR) memory.
414 * If memory is non-FMR does nothing.
415 */
416void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
417 enum iser_data_dir cmd_dir)
418{
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300419 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300420 int ret;
421
422 if (!reg->mem_h)
423 return;
424
425 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
426
427 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
428 if (ret)
429 iser_err("ib_fmr_pool_unmap failed %d\n", ret);
430
431 reg->mem_h = NULL;
432}
433
434void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
435 enum iser_data_dir cmd_dir)
436{
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300437 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300438
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300439 if (!reg->mem_h)
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300440 return;
441
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300442 iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
443 reg->mem_h);
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300444 reg->mem_h = NULL;
Sagi Grimbergd03e61d2015-04-14 18:08:17 +0300445}
446
447/**
Sagi Grimberge6575712013-07-28 12:35:41 +0300448 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
449 * using FMR (if possible) obtaining rkey and va
Or Gerlitz6461f642006-05-11 10:03:08 +0300450 *
451 * returns 0 on success, errno code on failure
452 */
Sagi Grimberge6575712013-07-28 12:35:41 +0300453int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
454 enum iser_data_dir cmd_dir)
Or Gerlitz6461f642006-05-11 10:03:08 +0300455{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300456 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
457 struct iser_device *device = ib_conn->device;
Ralph Campbell51803112006-12-12 14:31:00 -0800458 struct ib_device *ibdev = device->ib_device;
Mike Christie2261ec32008-05-21 15:54:11 -0500459 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300460 struct iser_mem_reg *mem_reg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300461 int aligned_len;
462 int err;
Erez Zilbere981f1d2006-09-11 12:24:00 +0300463 int i;
Erez Zilberd8111022006-09-11 12:26:33 +0300464 struct scatterlist *sg;
Or Gerlitz6461f642006-05-11 10:03:08 +0300465
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300466 mem_reg = &iser_task->rdma_reg[cmd_dir];
Or Gerlitz6461f642006-05-11 10:03:08 +0300467
Ralph Campbell51803112006-12-12 14:31:00 -0800468 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
Sagi Grimberg55878562013-07-28 12:35:42 +0300469 if (aligned_len != mem->dma_nents) {
Sagi Grimberg56408322015-04-14 18:08:16 +0300470 err = fall_to_bounce_buf(iser_task, mem,
Sagi Grimberg919fc272013-07-28 12:35:40 +0300471 cmd_dir, aligned_len);
472 if (err) {
473 iser_err("failed to allocate bounce buffer\n");
474 return err;
475 }
Or Gerlitz6461f642006-05-11 10:03:08 +0300476 }
477
Erez Zilberd8111022006-09-11 12:26:33 +0300478 /* if there a single dma entry, FMR is not needed */
479 if (mem->dma_nents == 1) {
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300480 sg = mem->sg;
Erez Zilberd8111022006-09-11 12:26:33 +0300481
Sagi Grimberg90a66842015-04-14 18:08:24 +0300482 mem_reg->sge.lkey = device->mr->lkey;
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300483 mem_reg->rkey = device->mr->rkey;
Sagi Grimberg90a66842015-04-14 18:08:24 +0300484 mem_reg->sge.length = ib_sg_dma_len(ibdev, &sg[0]);
485 mem_reg->sge.addr = ib_sg_dma_address(ibdev, &sg[0]);
Erez Zilberd8111022006-09-11 12:26:33 +0300486
487 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
488 "va: 0x%08lX sz: %ld]\n",
Sagi Grimberg90a66842015-04-14 18:08:24 +0300489 (unsigned int)mem_reg->sge.lkey,
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300490 (unsigned int)mem_reg->rkey,
Sagi Grimberg90a66842015-04-14 18:08:24 +0300491 (unsigned long)mem_reg->sge.addr,
492 (unsigned long)mem_reg->sge.length);
Erez Zilberd8111022006-09-11 12:26:33 +0300493 } else { /* use FMR for multiple dma entries */
Sagi Grimbergf0e35c22015-04-14 18:08:20 +0300494 err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300495 mem_reg);
Or Gerlitz819a0872013-02-21 14:50:09 +0000496 if (err && err != -EAGAIN) {
Ralph Campbell51803112006-12-12 14:31:00 -0800497 iser_data_buf_dump(mem, ibdev);
Mike Christie2261ec32008-05-21 15:54:11 -0500498 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
499 mem->dma_nents,
500 ntoh24(iser_task->desc.iscsi_header.dlength));
Erez Zilberd8111022006-09-11 12:26:33 +0300501 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300502 ib_conn->fmr.page_vec->data_size,
503 ib_conn->fmr.page_vec->length,
504 ib_conn->fmr.page_vec->offset);
505 for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
Erez Zilberd8111022006-09-11 12:26:33 +0300506 iser_err("page_vec[%d] = 0x%llx\n", i,
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300507 (unsigned long long)ib_conn->fmr.page_vec->pages[i]);
Erez Zilbere981f1d2006-09-11 12:24:00 +0300508 }
Or Gerlitz450d1e42013-05-01 13:25:26 +0000509 if (err)
510 return err;
Erez Zilbere981f1d2006-09-11 12:24:00 +0300511 }
Or Gerlitz6461f642006-05-11 10:03:08 +0300512 return 0;
513}
Sagi Grimberg55878562013-07-28 12:35:42 +0300514
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200515static void
Sagi Grimberg92792c02014-08-13 19:54:33 +0300516iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
517 struct ib_sig_domain *domain)
518{
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300519 domain->sig_type = IB_SIG_TYPE_T10_DIF;
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200520 domain->sig.dif.pi_interval = scsi_prot_interval(sc);
521 domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300522 /*
523 * At the moment we hard code those, but in the future
524 * we will take them from sc.
525 */
526 domain->sig.dif.apptag_check_mask = 0xffff;
527 domain->sig.dif.app_escape = true;
528 domain->sig.dif.ref_escape = true;
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200529 if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300530 domain->sig.dif.ref_remap = true;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300531};
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200532
533static int
534iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
535{
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200536 switch (scsi_get_prot_op(sc)) {
537 case SCSI_PROT_WRITE_INSERT:
538 case SCSI_PROT_READ_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300539 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300540 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200541 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200542 break;
543 case SCSI_PROT_READ_INSERT:
544 case SCSI_PROT_WRITE_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300545 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300546 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200547 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
548 IB_T10DIF_CSUM : IB_T10DIF_CRC;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200549 break;
550 case SCSI_PROT_READ_PASS:
551 case SCSI_PROT_WRITE_PASS:
Sagi Grimberg92792c02014-08-13 19:54:33 +0300552 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200553 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
Sagi Grimberg92792c02014-08-13 19:54:33 +0300554 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200555 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
556 IB_T10DIF_CSUM : IB_T10DIF_CRC;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200557 break;
558 default:
559 iser_err("Unsupported PI operation %d\n",
560 scsi_get_prot_op(sc));
561 return -EINVAL;
562 }
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300563
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200564 return 0;
565}
566
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200567static inline void
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200568iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
569{
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200570 *mask = 0;
571 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
572 *mask |= ISER_CHECK_REFTAG;
573 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
574 *mask |= ISER_CHECK_GUARD;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200575}
576
Sagi Grimberga11b3e62014-12-07 16:10:01 +0200577static void
578iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
579{
580 u32 rkey;
581
582 memset(inv_wr, 0, sizeof(*inv_wr));
583 inv_wr->opcode = IB_WR_LOCAL_INV;
584 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
585 inv_wr->ex.invalidate_rkey = mr->rkey;
586
587 rkey = ib_inc_rkey(mr->rkey);
588 ib_update_fast_reg_key(mr, rkey);
589}
590
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200591static int
592iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300593 struct fast_reg_descriptor *desc,
594 struct iser_mem_reg *data_reg,
595 struct iser_mem_reg *prot_reg,
596 struct iser_mem_reg *sig_reg)
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200597{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300598 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200599 struct iser_pi_context *pi_ctx = desc->pi_ctx;
600 struct ib_send_wr sig_wr, inv_wr;
601 struct ib_send_wr *bad_wr, *wr = NULL;
602 struct ib_sig_attrs sig_attrs;
603 int ret;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200604
605 memset(&sig_attrs, 0, sizeof(sig_attrs));
606 ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
607 if (ret)
608 goto err;
609
Sagi Grimberg5bb6e542014-12-07 16:10:06 +0200610 iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200611
612 if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
Sagi Grimberga11b3e62014-12-07 16:10:01 +0200613 iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200614 wr = &inv_wr;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200615 }
616
617 memset(&sig_wr, 0, sizeof(sig_wr));
618 sig_wr.opcode = IB_WR_REG_SIG_MR;
619 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300620 sig_wr.sg_list = &data_reg->sge;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200621 sig_wr.num_sge = 1;
622 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
623 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
624 if (scsi_prot_sg_count(iser_task->sc))
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300625 sig_wr.wr.sig_handover.prot = &prot_reg->sge;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200626 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
627 IB_ACCESS_REMOTE_READ |
628 IB_ACCESS_REMOTE_WRITE;
629
630 if (!wr)
631 wr = &sig_wr;
632 else
633 wr->next = &sig_wr;
634
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300635 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200636 if (ret) {
637 iser_err("reg_sig_mr failed, ret:%d\n", ret);
638 goto err;
639 }
640 desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
641
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300642 sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
643 sig_reg->rkey = pi_ctx->sig_mr->rkey;
644 sig_reg->sge.addr = 0;
645 sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200646
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300647 iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
648 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
649 sig_reg->sge.length);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200650err:
651 return ret;
652}
653
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200654static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200655 struct iser_data_buf *mem,
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300656 struct fast_reg_descriptor *desc,
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200657 enum iser_reg_indicator ind,
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300658 struct iser_mem_reg *reg)
Sagi Grimberg55878562013-07-28 12:35:42 +0300659{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300660 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
661 struct iser_device *device = ib_conn->device;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200662 struct ib_device *ibdev = device->ib_device;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200663 struct ib_mr *mr;
664 struct ib_fast_reg_page_list *frpl;
Sagi Grimberg55878562013-07-28 12:35:42 +0300665 struct ib_send_wr fastreg_wr, inv_wr;
666 struct ib_send_wr *bad_wr, *wr = NULL;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200667 int ret, offset, size, plen;
668
669 /* if there a single dma entry, dma mr suffices */
670 if (mem->dma_nents == 1) {
Sagi Grimberge3784bd2015-04-14 18:08:15 +0300671 struct scatterlist *sg = mem->sg;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200672
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300673 reg->sge.lkey = device->mr->lkey;
674 reg->rkey = device->mr->rkey;
675 reg->sge.addr = ib_sg_dma_address(ibdev, &sg[0]);
676 reg->sge.length = ib_sg_dma_len(ibdev, &sg[0]);
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200677
678 iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300679 reg->sge.lkey, reg->sge.addr, reg->sge.length);
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200680 return 0;
681 }
682
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200683 if (ind == ISER_DATA_KEY_VALID) {
684 mr = desc->data_mr;
685 frpl = desc->data_frpl;
686 } else {
687 mr = desc->pi_ctx->prot_mr;
688 frpl = desc->pi_ctx->prot_frpl;
689 }
690
691 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200692 &offset, &size);
693 if (plen * SIZE_4K < size) {
694 iser_err("fast reg page_list too short to hold this SG\n");
695 return -EINVAL;
696 }
Sagi Grimberg55878562013-07-28 12:35:42 +0300697
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200698 if (!(desc->reg_indicators & ind)) {
Sagi Grimberga11b3e62014-12-07 16:10:01 +0200699 iser_inv_rkey(&inv_wr, mr);
Sagi Grimberg55878562013-07-28 12:35:42 +0300700 wr = &inv_wr;
Sagi Grimberg55878562013-07-28 12:35:42 +0300701 }
702
703 /* Prepare FASTREG WR */
704 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
Sagi Grimberg7306b8f2014-03-05 19:43:39 +0200705 fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg55878562013-07-28 12:35:42 +0300706 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200707 fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
708 fastreg_wr.wr.fast_reg.page_list = frpl;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200709 fastreg_wr.wr.fast_reg.page_list_len = plen;
Sagi Grimberg55878562013-07-28 12:35:42 +0300710 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200711 fastreg_wr.wr.fast_reg.length = size;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200712 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
Sagi Grimberg55878562013-07-28 12:35:42 +0300713 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
714 IB_ACCESS_REMOTE_WRITE |
715 IB_ACCESS_REMOTE_READ);
716
Sagi Grimbergdb523b82014-01-23 12:31:28 +0200717 if (!wr)
Sagi Grimberg55878562013-07-28 12:35:42 +0300718 wr = &fastreg_wr;
Sagi Grimbergdb523b82014-01-23 12:31:28 +0200719 else
Sagi Grimberg55878562013-07-28 12:35:42 +0300720 wr->next = &fastreg_wr;
Sagi Grimberg55878562013-07-28 12:35:42 +0300721
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300722 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
Sagi Grimberg55878562013-07-28 12:35:42 +0300723 if (ret) {
Sagi Grimberg55878562013-07-28 12:35:42 +0300724 iser_err("fast registration failed, ret:%d\n", ret);
725 return ret;
726 }
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200727 desc->reg_indicators &= ~ind;
Sagi Grimberg55878562013-07-28 12:35:42 +0300728
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300729 reg->sge.lkey = mr->lkey;
730 reg->rkey = mr->rkey;
731 reg->sge.addr = frpl->page_list[0] + offset;
732 reg->sge.length = size;
Sagi Grimberg55878562013-07-28 12:35:42 +0300733
734 return ret;
735}
736
737/**
Sagi Grimberg7306b8f2014-03-05 19:43:39 +0200738 * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
Sagi Grimberg55878562013-07-28 12:35:42 +0300739 * using Fast Registration WR (if possible) obtaining rkey and va
740 *
741 * returns 0 on success, errno code on failure
742 */
Sagi Grimberg7306b8f2014-03-05 19:43:39 +0200743int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
744 enum iser_data_dir cmd_dir)
Sagi Grimberg55878562013-07-28 12:35:42 +0300745{
Sagi Grimberga4ee3532014-10-01 14:01:58 +0300746 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
747 struct iser_device *device = ib_conn->device;
Sagi Grimberg55878562013-07-28 12:35:42 +0300748 struct ib_device *ibdev = device->ib_device;
749 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300750 struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200751 struct fast_reg_descriptor *desc = NULL;
Sagi Grimberg55878562013-07-28 12:35:42 +0300752 int err, aligned_len;
Sagi Grimberg55878562013-07-28 12:35:42 +0300753
754 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
755 if (aligned_len != mem->dma_nents) {
Sagi Grimberg56408322015-04-14 18:08:16 +0300756 err = fall_to_bounce_buf(iser_task, mem,
Sagi Grimberg55878562013-07-28 12:35:42 +0300757 cmd_dir, aligned_len);
758 if (err) {
759 iser_err("failed to allocate bounce buffer\n");
760 return err;
761 }
Sagi Grimberg55878562013-07-28 12:35:42 +0300762 }
763
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200764 if (mem->dma_nents != 1 ||
765 scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300766 desc = iser_reg_desc_get(ib_conn);
Sagi Grimbergb130ede2015-04-14 18:08:19 +0300767 mem_reg->mem_h = desc;
Sagi Grimberg55878562013-07-28 12:35:42 +0300768 }
769
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300770 err = iser_fast_reg_mr(iser_task, mem, desc,
771 ISER_DATA_KEY_VALID, mem_reg);
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200772 if (err)
773 goto err_reg;
774
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200775 if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300776 struct iser_mem_reg prot_reg;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200777
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300778 memset(&prot_reg, 0, sizeof(prot_reg));
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200779 if (scsi_prot_sg_count(iser_task->sc)) {
780 mem = &iser_task->prot[cmd_dir];
781 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
782 if (aligned_len != mem->dma_nents) {
Sagi Grimberg56408322015-04-14 18:08:16 +0300783 err = fall_to_bounce_buf(iser_task, mem,
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200784 cmd_dir, aligned_len);
785 if (err) {
786 iser_err("failed to allocate bounce buffer\n");
787 return err;
788 }
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200789 }
790
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300791 err = iser_fast_reg_mr(iser_task, mem, desc,
792 ISER_PROT_KEY_VALID, &prot_reg);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200793 if (err)
794 goto err_reg;
795 }
796
Sagi Grimberg6ef8bb82015-04-14 18:08:25 +0300797 err = iser_reg_sig_mr(iser_task, desc, mem_reg,
798 &prot_reg, mem_reg);
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200799 if (err) {
800 iser_err("Failed to register signature mr\n");
801 return err;
802 }
803 desc->reg_indicators |= ISER_FASTREG_PROTECTED;
Sagi Grimberg177e31bd2014-03-05 19:43:48 +0200804 }
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200805
Sagi Grimberg55878562013-07-28 12:35:42 +0300806 return 0;
807err_reg:
Sagi Grimbergbd8b9442015-04-14 18:08:21 +0300808 if (desc)
809 iser_reg_desc_put(ib_conn, desc);
Sagi Grimbergd11ec4e2014-03-05 19:43:40 +0200810
Sagi Grimberg55878562013-07-28 12:35:42 +0300811 return err;
812}