blob: 6a5ccd4c7e6321a1b97a336e5d1d7f58509ba7b4 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100135static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
137 const char *opname);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800138static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200141static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900142
Roland Dreieraef9ec32005-11-02 14:07:13 -0800143static struct ib_client srp_client = {
144 .name = "srp",
145 .add = srp_add_one,
146 .remove = srp_remove_one
147};
148
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700149static struct ib_sa_client srp_sa_client;
150
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200151static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152{
153 int tmo = *(int *)kp->arg;
154
155 if (tmo >= 0)
156 return sprintf(buffer, "%d", tmo);
157 else
158 return sprintf(buffer, "off");
159}
160
161static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162{
163 int tmo, res;
164
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300165 res = srp_parse_tmo(&tmo, val);
166 if (res)
167 goto out;
168
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200174 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930185static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
Roland Dreieraef9ec32005-11-02 14:07:13 -0800190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700204
205 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700208}
209
Roland Dreieraef9ec32005-11-02 14:07:13 -0800210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300255 pr_debug("QP event %s (%d)\n",
256 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
Bart Van Assche56b53902014-07-09 15:58:22 +0200269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295 struct ib_cm_id *new_cm_id;
296
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500309
310 return 0;
311}
312
Bart Van Assched1b42892014-05-20 15:07:20 +0200313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
Bart Van Asschefa9863f2016-04-22 14:13:57 -0700319 fmr_param.pool_size = target->mr_pool_size;
Bart Van Assched1b42892014-05-20 15:07:20 +0200320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200344 if (d->mr)
345 ib_dereg_mr(d->mr);
346 }
347 kfree(pool);
348}
349
350/**
351 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352 * @device: IB device to allocate fast registration descriptors for.
353 * @pd: Protection domain associated with the FR descriptors.
354 * @pool_size: Number of descriptors to allocate.
355 * @max_page_list_len: Maximum fast registration work request page list length.
356 */
357static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358 struct ib_pd *pd, int pool_size,
359 int max_page_list_len)
360{
361 struct srp_fr_pool *pool;
362 struct srp_fr_desc *d;
363 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200364 int i, ret = -EINVAL;
365
366 if (pool_size <= 0)
367 goto err;
368 ret = -ENOMEM;
369 pool = kzalloc(sizeof(struct srp_fr_pool) +
370 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
371 if (!pool)
372 goto err;
373 pool->size = pool_size;
374 pool->max_page_list_len = max_page_list_len;
375 spin_lock_init(&pool->lock);
376 INIT_LIST_HEAD(&pool->free_list);
377
378 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300379 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200381 if (IS_ERR(mr)) {
382 ret = PTR_ERR(mr);
383 goto destroy_pool;
384 }
385 d->mr = mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200386 list_add_tail(&d->entry, &pool->free_list);
387 }
388
389out:
390 return pool;
391
392destroy_pool:
393 srp_destroy_fr_pool(pool);
394
395err:
396 pool = ERR_PTR(ret);
397 goto out;
398}
399
400/**
401 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402 * @pool: Pool to obtain descriptor from.
403 */
404static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
405{
406 struct srp_fr_desc *d = NULL;
407 unsigned long flags;
408
409 spin_lock_irqsave(&pool->lock, flags);
410 if (!list_empty(&pool->free_list)) {
411 d = list_first_entry(&pool->free_list, typeof(*d), entry);
412 list_del(&d->entry);
413 }
414 spin_unlock_irqrestore(&pool->lock, flags);
415
416 return d;
417}
418
419/**
420 * srp_fr_pool_put() - put an FR descriptor back in the free list
421 * @pool: Pool the descriptor was allocated from.
422 * @desc: Pointer to an array of fast registration descriptor pointers.
423 * @n: Number of descriptors to put back.
424 *
425 * Note: The caller must already have queued an invalidation request for
426 * desc->mr->rkey before calling this function.
427 */
428static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
429 int n)
430{
431 unsigned long flags;
432 int i;
433
434 spin_lock_irqsave(&pool->lock, flags);
435 for (i = 0; i < n; i++)
436 list_add(&desc[i]->entry, &pool->free_list);
437 spin_unlock_irqrestore(&pool->lock, flags);
438}
439
440static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
441{
442 struct srp_device *dev = target->srp_host->srp_dev;
443
Bart Van Asschefa9863f2016-04-22 14:13:57 -0700444 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200445 dev->max_pages_per_mr);
446}
447
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
Bart Van Asschef83b2562016-05-12 10:48:48 -0700450 * @qp: RDMA queue pair.
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200451 *
Steve Wise561392d2016-02-17 08:15:42 -0800452 * Drain the qp before destroying it. This avoids that the receive
453 * completion handler can access the queue pair while it is
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200454 * being destroyed.
455 */
Bart Van Asschef83b2562016-05-12 10:48:48 -0700456static void srp_destroy_qp(struct ib_qp *qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457{
Bart Van Asschef83b2562016-05-12 10:48:48 -0700458 ib_drain_rq(qp);
459 ib_destroy_qp(qp);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200460}
461
Bart Van Assche509c07b2014-10-30 14:48:30 +0100462static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800463{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100464 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200465 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800466 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100467 struct ib_cq *recv_cq, *send_cq;
468 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200469 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200470 struct srp_fr_pool *fr_pool = NULL;
Bart Van Assche509c5f32016-05-12 10:50:35 -0700471 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800472 int ret;
473
474 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
475 if (!init_attr)
476 return -ENOMEM;
477
Steve Wise561392d2016-02-17 08:15:42 -0800478 /* queue_size + 1 for ib_drain_rq() */
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100479 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
480 ch->comp_vector, IB_POLL_SOFTIRQ);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100481 if (IS_ERR(recv_cq)) {
482 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800483 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800484 }
485
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100486 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
487 ch->comp_vector, IB_POLL_DIRECT);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100488 if (IS_ERR(send_cq)) {
489 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800490 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000491 }
492
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200494 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200495 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800496 init_attr->cap.max_recv_sge = 1;
497 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200498 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800499 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100500 init_attr->send_cq = send_cq;
501 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502
Bart Van Assche62154b22014-05-20 15:04:45 +0200503 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100504 if (IS_ERR(qp)) {
505 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800506 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800507 }
508
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100509 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800510 if (ret)
511 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800512
Bart Van Assche002f1562015-08-10 17:08:44 -0700513 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200514 fr_pool = srp_alloc_fr_pool(target);
515 if (IS_ERR(fr_pool)) {
516 ret = PTR_ERR(fr_pool);
517 shost_printk(KERN_WARNING, target->scsi_host, PFX
518 "FR pool allocation failed (%d)\n", ret);
519 goto err_qp;
520 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700521 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200522 fmr_pool = srp_alloc_fmr_pool(target);
523 if (IS_ERR(fmr_pool)) {
524 ret = PTR_ERR(fmr_pool);
525 shost_printk(KERN_WARNING, target->scsi_host, PFX
526 "FMR pool allocation failed (%d)\n", ret);
527 goto err_qp;
528 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200529 }
530
Bart Van Assche509c07b2014-10-30 14:48:30 +0100531 if (ch->qp)
Bart Van Asschef83b2562016-05-12 10:48:48 -0700532 srp_destroy_qp(ch->qp);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100533 if (ch->recv_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100534 ib_free_cq(ch->recv_cq);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100535 if (ch->send_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100536 ib_free_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100537
Bart Van Assche509c07b2014-10-30 14:48:30 +0100538 ch->qp = qp;
539 ch->recv_cq = recv_cq;
540 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100541
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300542 if (dev->use_fast_reg) {
543 if (ch->fr_pool)
544 srp_destroy_fr_pool(ch->fr_pool);
545 ch->fr_pool = fr_pool;
546 } else if (dev->use_fmr) {
547 if (ch->fmr_pool)
548 ib_destroy_fmr_pool(ch->fmr_pool);
549 ch->fmr_pool = fmr_pool;
550 }
551
Roland Dreierda9d2f02010-02-24 15:07:59 -0800552 kfree(init_attr);
553 return 0;
554
555err_qp:
Bart Van Asschef83b2562016-05-12 10:48:48 -0700556 srp_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800557
558err_send_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100559 ib_free_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800560
561err_recv_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100562 ib_free_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800563
564err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800565 kfree(init_attr);
566 return ret;
567}
568
Bart Van Assche4d73f952013-10-26 14:40:37 +0200569/*
570 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100571 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200572 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100573static void srp_free_ch_ib(struct srp_target_port *target,
574 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800575{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200576 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800577 int i;
578
Bart Van Assched92c0da2014-10-06 17:14:36 +0200579 if (!ch->target)
580 return;
581
Bart Van Assche509c07b2014-10-30 14:48:30 +0100582 if (ch->cm_id) {
583 ib_destroy_cm_id(ch->cm_id);
584 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100585 }
586
Bart Van Assched92c0da2014-10-06 17:14:36 +0200587 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
588 if (!ch->qp)
589 return;
590
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200591 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100592 if (ch->fr_pool)
593 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700594 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100595 if (ch->fmr_pool)
596 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200597 }
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100598
Bart Van Asschef83b2562016-05-12 10:48:48 -0700599 srp_destroy_qp(ch->qp);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100600 ib_free_cq(ch->send_cq);
601 ib_free_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602
Bart Van Assched92c0da2014-10-06 17:14:36 +0200603 /*
604 * Avoid that the SCSI error handler tries to use this channel after
605 * it has been freed. The SCSI error handler can namely continue
606 * trying to perform recovery actions after scsi_remove_host()
607 * returned.
608 */
609 ch->target = NULL;
610
Bart Van Assche509c07b2014-10-30 14:48:30 +0100611 ch->qp = NULL;
612 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100613
Bart Van Assche509c07b2014-10-30 14:48:30 +0100614 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200615 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100616 srp_free_iu(target->srp_host, ch->rx_ring[i]);
617 kfree(ch->rx_ring);
618 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200619 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200621 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 srp_free_iu(target->srp_host, ch->tx_ring[i]);
623 kfree(ch->tx_ring);
624 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200625 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800626}
627
628static void srp_path_rec_completion(int status,
629 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100630 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800631{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100632 struct srp_rdma_ch *ch = ch_ptr;
633 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800634
Bart Van Assche509c07b2014-10-30 14:48:30 +0100635 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800636 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500637 shost_printk(KERN_ERR, target->scsi_host,
638 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800639 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 ch->path = *pathrec;
641 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800642}
643
Bart Van Assche509c07b2014-10-30 14:48:30 +0100644static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800645{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100647 int ret;
648
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800650
Bart Van Assche509c07b2014-10-30 14:48:30 +0100651 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652
Bart Van Assche509c07b2014-10-30 14:48:30 +0100653 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
654 target->srp_host->srp_dev->dev,
655 target->srp_host->port,
656 &ch->path,
657 IB_SA_PATH_REC_SERVICE_ID |
658 IB_SA_PATH_REC_DGID |
659 IB_SA_PATH_REC_SGID |
660 IB_SA_PATH_REC_NUMB_PATH |
661 IB_SA_PATH_REC_PKEY,
662 SRP_PATH_REC_TIMEOUT_MS,
663 GFP_KERNEL,
664 srp_path_rec_completion,
665 ch, &ch->path_query);
666 if (ch->path_query_id < 0)
667 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668
Bart Van Assche509c07b2014-10-30 14:48:30 +0100669 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100670 if (ret < 0)
671 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500674 shost_printk(KERN_WARNING, target->scsi_host,
675 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678}
679
Bart Van Assched92c0da2014-10-06 17:14:36 +0200680static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800681{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100682 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800683 struct {
684 struct ib_cm_req_param param;
685 struct srp_login_req priv;
686 } *req = NULL;
687 int status;
688
689 req = kzalloc(sizeof *req, GFP_KERNEL);
690 if (!req)
691 return -ENOMEM;
692
Bart Van Assche509c07b2014-10-30 14:48:30 +0100693 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694 req->param.alternate_path = NULL;
695 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100696 req->param.qp_num = ch->qp->qp_num;
697 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698 req->param.private_data = &req->priv;
699 req->param.private_data_len = sizeof req->priv;
700 req->param.flow_control = 1;
701
702 get_random_bytes(&req->param.starting_psn, 4);
703 req->param.starting_psn &= 0xffffff;
704
705 /*
706 * Pick some arbitrary defaults here; we could make these
707 * module parameters if anyone cared about setting them.
708 */
709 req->param.responder_resources = 4;
710 req->param.remote_cm_response_timeout = 20;
711 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200712 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800713 req->param.rnr_retry_count = 7;
714 req->param.max_cm_retries = 15;
715
716 req->priv.opcode = SRP_LOGIN_REQ;
717 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500718 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800719 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200721 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
722 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700723 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700724 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700725 * port identifier format is 8 bytes of ID extension followed
726 * by 8 bytes of GUID. Older drafts put the two halves in the
727 * opposite order, so that the GUID comes first.
728 *
729 * Targets conforming to these obsolete drafts can be
730 * recognized by the I/O Class they report.
731 */
732 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
733 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100734 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700735 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200736 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700737 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
738 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
739 } else {
740 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200741 &target->initiator_ext, 8);
742 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100743 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700744 memcpy(req->priv.target_port_id, &target->id_ext, 8);
745 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
746 }
747
Roland Dreieraef9ec32005-11-02 14:07:13 -0800748 /*
749 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200750 * zero out the first 8 bytes of our initiator port ID and set
751 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800752 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700753 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500754 shost_printk(KERN_DEBUG, target->scsi_host,
755 PFX "Topspin/Cisco initiator port ID workaround "
756 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200757 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800758 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200759 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100760 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800761 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800762
Bart Van Assche509c07b2014-10-30 14:48:30 +0100763 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800764
765 kfree(req);
766
767 return status;
768}
769
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000770static bool srp_queue_remove_work(struct srp_target_port *target)
771{
772 bool changed = false;
773
774 spin_lock_irq(&target->lock);
775 if (target->state != SRP_TARGET_REMOVED) {
776 target->state = SRP_TARGET_REMOVED;
777 changed = true;
778 }
779 spin_unlock_irq(&target->lock);
780
781 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200782 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000783
784 return changed;
785}
786
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787static void srp_disconnect_target(struct srp_target_port *target)
788{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200789 struct srp_rdma_ch *ch;
790 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100791
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200792 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800793
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200794 for (i = 0; i < target->ch_count; i++) {
795 ch = &target->ch[i];
796 ch->connected = false;
797 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
798 shost_printk(KERN_DEBUG, target->scsi_host,
799 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000800 }
Roland Dreiere6581052006-05-17 09:13:21 -0700801 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800802}
803
Bart Van Assche509c07b2014-10-30 14:48:30 +0100804static void srp_free_req_data(struct srp_target_port *target,
805 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500806{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200807 struct srp_device *dev = target->srp_host->srp_dev;
808 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500809 struct srp_request *req;
810 int i;
811
Bart Van Assche47513cf2015-05-18 13:25:54 +0200812 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200813 return;
814
815 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100816 req = &ch->req_ring[i];
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300817 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200818 kfree(req->fr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300819 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200820 kfree(req->fmr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300821 kfree(req->map_page);
822 }
David Dillowc07d4242011-01-16 13:57:10 -0500823 if (req->indirect_dma_addr) {
824 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825 target->indirect_size,
826 DMA_TO_DEVICE);
827 }
828 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500829 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200830
Bart Van Assche509c07b2014-10-30 14:48:30 +0100831 kfree(ch->req_ring);
832 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500833}
834
Bart Van Assche509c07b2014-10-30 14:48:30 +0100835static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200836{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100837 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200838 struct srp_device *srp_dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = srp_dev->dev;
840 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200841 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200842 dma_addr_t dma_addr;
843 int i, ret = -ENOMEM;
844
Bart Van Assche509c07b2014-10-30 14:48:30 +0100845 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
846 GFP_KERNEL);
847 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200848 goto out;
849
850 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100851 req = &ch->req_ring[i];
Bart Van Assche509c5f32016-05-12 10:50:35 -0700852 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200853 GFP_KERNEL);
854 if (!mr_list)
855 goto out;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300856 if (srp_dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200857 req->fr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300858 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200859 req->fmr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300860 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861 sizeof(void *), GFP_KERNEL);
862 if (!req->map_page)
863 goto out;
864 }
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200865 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200866 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200867 goto out;
868
869 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870 target->indirect_size,
871 DMA_TO_DEVICE);
872 if (ib_dma_mapping_error(ibdev, dma_addr))
873 goto out;
874
875 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200876 }
877 ret = 0;
878
879out:
880 return ret;
881}
882
Bart Van Assche683b1592012-01-14 12:40:44 +0000883/**
884 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885 * @shost: SCSI host whose attributes to remove from sysfs.
886 *
887 * Note: Any attributes defined in the host template and that did not exist
888 * before invocation of this function will be ignored.
889 */
890static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
891{
892 struct device_attribute **attr;
893
894 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895 device_remove_file(&shost->shost_dev, *attr);
896}
897
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000898static void srp_remove_target(struct srp_target_port *target)
899{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200900 struct srp_rdma_ch *ch;
901 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100902
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000903 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
904
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000905 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200906 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000907 srp_remove_host(target->scsi_host);
908 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100909 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000910 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200911 for (i = 0; i < target->ch_count; i++) {
912 ch = &target->ch[i];
913 srp_free_ch_ib(target, ch);
914 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200915 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200916 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200917 for (i = 0; i < target->ch_count; i++) {
918 ch = &target->ch[i];
919 srp_free_req_data(target, ch);
920 }
921 kfree(target->ch);
922 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200923
924 spin_lock(&target->srp_host->target_lock);
925 list_del(&target->list);
926 spin_unlock(&target->srp_host->target_lock);
927
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000928 scsi_host_put(target->scsi_host);
929}
930
David Howellsc4028952006-11-22 14:57:56 +0000931static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800932{
David Howellsc4028952006-11-22 14:57:56 +0000933 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000934 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800935
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000936 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800937
Bart Van Assche96fc2482013-06-28 14:51:26 +0200938 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800939}
940
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200941static void srp_rport_delete(struct srp_rport *rport)
942{
943 struct srp_target_port *target = rport->lld_data;
944
945 srp_queue_remove_work(target);
946}
947
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200948/**
949 * srp_connected_ch() - number of connected channels
950 * @target: SRP target port.
951 */
952static int srp_connected_ch(struct srp_target_port *target)
953{
954 int i, c = 0;
955
956 for (i = 0; i < target->ch_count; i++)
957 c += target->ch[i].connected;
958
959 return c;
960}
961
Bart Van Assched92c0da2014-10-06 17:14:36 +0200962static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100964 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800965 int ret;
966
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200967 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000968
Bart Van Assche509c07b2014-10-30 14:48:30 +0100969 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800970 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800971 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800972
973 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100974 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200975 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800976 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800977 goto out;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100978 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100979 if (ret < 0)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800980 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800981
982 /*
983 * The CM event handling code will set status to
984 * SRP_PORT_REDIRECT if we get a port redirect REJ
985 * back, or SRP_DLID_REDIRECT if we get a lid/qp
986 * redirect REJ back.
987 */
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800988 ret = ch->status;
989 switch (ret) {
Roland Dreieraef9ec32005-11-02 14:07:13 -0800990 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200991 ch->connected = true;
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800992 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800993
994 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +0100995 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800996 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800997 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800998 break;
999
1000 case SRP_DLID_REDIRECT:
1001 break;
1002
David Dillow9fe4bcf2008-01-08 17:08:52 -05001003 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001004 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001005 "giving up on stale connection\n");
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001006 ret = -ECONNRESET;
1007 goto out;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001008
Roland Dreieraef9ec32005-11-02 14:07:13 -08001009 default:
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001010 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001011 }
1012 }
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001013
1014out:
1015 return ret <= 0 ? ret : -ENODEV;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001016}
1017
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001018static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1019{
1020 srp_handle_qp_err(cq, wc, "INV RKEY");
1021}
1022
1023static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1024 u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001025{
1026 struct ib_send_wr *bad_wr;
1027 struct ib_send_wr wr = {
1028 .opcode = IB_WR_LOCAL_INV,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001029 .next = NULL,
1030 .num_sge = 0,
1031 .send_flags = 0,
1032 .ex.invalidate_rkey = rkey,
1033 };
1034
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001035 wr.wr_cqe = &req->reg_cqe;
1036 req->reg_cqe.done = srp_inv_rkey_err_done;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001037 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001038}
1039
Roland Dreierd945e1d2006-05-09 10:50:28 -07001040static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001041 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001042 struct srp_request *req)
1043{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001044 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001045 struct srp_device *dev = target->srp_host->srp_dev;
1046 struct ib_device *ibdev = dev->dev;
1047 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001048
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001049 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001050 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1052 return;
1053
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001054 if (dev->use_fast_reg) {
1055 struct srp_fr_desc **pfr;
1056
1057 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001058 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001059 if (res < 0) {
1060 shost_printk(KERN_ERR, target->scsi_host, PFX
1061 "Queueing INV WR for rkey %#x failed (%d)\n",
1062 (*pfr)->mr->rkey, res);
1063 queue_work(system_long_wq,
1064 &target->tl_err_work);
1065 }
1066 }
1067 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001068 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001069 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001070 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001071 struct ib_pool_fmr **pfmr;
1072
1073 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1074 ib_fmr_pool_unmap(*pfmr);
1075 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001076
David Dillow8f26c9f2011-01-14 19:45:50 -05001077 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1078 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001079}
1080
Bart Van Assche22032992012-08-14 13:18:53 +00001081/**
1082 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001083 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001084 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001085 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001086 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1087 * ownership of @req->scmnd if it equals @scmnd.
1088 *
1089 * Return value:
1090 * Either NULL or a pointer to the SCSI command the caller became owner of.
1091 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001092static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001093 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001094 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001095 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001096{
Bart Van Assche94a91742010-11-26 14:50:09 -05001097 unsigned long flags;
1098
Bart Van Assche509c07b2014-10-30 14:48:30 +01001099 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001100 if (req->scmnd &&
1101 (!sdev || req->scmnd->device == sdev) &&
1102 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001103 scmnd = req->scmnd;
1104 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001105 } else {
1106 scmnd = NULL;
1107 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001108 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001109
1110 return scmnd;
1111}
1112
1113/**
Bart Van Assche6ec2ba02016-04-22 14:12:47 -07001114 * srp_free_req() - Unmap data and adjust ch->req_lim.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001115 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001116 * @req: Request to be freed.
1117 * @scmnd: SCSI command associated with @req.
1118 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001119 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001120static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001122{
1123 unsigned long flags;
1124
Bart Van Assche509c07b2014-10-30 14:48:30 +01001125 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001126
Bart Van Assche509c07b2014-10-30 14:48:30 +01001127 spin_lock_irqsave(&ch->lock, flags);
1128 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001129 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001130}
1131
Bart Van Assche509c07b2014-10-30 14:48:30 +01001132static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001134{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001135 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001136
1137 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001138 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001139 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001140 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001141 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001142}
1143
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001144static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001145{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001146 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001147 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001148 struct Scsi_Host *shost = target->scsi_host;
1149 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001150 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001151
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001152 /*
1153 * Invoking srp_terminate_io() while srp_queuecommand() is running
1154 * is not safe. Hence the warning statement below.
1155 */
1156 shost_for_each_device(sdev, shost)
1157 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1158
Bart Van Assched92c0da2014-10-06 17:14:36 +02001159 for (i = 0; i < target->ch_count; i++) {
1160 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001161
Bart Van Assched92c0da2014-10-06 17:14:36 +02001162 for (j = 0; j < target->req_ring_size; ++j) {
1163 struct srp_request *req = &ch->req_ring[j];
1164
1165 srp_finish_req(ch, req, NULL,
1166 DID_TRANSPORT_FAILFAST << 16);
1167 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001168 }
1169}
1170
1171/*
1172 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174 * srp_reset_device() or srp_reset_host() calls will occur while this function
1175 * is in progress. One way to realize that is not to call this function
1176 * directly but to call srp_reconnect_rport() instead since that last function
1177 * serializes calls of this function via rport->mutex and also blocks
1178 * srp_queuecommand() calls before invoking this function.
1179 */
1180static int srp_rport_reconnect(struct srp_rport *rport)
1181{
1182 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001183 struct srp_rdma_ch *ch;
1184 int i, j, ret = 0;
1185 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001186
Roland Dreieraef9ec32005-11-02 14:07:13 -08001187 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001188
1189 if (target->state == SRP_TARGET_SCANNING)
1190 return -ENODEV;
1191
Roland Dreieraef9ec32005-11-02 14:07:13 -08001192 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001193 * Now get a new local CM ID so that we avoid confusing the target in
1194 * case things are really fouled up. Doing so also ensures that all CM
1195 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001196 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001197 for (i = 0; i < target->ch_count; i++) {
1198 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001199 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001200 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001201 for (i = 0; i < target->ch_count; i++) {
1202 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001203 for (j = 0; j < target->req_ring_size; ++j) {
1204 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205
Bart Van Assched92c0da2014-10-06 17:14:36 +02001206 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1207 }
1208 }
1209 for (i = 0; i < target->ch_count; i++) {
1210 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001211 /*
1212 * Whether or not creating a new CM ID succeeded, create a new
1213 * QP. This guarantees that all completion callback function
1214 * invocations have finished before request resetting starts.
1215 */
1216 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001217
Bart Van Assched92c0da2014-10-06 17:14:36 +02001218 INIT_LIST_HEAD(&ch->free_tx);
1219 for (j = 0; j < target->queue_size; ++j)
1220 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1221 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001222
1223 target->qp_in_error = false;
1224
Bart Van Assched92c0da2014-10-06 17:14:36 +02001225 for (i = 0; i < target->ch_count; i++) {
1226 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001227 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001228 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001229 ret = srp_connect_ch(ch, multich);
1230 multich = true;
1231 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001232
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001233 if (ret == 0)
1234 shost_printk(KERN_INFO, target->scsi_host,
1235 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001236
1237 return ret;
1238}
1239
David Dillow8f26c9f2011-01-14 19:45:50 -05001240static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1241 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001242{
David Dillow8f26c9f2011-01-14 19:45:50 -05001243 struct srp_direct_buf *desc = state->desc;
1244
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001245 WARN_ON_ONCE(!dma_len);
1246
David Dillow8f26c9f2011-01-14 19:45:50 -05001247 desc->va = cpu_to_be64(dma_addr);
1248 desc->key = cpu_to_be32(rkey);
1249 desc->len = cpu_to_be32(dma_len);
1250
1251 state->total_len += dma_len;
1252 state->desc++;
1253 state->ndesc++;
1254}
1255
1256static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001257 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001258{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001259 struct srp_target_port *target = ch->target;
1260 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001261 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001262 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001263
Bart Van Asschef731ed62015-08-10 17:07:27 -07001264 if (state->fmr.next >= state->fmr.end)
1265 return -ENOMEM;
1266
Sagi Grimberg26630e82015-10-13 19:11:38 +03001267 WARN_ON_ONCE(!dev->use_fmr);
1268
1269 if (state->npages == 0)
1270 return 0;
1271
1272 if (state->npages == 1 && target->global_mr) {
1273 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1274 target->global_mr->rkey);
1275 goto reset_state;
1276 }
1277
Bart Van Assche509c07b2014-10-30 14:48:30 +01001278 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001279 state->npages, io_addr);
1280 if (IS_ERR(fmr))
1281 return PTR_ERR(fmr);
1282
Bart Van Asschef731ed62015-08-10 17:07:27 -07001283 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001284 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001285
Bart Van Assche186fbc62015-08-10 17:06:29 -07001286 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001288
Sagi Grimberg26630e82015-10-13 19:11:38 +03001289reset_state:
1290 state->npages = 0;
1291 state->dma_len = 0;
1292
David Dillow8f26c9f2011-01-14 19:45:50 -05001293 return 0;
1294}
1295
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001296static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1297{
1298 srp_handle_qp_err(cq, wc, "FAST REG");
1299}
1300
Bart Van Assche509c5f32016-05-12 10:50:35 -07001301/*
1302 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1303 * where to start in the first element. If sg_offset_p != NULL then
1304 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1305 * byte that has not yet been mapped.
1306 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001307static int srp_map_finish_fr(struct srp_map_state *state,
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001308 struct srp_request *req,
Bart Van Assche509c5f32016-05-12 10:50:35 -07001309 struct srp_rdma_ch *ch, int sg_nents,
1310 unsigned int *sg_offset_p)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001311{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001312 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001313 struct srp_device *dev = target->srp_host->srp_dev;
1314 struct ib_send_wr *bad_wr;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001315 struct ib_reg_wr wr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001316 struct srp_fr_desc *desc;
1317 u32 rkey;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001318 int n, err;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001319
Bart Van Asschef731ed62015-08-10 17:07:27 -07001320 if (state->fr.next >= state->fr.end)
1321 return -ENOMEM;
1322
Sagi Grimberg26630e82015-10-13 19:11:38 +03001323 WARN_ON_ONCE(!dev->use_fast_reg);
1324
Bart Van Assche57b0be92015-12-01 10:19:38 -08001325 if (sg_nents == 1 && target->global_mr) {
Bart Van Assche509c5f32016-05-12 10:50:35 -07001326 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1327
1328 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1329 sg_dma_len(state->sg) - sg_offset,
Sagi Grimberg26630e82015-10-13 19:11:38 +03001330 target->global_mr->rkey);
Bart Van Assche509c5f32016-05-12 10:50:35 -07001331 if (sg_offset_p)
1332 *sg_offset_p = 0;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001333 return 1;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001334 }
1335
Bart Van Assche509c07b2014-10-30 14:48:30 +01001336 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001337 if (!desc)
1338 return -ENOMEM;
1339
1340 rkey = ib_inc_rkey(desc->mr->rkey);
1341 ib_update_fast_reg_key(desc->mr, rkey);
1342
Bart Van Assche509c5f32016-05-12 10:50:35 -07001343 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1344 dev->mr_page_size);
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001345 if (unlikely(n < 0)) {
1346 srp_fr_pool_put(ch->fr_pool, &desc, 1);
Bart Van Assche509c5f32016-05-12 10:50:35 -07001347 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001348 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
Bart Van Assche509c5f32016-05-12 10:50:35 -07001349 sg_offset_p ? *sg_offset_p : -1, n);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001350 return n;
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001351 }
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001352
Bart Van Assche509c5f32016-05-12 10:50:35 -07001353 WARN_ON_ONCE(desc->mr->length == 0);
1354
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001355 req->reg_cqe.done = srp_reg_mr_err_done;
1356
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001357 wr.wr.next = NULL;
1358 wr.wr.opcode = IB_WR_REG_MR;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001359 wr.wr.wr_cqe = &req->reg_cqe;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001360 wr.wr.num_sge = 0;
1361 wr.wr.send_flags = 0;
1362 wr.mr = desc->mr;
1363 wr.key = desc->mr->rkey;
1364 wr.access = (IB_ACCESS_LOCAL_WRITE |
1365 IB_ACCESS_REMOTE_READ |
1366 IB_ACCESS_REMOTE_WRITE);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001367
Bart Van Asschef731ed62015-08-10 17:07:27 -07001368 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001369 state->nmdesc++;
1370
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001371 srp_map_desc(state, desc->mr->iova,
1372 desc->mr->length, desc->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001373
Sagi Grimberg26630e82015-10-13 19:11:38 +03001374 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
Bart Van Assche509c5f32016-05-12 10:50:35 -07001375 if (unlikely(err)) {
1376 WARN_ON_ONCE(err == -ENOMEM);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001377 return err;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001378 }
Sagi Grimberg26630e82015-10-13 19:11:38 +03001379
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001380 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001381}
1382
David Dillow8f26c9f2011-01-14 19:45:50 -05001383static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001384 struct srp_rdma_ch *ch,
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001385 struct scatterlist *sg, int sg_index)
David Dillow8f26c9f2011-01-14 19:45:50 -05001386{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001387 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001388 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001389 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001390 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1391 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001392 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001393 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001394
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001395 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001396
David Dillow8f26c9f2011-01-14 19:45:50 -05001397 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001398 unsigned offset = dma_addr & ~dev->mr_page_mask;
1399 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001400 ret = srp_map_finish_fmr(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001401 if (ret)
1402 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001403 }
1404
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001405 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001406
1407 if (!state->npages)
1408 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001409 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001410 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001411 dma_addr += len;
1412 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001413 }
1414
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001415 /*
1416 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001417 * close it out and start a new one -- we can only merge at page
Bart Van Assche1d3d98c2016-04-22 14:12:10 -07001418 * boundaries.
David Dillow8f26c9f2011-01-14 19:45:50 -05001419 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001420 ret = 0;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001421 if (len != dev->mr_page_size)
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001422 ret = srp_map_finish_fmr(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001423 return ret;
1424}
1425
Sagi Grimberg26630e82015-10-13 19:11:38 +03001426static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1427 struct srp_request *req, struct scatterlist *scat,
1428 int count)
1429{
1430 struct scatterlist *sg;
1431 int i, ret;
1432
Sagi Grimberg26630e82015-10-13 19:11:38 +03001433 state->pages = req->map_page;
1434 state->fmr.next = req->fmr_list;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001435 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001436
1437 for_each_sg(scat, sg, count, i) {
1438 ret = srp_map_sg_entry(state, ch, sg, i);
1439 if (ret)
1440 return ret;
1441 }
1442
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001443 ret = srp_map_finish_fmr(state, ch);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001444 if (ret)
1445 return ret;
1446
Sagi Grimberg26630e82015-10-13 19:11:38 +03001447 return 0;
1448}
1449
1450static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1451 struct srp_request *req, struct scatterlist *scat,
1452 int count)
1453{
Bart Van Assche509c5f32016-05-12 10:50:35 -07001454 unsigned int sg_offset = 0;
1455
Sagi Grimberg26630e82015-10-13 19:11:38 +03001456 state->desc = req->indirect_desc;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001457 state->fr.next = req->fr_list;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001458 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001459 state->sg = scat;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001460
Bart Van Assche3b59b7a62016-04-22 14:14:43 -07001461 if (count == 0)
1462 return 0;
1463
Bart Van Assche57b0be92015-12-01 10:19:38 -08001464 while (count) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001465 int i, n;
1466
Bart Van Assche509c5f32016-05-12 10:50:35 -07001467 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001468 if (unlikely(n < 0))
1469 return n;
1470
Bart Van Assche57b0be92015-12-01 10:19:38 -08001471 count -= n;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001472 for (i = 0; i < n; i++)
1473 state->sg = sg_next(state->sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001474 }
1475
Sagi Grimberg26630e82015-10-13 19:11:38 +03001476 return 0;
1477}
1478
1479static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1480 struct srp_request *req, struct scatterlist *scat,
1481 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001482{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001483 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001484 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001485 struct scatterlist *sg;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001486 int i;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001487
Sagi Grimberg26630e82015-10-13 19:11:38 +03001488 state->desc = req->indirect_desc;
1489 for_each_sg(scat, sg, count, i) {
1490 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1491 ib_sg_dma_len(dev->dev, sg),
1492 target->global_mr->rkey);
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001493 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001494
Sagi Grimberg26630e82015-10-13 19:11:38 +03001495 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001496}
1497
Bart Van Assche330179f2015-08-10 17:09:05 -07001498/*
1499 * Register the indirect data buffer descriptor with the HCA.
1500 *
1501 * Note: since the indirect data buffer descriptor has been allocated with
1502 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1503 * memory buffer.
1504 */
1505static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1506 void **next_mr, void **end_mr, u32 idb_len,
1507 __be32 *idb_rkey)
1508{
1509 struct srp_target_port *target = ch->target;
1510 struct srp_device *dev = target->srp_host->srp_dev;
1511 struct srp_map_state state;
1512 struct srp_direct_buf idb_desc;
1513 u64 idb_pages[1];
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001514 struct scatterlist idb_sg[1];
Bart Van Assche330179f2015-08-10 17:09:05 -07001515 int ret;
1516
1517 memset(&state, 0, sizeof(state));
1518 memset(&idb_desc, 0, sizeof(idb_desc));
1519 state.gen.next = next_mr;
1520 state.gen.end = end_mr;
1521 state.desc = &idb_desc;
Bart Van Assche330179f2015-08-10 17:09:05 -07001522 state.base_dma_addr = req->indirect_dma_addr;
1523 state.dma_len = idb_len;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001524
1525 if (dev->use_fast_reg) {
1526 state.sg = idb_sg;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001527 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1528 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
Christoph Hellwigfc925512015-12-01 10:18:30 -08001529#ifdef CONFIG_NEED_SG_DMA_LENGTH
1530 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1531#endif
Bart Van Assche509c5f32016-05-12 10:50:35 -07001532 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001533 if (ret < 0)
1534 return ret;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001535 WARN_ON_ONCE(ret < 1);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001536 } else if (dev->use_fmr) {
1537 state.pages = idb_pages;
1538 state.pages[0] = (req->indirect_dma_addr &
1539 dev->mr_page_mask);
1540 state.npages = 1;
1541 ret = srp_map_finish_fmr(&state, ch);
1542 if (ret < 0)
1543 return ret;
1544 } else {
1545 return -EINVAL;
1546 }
Bart Van Assche330179f2015-08-10 17:09:05 -07001547
1548 *idb_rkey = idb_desc.key;
1549
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001550 return 0;
Bart Van Assche330179f2015-08-10 17:09:05 -07001551}
1552
Bart Van Assche509c5f32016-05-12 10:50:35 -07001553#if defined(DYNAMIC_DATA_DEBUG)
1554static void srp_check_mapping(struct srp_map_state *state,
1555 struct srp_rdma_ch *ch, struct srp_request *req,
1556 struct scatterlist *scat, int count)
1557{
1558 struct srp_device *dev = ch->target->srp_host->srp_dev;
1559 struct srp_fr_desc **pfr;
1560 u64 desc_len = 0, mr_len = 0;
1561 int i;
1562
1563 for (i = 0; i < state->ndesc; i++)
1564 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1565 if (dev->use_fast_reg)
1566 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1567 mr_len += (*pfr)->mr->length;
1568 else if (dev->use_fmr)
1569 for (i = 0; i < state->nmdesc; i++)
1570 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1571 if (desc_len != scsi_bufflen(req->scmnd) ||
1572 mr_len > scsi_bufflen(req->scmnd))
1573 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1574 scsi_bufflen(req->scmnd), desc_len, mr_len,
1575 state->ndesc, state->nmdesc);
1576}
1577#endif
1578
Bart Van Assche77269cd2016-04-22 14:13:09 -07001579/**
1580 * srp_map_data() - map SCSI data buffer onto an SRP request
1581 * @scmnd: SCSI command to map
1582 * @ch: SRP RDMA channel
1583 * @req: SRP request
1584 *
1585 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1586 * mapping failed.
1587 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001588static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001589 struct srp_request *req)
1590{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001591 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001592 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001593 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001594 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001595 struct srp_device *dev;
1596 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001597 struct srp_map_state state;
1598 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001599 u32 idb_len, table_len;
1600 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001601 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001602
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001603 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001604 return sizeof (struct srp_cmd);
1605
1606 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1607 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001608 shost_printk(KERN_WARNING, target->scsi_host,
1609 PFX "Unhandled data direction %d\n",
1610 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001611 return -EINVAL;
1612 }
1613
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001614 nents = scsi_sg_count(scmnd);
1615 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001616
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001617 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001618 ibdev = dev->dev;
1619
1620 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001621 if (unlikely(count == 0))
1622 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001623
1624 fmt = SRP_DATA_DESC_DIRECT;
1625 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001626
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001627 if (count == 1 && target->global_mr) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001628 /*
1629 * The midlayer only generated a single gather/scatter
1630 * entry, or DMA mapping coalesced everything to a
1631 * single entry. So a direct descriptor along with
1632 * the DMA MR suffices.
1633 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001634 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001635
Ralph Campbell85507bc2006-12-12 14:30:55 -08001636 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001637 buf->key = cpu_to_be32(target->global_mr->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001638 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001639
Bart Van Assche52ede082014-05-20 15:07:45 +02001640 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001641 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001642 }
1643
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001644 /*
1645 * We have more than one scatter/gather entry, so build our indirect
1646 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001647 */
1648 indirect_hdr = (void *) cmd->add_data;
1649
David Dillowc07d4242011-01-16 13:57:10 -05001650 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1651 target->indirect_size, DMA_TO_DEVICE);
1652
David Dillow8f26c9f2011-01-14 19:45:50 -05001653 memset(&state, 0, sizeof(state));
Sagi Grimberg26630e82015-10-13 19:11:38 +03001654 if (dev->use_fast_reg)
Bart Van Asschee012f362016-04-22 14:13:35 -07001655 ret = srp_map_sg_fr(&state, ch, req, scat, count);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001656 else if (dev->use_fmr)
Bart Van Asschee012f362016-04-22 14:13:35 -07001657 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001658 else
Bart Van Asschee012f362016-04-22 14:13:35 -07001659 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1660 req->nmdesc = state.nmdesc;
1661 if (ret < 0)
1662 goto unmap;
David Dillow8f26c9f2011-01-14 19:45:50 -05001663
Bart Van Assche509c5f32016-05-12 10:50:35 -07001664#if defined(DYNAMIC_DEBUG)
1665 {
1666 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1667 "Memory mapping consistency check");
1668 if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
1669 srp_check_mapping(&state, ch, req, scat, count);
1670 }
1671#endif
1672
David Dillowc07d4242011-01-16 13:57:10 -05001673 /* We've mapped the request, now pull as much of the indirect
1674 * descriptor table as we can into the command buffer. If this
1675 * target is not using an external indirect table, we are
1676 * guaranteed to fit into the command, as the SCSI layer won't
1677 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001678 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001679 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001680 /*
1681 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001682 * so use a direct descriptor.
1683 */
1684 struct srp_direct_buf *buf = (void *) cmd->add_data;
1685
David Dillowc07d4242011-01-16 13:57:10 -05001686 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001687 goto map_complete;
1688 }
1689
David Dillowc07d4242011-01-16 13:57:10 -05001690 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1691 !target->allow_ext_sg)) {
1692 shost_printk(KERN_ERR, target->scsi_host,
1693 "Could not fit S/G list into SRP_CMD\n");
Bart Van Asschee012f362016-04-22 14:13:35 -07001694 ret = -EIO;
1695 goto unmap;
David Dillowc07d4242011-01-16 13:57:10 -05001696 }
1697
1698 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001699 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001700 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001701
1702 fmt = SRP_DATA_DESC_INDIRECT;
1703 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001704 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001705
David Dillowc07d4242011-01-16 13:57:10 -05001706 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1707 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001708
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001709 if (!target->global_mr) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001710 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1711 idb_len, &idb_rkey);
1712 if (ret < 0)
Bart Van Asschee012f362016-04-22 14:13:35 -07001713 goto unmap;
Bart Van Assche330179f2015-08-10 17:09:05 -07001714 req->nmdesc++;
1715 } else {
Bart Van Asschea745f4f42015-12-01 10:18:47 -08001716 idb_rkey = cpu_to_be32(target->global_mr->rkey);
Bart Van Assche330179f2015-08-10 17:09:05 -07001717 }
1718
David Dillowc07d4242011-01-16 13:57:10 -05001719 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001720 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001721 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1722 indirect_hdr->len = cpu_to_be32(state.total_len);
1723
1724 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001725 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001726 else
David Dillowc07d4242011-01-16 13:57:10 -05001727 cmd->data_in_desc_cnt = count;
1728
1729 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1730 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001731
1732map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001733 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1734 cmd->buf_fmt = fmt << 4;
1735 else
1736 cmd->buf_fmt = fmt;
1737
Roland Dreieraef9ec32005-11-02 14:07:13 -08001738 return len;
Bart Van Asschee012f362016-04-22 14:13:35 -07001739
1740unmap:
1741 srp_unmap_data(scmnd, ch, req);
Bart Van Asscheffc548b2016-04-22 14:14:15 -07001742 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1743 ret = -E2BIG;
Bart Van Asschee012f362016-04-22 14:13:35 -07001744 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001745}
1746
David Dillow05a1d752010-10-08 14:48:14 -04001747/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001748 * Return an IU and possible credit to the free pool
1749 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001750static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001751 enum srp_iu_type iu_type)
1752{
1753 unsigned long flags;
1754
Bart Van Assche509c07b2014-10-30 14:48:30 +01001755 spin_lock_irqsave(&ch->lock, flags);
1756 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001757 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001758 ++ch->req_lim;
1759 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001760}
1761
1762/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001763 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001764 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001765 *
1766 * Note:
1767 * An upper limit for the number of allocated information units for each
1768 * request type is:
1769 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1770 * more than Scsi_Host.can_queue requests.
1771 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1772 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1773 * one unanswered SRP request to an initiator.
1774 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001775static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001776 enum srp_iu_type iu_type)
1777{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001778 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001779 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1780 struct srp_iu *iu;
1781
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001782 ib_process_cq_direct(ch->send_cq, -1);
David Dillow05a1d752010-10-08 14:48:14 -04001783
Bart Van Assche509c07b2014-10-30 14:48:30 +01001784 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001785 return NULL;
1786
1787 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001788 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001789 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001790 ++target->zero_req_lim;
1791 return NULL;
1792 }
1793
Bart Van Assche509c07b2014-10-30 14:48:30 +01001794 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001795 }
1796
Bart Van Assche509c07b2014-10-30 14:48:30 +01001797 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001798 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001799 return iu;
1800}
1801
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001802static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1803{
1804 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1805 struct srp_rdma_ch *ch = cq->cq_context;
1806
1807 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1808 srp_handle_qp_err(cq, wc, "SEND");
1809 return;
1810 }
1811
1812 list_add(&iu->list, &ch->free_tx);
1813}
1814
Bart Van Assche509c07b2014-10-30 14:48:30 +01001815static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001816{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001817 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001818 struct ib_sge list;
1819 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001820
1821 list.addr = iu->dma;
1822 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001823 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001824
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001825 iu->cqe.done = srp_send_done;
1826
David Dillow05a1d752010-10-08 14:48:14 -04001827 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001828 wr.wr_cqe = &iu->cqe;
David Dillow05a1d752010-10-08 14:48:14 -04001829 wr.sg_list = &list;
1830 wr.num_sge = 1;
1831 wr.opcode = IB_WR_SEND;
1832 wr.send_flags = IB_SEND_SIGNALED;
1833
Bart Van Assche509c07b2014-10-30 14:48:30 +01001834 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001835}
1836
Bart Van Assche509c07b2014-10-30 14:48:30 +01001837static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001838{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001839 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001840 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001841 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001842
1843 list.addr = iu->dma;
1844 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001845 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001846
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001847 iu->cqe.done = srp_recv_done;
1848
Bart Van Asschec996bb42010-07-30 10:59:05 +00001849 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001850 wr.wr_cqe = &iu->cqe;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001851 wr.sg_list = &list;
1852 wr.num_sge = 1;
1853
Bart Van Assche509c07b2014-10-30 14:48:30 +01001854 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001855}
1856
Bart Van Assche509c07b2014-10-30 14:48:30 +01001857static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001858{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001859 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001860 struct srp_request *req;
1861 struct scsi_cmnd *scmnd;
1862 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863
Roland Dreieraef9ec32005-11-02 14:07:13 -08001864 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001865 spin_lock_irqsave(&ch->lock, flags);
1866 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1867 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001868
Bart Van Assche509c07b2014-10-30 14:48:30 +01001869 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001870 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001871 ch->tsk_mgmt_status = rsp->data[3];
1872 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001873 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001874 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1875 if (scmnd) {
1876 req = (void *)scmnd->host_scribble;
1877 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1878 }
Bart Van Assche22032992012-08-14 13:18:53 +00001879 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001880 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001881 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1882 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001883
Bart Van Assche509c07b2014-10-30 14:48:30 +01001884 spin_lock_irqsave(&ch->lock, flags);
1885 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1886 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001887
1888 return;
1889 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001890 scmnd->result = rsp->status;
1891
1892 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1893 memcpy(scmnd->sense_buffer, rsp->data +
1894 be32_to_cpu(rsp->resp_data_len),
1895 min_t(int, be32_to_cpu(rsp->sense_data_len),
1896 SCSI_SENSE_BUFFERSIZE));
1897 }
1898
Bart Van Asschee7145312014-07-09 15:57:51 +02001899 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001900 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001901 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1902 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1903 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1904 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1905 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1906 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001907
Bart Van Assche509c07b2014-10-30 14:48:30 +01001908 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001909 be32_to_cpu(rsp->req_lim_delta));
1910
David Dillowf8b6e312010-11-26 13:02:21 -05001911 scmnd->host_scribble = NULL;
1912 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001913 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001914}
1915
Bart Van Assche509c07b2014-10-30 14:48:30 +01001916static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001917 void *rsp, int len)
1918{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001919 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001920 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001921 unsigned long flags;
1922 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001923 int err;
David Dillowbb125882010-10-08 14:40:47 -04001924
Bart Van Assche509c07b2014-10-30 14:48:30 +01001925 spin_lock_irqsave(&ch->lock, flags);
1926 ch->req_lim += req_delta;
1927 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1928 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001929
David Dillowbb125882010-10-08 14:40:47 -04001930 if (!iu) {
1931 shost_printk(KERN_ERR, target->scsi_host, PFX
1932 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001933 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001934 }
1935
1936 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1937 memcpy(iu->buf, rsp, len);
1938 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1939
Bart Van Assche509c07b2014-10-30 14:48:30 +01001940 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001941 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001942 shost_printk(KERN_ERR, target->scsi_host, PFX
1943 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001944 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001945 }
David Dillowbb125882010-10-08 14:40:47 -04001946
David Dillowbb125882010-10-08 14:40:47 -04001947 return err;
1948}
1949
Bart Van Assche509c07b2014-10-30 14:48:30 +01001950static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001951 struct srp_cred_req *req)
1952{
1953 struct srp_cred_rsp rsp = {
1954 .opcode = SRP_CRED_RSP,
1955 .tag = req->tag,
1956 };
1957 s32 delta = be32_to_cpu(req->req_lim_delta);
1958
Bart Van Assche509c07b2014-10-30 14:48:30 +01001959 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1960 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001961 "problems processing SRP_CRED_REQ\n");
1962}
1963
Bart Van Assche509c07b2014-10-30 14:48:30 +01001964static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001965 struct srp_aer_req *req)
1966{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001967 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001968 struct srp_aer_rsp rsp = {
1969 .opcode = SRP_AER_RSP,
1970 .tag = req->tag,
1971 };
1972 s32 delta = be32_to_cpu(req->req_lim_delta);
1973
1974 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001975 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001976
Bart Van Assche509c07b2014-10-30 14:48:30 +01001977 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001978 shost_printk(KERN_ERR, target->scsi_host, PFX
1979 "problems processing SRP_AER_REQ\n");
1980}
1981
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001982static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001983{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001984 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1985 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001986 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001987 struct ib_device *dev = target->srp_host->srp_dev->dev;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001988 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001989 u8 opcode;
1990
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001991 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1992 srp_handle_qp_err(cq, wc, "RECV");
1993 return;
1994 }
1995
Bart Van Assche509c07b2014-10-30 14:48:30 +01001996 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001997 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001998
1999 opcode = *(u8 *) iu->buf;
2000
2001 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002002 shost_printk(KERN_ERR, target->scsi_host,
2003 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00002004 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2005 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002006 }
2007
2008 switch (opcode) {
2009 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002010 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002011 break;
2012
David Dillowbb125882010-10-08 14:40:47 -04002013 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002014 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04002015 break;
2016
2017 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002018 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04002019 break;
2020
Roland Dreieraef9ec32005-11-02 14:07:13 -08002021 case SRP_T_LOGOUT:
2022 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05002023 shost_printk(KERN_WARNING, target->scsi_host,
2024 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002025 break;
2026
2027 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002028 shost_printk(KERN_WARNING, target->scsi_host,
2029 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002030 break;
2031 }
2032
Bart Van Assche509c07b2014-10-30 14:48:30 +01002033 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002034 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00002035
Bart Van Assche509c07b2014-10-30 14:48:30 +01002036 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00002037 if (res != 0)
2038 shost_printk(KERN_ERR, target->scsi_host,
2039 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002040}
2041
Bart Van Asschec1120f82013-10-26 14:35:08 +02002042/**
2043 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02002044 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02002045 *
2046 * Note: This function may get invoked before the rport has been created,
2047 * hence the target->rport test.
2048 */
2049static void srp_tl_err_work(struct work_struct *work)
2050{
2051 struct srp_target_port *target;
2052
2053 target = container_of(work, struct srp_target_port, tl_err_work);
2054 if (target->rport)
2055 srp_start_tl_fail_timers(target->rport);
2056}
2057
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002058static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2059 const char *opname)
Bart Van Assche948d1e82011-09-03 09:25:42 +02002060{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002061 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002062 struct srp_target_port *target = ch->target;
2063
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002064 if (ch->connected && !target->qp_in_error) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002065 shost_printk(KERN_ERR, target->scsi_host,
2066 PFX "failed %s status %s (%d) for CQE %p\n",
2067 opname, ib_wc_status_msg(wc->status), wc->status,
2068 wc->wr_cqe);
Bart Van Asschec1120f82013-10-26 14:35:08 +02002069 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01002070 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02002071 target->qp_in_error = true;
2072}
2073
Bart Van Assche76c75b22010-11-26 14:37:47 -05002074static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002075{
Bart Van Assche76c75b22010-11-26 14:37:47 -05002076 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002077 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002078 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002079 struct srp_request *req;
2080 struct srp_iu *iu;
2081 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08002082 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002083 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002084 u32 tag;
2085 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02002086 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002087 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2088
2089 /*
2090 * The SCSI EH thread is the only context from which srp_queuecommand()
2091 * can get invoked for blocked devices (SDEV_BLOCK /
2092 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2093 * locking the rport mutex if invoked from inside the SCSI EH.
2094 */
2095 if (in_scsi_eh)
2096 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002097
Bart Van Assched1b42892014-05-20 15:07:20 +02002098 scmnd->result = srp_chkready(target->rport);
2099 if (unlikely(scmnd->result))
2100 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002101
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002102 WARN_ON_ONCE(scmnd->request->tag < 0);
2103 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002104 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002105 idx = blk_mq_unique_tag_to_tag(tag);
2106 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2107 dev_name(&shost->shost_gendev), tag, idx,
2108 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002109
2110 spin_lock_irqsave(&ch->lock, flags);
2111 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002112 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002113
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002114 if (!iu)
2115 goto err;
2116
2117 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002118 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002119 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002120 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002121
David Dillowf8b6e312010-11-26 13:02:21 -05002122 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002123
2124 cmd = iu->buf;
2125 memset(cmd, 0, sizeof *cmd);
2126
2127 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002128 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002129 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002130 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2131
Roland Dreieraef9ec32005-11-02 14:07:13 -08002132 req->scmnd = scmnd;
2133 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002134
Bart Van Assche509c07b2014-10-30 14:48:30 +01002135 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002136 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002137 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002138 PFX "Failed to map data (%d)\n", len);
2139 /*
2140 * If we ran out of memory descriptors (-ENOMEM) because an
2141 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002142 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002143 * to reduce queue depth temporarily.
2144 */
2145 scmnd->result = len == -ENOMEM ?
2146 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002147 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002148 }
2149
David Dillow49248642011-01-14 18:23:24 -05002150 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002151 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002152
Bart Van Assche509c07b2014-10-30 14:48:30 +01002153 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002154 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002155 goto err_unmap;
2156 }
2157
Bart Van Assched1b42892014-05-20 15:07:20 +02002158 ret = 0;
2159
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002160unlock_rport:
2161 if (in_scsi_eh)
2162 mutex_unlock(&rport->mutex);
2163
Bart Van Assched1b42892014-05-20 15:07:20 +02002164 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002165
2166err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002167 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002168
Bart Van Assche76c75b22010-11-26 14:37:47 -05002169err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002170 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002171
Bart Van Assche024ca902014-05-20 15:03:49 +02002172 /*
2173 * Avoid that the loops that iterate over the request ring can
2174 * encounter a dangling SCSI command pointer.
2175 */
2176 req->scmnd = NULL;
2177
Bart Van Assched1b42892014-05-20 15:07:20 +02002178err:
2179 if (scmnd->result) {
2180 scmnd->scsi_done(scmnd);
2181 ret = 0;
2182 } else {
2183 ret = SCSI_MLQUEUE_HOST_BUSY;
2184 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002185
Bart Van Assched1b42892014-05-20 15:07:20 +02002186 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002187}
2188
Bart Van Assche4d73f952013-10-26 14:40:37 +02002189/*
2190 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002191 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002192 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002193static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002194{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002195 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002196 int i;
2197
Bart Van Assche509c07b2014-10-30 14:48:30 +01002198 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2199 GFP_KERNEL);
2200 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002201 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002202 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2203 GFP_KERNEL);
2204 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002205 goto err_no_ring;
2206
2207 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002208 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2209 ch->max_ti_iu_len,
2210 GFP_KERNEL, DMA_FROM_DEVICE);
2211 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002212 goto err;
2213 }
2214
Bart Van Assche4d73f952013-10-26 14:40:37 +02002215 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002216 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2217 target->max_iu_len,
2218 GFP_KERNEL, DMA_TO_DEVICE);
2219 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002220 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002221
Bart Van Assche509c07b2014-10-30 14:48:30 +01002222 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002223 }
2224
2225 return 0;
2226
2227err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002228 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002229 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2230 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002231 }
2232
Bart Van Assche4d73f952013-10-26 14:40:37 +02002233
2234err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002235 kfree(ch->tx_ring);
2236 ch->tx_ring = NULL;
2237 kfree(ch->rx_ring);
2238 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002239
2240 return -ENOMEM;
2241}
2242
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002243static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2244{
2245 uint64_t T_tr_ns, max_compl_time_ms;
2246 uint32_t rq_tmo_jiffies;
2247
2248 /*
2249 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2250 * table 91), both the QP timeout and the retry count have to be set
2251 * for RC QP's during the RTR to RTS transition.
2252 */
2253 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2254 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2255
2256 /*
2257 * Set target->rq_tmo_jiffies to one second more than the largest time
2258 * it can take before an error completion is generated. See also
2259 * C9-140..142 in the IBTA spec for more information about how to
2260 * convert the QP Local ACK Timeout value to nanoseconds.
2261 */
2262 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2263 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2264 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2265 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2266
2267 return rq_tmo_jiffies;
2268}
2269
David Dillow961e0be2011-01-14 17:32:07 -05002270static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002271 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002272 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002273{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002274 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002275 struct ib_qp_attr *qp_attr = NULL;
2276 int attr_mask = 0;
2277 int ret;
2278 int i;
2279
2280 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002281 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2282 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002283
2284 /*
2285 * Reserve credits for task management so we don't
2286 * bounce requests back to the SCSI mid-layer.
2287 */
2288 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002289 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002290 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002291 target->scsi_host->cmd_per_lun
2292 = min_t(int, target->scsi_host->can_queue,
2293 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002294 } else {
2295 shost_printk(KERN_WARNING, target->scsi_host,
2296 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2297 ret = -ECONNRESET;
2298 goto error;
2299 }
2300
Bart Van Assche509c07b2014-10-30 14:48:30 +01002301 if (!ch->rx_ring) {
2302 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002303 if (ret)
2304 goto error;
2305 }
2306
2307 ret = -ENOMEM;
2308 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2309 if (!qp_attr)
2310 goto error;
2311
2312 qp_attr->qp_state = IB_QPS_RTR;
2313 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2314 if (ret)
2315 goto error_free;
2316
Bart Van Assche509c07b2014-10-30 14:48:30 +01002317 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002318 if (ret)
2319 goto error_free;
2320
Bart Van Assche4d73f952013-10-26 14:40:37 +02002321 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002322 struct srp_iu *iu = ch->rx_ring[i];
2323
2324 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002325 if (ret)
2326 goto error_free;
2327 }
2328
2329 qp_attr->qp_state = IB_QPS_RTS;
2330 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2331 if (ret)
2332 goto error_free;
2333
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002334 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2335
Bart Van Assche509c07b2014-10-30 14:48:30 +01002336 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002337 if (ret)
2338 goto error_free;
2339
2340 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2341
2342error_free:
2343 kfree(qp_attr);
2344
2345error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002346 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002347}
2348
Roland Dreieraef9ec32005-11-02 14:07:13 -08002349static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2350 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002351 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002352{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002353 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002354 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002355 struct ib_class_port_info *cpi;
2356 int opcode;
2357
2358 switch (event->param.rej_rcvd.reason) {
2359 case IB_CM_REJ_PORT_CM_REDIRECT:
2360 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002361 ch->path.dlid = cpi->redirect_lid;
2362 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002363 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002364 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002365
Bart Van Assche509c07b2014-10-30 14:48:30 +01002366 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002367 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2368 break;
2369
2370 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002371 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002372 /*
2373 * Topspin/Cisco SRP gateways incorrectly send
2374 * reject reason code 25 when they mean 24
2375 * (port redirect).
2376 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002377 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002378 event->param.rej_rcvd.ari, 16);
2379
David Dillow7aa54bd2008-01-07 18:23:41 -05002380 shost_printk(KERN_DEBUG, shost,
2381 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002382 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2383 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002384
Bart Van Assche509c07b2014-10-30 14:48:30 +01002385 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002386 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002387 shost_printk(KERN_WARNING, shost,
2388 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002389 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002390 }
2391 break;
2392
2393 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002394 shost_printk(KERN_WARNING, shost,
2395 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002396 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002397 break;
2398
2399 case IB_CM_REJ_CONSUMER_DEFINED:
2400 opcode = *(u8 *) event->private_data;
2401 if (opcode == SRP_LOGIN_REJ) {
2402 struct srp_login_rej *rej = event->private_data;
2403 u32 reason = be32_to_cpu(rej->reason);
2404
2405 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002406 shost_printk(KERN_WARNING, shost,
2407 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002408 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002409 shost_printk(KERN_WARNING, shost, PFX
2410 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002411 target->sgid.raw,
2412 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002413 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002414 shost_printk(KERN_WARNING, shost,
2415 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2416 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002417 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002418 break;
2419
David Dillow9fe4bcf2008-01-08 17:08:52 -05002420 case IB_CM_REJ_STALE_CONN:
2421 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002422 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002423 break;
2424
Roland Dreieraef9ec32005-11-02 14:07:13 -08002425 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002426 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2427 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002428 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002429 }
2430}
2431
2432static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2433{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002434 struct srp_rdma_ch *ch = cm_id->context;
2435 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002436 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002437
2438 switch (event->event) {
2439 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002440 shost_printk(KERN_DEBUG, target->scsi_host,
2441 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002442 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002443 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002444 break;
2445
2446 case IB_CM_REP_RECEIVED:
2447 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002448 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002449 break;
2450
2451 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002452 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002453 comp = 1;
2454
Bart Van Assche509c07b2014-10-30 14:48:30 +01002455 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456 break;
2457
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002458 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002459 shost_printk(KERN_WARNING, target->scsi_host,
2460 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002461 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002462 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002463 shost_printk(KERN_ERR, target->scsi_host,
2464 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002465 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002466 break;
2467
2468 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002469 shost_printk(KERN_ERR, target->scsi_host,
2470 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002471 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002472
Bart Van Assche509c07b2014-10-30 14:48:30 +01002473 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002474 break;
2475
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002476 case IB_CM_MRA_RECEIVED:
2477 case IB_CM_DREQ_ERROR:
2478 case IB_CM_DREP_RECEIVED:
2479 break;
2480
Roland Dreieraef9ec32005-11-02 14:07:13 -08002481 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002482 shost_printk(KERN_WARNING, target->scsi_host,
2483 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002484 break;
2485 }
2486
2487 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002488 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002489
Roland Dreieraef9ec32005-11-02 14:07:13 -08002490 return 0;
2491}
2492
Jack Wang71444b92013-11-07 11:37:37 +01002493/**
Jack Wang71444b92013-11-07 11:37:37 +01002494 * srp_change_queue_depth - setting device queue depth
2495 * @sdev: scsi device struct
2496 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002497 *
2498 * Returns queue depth.
2499 */
2500static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002501srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002502{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002503 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002504 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002505 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002506}
2507
Bart Van Assche985aa492015-05-18 13:27:14 +02002508static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2509 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002510{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002511 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002512 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002513 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002514 struct srp_iu *iu;
2515 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002516
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002517 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002518 return -1;
2519
Bart Van Assche509c07b2014-10-30 14:48:30 +01002520 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002521
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002522 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002523 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002524 * invoked while a task management function is being sent.
2525 */
2526 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002527 spin_lock_irq(&ch->lock);
2528 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2529 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002530
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002531 if (!iu) {
2532 mutex_unlock(&rport->mutex);
2533
Bart Van Assche76c75b22010-11-26 14:37:47 -05002534 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002535 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002536
David Dillow19081f32010-10-18 08:54:49 -04002537 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2538 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002539 tsk_mgmt = iu->buf;
2540 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2541
2542 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002543 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002544 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002545 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002546 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002547
David Dillow19081f32010-10-18 08:54:49 -04002548 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2549 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002550 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2551 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002552 mutex_unlock(&rport->mutex);
2553
Bart Van Assche76c75b22010-11-26 14:37:47 -05002554 return -1;
2555 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002556 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002557
Bart Van Assche509c07b2014-10-30 14:48:30 +01002558 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002559 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002560 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002561
Roland Dreierd945e1d2006-05-09 10:50:28 -07002562 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002563}
2564
Roland Dreieraef9ec32005-11-02 14:07:13 -08002565static int srp_abort(struct scsi_cmnd *scmnd)
2566{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002567 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002568 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002569 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002570 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002571 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002572 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002573
David Dillow7aa54bd2008-01-07 18:23:41 -05002574 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002575
Bart Van Assched92c0da2014-10-06 17:14:36 +02002576 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002577 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002578 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002579 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2580 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2581 return SUCCESS;
2582 ch = &target->ch[ch_idx];
2583 if (!srp_claim_req(ch, req, NULL, scmnd))
2584 return SUCCESS;
2585 shost_printk(KERN_ERR, target->scsi_host,
2586 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002587 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002588 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002589 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002590 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002591 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002592 else
2593 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002594 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002595 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002596 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002597
Bart Van Assche086f44f2013-06-12 15:23:04 +02002598 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002599}
2600
2601static int srp_reset_device(struct scsi_cmnd *scmnd)
2602{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002603 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002604 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002605 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002606
David Dillow7aa54bd2008-01-07 18:23:41 -05002607 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002608
Bart Van Assched92c0da2014-10-06 17:14:36 +02002609 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002610 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002611 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002612 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002613 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002614 return FAILED;
2615
Bart Van Assched92c0da2014-10-06 17:14:36 +02002616 for (i = 0; i < target->ch_count; i++) {
2617 ch = &target->ch[i];
2618 for (i = 0; i < target->req_ring_size; ++i) {
2619 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002620
Bart Van Assched92c0da2014-10-06 17:14:36 +02002621 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2622 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002623 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002624
Roland Dreierd945e1d2006-05-09 10:50:28 -07002625 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002626}
2627
2628static int srp_reset_host(struct scsi_cmnd *scmnd)
2629{
2630 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002631
David Dillow7aa54bd2008-01-07 18:23:41 -05002632 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002633
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002634 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002635}
2636
Bart Van Assche509c5f32016-05-12 10:50:35 -07002637static int srp_slave_alloc(struct scsi_device *sdev)
2638{
2639 struct Scsi_Host *shost = sdev->host;
2640 struct srp_target_port *target = host_to_target(shost);
2641 struct srp_device *srp_dev = target->srp_host->srp_dev;
2642 struct ib_device *ibdev = srp_dev->dev;
2643
2644 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
2645 blk_queue_virt_boundary(sdev->request_queue,
2646 ~srp_dev->mr_page_mask);
2647
2648 return 0;
2649}
2650
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002651static int srp_slave_configure(struct scsi_device *sdev)
2652{
2653 struct Scsi_Host *shost = sdev->host;
2654 struct srp_target_port *target = host_to_target(shost);
2655 struct request_queue *q = sdev->request_queue;
2656 unsigned long timeout;
2657
2658 if (sdev->type == TYPE_DISK) {
2659 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2660 blk_queue_rq_timeout(q, timeout);
2661 }
2662
2663 return 0;
2664}
2665
Tony Jonesee959b02008-02-22 00:13:36 +01002666static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2667 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002668{
Tony Jonesee959b02008-02-22 00:13:36 +01002669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002670
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002671 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002672}
2673
Tony Jonesee959b02008-02-22 00:13:36 +01002674static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2675 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002676{
Tony Jonesee959b02008-02-22 00:13:36 +01002677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002678
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002679 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002680}
2681
Tony Jonesee959b02008-02-22 00:13:36 +01002682static ssize_t show_service_id(struct device *dev,
2683 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002684{
Tony Jonesee959b02008-02-22 00:13:36 +01002685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002686
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002687 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002688}
2689
Tony Jonesee959b02008-02-22 00:13:36 +01002690static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2691 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002692{
Tony Jonesee959b02008-02-22 00:13:36 +01002693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002694
Bart Van Assche747fe002014-10-30 14:48:05 +01002695 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002696}
2697
Bart Van Assche848b3082013-10-26 14:38:12 +02002698static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2699 char *buf)
2700{
2701 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2702
Bart Van Assche747fe002014-10-30 14:48:05 +01002703 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002704}
2705
Tony Jonesee959b02008-02-22 00:13:36 +01002706static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2707 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002708{
Tony Jonesee959b02008-02-22 00:13:36 +01002709 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002710 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002711
Bart Van Assche509c07b2014-10-30 14:48:30 +01002712 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002713}
2714
Tony Jonesee959b02008-02-22 00:13:36 +01002715static ssize_t show_orig_dgid(struct device *dev,
2716 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002717{
Tony Jonesee959b02008-02-22 00:13:36 +01002718 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002719
Bart Van Assche747fe002014-10-30 14:48:05 +01002720 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002721}
2722
Bart Van Assche89de7482010-08-03 14:08:45 +00002723static ssize_t show_req_lim(struct device *dev,
2724 struct device_attribute *attr, char *buf)
2725{
2726 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002727 struct srp_rdma_ch *ch;
2728 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002729
Bart Van Assched92c0da2014-10-06 17:14:36 +02002730 for (i = 0; i < target->ch_count; i++) {
2731 ch = &target->ch[i];
2732 req_lim = min(req_lim, ch->req_lim);
2733 }
2734 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002735}
2736
Tony Jonesee959b02008-02-22 00:13:36 +01002737static ssize_t show_zero_req_lim(struct device *dev,
2738 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002739{
Tony Jonesee959b02008-02-22 00:13:36 +01002740 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002741
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002742 return sprintf(buf, "%d\n", target->zero_req_lim);
2743}
2744
Tony Jonesee959b02008-02-22 00:13:36 +01002745static ssize_t show_local_ib_port(struct device *dev,
2746 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002747{
Tony Jonesee959b02008-02-22 00:13:36 +01002748 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002749
2750 return sprintf(buf, "%d\n", target->srp_host->port);
2751}
2752
Tony Jonesee959b02008-02-22 00:13:36 +01002753static ssize_t show_local_ib_device(struct device *dev,
2754 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002755{
Tony Jonesee959b02008-02-22 00:13:36 +01002756 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002757
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002758 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002759}
2760
Bart Van Assched92c0da2014-10-06 17:14:36 +02002761static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2762 char *buf)
2763{
2764 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2765
2766 return sprintf(buf, "%d\n", target->ch_count);
2767}
2768
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002769static ssize_t show_comp_vector(struct device *dev,
2770 struct device_attribute *attr, char *buf)
2771{
2772 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2773
2774 return sprintf(buf, "%d\n", target->comp_vector);
2775}
2776
Vu Pham7bb312e2013-10-26 14:31:27 +02002777static ssize_t show_tl_retry_count(struct device *dev,
2778 struct device_attribute *attr, char *buf)
2779{
2780 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2781
2782 return sprintf(buf, "%d\n", target->tl_retry_count);
2783}
2784
David Dillow49248642011-01-14 18:23:24 -05002785static ssize_t show_cmd_sg_entries(struct device *dev,
2786 struct device_attribute *attr, char *buf)
2787{
2788 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2789
2790 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2791}
2792
David Dillowc07d4242011-01-16 13:57:10 -05002793static ssize_t show_allow_ext_sg(struct device *dev,
2794 struct device_attribute *attr, char *buf)
2795{
2796 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2797
2798 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2799}
2800
Tony Jonesee959b02008-02-22 00:13:36 +01002801static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2802static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2803static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2804static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002805static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002806static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2807static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002808static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002809static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2810static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2811static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002812static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002813static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002814static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002815static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002816static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002817
Tony Jonesee959b02008-02-22 00:13:36 +01002818static struct device_attribute *srp_host_attrs[] = {
2819 &dev_attr_id_ext,
2820 &dev_attr_ioc_guid,
2821 &dev_attr_service_id,
2822 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002823 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002824 &dev_attr_dgid,
2825 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002826 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002827 &dev_attr_zero_req_lim,
2828 &dev_attr_local_ib_port,
2829 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002830 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002831 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002832 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002833 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002834 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002835 NULL
2836};
2837
Roland Dreieraef9ec32005-11-02 14:07:13 -08002838static struct scsi_host_template srp_template = {
2839 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002840 .name = "InfiniBand SRP initiator",
2841 .proc_name = DRV_NAME,
Bart Van Assche509c5f32016-05-12 10:50:35 -07002842 .slave_alloc = srp_slave_alloc,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002843 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002844 .info = srp_target_info,
2845 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002846 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002847 .eh_abort_handler = srp_abort,
2848 .eh_device_reset_handler = srp_reset_device,
2849 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002850 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002851 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002852 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002853 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002854 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002855 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002856 .shost_attrs = srp_host_attrs,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002857 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002858};
2859
Bart Van Assche34aa6542014-10-30 14:47:22 +01002860static int srp_sdev_count(struct Scsi_Host *host)
2861{
2862 struct scsi_device *sdev;
2863 int c = 0;
2864
2865 shost_for_each_device(sdev, host)
2866 c++;
2867
2868 return c;
2869}
2870
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002871/*
2872 * Return values:
2873 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2874 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2875 * removal has been scheduled.
2876 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2877 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002878static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2879{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002880 struct srp_rport_identifiers ids;
2881 struct srp_rport *rport;
2882
Bart Van Assche34aa6542014-10-30 14:47:22 +01002883 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002884 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002885 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002886
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002887 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002888 return -ENODEV;
2889
FUJITA Tomonori32368222007-06-27 16:33:12 +09002890 memcpy(ids.port_id, &target->id_ext, 8);
2891 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002892 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002893 rport = srp_rport_add(target->scsi_host, &ids);
2894 if (IS_ERR(rport)) {
2895 scsi_remove_host(target->scsi_host);
2896 return PTR_ERR(rport);
2897 }
2898
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002899 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002900 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002901
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002902 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002903 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002904 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002905
Roland Dreieraef9ec32005-11-02 14:07:13 -08002906 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002907 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002908
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002909 if (srp_connected_ch(target) < target->ch_count ||
2910 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002911 shost_printk(KERN_INFO, target->scsi_host,
2912 PFX "SCSI scan failed - removing SCSI host\n");
2913 srp_queue_remove_work(target);
2914 goto out;
2915 }
2916
Bart Van Asschecf1acab2016-05-12 10:47:38 -07002917 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
Bart Van Assche34aa6542014-10-30 14:47:22 +01002918 dev_name(&target->scsi_host->shost_gendev),
2919 srp_sdev_count(target->scsi_host));
2920
2921 spin_lock_irq(&target->lock);
2922 if (target->state == SRP_TARGET_SCANNING)
2923 target->state = SRP_TARGET_LIVE;
2924 spin_unlock_irq(&target->lock);
2925
2926out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002927 return 0;
2928}
2929
Tony Jonesee959b02008-02-22 00:13:36 +01002930static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002931{
2932 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002933 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002934
2935 complete(&host->released);
2936}
2937
2938static struct class srp_class = {
2939 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002940 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002941};
2942
Bart Van Assche96fc2482013-06-28 14:51:26 +02002943/**
2944 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002945 * @host: SRP host.
2946 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002947 */
2948static bool srp_conn_unique(struct srp_host *host,
2949 struct srp_target_port *target)
2950{
2951 struct srp_target_port *t;
2952 bool ret = false;
2953
2954 if (target->state == SRP_TARGET_REMOVED)
2955 goto out;
2956
2957 ret = true;
2958
2959 spin_lock(&host->target_lock);
2960 list_for_each_entry(t, &host->target_list, list) {
2961 if (t != target &&
2962 target->id_ext == t->id_ext &&
2963 target->ioc_guid == t->ioc_guid &&
2964 target->initiator_ext == t->initiator_ext) {
2965 ret = false;
2966 break;
2967 }
2968 }
2969 spin_unlock(&host->target_lock);
2970
2971out:
2972 return ret;
2973}
2974
Roland Dreieraef9ec32005-11-02 14:07:13 -08002975/*
2976 * Target ports are added by writing
2977 *
2978 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2979 * pkey=<P_Key>,service_id=<service ID>
2980 *
2981 * to the add_target sysfs attribute.
2982 */
2983enum {
2984 SRP_OPT_ERR = 0,
2985 SRP_OPT_ID_EXT = 1 << 0,
2986 SRP_OPT_IOC_GUID = 1 << 1,
2987 SRP_OPT_DGID = 1 << 2,
2988 SRP_OPT_PKEY = 1 << 3,
2989 SRP_OPT_SERVICE_ID = 1 << 4,
2990 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002991 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002992 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002993 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002994 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002995 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2996 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002997 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002998 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002999 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003000 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3001 SRP_OPT_IOC_GUID |
3002 SRP_OPT_DGID |
3003 SRP_OPT_PKEY |
3004 SRP_OPT_SERVICE_ID),
3005};
3006
Steven Whitehousea447c092008-10-13 10:46:57 +01003007static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07003008 { SRP_OPT_ID_EXT, "id_ext=%s" },
3009 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3010 { SRP_OPT_DGID, "dgid=%s" },
3011 { SRP_OPT_PKEY, "pkey=%x" },
3012 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3013 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3014 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07003015 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003016 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05003017 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05003018 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3019 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003020 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02003021 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02003022 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07003023 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003024};
3025
3026static int srp_parse_options(const char *buf, struct srp_target_port *target)
3027{
3028 char *options, *sep_opt;
3029 char *p;
3030 char dgid[3];
3031 substring_t args[MAX_OPT_ARGS];
3032 int opt_mask = 0;
3033 int token;
3034 int ret = -EINVAL;
3035 int i;
3036
3037 options = kstrdup(buf, GFP_KERNEL);
3038 if (!options)
3039 return -ENOMEM;
3040
3041 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03003042 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003043 if (!*p)
3044 continue;
3045
3046 token = match_token(p, srp_opt_tokens, args);
3047 opt_mask |= token;
3048
3049 switch (token) {
3050 case SRP_OPT_ID_EXT:
3051 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003052 if (!p) {
3053 ret = -ENOMEM;
3054 goto out;
3055 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003056 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3057 kfree(p);
3058 break;
3059
3060 case SRP_OPT_IOC_GUID:
3061 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003062 if (!p) {
3063 ret = -ENOMEM;
3064 goto out;
3065 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003066 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3067 kfree(p);
3068 break;
3069
3070 case SRP_OPT_DGID:
3071 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003072 if (!p) {
3073 ret = -ENOMEM;
3074 goto out;
3075 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003076 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003077 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07003078 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003079 goto out;
3080 }
3081
3082 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01003083 strlcpy(dgid, p + i * 2, sizeof(dgid));
3084 if (sscanf(dgid, "%hhx",
3085 &target->orig_dgid.raw[i]) < 1) {
3086 ret = -EINVAL;
3087 kfree(p);
3088 goto out;
3089 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003090 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08003091 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003092 break;
3093
3094 case SRP_OPT_PKEY:
3095 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003096 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003097 goto out;
3098 }
Bart Van Assche747fe002014-10-30 14:48:05 +01003099 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003100 break;
3101
3102 case SRP_OPT_SERVICE_ID:
3103 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003104 if (!p) {
3105 ret = -ENOMEM;
3106 goto out;
3107 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003108 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3109 kfree(p);
3110 break;
3111
3112 case SRP_OPT_MAX_SECT:
3113 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003114 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003115 goto out;
3116 }
3117 target->scsi_host->max_sectors = token;
3118 break;
3119
Bart Van Assche4d73f952013-10-26 14:40:37 +02003120 case SRP_OPT_QUEUE_SIZE:
3121 if (match_int(args, &token) || token < 1) {
3122 pr_warn("bad queue_size parameter '%s'\n", p);
3123 goto out;
3124 }
3125 target->scsi_host->can_queue = token;
3126 target->queue_size = token + SRP_RSP_SQ_SIZE +
3127 SRP_TSK_MGMT_SQ_SIZE;
3128 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3129 target->scsi_host->cmd_per_lun = token;
3130 break;
3131
Vu Pham52fb2b502006-06-17 20:37:31 -07003132 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003133 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003134 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3135 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003136 goto out;
3137 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003138 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003139 break;
3140
Ramachandra K0c0450db2006-06-17 20:37:38 -07003141 case SRP_OPT_IO_CLASS:
3142 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003143 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003144 goto out;
3145 }
3146 if (token != SRP_REV10_IB_IO_CLASS &&
3147 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003148 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3149 token, SRP_REV10_IB_IO_CLASS,
3150 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003151 goto out;
3152 }
3153 target->io_class = token;
3154 break;
3155
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003156 case SRP_OPT_INITIATOR_EXT:
3157 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003158 if (!p) {
3159 ret = -ENOMEM;
3160 goto out;
3161 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003162 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3163 kfree(p);
3164 break;
3165
David Dillow49248642011-01-14 18:23:24 -05003166 case SRP_OPT_CMD_SG_ENTRIES:
3167 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003168 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3169 p);
David Dillow49248642011-01-14 18:23:24 -05003170 goto out;
3171 }
3172 target->cmd_sg_cnt = token;
3173 break;
3174
David Dillowc07d4242011-01-16 13:57:10 -05003175 case SRP_OPT_ALLOW_EXT_SG:
3176 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003177 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003178 goto out;
3179 }
3180 target->allow_ext_sg = !!token;
3181 break;
3182
3183 case SRP_OPT_SG_TABLESIZE:
3184 if (match_int(args, &token) || token < 1 ||
3185 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003186 pr_warn("bad max sg_tablesize parameter '%s'\n",
3187 p);
David Dillowc07d4242011-01-16 13:57:10 -05003188 goto out;
3189 }
3190 target->sg_tablesize = token;
3191 break;
3192
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003193 case SRP_OPT_COMP_VECTOR:
3194 if (match_int(args, &token) || token < 0) {
3195 pr_warn("bad comp_vector parameter '%s'\n", p);
3196 goto out;
3197 }
3198 target->comp_vector = token;
3199 break;
3200
Vu Pham7bb312e2013-10-26 14:31:27 +02003201 case SRP_OPT_TL_RETRY_COUNT:
3202 if (match_int(args, &token) || token < 2 || token > 7) {
3203 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3204 p);
3205 goto out;
3206 }
3207 target->tl_retry_count = token;
3208 break;
3209
Roland Dreieraef9ec32005-11-02 14:07:13 -08003210 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003211 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3212 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003213 goto out;
3214 }
3215 }
3216
3217 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3218 ret = 0;
3219 else
3220 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3221 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3222 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003223 pr_warn("target creation request is missing parameter '%s'\n",
3224 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003225
Bart Van Assche4d73f952013-10-26 14:40:37 +02003226 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3227 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3228 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3229 target->scsi_host->cmd_per_lun,
3230 target->scsi_host->can_queue);
3231
Roland Dreieraef9ec32005-11-02 14:07:13 -08003232out:
3233 kfree(options);
3234 return ret;
3235}
3236
Tony Jonesee959b02008-02-22 00:13:36 +01003237static ssize_t srp_create_target(struct device *dev,
3238 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003239 const char *buf, size_t count)
3240{
3241 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003242 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003243 struct Scsi_Host *target_host;
3244 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003245 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003246 struct srp_device *srp_dev = host->srp_dev;
3247 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003248 int ret, node_idx, node, cpu, i;
Bart Van Assche509c5f32016-05-12 10:50:35 -07003249 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003250 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003251
3252 target_host = scsi_host_alloc(&srp_template,
3253 sizeof (struct srp_target_port));
3254 if (!target_host)
3255 return -ENOMEM;
3256
David Dillow49248642011-01-14 18:23:24 -05003257 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003258 target_host->max_channel = 0;
3259 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003260 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003261 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003262
Roland Dreieraef9ec32005-11-02 14:07:13 -08003263 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003264
David Dillow49248642011-01-14 18:23:24 -05003265 target->io_class = SRP_REV16A_IB_IO_CLASS;
3266 target->scsi_host = target_host;
3267 target->srp_host = host;
Jason Gunthorpee6bf5f482015-07-30 17:22:22 -06003268 target->lkey = host->srp_dev->pd->local_dma_lkey;
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003269 target->global_mr = host->srp_dev->global_mr;
David Dillow49248642011-01-14 18:23:24 -05003270 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003271 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3272 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003273 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003274 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003275
Bart Van Assche34aa6542014-10-30 14:47:22 +01003276 /*
3277 * Avoid that the SCSI host can be removed by srp_remove_target()
3278 * before this function returns.
3279 */
3280 scsi_host_get(target->scsi_host);
3281
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003282 mutex_lock(&host->add_target_mutex);
3283
Roland Dreieraef9ec32005-11-02 14:07:13 -08003284 ret = srp_parse_options(buf, target);
3285 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003286 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003287
Bart Van Assche4d73f952013-10-26 14:40:37 +02003288 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3289
Bart Van Assche96fc2482013-06-28 14:51:26 +02003290 if (!srp_conn_unique(target->srp_host, target)) {
3291 shost_printk(KERN_INFO, target->scsi_host,
3292 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3293 be64_to_cpu(target->id_ext),
3294 be64_to_cpu(target->ioc_guid),
3295 be64_to_cpu(target->initiator_ext));
3296 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003297 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003298 }
3299
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003300 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003301 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003302 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003303 target->sg_tablesize = target->cmd_sg_cnt;
3304 }
3305
Bart Van Assche509c5f32016-05-12 10:50:35 -07003306 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3307 /*
3308 * FR and FMR can only map one HCA page per entry. If the
3309 * start address is not aligned on a HCA page boundary two
3310 * entries will be used for the head and the tail although
3311 * these two entries combined contain at most one HCA page of
3312 * data. Hence the "+ 1" in the calculation below.
3313 *
3314 * The indirect data buffer descriptor is contiguous so the
3315 * memory for that buffer will only be registered if
3316 * register_always is true. Hence add one to mr_per_cmd if
3317 * register_always has been set.
3318 */
3319 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3320 (ilog2(srp_dev->mr_page_size) - 9);
3321 mr_per_cmd = register_always +
3322 (target->scsi_host->max_sectors + 1 +
3323 max_sectors_per_mr - 1) / max_sectors_per_mr;
3324 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3325 target->scsi_host->max_sectors,
3326 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3327 max_sectors_per_mr, mr_per_cmd);
3328 }
3329
David Dillowc07d4242011-01-16 13:57:10 -05003330 target_host->sg_tablesize = target->sg_tablesize;
Bart Van Assche509c5f32016-05-12 10:50:35 -07003331 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3332 target->mr_per_cmd = mr_per_cmd;
David Dillowc07d4242011-01-16 13:57:10 -05003333 target->indirect_size = target->sg_tablesize *
3334 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003335 target->max_iu_len = sizeof (struct srp_cmd) +
3336 sizeof (struct srp_indirect_buf) +
3337 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3338
Bart Van Asschec1120f82013-10-26 14:35:08 +02003339 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003340 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003341 spin_lock_init(&target->lock);
Matan Barak55ee3ab2015-10-15 18:38:45 +03003342 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003343 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003344 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003345
Bart Van Assched92c0da2014-10-06 17:14:36 +02003346 ret = -ENOMEM;
3347 target->ch_count = max_t(unsigned, num_online_nodes(),
3348 min(ch_count ? :
3349 min(4 * num_online_nodes(),
3350 ibdev->num_comp_vectors),
3351 num_online_cpus()));
3352 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3353 GFP_KERNEL);
3354 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003355 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003356
Bart Van Assched92c0da2014-10-06 17:14:36 +02003357 node_idx = 0;
3358 for_each_online_node(node) {
3359 const int ch_start = (node_idx * target->ch_count /
3360 num_online_nodes());
3361 const int ch_end = ((node_idx + 1) * target->ch_count /
3362 num_online_nodes());
3363 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3364 num_online_nodes() + target->comp_vector)
3365 % ibdev->num_comp_vectors;
3366 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3367 num_online_nodes() + target->comp_vector)
3368 % ibdev->num_comp_vectors;
3369 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370
Bart Van Assched92c0da2014-10-06 17:14:36 +02003371 for_each_online_cpu(cpu) {
3372 if (cpu_to_node(cpu) != node)
3373 continue;
3374 if (ch_start + cpu_idx >= ch_end)
3375 continue;
3376 ch = &target->ch[ch_start + cpu_idx];
3377 ch->target = target;
3378 ch->comp_vector = cv_start == cv_end ? cv_start :
3379 cv_start + cpu_idx % (cv_end - cv_start);
3380 spin_lock_init(&ch->lock);
3381 INIT_LIST_HEAD(&ch->free_tx);
3382 ret = srp_new_cm_id(ch);
3383 if (ret)
3384 goto err_disconnect;
3385
3386 ret = srp_create_ch_ib(ch);
3387 if (ret)
3388 goto err_disconnect;
3389
3390 ret = srp_alloc_req_data(ch);
3391 if (ret)
3392 goto err_disconnect;
3393
3394 ret = srp_connect_ch(ch, multich);
3395 if (ret) {
3396 shost_printk(KERN_ERR, target->scsi_host,
3397 PFX "Connection %d/%d failed\n",
3398 ch_start + cpu_idx,
3399 target->ch_count);
3400 if (node_idx == 0 && cpu_idx == 0) {
3401 goto err_disconnect;
3402 } else {
3403 srp_free_ch_ib(target, ch);
3404 srp_free_req_data(target, ch);
3405 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003406 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003407 }
3408 }
3409
3410 multich = true;
3411 cpu_idx++;
3412 }
3413 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003414 }
3415
Bart Van Asschec257ea62015-07-31 14:13:22 -07003416connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003417 target->scsi_host->nr_hw_queues = target->ch_count;
3418
Roland Dreieraef9ec32005-11-02 14:07:13 -08003419 ret = srp_add_target(host, target);
3420 if (ret)
3421 goto err_disconnect;
3422
Bart Van Assche34aa6542014-10-30 14:47:22 +01003423 if (target->state != SRP_TARGET_REMOVED) {
3424 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3425 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3426 be64_to_cpu(target->id_ext),
3427 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003428 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003429 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003430 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003431 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003432
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003433 ret = count;
3434
3435out:
3436 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003437
3438 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003439 if (ret < 0)
3440 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003441
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003442 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003443
3444err_disconnect:
3445 srp_disconnect_target(target);
3446
Bart Van Assched92c0da2014-10-06 17:14:36 +02003447 for (i = 0; i < target->ch_count; i++) {
3448 ch = &target->ch[i];
3449 srp_free_ch_ib(target, ch);
3450 srp_free_req_data(target, ch);
3451 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003452
Bart Van Assched92c0da2014-10-06 17:14:36 +02003453 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003454 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003455}
3456
Tony Jonesee959b02008-02-22 00:13:36 +01003457static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003458
Tony Jonesee959b02008-02-22 00:13:36 +01003459static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3460 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003461{
Tony Jonesee959b02008-02-22 00:13:36 +01003462 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003463
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003464 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003465}
3466
Tony Jonesee959b02008-02-22 00:13:36 +01003467static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003468
Tony Jonesee959b02008-02-22 00:13:36 +01003469static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3470 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003471{
Tony Jonesee959b02008-02-22 00:13:36 +01003472 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003473
3474 return sprintf(buf, "%d\n", host->port);
3475}
3476
Tony Jonesee959b02008-02-22 00:13:36 +01003477static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003478
Roland Dreierf5358a12006-06-17 20:37:29 -07003479static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003480{
3481 struct srp_host *host;
3482
3483 host = kzalloc(sizeof *host, GFP_KERNEL);
3484 if (!host)
3485 return NULL;
3486
3487 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003488 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003489 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003490 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003491 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003492 host->port = port;
3493
Tony Jonesee959b02008-02-22 00:13:36 +01003494 host->dev.class = &srp_class;
3495 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003496 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003497
Tony Jonesee959b02008-02-22 00:13:36 +01003498 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003499 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003500 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003501 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003502 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003503 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003504 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003505 goto err_class;
3506
3507 return host;
3508
3509err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003510 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003511
Roland Dreierf5358a12006-06-17 20:37:29 -07003512free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003513 kfree(host);
3514
3515 return NULL;
3516}
3517
3518static void srp_add_one(struct ib_device *device)
3519{
Roland Dreierf5358a12006-06-17 20:37:29 -07003520 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003521 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003522 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003523 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003524
Roland Dreierf5358a12006-06-17 20:37:29 -07003525 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3526 if (!srp_dev)
Or Gerlitz4a061b22015-12-18 10:59:46 +02003527 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003528
3529 /*
3530 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003531 * minimum of 4096 bytes. We're unlikely to build large sglists
3532 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003533 */
Or Gerlitz4a061b22015-12-18 10:59:46 +02003534 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
Bart Van Assche52ede082014-05-20 15:07:45 +02003535 srp_dev->mr_page_size = 1 << mr_page_shift;
3536 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003537 max_pages_per_mr = device->attrs.max_mr_size;
Bart Van Assche52ede082014-05-20 15:07:45 +02003538 do_div(max_pages_per_mr, srp_dev->mr_page_size);
Bart Van Assche509c5f32016-05-12 10:50:35 -07003539 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3540 device->attrs.max_mr_size, srp_dev->mr_page_size,
3541 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
Bart Van Assche52ede082014-05-20 15:07:45 +02003542 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3543 max_pages_per_mr);
Bart Van Assche835ee622016-05-12 10:49:39 -07003544
3545 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3546 device->map_phys_fmr && device->unmap_fmr);
3547 srp_dev->has_fr = (device->attrs.device_cap_flags &
3548 IB_DEVICE_MEM_MGT_EXTENSIONS);
Bart Van Assche509c5f32016-05-12 10:50:35 -07003549 if (!srp_dev->has_fmr && !srp_dev->has_fr) {
Bart Van Assche835ee622016-05-12 10:49:39 -07003550 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
Bart Van Assche509c5f32016-05-12 10:50:35 -07003551 } else if (device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
3552 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3553 (!srp_dev->has_fmr || prefer_fr));
3554 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3555 }
Bart Van Assche835ee622016-05-12 10:49:39 -07003556
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003557 if (srp_dev->use_fast_reg) {
3558 srp_dev->max_pages_per_mr =
3559 min_t(u32, srp_dev->max_pages_per_mr,
Or Gerlitz4a061b22015-12-18 10:59:46 +02003560 device->attrs.max_fast_reg_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003561 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003562 srp_dev->mr_max_size = srp_dev->mr_page_size *
3563 srp_dev->max_pages_per_mr;
Or Gerlitz4a061b22015-12-18 10:59:46 +02003564 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3565 device->name, mr_page_shift, device->attrs.max_mr_size,
3566 device->attrs.max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003567 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003568
3569 INIT_LIST_HEAD(&srp_dev->dev_list);
3570
3571 srp_dev->dev = device;
3572 srp_dev->pd = ib_alloc_pd(device);
3573 if (IS_ERR(srp_dev->pd))
3574 goto free_dev;
3575
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003576 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3577 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3578 IB_ACCESS_LOCAL_WRITE |
3579 IB_ACCESS_REMOTE_READ |
3580 IB_ACCESS_REMOTE_WRITE);
3581 if (IS_ERR(srp_dev->global_mr))
3582 goto err_pd;
3583 } else {
3584 srp_dev->global_mr = NULL;
3585 }
Roland Dreierf5358a12006-06-17 20:37:29 -07003586
Hal Rosenstock41390322015-06-29 09:57:00 -04003587 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003588 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003589 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003590 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003591 }
3592
Roland Dreierf5358a12006-06-17 20:37:29 -07003593 ib_set_client_data(device, &srp_client, srp_dev);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003594 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003595
3596err_pd:
3597 ib_dealloc_pd(srp_dev->pd);
3598
3599free_dev:
3600 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003601}
3602
Haggai Eran7c1eb452015-07-30 17:50:14 +03003603static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003604{
Roland Dreierf5358a12006-06-17 20:37:29 -07003605 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003606 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003607 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003608
Haggai Eran7c1eb452015-07-30 17:50:14 +03003609 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003610 if (!srp_dev)
3611 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003612
Roland Dreierf5358a12006-06-17 20:37:29 -07003613 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003614 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003615 /*
3616 * Wait for the sysfs entry to go away, so that no new
3617 * target ports can be created.
3618 */
3619 wait_for_completion(&host->released);
3620
3621 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003622 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003623 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003624 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003625 list_for_each_entry(target, &host->target_list, list)
3626 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003627 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003628
3629 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003630 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003631 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003632 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003633 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003634
Roland Dreieraef9ec32005-11-02 14:07:13 -08003635 kfree(host);
3636 }
3637
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003638 if (srp_dev->global_mr)
3639 ib_dereg_mr(srp_dev->global_mr);
Roland Dreierf5358a12006-06-17 20:37:29 -07003640 ib_dealloc_pd(srp_dev->pd);
3641
3642 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003643}
3644
FUJITA Tomonori32368222007-06-27 16:33:12 +09003645static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003646 .has_rport_state = true,
3647 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003648 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003649 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3650 .dev_loss_tmo = &srp_dev_loss_tmo,
3651 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003652 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003653 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003654};
3655
Roland Dreieraef9ec32005-11-02 14:07:13 -08003656static int __init srp_init_module(void)
3657{
3658 int ret;
3659
David Dillow49248642011-01-14 18:23:24 -05003660 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003661 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003662 if (!cmd_sg_entries)
3663 cmd_sg_entries = srp_sg_tablesize;
3664 }
3665
3666 if (!cmd_sg_entries)
3667 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3668
3669 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003670 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003671 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003672 }
3673
David Dillowc07d4242011-01-16 13:57:10 -05003674 if (!indirect_sg_entries)
3675 indirect_sg_entries = cmd_sg_entries;
3676 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003677 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3678 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003679 indirect_sg_entries = cmd_sg_entries;
3680 }
3681
Bart Van Asschebcc05912014-07-09 15:57:26 +02003682 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003683 if (!srp_remove_wq) {
3684 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003685 goto out;
3686 }
3687
3688 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003689 ib_srp_transport_template =
3690 srp_attach_transport(&ib_srp_transport_functions);
3691 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003692 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003693
Roland Dreieraef9ec32005-11-02 14:07:13 -08003694 ret = class_register(&srp_class);
3695 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003696 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003697 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003698 }
3699
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003700 ib_sa_register_client(&srp_sa_client);
3701
Roland Dreieraef9ec32005-11-02 14:07:13 -08003702 ret = ib_register_client(&srp_client);
3703 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003704 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003705 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003706 }
3707
Bart Van Asschebcc05912014-07-09 15:57:26 +02003708out:
3709 return ret;
3710
3711unreg_sa:
3712 ib_sa_unregister_client(&srp_sa_client);
3713 class_unregister(&srp_class);
3714
3715release_tr:
3716 srp_release_transport(ib_srp_transport_template);
3717
3718destroy_wq:
3719 destroy_workqueue(srp_remove_wq);
3720 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003721}
3722
3723static void __exit srp_cleanup_module(void)
3724{
3725 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003726 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003727 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003728 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003729 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003730}
3731
3732module_init(srp_init_module);
3733module_exit(srp_cleanup_module);