blob: a37a1f9d1204ea279ed9c083e1d5a711ff1b1797 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100135static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
137 const char *opname);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800138static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200141static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900142
Roland Dreieraef9ec32005-11-02 14:07:13 -0800143static struct ib_client srp_client = {
144 .name = "srp",
145 .add = srp_add_one,
146 .remove = srp_remove_one
147};
148
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700149static struct ib_sa_client srp_sa_client;
150
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200151static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152{
153 int tmo = *(int *)kp->arg;
154
155 if (tmo >= 0)
156 return sprintf(buffer, "%d", tmo);
157 else
158 return sprintf(buffer, "off");
159}
160
161static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162{
163 int tmo, res;
164
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300165 res = srp_parse_tmo(&tmo, val);
166 if (res)
167 goto out;
168
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200174 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930185static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
Roland Dreieraef9ec32005-11-02 14:07:13 -0800190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700204
205 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700208}
209
Roland Dreieraef9ec32005-11-02 14:07:13 -0800210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300255 pr_debug("QP event %s (%d)\n",
256 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
Bart Van Assche56b53902014-07-09 15:58:22 +0200269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295 struct ib_cm_id *new_cm_id;
296
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500309
310 return 0;
311}
312
Bart Van Assched1b42892014-05-20 15:07:20 +0200313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
Bart Van Asschefa9863f2016-04-22 14:13:57 -0700319 fmr_param.pool_size = target->mr_pool_size;
Bart Van Assched1b42892014-05-20 15:07:20 +0200320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200344 if (d->mr)
345 ib_dereg_mr(d->mr);
346 }
347 kfree(pool);
348}
349
350/**
351 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352 * @device: IB device to allocate fast registration descriptors for.
353 * @pd: Protection domain associated with the FR descriptors.
354 * @pool_size: Number of descriptors to allocate.
355 * @max_page_list_len: Maximum fast registration work request page list length.
356 */
357static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358 struct ib_pd *pd, int pool_size,
359 int max_page_list_len)
360{
361 struct srp_fr_pool *pool;
362 struct srp_fr_desc *d;
363 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200364 int i, ret = -EINVAL;
365
366 if (pool_size <= 0)
367 goto err;
368 ret = -ENOMEM;
369 pool = kzalloc(sizeof(struct srp_fr_pool) +
370 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
371 if (!pool)
372 goto err;
373 pool->size = pool_size;
374 pool->max_page_list_len = max_page_list_len;
375 spin_lock_init(&pool->lock);
376 INIT_LIST_HEAD(&pool->free_list);
377
378 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300379 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200381 if (IS_ERR(mr)) {
382 ret = PTR_ERR(mr);
383 goto destroy_pool;
384 }
385 d->mr = mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200386 list_add_tail(&d->entry, &pool->free_list);
387 }
388
389out:
390 return pool;
391
392destroy_pool:
393 srp_destroy_fr_pool(pool);
394
395err:
396 pool = ERR_PTR(ret);
397 goto out;
398}
399
400/**
401 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402 * @pool: Pool to obtain descriptor from.
403 */
404static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
405{
406 struct srp_fr_desc *d = NULL;
407 unsigned long flags;
408
409 spin_lock_irqsave(&pool->lock, flags);
410 if (!list_empty(&pool->free_list)) {
411 d = list_first_entry(&pool->free_list, typeof(*d), entry);
412 list_del(&d->entry);
413 }
414 spin_unlock_irqrestore(&pool->lock, flags);
415
416 return d;
417}
418
419/**
420 * srp_fr_pool_put() - put an FR descriptor back in the free list
421 * @pool: Pool the descriptor was allocated from.
422 * @desc: Pointer to an array of fast registration descriptor pointers.
423 * @n: Number of descriptors to put back.
424 *
425 * Note: The caller must already have queued an invalidation request for
426 * desc->mr->rkey before calling this function.
427 */
428static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
429 int n)
430{
431 unsigned long flags;
432 int i;
433
434 spin_lock_irqsave(&pool->lock, flags);
435 for (i = 0; i < n; i++)
436 list_add(&desc[i]->entry, &pool->free_list);
437 spin_unlock_irqrestore(&pool->lock, flags);
438}
439
440static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
441{
442 struct srp_device *dev = target->srp_host->srp_dev;
443
Bart Van Asschefa9863f2016-04-22 14:13:57 -0700444 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200445 dev->max_pages_per_mr);
446}
447
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
Bart Van Asschef83b2562016-05-12 10:48:48 -0700450 * @qp: RDMA queue pair.
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200451 *
Steve Wise561392d2016-02-17 08:15:42 -0800452 * Drain the qp before destroying it. This avoids that the receive
453 * completion handler can access the queue pair while it is
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200454 * being destroyed.
455 */
Bart Van Asschef83b2562016-05-12 10:48:48 -0700456static void srp_destroy_qp(struct ib_qp *qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457{
Bart Van Asschef83b2562016-05-12 10:48:48 -0700458 ib_drain_rq(qp);
459 ib_destroy_qp(qp);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200460}
461
Bart Van Assche509c07b2014-10-30 14:48:30 +0100462static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800463{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100464 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200465 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800466 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100467 struct ib_cq *recv_cq, *send_cq;
468 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200469 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200470 struct srp_fr_pool *fr_pool = NULL;
Sagi Grimberg09c0c0b2015-12-01 10:18:03 -0800471 const int m = dev->use_fast_reg ? 3 : 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800472 int ret;
473
474 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
475 if (!init_attr)
476 return -ENOMEM;
477
Steve Wise561392d2016-02-17 08:15:42 -0800478 /* queue_size + 1 for ib_drain_rq() */
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100479 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
480 ch->comp_vector, IB_POLL_SOFTIRQ);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100481 if (IS_ERR(recv_cq)) {
482 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800483 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800484 }
485
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100486 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
487 ch->comp_vector, IB_POLL_DIRECT);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100488 if (IS_ERR(send_cq)) {
489 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800490 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000491 }
492
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200494 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200495 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800496 init_attr->cap.max_recv_sge = 1;
497 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200498 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800499 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100500 init_attr->send_cq = send_cq;
501 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502
Bart Van Assche62154b22014-05-20 15:04:45 +0200503 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100504 if (IS_ERR(qp)) {
505 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800506 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800507 }
508
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100509 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800510 if (ret)
511 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800512
Bart Van Assche002f1562015-08-10 17:08:44 -0700513 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200514 fr_pool = srp_alloc_fr_pool(target);
515 if (IS_ERR(fr_pool)) {
516 ret = PTR_ERR(fr_pool);
517 shost_printk(KERN_WARNING, target->scsi_host, PFX
518 "FR pool allocation failed (%d)\n", ret);
519 goto err_qp;
520 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700521 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200522 fmr_pool = srp_alloc_fmr_pool(target);
523 if (IS_ERR(fmr_pool)) {
524 ret = PTR_ERR(fmr_pool);
525 shost_printk(KERN_WARNING, target->scsi_host, PFX
526 "FMR pool allocation failed (%d)\n", ret);
527 goto err_qp;
528 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200529 }
530
Bart Van Assche509c07b2014-10-30 14:48:30 +0100531 if (ch->qp)
Bart Van Asschef83b2562016-05-12 10:48:48 -0700532 srp_destroy_qp(ch->qp);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100533 if (ch->recv_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100534 ib_free_cq(ch->recv_cq);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100535 if (ch->send_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100536 ib_free_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100537
Bart Van Assche509c07b2014-10-30 14:48:30 +0100538 ch->qp = qp;
539 ch->recv_cq = recv_cq;
540 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100541
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300542 if (dev->use_fast_reg) {
543 if (ch->fr_pool)
544 srp_destroy_fr_pool(ch->fr_pool);
545 ch->fr_pool = fr_pool;
546 } else if (dev->use_fmr) {
547 if (ch->fmr_pool)
548 ib_destroy_fmr_pool(ch->fmr_pool);
549 ch->fmr_pool = fmr_pool;
550 }
551
Roland Dreierda9d2f02010-02-24 15:07:59 -0800552 kfree(init_attr);
553 return 0;
554
555err_qp:
Bart Van Asschef83b2562016-05-12 10:48:48 -0700556 srp_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800557
558err_send_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100559 ib_free_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800560
561err_recv_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100562 ib_free_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800563
564err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800565 kfree(init_attr);
566 return ret;
567}
568
Bart Van Assche4d73f952013-10-26 14:40:37 +0200569/*
570 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100571 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200572 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100573static void srp_free_ch_ib(struct srp_target_port *target,
574 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800575{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200576 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800577 int i;
578
Bart Van Assched92c0da2014-10-06 17:14:36 +0200579 if (!ch->target)
580 return;
581
Bart Van Assche509c07b2014-10-30 14:48:30 +0100582 if (ch->cm_id) {
583 ib_destroy_cm_id(ch->cm_id);
584 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100585 }
586
Bart Van Assched92c0da2014-10-06 17:14:36 +0200587 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
588 if (!ch->qp)
589 return;
590
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200591 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100592 if (ch->fr_pool)
593 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700594 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100595 if (ch->fmr_pool)
596 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200597 }
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100598
Bart Van Asschef83b2562016-05-12 10:48:48 -0700599 srp_destroy_qp(ch->qp);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100600 ib_free_cq(ch->send_cq);
601 ib_free_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602
Bart Van Assched92c0da2014-10-06 17:14:36 +0200603 /*
604 * Avoid that the SCSI error handler tries to use this channel after
605 * it has been freed. The SCSI error handler can namely continue
606 * trying to perform recovery actions after scsi_remove_host()
607 * returned.
608 */
609 ch->target = NULL;
610
Bart Van Assche509c07b2014-10-30 14:48:30 +0100611 ch->qp = NULL;
612 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100613
Bart Van Assche509c07b2014-10-30 14:48:30 +0100614 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200615 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100616 srp_free_iu(target->srp_host, ch->rx_ring[i]);
617 kfree(ch->rx_ring);
618 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200619 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200621 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 srp_free_iu(target->srp_host, ch->tx_ring[i]);
623 kfree(ch->tx_ring);
624 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200625 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800626}
627
628static void srp_path_rec_completion(int status,
629 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100630 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800631{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100632 struct srp_rdma_ch *ch = ch_ptr;
633 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800634
Bart Van Assche509c07b2014-10-30 14:48:30 +0100635 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800636 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500637 shost_printk(KERN_ERR, target->scsi_host,
638 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800639 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 ch->path = *pathrec;
641 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800642}
643
Bart Van Assche509c07b2014-10-30 14:48:30 +0100644static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800645{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100647 int ret;
648
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800650
Bart Van Assche509c07b2014-10-30 14:48:30 +0100651 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652
Bart Van Assche509c07b2014-10-30 14:48:30 +0100653 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
654 target->srp_host->srp_dev->dev,
655 target->srp_host->port,
656 &ch->path,
657 IB_SA_PATH_REC_SERVICE_ID |
658 IB_SA_PATH_REC_DGID |
659 IB_SA_PATH_REC_SGID |
660 IB_SA_PATH_REC_NUMB_PATH |
661 IB_SA_PATH_REC_PKEY,
662 SRP_PATH_REC_TIMEOUT_MS,
663 GFP_KERNEL,
664 srp_path_rec_completion,
665 ch, &ch->path_query);
666 if (ch->path_query_id < 0)
667 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668
Bart Van Assche509c07b2014-10-30 14:48:30 +0100669 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100670 if (ret < 0)
671 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500674 shost_printk(KERN_WARNING, target->scsi_host,
675 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678}
679
Bart Van Assched92c0da2014-10-06 17:14:36 +0200680static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800681{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100682 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800683 struct {
684 struct ib_cm_req_param param;
685 struct srp_login_req priv;
686 } *req = NULL;
687 int status;
688
689 req = kzalloc(sizeof *req, GFP_KERNEL);
690 if (!req)
691 return -ENOMEM;
692
Bart Van Assche509c07b2014-10-30 14:48:30 +0100693 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694 req->param.alternate_path = NULL;
695 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100696 req->param.qp_num = ch->qp->qp_num;
697 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698 req->param.private_data = &req->priv;
699 req->param.private_data_len = sizeof req->priv;
700 req->param.flow_control = 1;
701
702 get_random_bytes(&req->param.starting_psn, 4);
703 req->param.starting_psn &= 0xffffff;
704
705 /*
706 * Pick some arbitrary defaults here; we could make these
707 * module parameters if anyone cared about setting them.
708 */
709 req->param.responder_resources = 4;
710 req->param.remote_cm_response_timeout = 20;
711 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200712 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800713 req->param.rnr_retry_count = 7;
714 req->param.max_cm_retries = 15;
715
716 req->priv.opcode = SRP_LOGIN_REQ;
717 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500718 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800719 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200721 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
722 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700723 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700724 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700725 * port identifier format is 8 bytes of ID extension followed
726 * by 8 bytes of GUID. Older drafts put the two halves in the
727 * opposite order, so that the GUID comes first.
728 *
729 * Targets conforming to these obsolete drafts can be
730 * recognized by the I/O Class they report.
731 */
732 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
733 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100734 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700735 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200736 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700737 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
738 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
739 } else {
740 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200741 &target->initiator_ext, 8);
742 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100743 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700744 memcpy(req->priv.target_port_id, &target->id_ext, 8);
745 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
746 }
747
Roland Dreieraef9ec32005-11-02 14:07:13 -0800748 /*
749 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200750 * zero out the first 8 bytes of our initiator port ID and set
751 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800752 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700753 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500754 shost_printk(KERN_DEBUG, target->scsi_host,
755 PFX "Topspin/Cisco initiator port ID workaround "
756 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200757 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800758 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200759 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100760 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800761 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800762
Bart Van Assche509c07b2014-10-30 14:48:30 +0100763 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800764
765 kfree(req);
766
767 return status;
768}
769
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000770static bool srp_queue_remove_work(struct srp_target_port *target)
771{
772 bool changed = false;
773
774 spin_lock_irq(&target->lock);
775 if (target->state != SRP_TARGET_REMOVED) {
776 target->state = SRP_TARGET_REMOVED;
777 changed = true;
778 }
779 spin_unlock_irq(&target->lock);
780
781 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200782 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000783
784 return changed;
785}
786
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787static void srp_disconnect_target(struct srp_target_port *target)
788{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200789 struct srp_rdma_ch *ch;
790 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100791
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200792 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800793
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200794 for (i = 0; i < target->ch_count; i++) {
795 ch = &target->ch[i];
796 ch->connected = false;
797 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
798 shost_printk(KERN_DEBUG, target->scsi_host,
799 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000800 }
Roland Dreiere6581052006-05-17 09:13:21 -0700801 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800802}
803
Bart Van Assche509c07b2014-10-30 14:48:30 +0100804static void srp_free_req_data(struct srp_target_port *target,
805 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500806{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200807 struct srp_device *dev = target->srp_host->srp_dev;
808 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500809 struct srp_request *req;
810 int i;
811
Bart Van Assche47513cf2015-05-18 13:25:54 +0200812 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200813 return;
814
815 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100816 req = &ch->req_ring[i];
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300817 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200818 kfree(req->fr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300819 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200820 kfree(req->fmr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300821 kfree(req->map_page);
822 }
David Dillowc07d4242011-01-16 13:57:10 -0500823 if (req->indirect_dma_addr) {
824 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825 target->indirect_size,
826 DMA_TO_DEVICE);
827 }
828 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500829 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200830
Bart Van Assche509c07b2014-10-30 14:48:30 +0100831 kfree(ch->req_ring);
832 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500833}
834
Bart Van Assche509c07b2014-10-30 14:48:30 +0100835static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200836{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100837 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200838 struct srp_device *srp_dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = srp_dev->dev;
840 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200841 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200842 dma_addr_t dma_addr;
843 int i, ret = -ENOMEM;
844
Bart Van Assche509c07b2014-10-30 14:48:30 +0100845 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
846 GFP_KERNEL);
847 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200848 goto out;
849
850 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100851 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200852 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
853 GFP_KERNEL);
854 if (!mr_list)
855 goto out;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300856 if (srp_dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200857 req->fr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300858 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200859 req->fmr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300860 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861 sizeof(void *), GFP_KERNEL);
862 if (!req->map_page)
863 goto out;
864 }
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200865 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200866 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200867 goto out;
868
869 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870 target->indirect_size,
871 DMA_TO_DEVICE);
872 if (ib_dma_mapping_error(ibdev, dma_addr))
873 goto out;
874
875 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200876 }
877 ret = 0;
878
879out:
880 return ret;
881}
882
Bart Van Assche683b1592012-01-14 12:40:44 +0000883/**
884 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885 * @shost: SCSI host whose attributes to remove from sysfs.
886 *
887 * Note: Any attributes defined in the host template and that did not exist
888 * before invocation of this function will be ignored.
889 */
890static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
891{
892 struct device_attribute **attr;
893
894 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895 device_remove_file(&shost->shost_dev, *attr);
896}
897
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000898static void srp_remove_target(struct srp_target_port *target)
899{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200900 struct srp_rdma_ch *ch;
901 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100902
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000903 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
904
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000905 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200906 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000907 srp_remove_host(target->scsi_host);
908 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100909 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000910 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200911 for (i = 0; i < target->ch_count; i++) {
912 ch = &target->ch[i];
913 srp_free_ch_ib(target, ch);
914 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200915 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200916 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200917 for (i = 0; i < target->ch_count; i++) {
918 ch = &target->ch[i];
919 srp_free_req_data(target, ch);
920 }
921 kfree(target->ch);
922 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200923
924 spin_lock(&target->srp_host->target_lock);
925 list_del(&target->list);
926 spin_unlock(&target->srp_host->target_lock);
927
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000928 scsi_host_put(target->scsi_host);
929}
930
David Howellsc4028952006-11-22 14:57:56 +0000931static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800932{
David Howellsc4028952006-11-22 14:57:56 +0000933 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000934 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800935
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000936 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800937
Bart Van Assche96fc2482013-06-28 14:51:26 +0200938 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800939}
940
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200941static void srp_rport_delete(struct srp_rport *rport)
942{
943 struct srp_target_port *target = rport->lld_data;
944
945 srp_queue_remove_work(target);
946}
947
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200948/**
949 * srp_connected_ch() - number of connected channels
950 * @target: SRP target port.
951 */
952static int srp_connected_ch(struct srp_target_port *target)
953{
954 int i, c = 0;
955
956 for (i = 0; i < target->ch_count; i++)
957 c += target->ch[i].connected;
958
959 return c;
960}
961
Bart Van Assched92c0da2014-10-06 17:14:36 +0200962static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100964 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800965 int ret;
966
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200967 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000968
Bart Van Assche509c07b2014-10-30 14:48:30 +0100969 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800970 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800971 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800972
973 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100974 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200975 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800976 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800977 goto out;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100978 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100979 if (ret < 0)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800980 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800981
982 /*
983 * The CM event handling code will set status to
984 * SRP_PORT_REDIRECT if we get a port redirect REJ
985 * back, or SRP_DLID_REDIRECT if we get a lid/qp
986 * redirect REJ back.
987 */
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800988 ret = ch->status;
989 switch (ret) {
Roland Dreieraef9ec32005-11-02 14:07:13 -0800990 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200991 ch->connected = true;
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800992 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800993
994 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +0100995 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800996 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800997 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800998 break;
999
1000 case SRP_DLID_REDIRECT:
1001 break;
1002
David Dillow9fe4bcf2008-01-08 17:08:52 -05001003 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001004 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001005 "giving up on stale connection\n");
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001006 ret = -ECONNRESET;
1007 goto out;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001008
Roland Dreieraef9ec32005-11-02 14:07:13 -08001009 default:
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001010 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001011 }
1012 }
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001013
1014out:
1015 return ret <= 0 ? ret : -ENODEV;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001016}
1017
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001018static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1019{
1020 srp_handle_qp_err(cq, wc, "INV RKEY");
1021}
1022
1023static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1024 u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001025{
1026 struct ib_send_wr *bad_wr;
1027 struct ib_send_wr wr = {
1028 .opcode = IB_WR_LOCAL_INV,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001029 .next = NULL,
1030 .num_sge = 0,
1031 .send_flags = 0,
1032 .ex.invalidate_rkey = rkey,
1033 };
1034
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001035 wr.wr_cqe = &req->reg_cqe;
1036 req->reg_cqe.done = srp_inv_rkey_err_done;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001037 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001038}
1039
Roland Dreierd945e1d2006-05-09 10:50:28 -07001040static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001041 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001042 struct srp_request *req)
1043{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001044 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001045 struct srp_device *dev = target->srp_host->srp_dev;
1046 struct ib_device *ibdev = dev->dev;
1047 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001048
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001049 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001050 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1052 return;
1053
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001054 if (dev->use_fast_reg) {
1055 struct srp_fr_desc **pfr;
1056
1057 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001058 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001059 if (res < 0) {
1060 shost_printk(KERN_ERR, target->scsi_host, PFX
1061 "Queueing INV WR for rkey %#x failed (%d)\n",
1062 (*pfr)->mr->rkey, res);
1063 queue_work(system_long_wq,
1064 &target->tl_err_work);
1065 }
1066 }
1067 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001068 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001069 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001070 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001071 struct ib_pool_fmr **pfmr;
1072
1073 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1074 ib_fmr_pool_unmap(*pfmr);
1075 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001076
David Dillow8f26c9f2011-01-14 19:45:50 -05001077 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1078 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001079}
1080
Bart Van Assche22032992012-08-14 13:18:53 +00001081/**
1082 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001083 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001084 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001085 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001086 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1087 * ownership of @req->scmnd if it equals @scmnd.
1088 *
1089 * Return value:
1090 * Either NULL or a pointer to the SCSI command the caller became owner of.
1091 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001092static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001093 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001094 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001095 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001096{
Bart Van Assche94a91742010-11-26 14:50:09 -05001097 unsigned long flags;
1098
Bart Van Assche509c07b2014-10-30 14:48:30 +01001099 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001100 if (req->scmnd &&
1101 (!sdev || req->scmnd->device == sdev) &&
1102 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001103 scmnd = req->scmnd;
1104 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001105 } else {
1106 scmnd = NULL;
1107 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001108 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001109
1110 return scmnd;
1111}
1112
1113/**
Bart Van Assche6ec2ba02016-04-22 14:12:47 -07001114 * srp_free_req() - Unmap data and adjust ch->req_lim.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001115 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001116 * @req: Request to be freed.
1117 * @scmnd: SCSI command associated with @req.
1118 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001119 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001120static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001122{
1123 unsigned long flags;
1124
Bart Van Assche509c07b2014-10-30 14:48:30 +01001125 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001126
Bart Van Assche509c07b2014-10-30 14:48:30 +01001127 spin_lock_irqsave(&ch->lock, flags);
1128 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001129 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001130}
1131
Bart Van Assche509c07b2014-10-30 14:48:30 +01001132static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001134{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001135 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001136
1137 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001138 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001139 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001140 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001141 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001142}
1143
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001144static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001145{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001146 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001147 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001148 struct Scsi_Host *shost = target->scsi_host;
1149 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001150 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001151
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001152 /*
1153 * Invoking srp_terminate_io() while srp_queuecommand() is running
1154 * is not safe. Hence the warning statement below.
1155 */
1156 shost_for_each_device(sdev, shost)
1157 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1158
Bart Van Assched92c0da2014-10-06 17:14:36 +02001159 for (i = 0; i < target->ch_count; i++) {
1160 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001161
Bart Van Assched92c0da2014-10-06 17:14:36 +02001162 for (j = 0; j < target->req_ring_size; ++j) {
1163 struct srp_request *req = &ch->req_ring[j];
1164
1165 srp_finish_req(ch, req, NULL,
1166 DID_TRANSPORT_FAILFAST << 16);
1167 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001168 }
1169}
1170
1171/*
1172 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174 * srp_reset_device() or srp_reset_host() calls will occur while this function
1175 * is in progress. One way to realize that is not to call this function
1176 * directly but to call srp_reconnect_rport() instead since that last function
1177 * serializes calls of this function via rport->mutex and also blocks
1178 * srp_queuecommand() calls before invoking this function.
1179 */
1180static int srp_rport_reconnect(struct srp_rport *rport)
1181{
1182 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001183 struct srp_rdma_ch *ch;
1184 int i, j, ret = 0;
1185 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001186
Roland Dreieraef9ec32005-11-02 14:07:13 -08001187 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001188
1189 if (target->state == SRP_TARGET_SCANNING)
1190 return -ENODEV;
1191
Roland Dreieraef9ec32005-11-02 14:07:13 -08001192 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001193 * Now get a new local CM ID so that we avoid confusing the target in
1194 * case things are really fouled up. Doing so also ensures that all CM
1195 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001196 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001197 for (i = 0; i < target->ch_count; i++) {
1198 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001199 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001200 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001201 for (i = 0; i < target->ch_count; i++) {
1202 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001203 for (j = 0; j < target->req_ring_size; ++j) {
1204 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205
Bart Van Assched92c0da2014-10-06 17:14:36 +02001206 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1207 }
1208 }
1209 for (i = 0; i < target->ch_count; i++) {
1210 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001211 /*
1212 * Whether or not creating a new CM ID succeeded, create a new
1213 * QP. This guarantees that all completion callback function
1214 * invocations have finished before request resetting starts.
1215 */
1216 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001217
Bart Van Assched92c0da2014-10-06 17:14:36 +02001218 INIT_LIST_HEAD(&ch->free_tx);
1219 for (j = 0; j < target->queue_size; ++j)
1220 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1221 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001222
1223 target->qp_in_error = false;
1224
Bart Van Assched92c0da2014-10-06 17:14:36 +02001225 for (i = 0; i < target->ch_count; i++) {
1226 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001227 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001228 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001229 ret = srp_connect_ch(ch, multich);
1230 multich = true;
1231 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001232
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001233 if (ret == 0)
1234 shost_printk(KERN_INFO, target->scsi_host,
1235 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001236
1237 return ret;
1238}
1239
David Dillow8f26c9f2011-01-14 19:45:50 -05001240static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1241 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001242{
David Dillow8f26c9f2011-01-14 19:45:50 -05001243 struct srp_direct_buf *desc = state->desc;
1244
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001245 WARN_ON_ONCE(!dma_len);
1246
David Dillow8f26c9f2011-01-14 19:45:50 -05001247 desc->va = cpu_to_be64(dma_addr);
1248 desc->key = cpu_to_be32(rkey);
1249 desc->len = cpu_to_be32(dma_len);
1250
1251 state->total_len += dma_len;
1252 state->desc++;
1253 state->ndesc++;
1254}
1255
1256static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001257 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001258{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001259 struct srp_target_port *target = ch->target;
1260 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001261 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001262 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001263
Bart Van Asschef731ed62015-08-10 17:07:27 -07001264 if (state->fmr.next >= state->fmr.end)
1265 return -ENOMEM;
1266
Sagi Grimberg26630e82015-10-13 19:11:38 +03001267 WARN_ON_ONCE(!dev->use_fmr);
1268
1269 if (state->npages == 0)
1270 return 0;
1271
1272 if (state->npages == 1 && target->global_mr) {
1273 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1274 target->global_mr->rkey);
1275 goto reset_state;
1276 }
1277
Bart Van Assche509c07b2014-10-30 14:48:30 +01001278 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001279 state->npages, io_addr);
1280 if (IS_ERR(fmr))
1281 return PTR_ERR(fmr);
1282
Bart Van Asschef731ed62015-08-10 17:07:27 -07001283 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001284 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001285
Bart Van Assche186fbc62015-08-10 17:06:29 -07001286 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001288
Sagi Grimberg26630e82015-10-13 19:11:38 +03001289reset_state:
1290 state->npages = 0;
1291 state->dma_len = 0;
1292
David Dillow8f26c9f2011-01-14 19:45:50 -05001293 return 0;
1294}
1295
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001296static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1297{
1298 srp_handle_qp_err(cq, wc, "FAST REG");
1299}
1300
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001301static int srp_map_finish_fr(struct srp_map_state *state,
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001302 struct srp_request *req,
Bart Van Assche57b0be92015-12-01 10:19:38 -08001303 struct srp_rdma_ch *ch, int sg_nents)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001304{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001305 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001306 struct srp_device *dev = target->srp_host->srp_dev;
1307 struct ib_send_wr *bad_wr;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001308 struct ib_reg_wr wr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001309 struct srp_fr_desc *desc;
1310 u32 rkey;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001311 int n, err;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001312
Bart Van Asschef731ed62015-08-10 17:07:27 -07001313 if (state->fr.next >= state->fr.end)
1314 return -ENOMEM;
1315
Sagi Grimberg26630e82015-10-13 19:11:38 +03001316 WARN_ON_ONCE(!dev->use_fast_reg);
1317
Bart Van Assche57b0be92015-12-01 10:19:38 -08001318 if (sg_nents == 1 && target->global_mr) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001319 srp_map_desc(state, sg_dma_address(state->sg),
1320 sg_dma_len(state->sg),
Sagi Grimberg26630e82015-10-13 19:11:38 +03001321 target->global_mr->rkey);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001322 return 1;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001323 }
1324
Bart Van Assche509c07b2014-10-30 14:48:30 +01001325 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001326 if (!desc)
1327 return -ENOMEM;
1328
1329 rkey = ib_inc_rkey(desc->mr->rkey);
1330 ib_update_fast_reg_key(desc->mr, rkey);
1331
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001332 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, NULL, dev->mr_page_size);
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001333 if (unlikely(n < 0)) {
1334 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1335 pr_debug("%s: ib_map_mr_sg(%d) returned %d.\n",
1336 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1337 n);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001338 return n;
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001339 }
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001340
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001341 req->reg_cqe.done = srp_reg_mr_err_done;
1342
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001343 wr.wr.next = NULL;
1344 wr.wr.opcode = IB_WR_REG_MR;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001345 wr.wr.wr_cqe = &req->reg_cqe;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001346 wr.wr.num_sge = 0;
1347 wr.wr.send_flags = 0;
1348 wr.mr = desc->mr;
1349 wr.key = desc->mr->rkey;
1350 wr.access = (IB_ACCESS_LOCAL_WRITE |
1351 IB_ACCESS_REMOTE_READ |
1352 IB_ACCESS_REMOTE_WRITE);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001353
Bart Van Asschef731ed62015-08-10 17:07:27 -07001354 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001355 state->nmdesc++;
1356
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001357 srp_map_desc(state, desc->mr->iova,
1358 desc->mr->length, desc->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001359
Sagi Grimberg26630e82015-10-13 19:11:38 +03001360 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001361 if (unlikely(err))
Sagi Grimberg26630e82015-10-13 19:11:38 +03001362 return err;
1363
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001364 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001365}
1366
David Dillow8f26c9f2011-01-14 19:45:50 -05001367static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001368 struct srp_rdma_ch *ch,
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001369 struct scatterlist *sg, int sg_index)
David Dillow8f26c9f2011-01-14 19:45:50 -05001370{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001371 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001372 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001373 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001374 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1375 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001376 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001377 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001378
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001379 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001380
David Dillow8f26c9f2011-01-14 19:45:50 -05001381 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001382 unsigned offset = dma_addr & ~dev->mr_page_mask;
1383 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001384 ret = srp_map_finish_fmr(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001385 if (ret)
1386 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001387 }
1388
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001389 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001390
1391 if (!state->npages)
1392 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001393 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001394 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001395 dma_addr += len;
1396 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001397 }
1398
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001399 /*
1400 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001401 * close it out and start a new one -- we can only merge at page
Bart Van Assche1d3d98c2016-04-22 14:12:10 -07001402 * boundaries.
David Dillow8f26c9f2011-01-14 19:45:50 -05001403 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001404 ret = 0;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001405 if (len != dev->mr_page_size)
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001406 ret = srp_map_finish_fmr(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001407 return ret;
1408}
1409
Sagi Grimberg26630e82015-10-13 19:11:38 +03001410static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1411 struct srp_request *req, struct scatterlist *scat,
1412 int count)
1413{
1414 struct scatterlist *sg;
1415 int i, ret;
1416
Sagi Grimberg26630e82015-10-13 19:11:38 +03001417 state->pages = req->map_page;
1418 state->fmr.next = req->fmr_list;
1419 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1420
1421 for_each_sg(scat, sg, count, i) {
1422 ret = srp_map_sg_entry(state, ch, sg, i);
1423 if (ret)
1424 return ret;
1425 }
1426
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001427 ret = srp_map_finish_fmr(state, ch);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001428 if (ret)
1429 return ret;
1430
Sagi Grimberg26630e82015-10-13 19:11:38 +03001431 return 0;
1432}
1433
1434static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1435 struct srp_request *req, struct scatterlist *scat,
1436 int count)
1437{
Sagi Grimberg26630e82015-10-13 19:11:38 +03001438 state->desc = req->indirect_desc;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001439 state->fr.next = req->fr_list;
1440 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1441 state->sg = scat;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001442
Bart Van Assche3b59b7a62016-04-22 14:14:43 -07001443 if (count == 0)
1444 return 0;
1445
Bart Van Assche57b0be92015-12-01 10:19:38 -08001446 while (count) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001447 int i, n;
1448
Doug Ledfordc6333f92015-12-15 14:10:44 -05001449 n = srp_map_finish_fr(state, req, ch, count);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001450 if (unlikely(n < 0))
1451 return n;
1452
Bart Van Assche57b0be92015-12-01 10:19:38 -08001453 count -= n;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001454 for (i = 0; i < n; i++)
1455 state->sg = sg_next(state->sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001456 }
1457
Sagi Grimberg26630e82015-10-13 19:11:38 +03001458 return 0;
1459}
1460
1461static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1462 struct srp_request *req, struct scatterlist *scat,
1463 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001464{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001465 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001466 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001467 struct scatterlist *sg;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001468 int i;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001469
Sagi Grimberg26630e82015-10-13 19:11:38 +03001470 state->desc = req->indirect_desc;
1471 for_each_sg(scat, sg, count, i) {
1472 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1473 ib_sg_dma_len(dev->dev, sg),
1474 target->global_mr->rkey);
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001475 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001476
Sagi Grimberg26630e82015-10-13 19:11:38 +03001477 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001478}
1479
Bart Van Assche330179f2015-08-10 17:09:05 -07001480/*
1481 * Register the indirect data buffer descriptor with the HCA.
1482 *
1483 * Note: since the indirect data buffer descriptor has been allocated with
1484 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1485 * memory buffer.
1486 */
1487static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1488 void **next_mr, void **end_mr, u32 idb_len,
1489 __be32 *idb_rkey)
1490{
1491 struct srp_target_port *target = ch->target;
1492 struct srp_device *dev = target->srp_host->srp_dev;
1493 struct srp_map_state state;
1494 struct srp_direct_buf idb_desc;
1495 u64 idb_pages[1];
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001496 struct scatterlist idb_sg[1];
Bart Van Assche330179f2015-08-10 17:09:05 -07001497 int ret;
1498
1499 memset(&state, 0, sizeof(state));
1500 memset(&idb_desc, 0, sizeof(idb_desc));
1501 state.gen.next = next_mr;
1502 state.gen.end = end_mr;
1503 state.desc = &idb_desc;
Bart Van Assche330179f2015-08-10 17:09:05 -07001504 state.base_dma_addr = req->indirect_dma_addr;
1505 state.dma_len = idb_len;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001506
1507 if (dev->use_fast_reg) {
1508 state.sg = idb_sg;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001509 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1510 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
Christoph Hellwigfc925512015-12-01 10:18:30 -08001511#ifdef CONFIG_NEED_SG_DMA_LENGTH
1512 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1513#endif
Doug Ledfordc6333f92015-12-15 14:10:44 -05001514 ret = srp_map_finish_fr(&state, req, ch, 1);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001515 if (ret < 0)
1516 return ret;
1517 } else if (dev->use_fmr) {
1518 state.pages = idb_pages;
1519 state.pages[0] = (req->indirect_dma_addr &
1520 dev->mr_page_mask);
1521 state.npages = 1;
1522 ret = srp_map_finish_fmr(&state, ch);
1523 if (ret < 0)
1524 return ret;
1525 } else {
1526 return -EINVAL;
1527 }
Bart Van Assche330179f2015-08-10 17:09:05 -07001528
1529 *idb_rkey = idb_desc.key;
1530
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001531 return 0;
Bart Van Assche330179f2015-08-10 17:09:05 -07001532}
1533
Bart Van Assche77269cd2016-04-22 14:13:09 -07001534/**
1535 * srp_map_data() - map SCSI data buffer onto an SRP request
1536 * @scmnd: SCSI command to map
1537 * @ch: SRP RDMA channel
1538 * @req: SRP request
1539 *
1540 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1541 * mapping failed.
1542 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001543static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001544 struct srp_request *req)
1545{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001546 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001547 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001548 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001549 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001550 struct srp_device *dev;
1551 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001552 struct srp_map_state state;
1553 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001554 u32 idb_len, table_len;
1555 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001556 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001557
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001558 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001559 return sizeof (struct srp_cmd);
1560
1561 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1562 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001563 shost_printk(KERN_WARNING, target->scsi_host,
1564 PFX "Unhandled data direction %d\n",
1565 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001566 return -EINVAL;
1567 }
1568
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001569 nents = scsi_sg_count(scmnd);
1570 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001571
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001572 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001573 ibdev = dev->dev;
1574
1575 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001576 if (unlikely(count == 0))
1577 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001578
1579 fmt = SRP_DATA_DESC_DIRECT;
1580 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001581
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001582 if (count == 1 && target->global_mr) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001583 /*
1584 * The midlayer only generated a single gather/scatter
1585 * entry, or DMA mapping coalesced everything to a
1586 * single entry. So a direct descriptor along with
1587 * the DMA MR suffices.
1588 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001589 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001590
Ralph Campbell85507bc2006-12-12 14:30:55 -08001591 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001592 buf->key = cpu_to_be32(target->global_mr->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001593 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001594
Bart Van Assche52ede082014-05-20 15:07:45 +02001595 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001596 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001597 }
1598
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001599 /*
1600 * We have more than one scatter/gather entry, so build our indirect
1601 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001602 */
1603 indirect_hdr = (void *) cmd->add_data;
1604
David Dillowc07d4242011-01-16 13:57:10 -05001605 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1606 target->indirect_size, DMA_TO_DEVICE);
1607
David Dillow8f26c9f2011-01-14 19:45:50 -05001608 memset(&state, 0, sizeof(state));
Sagi Grimberg26630e82015-10-13 19:11:38 +03001609 if (dev->use_fast_reg)
Bart Van Asschee012f362016-04-22 14:13:35 -07001610 ret = srp_map_sg_fr(&state, ch, req, scat, count);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001611 else if (dev->use_fmr)
Bart Van Asschee012f362016-04-22 14:13:35 -07001612 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001613 else
Bart Van Asschee012f362016-04-22 14:13:35 -07001614 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1615 req->nmdesc = state.nmdesc;
1616 if (ret < 0)
1617 goto unmap;
David Dillow8f26c9f2011-01-14 19:45:50 -05001618
David Dillowc07d4242011-01-16 13:57:10 -05001619 /* We've mapped the request, now pull as much of the indirect
1620 * descriptor table as we can into the command buffer. If this
1621 * target is not using an external indirect table, we are
1622 * guaranteed to fit into the command, as the SCSI layer won't
1623 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001624 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001625 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001626 /*
1627 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001628 * so use a direct descriptor.
1629 */
1630 struct srp_direct_buf *buf = (void *) cmd->add_data;
1631
David Dillowc07d4242011-01-16 13:57:10 -05001632 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001633 goto map_complete;
1634 }
1635
David Dillowc07d4242011-01-16 13:57:10 -05001636 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1637 !target->allow_ext_sg)) {
1638 shost_printk(KERN_ERR, target->scsi_host,
1639 "Could not fit S/G list into SRP_CMD\n");
Bart Van Asschee012f362016-04-22 14:13:35 -07001640 ret = -EIO;
1641 goto unmap;
David Dillowc07d4242011-01-16 13:57:10 -05001642 }
1643
1644 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001645 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001646 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001647
1648 fmt = SRP_DATA_DESC_INDIRECT;
1649 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001650 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001651
David Dillowc07d4242011-01-16 13:57:10 -05001652 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1653 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001654
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001655 if (!target->global_mr) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001656 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1657 idb_len, &idb_rkey);
1658 if (ret < 0)
Bart Van Asschee012f362016-04-22 14:13:35 -07001659 goto unmap;
Bart Van Assche330179f2015-08-10 17:09:05 -07001660 req->nmdesc++;
1661 } else {
Bart Van Asschea745f4f42015-12-01 10:18:47 -08001662 idb_rkey = cpu_to_be32(target->global_mr->rkey);
Bart Van Assche330179f2015-08-10 17:09:05 -07001663 }
1664
David Dillowc07d4242011-01-16 13:57:10 -05001665 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001666 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001667 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1668 indirect_hdr->len = cpu_to_be32(state.total_len);
1669
1670 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001671 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001672 else
David Dillowc07d4242011-01-16 13:57:10 -05001673 cmd->data_in_desc_cnt = count;
1674
1675 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1676 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001677
1678map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001679 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1680 cmd->buf_fmt = fmt << 4;
1681 else
1682 cmd->buf_fmt = fmt;
1683
Roland Dreieraef9ec32005-11-02 14:07:13 -08001684 return len;
Bart Van Asschee012f362016-04-22 14:13:35 -07001685
1686unmap:
1687 srp_unmap_data(scmnd, ch, req);
Bart Van Asscheffc548b2016-04-22 14:14:15 -07001688 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1689 ret = -E2BIG;
Bart Van Asschee012f362016-04-22 14:13:35 -07001690 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001691}
1692
David Dillow05a1d752010-10-08 14:48:14 -04001693/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001694 * Return an IU and possible credit to the free pool
1695 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001696static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001697 enum srp_iu_type iu_type)
1698{
1699 unsigned long flags;
1700
Bart Van Assche509c07b2014-10-30 14:48:30 +01001701 spin_lock_irqsave(&ch->lock, flags);
1702 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001703 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001704 ++ch->req_lim;
1705 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001706}
1707
1708/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001709 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001710 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001711 *
1712 * Note:
1713 * An upper limit for the number of allocated information units for each
1714 * request type is:
1715 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1716 * more than Scsi_Host.can_queue requests.
1717 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1718 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1719 * one unanswered SRP request to an initiator.
1720 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001721static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001722 enum srp_iu_type iu_type)
1723{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001724 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001725 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1726 struct srp_iu *iu;
1727
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001728 ib_process_cq_direct(ch->send_cq, -1);
David Dillow05a1d752010-10-08 14:48:14 -04001729
Bart Van Assche509c07b2014-10-30 14:48:30 +01001730 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001731 return NULL;
1732
1733 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001734 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001735 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001736 ++target->zero_req_lim;
1737 return NULL;
1738 }
1739
Bart Van Assche509c07b2014-10-30 14:48:30 +01001740 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001741 }
1742
Bart Van Assche509c07b2014-10-30 14:48:30 +01001743 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001744 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001745 return iu;
1746}
1747
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001748static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1749{
1750 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1751 struct srp_rdma_ch *ch = cq->cq_context;
1752
1753 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1754 srp_handle_qp_err(cq, wc, "SEND");
1755 return;
1756 }
1757
1758 list_add(&iu->list, &ch->free_tx);
1759}
1760
Bart Van Assche509c07b2014-10-30 14:48:30 +01001761static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001762{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001763 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001764 struct ib_sge list;
1765 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001766
1767 list.addr = iu->dma;
1768 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001769 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001770
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001771 iu->cqe.done = srp_send_done;
1772
David Dillow05a1d752010-10-08 14:48:14 -04001773 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001774 wr.wr_cqe = &iu->cqe;
David Dillow05a1d752010-10-08 14:48:14 -04001775 wr.sg_list = &list;
1776 wr.num_sge = 1;
1777 wr.opcode = IB_WR_SEND;
1778 wr.send_flags = IB_SEND_SIGNALED;
1779
Bart Van Assche509c07b2014-10-30 14:48:30 +01001780 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001781}
1782
Bart Van Assche509c07b2014-10-30 14:48:30 +01001783static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001784{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001785 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001786 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001787 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001788
1789 list.addr = iu->dma;
1790 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001791 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001792
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001793 iu->cqe.done = srp_recv_done;
1794
Bart Van Asschec996bb42010-07-30 10:59:05 +00001795 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001796 wr.wr_cqe = &iu->cqe;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001797 wr.sg_list = &list;
1798 wr.num_sge = 1;
1799
Bart Van Assche509c07b2014-10-30 14:48:30 +01001800 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001801}
1802
Bart Van Assche509c07b2014-10-30 14:48:30 +01001803static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001804{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001805 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001806 struct srp_request *req;
1807 struct scsi_cmnd *scmnd;
1808 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001809
Roland Dreieraef9ec32005-11-02 14:07:13 -08001810 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001811 spin_lock_irqsave(&ch->lock, flags);
1812 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1813 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001814
Bart Van Assche509c07b2014-10-30 14:48:30 +01001815 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001816 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001817 ch->tsk_mgmt_status = rsp->data[3];
1818 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001819 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001820 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1821 if (scmnd) {
1822 req = (void *)scmnd->host_scribble;
1823 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1824 }
Bart Van Assche22032992012-08-14 13:18:53 +00001825 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001826 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001827 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1828 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001829
Bart Van Assche509c07b2014-10-30 14:48:30 +01001830 spin_lock_irqsave(&ch->lock, flags);
1831 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1832 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001833
1834 return;
1835 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001836 scmnd->result = rsp->status;
1837
1838 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1839 memcpy(scmnd->sense_buffer, rsp->data +
1840 be32_to_cpu(rsp->resp_data_len),
1841 min_t(int, be32_to_cpu(rsp->sense_data_len),
1842 SCSI_SENSE_BUFFERSIZE));
1843 }
1844
Bart Van Asschee7145312014-07-09 15:57:51 +02001845 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001846 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001847 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1848 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1849 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1850 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1851 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1852 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001853
Bart Van Assche509c07b2014-10-30 14:48:30 +01001854 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001855 be32_to_cpu(rsp->req_lim_delta));
1856
David Dillowf8b6e312010-11-26 13:02:21 -05001857 scmnd->host_scribble = NULL;
1858 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001859 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001860}
1861
Bart Van Assche509c07b2014-10-30 14:48:30 +01001862static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001863 void *rsp, int len)
1864{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001865 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001866 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001867 unsigned long flags;
1868 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001869 int err;
David Dillowbb125882010-10-08 14:40:47 -04001870
Bart Van Assche509c07b2014-10-30 14:48:30 +01001871 spin_lock_irqsave(&ch->lock, flags);
1872 ch->req_lim += req_delta;
1873 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1874 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001875
David Dillowbb125882010-10-08 14:40:47 -04001876 if (!iu) {
1877 shost_printk(KERN_ERR, target->scsi_host, PFX
1878 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001879 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001880 }
1881
1882 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1883 memcpy(iu->buf, rsp, len);
1884 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1885
Bart Van Assche509c07b2014-10-30 14:48:30 +01001886 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001887 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001888 shost_printk(KERN_ERR, target->scsi_host, PFX
1889 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001890 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001891 }
David Dillowbb125882010-10-08 14:40:47 -04001892
David Dillowbb125882010-10-08 14:40:47 -04001893 return err;
1894}
1895
Bart Van Assche509c07b2014-10-30 14:48:30 +01001896static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001897 struct srp_cred_req *req)
1898{
1899 struct srp_cred_rsp rsp = {
1900 .opcode = SRP_CRED_RSP,
1901 .tag = req->tag,
1902 };
1903 s32 delta = be32_to_cpu(req->req_lim_delta);
1904
Bart Van Assche509c07b2014-10-30 14:48:30 +01001905 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1906 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001907 "problems processing SRP_CRED_REQ\n");
1908}
1909
Bart Van Assche509c07b2014-10-30 14:48:30 +01001910static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001911 struct srp_aer_req *req)
1912{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001913 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001914 struct srp_aer_rsp rsp = {
1915 .opcode = SRP_AER_RSP,
1916 .tag = req->tag,
1917 };
1918 s32 delta = be32_to_cpu(req->req_lim_delta);
1919
1920 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001921 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001922
Bart Van Assche509c07b2014-10-30 14:48:30 +01001923 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001924 shost_printk(KERN_ERR, target->scsi_host, PFX
1925 "problems processing SRP_AER_REQ\n");
1926}
1927
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001928static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001929{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001930 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1931 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001932 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001933 struct ib_device *dev = target->srp_host->srp_dev->dev;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001934 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001935 u8 opcode;
1936
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001937 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1938 srp_handle_qp_err(cq, wc, "RECV");
1939 return;
1940 }
1941
Bart Van Assche509c07b2014-10-30 14:48:30 +01001942 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001943 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001944
1945 opcode = *(u8 *) iu->buf;
1946
1947 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001948 shost_printk(KERN_ERR, target->scsi_host,
1949 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001950 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1951 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001952 }
1953
1954 switch (opcode) {
1955 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001956 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001957 break;
1958
David Dillowbb125882010-10-08 14:40:47 -04001959 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001960 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001961 break;
1962
1963 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001964 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001965 break;
1966
Roland Dreieraef9ec32005-11-02 14:07:13 -08001967 case SRP_T_LOGOUT:
1968 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001969 shost_printk(KERN_WARNING, target->scsi_host,
1970 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001971 break;
1972
1973 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001974 shost_printk(KERN_WARNING, target->scsi_host,
1975 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001976 break;
1977 }
1978
Bart Van Assche509c07b2014-10-30 14:48:30 +01001979 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001980 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001981
Bart Van Assche509c07b2014-10-30 14:48:30 +01001982 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001983 if (res != 0)
1984 shost_printk(KERN_ERR, target->scsi_host,
1985 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001986}
1987
Bart Van Asschec1120f82013-10-26 14:35:08 +02001988/**
1989 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001990 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001991 *
1992 * Note: This function may get invoked before the rport has been created,
1993 * hence the target->rport test.
1994 */
1995static void srp_tl_err_work(struct work_struct *work)
1996{
1997 struct srp_target_port *target;
1998
1999 target = container_of(work, struct srp_target_port, tl_err_work);
2000 if (target->rport)
2001 srp_start_tl_fail_timers(target->rport);
2002}
2003
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002004static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2005 const char *opname)
Bart Van Assche948d1e82011-09-03 09:25:42 +02002006{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002007 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002008 struct srp_target_port *target = ch->target;
2009
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002010 if (ch->connected && !target->qp_in_error) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002011 shost_printk(KERN_ERR, target->scsi_host,
2012 PFX "failed %s status %s (%d) for CQE %p\n",
2013 opname, ib_wc_status_msg(wc->status), wc->status,
2014 wc->wr_cqe);
Bart Van Asschec1120f82013-10-26 14:35:08 +02002015 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01002016 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02002017 target->qp_in_error = true;
2018}
2019
Bart Van Assche76c75b22010-11-26 14:37:47 -05002020static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002021{
Bart Van Assche76c75b22010-11-26 14:37:47 -05002022 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002023 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002024 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002025 struct srp_request *req;
2026 struct srp_iu *iu;
2027 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08002028 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002029 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002030 u32 tag;
2031 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02002032 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002033 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2034
2035 /*
2036 * The SCSI EH thread is the only context from which srp_queuecommand()
2037 * can get invoked for blocked devices (SDEV_BLOCK /
2038 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2039 * locking the rport mutex if invoked from inside the SCSI EH.
2040 */
2041 if (in_scsi_eh)
2042 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002043
Bart Van Assched1b42892014-05-20 15:07:20 +02002044 scmnd->result = srp_chkready(target->rport);
2045 if (unlikely(scmnd->result))
2046 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002047
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002048 WARN_ON_ONCE(scmnd->request->tag < 0);
2049 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002050 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002051 idx = blk_mq_unique_tag_to_tag(tag);
2052 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2053 dev_name(&shost->shost_gendev), tag, idx,
2054 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002055
2056 spin_lock_irqsave(&ch->lock, flags);
2057 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002058 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002059
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002060 if (!iu)
2061 goto err;
2062
2063 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002064 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002065 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002066 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002067
David Dillowf8b6e312010-11-26 13:02:21 -05002068 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002069
2070 cmd = iu->buf;
2071 memset(cmd, 0, sizeof *cmd);
2072
2073 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002074 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002075 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002076 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2077
Roland Dreieraef9ec32005-11-02 14:07:13 -08002078 req->scmnd = scmnd;
2079 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002080
Bart Van Assche509c07b2014-10-30 14:48:30 +01002081 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002082 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002083 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002084 PFX "Failed to map data (%d)\n", len);
2085 /*
2086 * If we ran out of memory descriptors (-ENOMEM) because an
2087 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002088 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002089 * to reduce queue depth temporarily.
2090 */
2091 scmnd->result = len == -ENOMEM ?
2092 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002093 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002094 }
2095
David Dillow49248642011-01-14 18:23:24 -05002096 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002097 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002098
Bart Van Assche509c07b2014-10-30 14:48:30 +01002099 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002100 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002101 goto err_unmap;
2102 }
2103
Bart Van Assched1b42892014-05-20 15:07:20 +02002104 ret = 0;
2105
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002106unlock_rport:
2107 if (in_scsi_eh)
2108 mutex_unlock(&rport->mutex);
2109
Bart Van Assched1b42892014-05-20 15:07:20 +02002110 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002111
2112err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002113 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002114
Bart Van Assche76c75b22010-11-26 14:37:47 -05002115err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002116 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002117
Bart Van Assche024ca902014-05-20 15:03:49 +02002118 /*
2119 * Avoid that the loops that iterate over the request ring can
2120 * encounter a dangling SCSI command pointer.
2121 */
2122 req->scmnd = NULL;
2123
Bart Van Assched1b42892014-05-20 15:07:20 +02002124err:
2125 if (scmnd->result) {
2126 scmnd->scsi_done(scmnd);
2127 ret = 0;
2128 } else {
2129 ret = SCSI_MLQUEUE_HOST_BUSY;
2130 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002131
Bart Van Assched1b42892014-05-20 15:07:20 +02002132 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002133}
2134
Bart Van Assche4d73f952013-10-26 14:40:37 +02002135/*
2136 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002137 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002138 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002139static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002140{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002141 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002142 int i;
2143
Bart Van Assche509c07b2014-10-30 14:48:30 +01002144 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2145 GFP_KERNEL);
2146 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002147 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002148 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2149 GFP_KERNEL);
2150 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002151 goto err_no_ring;
2152
2153 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002154 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2155 ch->max_ti_iu_len,
2156 GFP_KERNEL, DMA_FROM_DEVICE);
2157 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002158 goto err;
2159 }
2160
Bart Van Assche4d73f952013-10-26 14:40:37 +02002161 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002162 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2163 target->max_iu_len,
2164 GFP_KERNEL, DMA_TO_DEVICE);
2165 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002166 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002167
Bart Van Assche509c07b2014-10-30 14:48:30 +01002168 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002169 }
2170
2171 return 0;
2172
2173err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002174 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002175 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2176 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002177 }
2178
Bart Van Assche4d73f952013-10-26 14:40:37 +02002179
2180err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002181 kfree(ch->tx_ring);
2182 ch->tx_ring = NULL;
2183 kfree(ch->rx_ring);
2184 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002185
2186 return -ENOMEM;
2187}
2188
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002189static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2190{
2191 uint64_t T_tr_ns, max_compl_time_ms;
2192 uint32_t rq_tmo_jiffies;
2193
2194 /*
2195 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2196 * table 91), both the QP timeout and the retry count have to be set
2197 * for RC QP's during the RTR to RTS transition.
2198 */
2199 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2200 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2201
2202 /*
2203 * Set target->rq_tmo_jiffies to one second more than the largest time
2204 * it can take before an error completion is generated. See also
2205 * C9-140..142 in the IBTA spec for more information about how to
2206 * convert the QP Local ACK Timeout value to nanoseconds.
2207 */
2208 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2209 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2210 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2211 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2212
2213 return rq_tmo_jiffies;
2214}
2215
David Dillow961e0be2011-01-14 17:32:07 -05002216static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002217 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002218 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002219{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002220 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002221 struct ib_qp_attr *qp_attr = NULL;
2222 int attr_mask = 0;
2223 int ret;
2224 int i;
2225
2226 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002227 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2228 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002229
2230 /*
2231 * Reserve credits for task management so we don't
2232 * bounce requests back to the SCSI mid-layer.
2233 */
2234 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002235 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002236 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002237 target->scsi_host->cmd_per_lun
2238 = min_t(int, target->scsi_host->can_queue,
2239 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002240 } else {
2241 shost_printk(KERN_WARNING, target->scsi_host,
2242 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2243 ret = -ECONNRESET;
2244 goto error;
2245 }
2246
Bart Van Assche509c07b2014-10-30 14:48:30 +01002247 if (!ch->rx_ring) {
2248 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002249 if (ret)
2250 goto error;
2251 }
2252
2253 ret = -ENOMEM;
2254 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2255 if (!qp_attr)
2256 goto error;
2257
2258 qp_attr->qp_state = IB_QPS_RTR;
2259 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2260 if (ret)
2261 goto error_free;
2262
Bart Van Assche509c07b2014-10-30 14:48:30 +01002263 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002264 if (ret)
2265 goto error_free;
2266
Bart Van Assche4d73f952013-10-26 14:40:37 +02002267 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002268 struct srp_iu *iu = ch->rx_ring[i];
2269
2270 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002271 if (ret)
2272 goto error_free;
2273 }
2274
2275 qp_attr->qp_state = IB_QPS_RTS;
2276 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2277 if (ret)
2278 goto error_free;
2279
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002280 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2281
Bart Van Assche509c07b2014-10-30 14:48:30 +01002282 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002283 if (ret)
2284 goto error_free;
2285
2286 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2287
2288error_free:
2289 kfree(qp_attr);
2290
2291error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002292 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002293}
2294
Roland Dreieraef9ec32005-11-02 14:07:13 -08002295static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2296 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002297 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002298{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002299 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002300 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002301 struct ib_class_port_info *cpi;
2302 int opcode;
2303
2304 switch (event->param.rej_rcvd.reason) {
2305 case IB_CM_REJ_PORT_CM_REDIRECT:
2306 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002307 ch->path.dlid = cpi->redirect_lid;
2308 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002309 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002310 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002311
Bart Van Assche509c07b2014-10-30 14:48:30 +01002312 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002313 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2314 break;
2315
2316 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002317 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002318 /*
2319 * Topspin/Cisco SRP gateways incorrectly send
2320 * reject reason code 25 when they mean 24
2321 * (port redirect).
2322 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002323 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002324 event->param.rej_rcvd.ari, 16);
2325
David Dillow7aa54bd2008-01-07 18:23:41 -05002326 shost_printk(KERN_DEBUG, shost,
2327 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002328 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2329 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002330
Bart Van Assche509c07b2014-10-30 14:48:30 +01002331 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002332 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002333 shost_printk(KERN_WARNING, shost,
2334 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002335 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002336 }
2337 break;
2338
2339 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002340 shost_printk(KERN_WARNING, shost,
2341 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002342 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002343 break;
2344
2345 case IB_CM_REJ_CONSUMER_DEFINED:
2346 opcode = *(u8 *) event->private_data;
2347 if (opcode == SRP_LOGIN_REJ) {
2348 struct srp_login_rej *rej = event->private_data;
2349 u32 reason = be32_to_cpu(rej->reason);
2350
2351 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002352 shost_printk(KERN_WARNING, shost,
2353 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002354 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002355 shost_printk(KERN_WARNING, shost, PFX
2356 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002357 target->sgid.raw,
2358 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002359 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002360 shost_printk(KERN_WARNING, shost,
2361 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2362 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002363 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002364 break;
2365
David Dillow9fe4bcf2008-01-08 17:08:52 -05002366 case IB_CM_REJ_STALE_CONN:
2367 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002368 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002369 break;
2370
Roland Dreieraef9ec32005-11-02 14:07:13 -08002371 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002372 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2373 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002374 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002375 }
2376}
2377
2378static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2379{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002380 struct srp_rdma_ch *ch = cm_id->context;
2381 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002382 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002383
2384 switch (event->event) {
2385 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002386 shost_printk(KERN_DEBUG, target->scsi_host,
2387 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002388 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002389 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002390 break;
2391
2392 case IB_CM_REP_RECEIVED:
2393 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002394 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395 break;
2396
2397 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002398 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002399 comp = 1;
2400
Bart Van Assche509c07b2014-10-30 14:48:30 +01002401 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002402 break;
2403
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002404 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002405 shost_printk(KERN_WARNING, target->scsi_host,
2406 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002407 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002408 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002409 shost_printk(KERN_ERR, target->scsi_host,
2410 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002411 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002412 break;
2413
2414 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002415 shost_printk(KERN_ERR, target->scsi_host,
2416 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002417 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002418
Bart Van Assche509c07b2014-10-30 14:48:30 +01002419 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420 break;
2421
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002422 case IB_CM_MRA_RECEIVED:
2423 case IB_CM_DREQ_ERROR:
2424 case IB_CM_DREP_RECEIVED:
2425 break;
2426
Roland Dreieraef9ec32005-11-02 14:07:13 -08002427 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002428 shost_printk(KERN_WARNING, target->scsi_host,
2429 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002430 break;
2431 }
2432
2433 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002434 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002435
Roland Dreieraef9ec32005-11-02 14:07:13 -08002436 return 0;
2437}
2438
Jack Wang71444b92013-11-07 11:37:37 +01002439/**
Jack Wang71444b92013-11-07 11:37:37 +01002440 * srp_change_queue_depth - setting device queue depth
2441 * @sdev: scsi device struct
2442 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002443 *
2444 * Returns queue depth.
2445 */
2446static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002447srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002448{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002449 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002450 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002451 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002452}
2453
Bart Van Assche985aa492015-05-18 13:27:14 +02002454static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2455 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002457 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002458 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002459 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002460 struct srp_iu *iu;
2461 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002462
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002463 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002464 return -1;
2465
Bart Van Assche509c07b2014-10-30 14:48:30 +01002466 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002467
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002468 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002469 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002470 * invoked while a task management function is being sent.
2471 */
2472 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002473 spin_lock_irq(&ch->lock);
2474 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2475 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002476
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002477 if (!iu) {
2478 mutex_unlock(&rport->mutex);
2479
Bart Van Assche76c75b22010-11-26 14:37:47 -05002480 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002481 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002482
David Dillow19081f32010-10-18 08:54:49 -04002483 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2484 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002485 tsk_mgmt = iu->buf;
2486 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2487
2488 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002489 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002490 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002491 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002492 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002493
David Dillow19081f32010-10-18 08:54:49 -04002494 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2495 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002496 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2497 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002498 mutex_unlock(&rport->mutex);
2499
Bart Van Assche76c75b22010-11-26 14:37:47 -05002500 return -1;
2501 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002502 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002503
Bart Van Assche509c07b2014-10-30 14:48:30 +01002504 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002505 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002506 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002507
Roland Dreierd945e1d2006-05-09 10:50:28 -07002508 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002509}
2510
Roland Dreieraef9ec32005-11-02 14:07:13 -08002511static int srp_abort(struct scsi_cmnd *scmnd)
2512{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002513 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002514 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002515 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002516 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002517 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002518 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002519
David Dillow7aa54bd2008-01-07 18:23:41 -05002520 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002521
Bart Van Assched92c0da2014-10-06 17:14:36 +02002522 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002523 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002524 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002525 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2526 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2527 return SUCCESS;
2528 ch = &target->ch[ch_idx];
2529 if (!srp_claim_req(ch, req, NULL, scmnd))
2530 return SUCCESS;
2531 shost_printk(KERN_ERR, target->scsi_host,
2532 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002533 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002534 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002535 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002536 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002537 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002538 else
2539 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002540 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002541 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002542 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002543
Bart Van Assche086f44f2013-06-12 15:23:04 +02002544 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002545}
2546
2547static int srp_reset_device(struct scsi_cmnd *scmnd)
2548{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002549 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002550 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002551 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002552
David Dillow7aa54bd2008-01-07 18:23:41 -05002553 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002554
Bart Van Assched92c0da2014-10-06 17:14:36 +02002555 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002556 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002557 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002558 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002559 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002560 return FAILED;
2561
Bart Van Assched92c0da2014-10-06 17:14:36 +02002562 for (i = 0; i < target->ch_count; i++) {
2563 ch = &target->ch[i];
2564 for (i = 0; i < target->req_ring_size; ++i) {
2565 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002566
Bart Van Assched92c0da2014-10-06 17:14:36 +02002567 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2568 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002569 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002570
Roland Dreierd945e1d2006-05-09 10:50:28 -07002571 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002572}
2573
2574static int srp_reset_host(struct scsi_cmnd *scmnd)
2575{
2576 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002577
David Dillow7aa54bd2008-01-07 18:23:41 -05002578 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002579
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002580 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002581}
2582
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002583static int srp_slave_configure(struct scsi_device *sdev)
2584{
2585 struct Scsi_Host *shost = sdev->host;
2586 struct srp_target_port *target = host_to_target(shost);
2587 struct request_queue *q = sdev->request_queue;
2588 unsigned long timeout;
2589
2590 if (sdev->type == TYPE_DISK) {
2591 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2592 blk_queue_rq_timeout(q, timeout);
2593 }
2594
2595 return 0;
2596}
2597
Tony Jonesee959b02008-02-22 00:13:36 +01002598static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2599 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002600{
Tony Jonesee959b02008-02-22 00:13:36 +01002601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002602
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002603 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002604}
2605
Tony Jonesee959b02008-02-22 00:13:36 +01002606static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2607 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002608{
Tony Jonesee959b02008-02-22 00:13:36 +01002609 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002610
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002611 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002612}
2613
Tony Jonesee959b02008-02-22 00:13:36 +01002614static ssize_t show_service_id(struct device *dev,
2615 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002616{
Tony Jonesee959b02008-02-22 00:13:36 +01002617 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002618
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002619 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002620}
2621
Tony Jonesee959b02008-02-22 00:13:36 +01002622static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2623 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002624{
Tony Jonesee959b02008-02-22 00:13:36 +01002625 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002626
Bart Van Assche747fe002014-10-30 14:48:05 +01002627 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002628}
2629
Bart Van Assche848b3082013-10-26 14:38:12 +02002630static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2631 char *buf)
2632{
2633 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2634
Bart Van Assche747fe002014-10-30 14:48:05 +01002635 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002636}
2637
Tony Jonesee959b02008-02-22 00:13:36 +01002638static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2639 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002640{
Tony Jonesee959b02008-02-22 00:13:36 +01002641 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002642 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002643
Bart Van Assche509c07b2014-10-30 14:48:30 +01002644 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002645}
2646
Tony Jonesee959b02008-02-22 00:13:36 +01002647static ssize_t show_orig_dgid(struct device *dev,
2648 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002649{
Tony Jonesee959b02008-02-22 00:13:36 +01002650 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002651
Bart Van Assche747fe002014-10-30 14:48:05 +01002652 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002653}
2654
Bart Van Assche89de7482010-08-03 14:08:45 +00002655static ssize_t show_req_lim(struct device *dev,
2656 struct device_attribute *attr, char *buf)
2657{
2658 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002659 struct srp_rdma_ch *ch;
2660 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002661
Bart Van Assched92c0da2014-10-06 17:14:36 +02002662 for (i = 0; i < target->ch_count; i++) {
2663 ch = &target->ch[i];
2664 req_lim = min(req_lim, ch->req_lim);
2665 }
2666 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002667}
2668
Tony Jonesee959b02008-02-22 00:13:36 +01002669static ssize_t show_zero_req_lim(struct device *dev,
2670 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002671{
Tony Jonesee959b02008-02-22 00:13:36 +01002672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002673
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002674 return sprintf(buf, "%d\n", target->zero_req_lim);
2675}
2676
Tony Jonesee959b02008-02-22 00:13:36 +01002677static ssize_t show_local_ib_port(struct device *dev,
2678 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002679{
Tony Jonesee959b02008-02-22 00:13:36 +01002680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002681
2682 return sprintf(buf, "%d\n", target->srp_host->port);
2683}
2684
Tony Jonesee959b02008-02-22 00:13:36 +01002685static ssize_t show_local_ib_device(struct device *dev,
2686 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002687{
Tony Jonesee959b02008-02-22 00:13:36 +01002688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002689
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002690 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002691}
2692
Bart Van Assched92c0da2014-10-06 17:14:36 +02002693static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2694 char *buf)
2695{
2696 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697
2698 return sprintf(buf, "%d\n", target->ch_count);
2699}
2700
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002701static ssize_t show_comp_vector(struct device *dev,
2702 struct device_attribute *attr, char *buf)
2703{
2704 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2705
2706 return sprintf(buf, "%d\n", target->comp_vector);
2707}
2708
Vu Pham7bb312e2013-10-26 14:31:27 +02002709static ssize_t show_tl_retry_count(struct device *dev,
2710 struct device_attribute *attr, char *buf)
2711{
2712 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2713
2714 return sprintf(buf, "%d\n", target->tl_retry_count);
2715}
2716
David Dillow49248642011-01-14 18:23:24 -05002717static ssize_t show_cmd_sg_entries(struct device *dev,
2718 struct device_attribute *attr, char *buf)
2719{
2720 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2721
2722 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2723}
2724
David Dillowc07d4242011-01-16 13:57:10 -05002725static ssize_t show_allow_ext_sg(struct device *dev,
2726 struct device_attribute *attr, char *buf)
2727{
2728 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2729
2730 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2731}
2732
Tony Jonesee959b02008-02-22 00:13:36 +01002733static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2734static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2735static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2736static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002737static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002738static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2739static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002740static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002741static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2742static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2743static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002744static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002745static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002746static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002747static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002748static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002749
Tony Jonesee959b02008-02-22 00:13:36 +01002750static struct device_attribute *srp_host_attrs[] = {
2751 &dev_attr_id_ext,
2752 &dev_attr_ioc_guid,
2753 &dev_attr_service_id,
2754 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002755 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002756 &dev_attr_dgid,
2757 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002758 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002759 &dev_attr_zero_req_lim,
2760 &dev_attr_local_ib_port,
2761 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002762 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002763 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002764 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002765 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002766 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002767 NULL
2768};
2769
Roland Dreieraef9ec32005-11-02 14:07:13 -08002770static struct scsi_host_template srp_template = {
2771 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002772 .name = "InfiniBand SRP initiator",
2773 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002774 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002775 .info = srp_target_info,
2776 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002777 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002778 .eh_abort_handler = srp_abort,
2779 .eh_device_reset_handler = srp_reset_device,
2780 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002781 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002782 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002783 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002784 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002785 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002786 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002787 .shost_attrs = srp_host_attrs,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002788 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002789};
2790
Bart Van Assche34aa6542014-10-30 14:47:22 +01002791static int srp_sdev_count(struct Scsi_Host *host)
2792{
2793 struct scsi_device *sdev;
2794 int c = 0;
2795
2796 shost_for_each_device(sdev, host)
2797 c++;
2798
2799 return c;
2800}
2801
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002802/*
2803 * Return values:
2804 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2805 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2806 * removal has been scheduled.
2807 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2808 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002809static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2810{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002811 struct srp_rport_identifiers ids;
2812 struct srp_rport *rport;
2813
Bart Van Assche34aa6542014-10-30 14:47:22 +01002814 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002815 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002816 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002817
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002818 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002819 return -ENODEV;
2820
FUJITA Tomonori32368222007-06-27 16:33:12 +09002821 memcpy(ids.port_id, &target->id_ext, 8);
2822 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002823 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002824 rport = srp_rport_add(target->scsi_host, &ids);
2825 if (IS_ERR(rport)) {
2826 scsi_remove_host(target->scsi_host);
2827 return PTR_ERR(rport);
2828 }
2829
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002830 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002831 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002832
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002833 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002834 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002835 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002836
Roland Dreieraef9ec32005-11-02 14:07:13 -08002837 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002838 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002839
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002840 if (srp_connected_ch(target) < target->ch_count ||
2841 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002842 shost_printk(KERN_INFO, target->scsi_host,
2843 PFX "SCSI scan failed - removing SCSI host\n");
2844 srp_queue_remove_work(target);
2845 goto out;
2846 }
2847
Bart Van Asschecf1acab2016-05-12 10:47:38 -07002848 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
Bart Van Assche34aa6542014-10-30 14:47:22 +01002849 dev_name(&target->scsi_host->shost_gendev),
2850 srp_sdev_count(target->scsi_host));
2851
2852 spin_lock_irq(&target->lock);
2853 if (target->state == SRP_TARGET_SCANNING)
2854 target->state = SRP_TARGET_LIVE;
2855 spin_unlock_irq(&target->lock);
2856
2857out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002858 return 0;
2859}
2860
Tony Jonesee959b02008-02-22 00:13:36 +01002861static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002862{
2863 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002864 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002865
2866 complete(&host->released);
2867}
2868
2869static struct class srp_class = {
2870 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002871 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002872};
2873
Bart Van Assche96fc2482013-06-28 14:51:26 +02002874/**
2875 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002876 * @host: SRP host.
2877 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002878 */
2879static bool srp_conn_unique(struct srp_host *host,
2880 struct srp_target_port *target)
2881{
2882 struct srp_target_port *t;
2883 bool ret = false;
2884
2885 if (target->state == SRP_TARGET_REMOVED)
2886 goto out;
2887
2888 ret = true;
2889
2890 spin_lock(&host->target_lock);
2891 list_for_each_entry(t, &host->target_list, list) {
2892 if (t != target &&
2893 target->id_ext == t->id_ext &&
2894 target->ioc_guid == t->ioc_guid &&
2895 target->initiator_ext == t->initiator_ext) {
2896 ret = false;
2897 break;
2898 }
2899 }
2900 spin_unlock(&host->target_lock);
2901
2902out:
2903 return ret;
2904}
2905
Roland Dreieraef9ec32005-11-02 14:07:13 -08002906/*
2907 * Target ports are added by writing
2908 *
2909 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2910 * pkey=<P_Key>,service_id=<service ID>
2911 *
2912 * to the add_target sysfs attribute.
2913 */
2914enum {
2915 SRP_OPT_ERR = 0,
2916 SRP_OPT_ID_EXT = 1 << 0,
2917 SRP_OPT_IOC_GUID = 1 << 1,
2918 SRP_OPT_DGID = 1 << 2,
2919 SRP_OPT_PKEY = 1 << 3,
2920 SRP_OPT_SERVICE_ID = 1 << 4,
2921 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002922 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002923 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002924 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002925 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002926 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2927 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002928 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002929 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002930 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002931 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2932 SRP_OPT_IOC_GUID |
2933 SRP_OPT_DGID |
2934 SRP_OPT_PKEY |
2935 SRP_OPT_SERVICE_ID),
2936};
2937
Steven Whitehousea447c092008-10-13 10:46:57 +01002938static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002939 { SRP_OPT_ID_EXT, "id_ext=%s" },
2940 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2941 { SRP_OPT_DGID, "dgid=%s" },
2942 { SRP_OPT_PKEY, "pkey=%x" },
2943 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2944 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2945 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002946 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002947 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002948 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002949 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2950 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002951 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002952 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002953 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002954 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002955};
2956
2957static int srp_parse_options(const char *buf, struct srp_target_port *target)
2958{
2959 char *options, *sep_opt;
2960 char *p;
2961 char dgid[3];
2962 substring_t args[MAX_OPT_ARGS];
2963 int opt_mask = 0;
2964 int token;
2965 int ret = -EINVAL;
2966 int i;
2967
2968 options = kstrdup(buf, GFP_KERNEL);
2969 if (!options)
2970 return -ENOMEM;
2971
2972 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002973 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002974 if (!*p)
2975 continue;
2976
2977 token = match_token(p, srp_opt_tokens, args);
2978 opt_mask |= token;
2979
2980 switch (token) {
2981 case SRP_OPT_ID_EXT:
2982 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002983 if (!p) {
2984 ret = -ENOMEM;
2985 goto out;
2986 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002987 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2988 kfree(p);
2989 break;
2990
2991 case SRP_OPT_IOC_GUID:
2992 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002993 if (!p) {
2994 ret = -ENOMEM;
2995 goto out;
2996 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002997 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2998 kfree(p);
2999 break;
3000
3001 case SRP_OPT_DGID:
3002 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003003 if (!p) {
3004 ret = -ENOMEM;
3005 goto out;
3006 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003007 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003008 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07003009 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003010 goto out;
3011 }
3012
3013 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01003014 strlcpy(dgid, p + i * 2, sizeof(dgid));
3015 if (sscanf(dgid, "%hhx",
3016 &target->orig_dgid.raw[i]) < 1) {
3017 ret = -EINVAL;
3018 kfree(p);
3019 goto out;
3020 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003021 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08003022 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003023 break;
3024
3025 case SRP_OPT_PKEY:
3026 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003027 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003028 goto out;
3029 }
Bart Van Assche747fe002014-10-30 14:48:05 +01003030 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003031 break;
3032
3033 case SRP_OPT_SERVICE_ID:
3034 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003035 if (!p) {
3036 ret = -ENOMEM;
3037 goto out;
3038 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003039 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3040 kfree(p);
3041 break;
3042
3043 case SRP_OPT_MAX_SECT:
3044 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003045 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003046 goto out;
3047 }
3048 target->scsi_host->max_sectors = token;
3049 break;
3050
Bart Van Assche4d73f952013-10-26 14:40:37 +02003051 case SRP_OPT_QUEUE_SIZE:
3052 if (match_int(args, &token) || token < 1) {
3053 pr_warn("bad queue_size parameter '%s'\n", p);
3054 goto out;
3055 }
3056 target->scsi_host->can_queue = token;
3057 target->queue_size = token + SRP_RSP_SQ_SIZE +
3058 SRP_TSK_MGMT_SQ_SIZE;
3059 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3060 target->scsi_host->cmd_per_lun = token;
3061 break;
3062
Vu Pham52fb2b502006-06-17 20:37:31 -07003063 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003064 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003065 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3066 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003067 goto out;
3068 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003069 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003070 break;
3071
Ramachandra K0c0450db2006-06-17 20:37:38 -07003072 case SRP_OPT_IO_CLASS:
3073 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003074 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003075 goto out;
3076 }
3077 if (token != SRP_REV10_IB_IO_CLASS &&
3078 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003079 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3080 token, SRP_REV10_IB_IO_CLASS,
3081 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003082 goto out;
3083 }
3084 target->io_class = token;
3085 break;
3086
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003087 case SRP_OPT_INITIATOR_EXT:
3088 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003089 if (!p) {
3090 ret = -ENOMEM;
3091 goto out;
3092 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003093 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3094 kfree(p);
3095 break;
3096
David Dillow49248642011-01-14 18:23:24 -05003097 case SRP_OPT_CMD_SG_ENTRIES:
3098 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003099 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3100 p);
David Dillow49248642011-01-14 18:23:24 -05003101 goto out;
3102 }
3103 target->cmd_sg_cnt = token;
3104 break;
3105
David Dillowc07d4242011-01-16 13:57:10 -05003106 case SRP_OPT_ALLOW_EXT_SG:
3107 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003108 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003109 goto out;
3110 }
3111 target->allow_ext_sg = !!token;
3112 break;
3113
3114 case SRP_OPT_SG_TABLESIZE:
3115 if (match_int(args, &token) || token < 1 ||
3116 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003117 pr_warn("bad max sg_tablesize parameter '%s'\n",
3118 p);
David Dillowc07d4242011-01-16 13:57:10 -05003119 goto out;
3120 }
3121 target->sg_tablesize = token;
3122 break;
3123
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003124 case SRP_OPT_COMP_VECTOR:
3125 if (match_int(args, &token) || token < 0) {
3126 pr_warn("bad comp_vector parameter '%s'\n", p);
3127 goto out;
3128 }
3129 target->comp_vector = token;
3130 break;
3131
Vu Pham7bb312e2013-10-26 14:31:27 +02003132 case SRP_OPT_TL_RETRY_COUNT:
3133 if (match_int(args, &token) || token < 2 || token > 7) {
3134 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3135 p);
3136 goto out;
3137 }
3138 target->tl_retry_count = token;
3139 break;
3140
Roland Dreieraef9ec32005-11-02 14:07:13 -08003141 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003142 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3143 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003144 goto out;
3145 }
3146 }
3147
3148 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3149 ret = 0;
3150 else
3151 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3152 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3153 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003154 pr_warn("target creation request is missing parameter '%s'\n",
3155 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003156
Bart Van Assche4d73f952013-10-26 14:40:37 +02003157 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3158 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3159 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3160 target->scsi_host->cmd_per_lun,
3161 target->scsi_host->can_queue);
3162
Roland Dreieraef9ec32005-11-02 14:07:13 -08003163out:
3164 kfree(options);
3165 return ret;
3166}
3167
Tony Jonesee959b02008-02-22 00:13:36 +01003168static ssize_t srp_create_target(struct device *dev,
3169 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003170 const char *buf, size_t count)
3171{
3172 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003173 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003174 struct Scsi_Host *target_host;
3175 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003176 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003177 struct srp_device *srp_dev = host->srp_dev;
3178 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003179 int ret, node_idx, node, cpu, i;
3180 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003181
3182 target_host = scsi_host_alloc(&srp_template,
3183 sizeof (struct srp_target_port));
3184 if (!target_host)
3185 return -ENOMEM;
3186
David Dillow49248642011-01-14 18:23:24 -05003187 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003188 target_host->max_channel = 0;
3189 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003190 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003191 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003192
Roland Dreieraef9ec32005-11-02 14:07:13 -08003193 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003194
David Dillow49248642011-01-14 18:23:24 -05003195 target->io_class = SRP_REV16A_IB_IO_CLASS;
3196 target->scsi_host = target_host;
3197 target->srp_host = host;
Jason Gunthorpee6bf5f42015-07-30 17:22:22 -06003198 target->lkey = host->srp_dev->pd->local_dma_lkey;
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003199 target->global_mr = host->srp_dev->global_mr;
David Dillow49248642011-01-14 18:23:24 -05003200 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003201 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3202 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003203 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003204 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003205
Bart Van Assche34aa6542014-10-30 14:47:22 +01003206 /*
3207 * Avoid that the SCSI host can be removed by srp_remove_target()
3208 * before this function returns.
3209 */
3210 scsi_host_get(target->scsi_host);
3211
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003212 mutex_lock(&host->add_target_mutex);
3213
Roland Dreieraef9ec32005-11-02 14:07:13 -08003214 ret = srp_parse_options(buf, target);
3215 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003216 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003217
Bart Van Assche4d73f952013-10-26 14:40:37 +02003218 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3219
Bart Van Assche96fc2482013-06-28 14:51:26 +02003220 if (!srp_conn_unique(target->srp_host, target)) {
3221 shost_printk(KERN_INFO, target->scsi_host,
3222 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3223 be64_to_cpu(target->id_ext),
3224 be64_to_cpu(target->ioc_guid),
3225 be64_to_cpu(target->initiator_ext));
3226 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003227 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003228 }
3229
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003230 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003231 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003232 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003233 target->sg_tablesize = target->cmd_sg_cnt;
3234 }
3235
3236 target_host->sg_tablesize = target->sg_tablesize;
Bart Van Asschefa9863f2016-04-22 14:13:57 -07003237 target->mr_pool_size = target->scsi_host->can_queue;
David Dillowc07d4242011-01-16 13:57:10 -05003238 target->indirect_size = target->sg_tablesize *
3239 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003240 target->max_iu_len = sizeof (struct srp_cmd) +
3241 sizeof (struct srp_indirect_buf) +
3242 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3243
Bart Van Asschec1120f82013-10-26 14:35:08 +02003244 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003245 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003246 spin_lock_init(&target->lock);
Matan Barak55ee3ab2015-10-15 18:38:45 +03003247 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003248 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003249 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003250
Bart Van Assched92c0da2014-10-06 17:14:36 +02003251 ret = -ENOMEM;
3252 target->ch_count = max_t(unsigned, num_online_nodes(),
3253 min(ch_count ? :
3254 min(4 * num_online_nodes(),
3255 ibdev->num_comp_vectors),
3256 num_online_cpus()));
3257 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3258 GFP_KERNEL);
3259 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003260 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003261
Bart Van Assched92c0da2014-10-06 17:14:36 +02003262 node_idx = 0;
3263 for_each_online_node(node) {
3264 const int ch_start = (node_idx * target->ch_count /
3265 num_online_nodes());
3266 const int ch_end = ((node_idx + 1) * target->ch_count /
3267 num_online_nodes());
3268 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3269 num_online_nodes() + target->comp_vector)
3270 % ibdev->num_comp_vectors;
3271 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3272 num_online_nodes() + target->comp_vector)
3273 % ibdev->num_comp_vectors;
3274 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003275
Bart Van Assched92c0da2014-10-06 17:14:36 +02003276 for_each_online_cpu(cpu) {
3277 if (cpu_to_node(cpu) != node)
3278 continue;
3279 if (ch_start + cpu_idx >= ch_end)
3280 continue;
3281 ch = &target->ch[ch_start + cpu_idx];
3282 ch->target = target;
3283 ch->comp_vector = cv_start == cv_end ? cv_start :
3284 cv_start + cpu_idx % (cv_end - cv_start);
3285 spin_lock_init(&ch->lock);
3286 INIT_LIST_HEAD(&ch->free_tx);
3287 ret = srp_new_cm_id(ch);
3288 if (ret)
3289 goto err_disconnect;
3290
3291 ret = srp_create_ch_ib(ch);
3292 if (ret)
3293 goto err_disconnect;
3294
3295 ret = srp_alloc_req_data(ch);
3296 if (ret)
3297 goto err_disconnect;
3298
3299 ret = srp_connect_ch(ch, multich);
3300 if (ret) {
3301 shost_printk(KERN_ERR, target->scsi_host,
3302 PFX "Connection %d/%d failed\n",
3303 ch_start + cpu_idx,
3304 target->ch_count);
3305 if (node_idx == 0 && cpu_idx == 0) {
3306 goto err_disconnect;
3307 } else {
3308 srp_free_ch_ib(target, ch);
3309 srp_free_req_data(target, ch);
3310 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003311 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003312 }
3313 }
3314
3315 multich = true;
3316 cpu_idx++;
3317 }
3318 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003319 }
3320
Bart Van Asschec257ea62015-07-31 14:13:22 -07003321connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003322 target->scsi_host->nr_hw_queues = target->ch_count;
3323
Roland Dreieraef9ec32005-11-02 14:07:13 -08003324 ret = srp_add_target(host, target);
3325 if (ret)
3326 goto err_disconnect;
3327
Bart Van Assche34aa6542014-10-30 14:47:22 +01003328 if (target->state != SRP_TARGET_REMOVED) {
3329 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3330 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3331 be64_to_cpu(target->id_ext),
3332 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003333 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003334 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003335 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003336 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003337
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003338 ret = count;
3339
3340out:
3341 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003342
3343 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003344 if (ret < 0)
3345 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003346
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003347 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003348
3349err_disconnect:
3350 srp_disconnect_target(target);
3351
Bart Van Assched92c0da2014-10-06 17:14:36 +02003352 for (i = 0; i < target->ch_count; i++) {
3353 ch = &target->ch[i];
3354 srp_free_ch_ib(target, ch);
3355 srp_free_req_data(target, ch);
3356 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003357
Bart Van Assched92c0da2014-10-06 17:14:36 +02003358 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003359 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003360}
3361
Tony Jonesee959b02008-02-22 00:13:36 +01003362static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003363
Tony Jonesee959b02008-02-22 00:13:36 +01003364static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3365 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003366{
Tony Jonesee959b02008-02-22 00:13:36 +01003367 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003369 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370}
3371
Tony Jonesee959b02008-02-22 00:13:36 +01003372static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003373
Tony Jonesee959b02008-02-22 00:13:36 +01003374static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3375 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003376{
Tony Jonesee959b02008-02-22 00:13:36 +01003377 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003378
3379 return sprintf(buf, "%d\n", host->port);
3380}
3381
Tony Jonesee959b02008-02-22 00:13:36 +01003382static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003383
Roland Dreierf5358a12006-06-17 20:37:29 -07003384static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385{
3386 struct srp_host *host;
3387
3388 host = kzalloc(sizeof *host, GFP_KERNEL);
3389 if (!host)
3390 return NULL;
3391
3392 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003393 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003394 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003395 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003396 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003397 host->port = port;
3398
Tony Jonesee959b02008-02-22 00:13:36 +01003399 host->dev.class = &srp_class;
3400 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003401 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003402
Tony Jonesee959b02008-02-22 00:13:36 +01003403 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003404 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003405 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003406 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003407 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003408 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003409 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003410 goto err_class;
3411
3412 return host;
3413
3414err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003415 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003416
Roland Dreierf5358a12006-06-17 20:37:29 -07003417free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003418 kfree(host);
3419
3420 return NULL;
3421}
3422
3423static void srp_add_one(struct ib_device *device)
3424{
Roland Dreierf5358a12006-06-17 20:37:29 -07003425 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003426 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003427 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003428 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003429
Roland Dreierf5358a12006-06-17 20:37:29 -07003430 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3431 if (!srp_dev)
Or Gerlitz4a061b22015-12-18 10:59:46 +02003432 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003433
3434 /*
3435 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003436 * minimum of 4096 bytes. We're unlikely to build large sglists
3437 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003438 */
Or Gerlitz4a061b22015-12-18 10:59:46 +02003439 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
Bart Van Assche52ede082014-05-20 15:07:45 +02003440 srp_dev->mr_page_size = 1 << mr_page_shift;
3441 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003442 max_pages_per_mr = device->attrs.max_mr_size;
Bart Van Assche52ede082014-05-20 15:07:45 +02003443 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3444 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3445 max_pages_per_mr);
Bart Van Assche835ee622016-05-12 10:49:39 -07003446
3447 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3448 device->map_phys_fmr && device->unmap_fmr);
3449 srp_dev->has_fr = (device->attrs.device_cap_flags &
3450 IB_DEVICE_MEM_MGT_EXTENSIONS);
3451 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3452 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3453
3454 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3455 (!srp_dev->has_fmr || prefer_fr));
3456 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3457
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003458 if (srp_dev->use_fast_reg) {
3459 srp_dev->max_pages_per_mr =
3460 min_t(u32, srp_dev->max_pages_per_mr,
Or Gerlitz4a061b22015-12-18 10:59:46 +02003461 device->attrs.max_fast_reg_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003462 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003463 srp_dev->mr_max_size = srp_dev->mr_page_size *
3464 srp_dev->max_pages_per_mr;
Or Gerlitz4a061b22015-12-18 10:59:46 +02003465 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3466 device->name, mr_page_shift, device->attrs.max_mr_size,
3467 device->attrs.max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003468 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003469
3470 INIT_LIST_HEAD(&srp_dev->dev_list);
3471
3472 srp_dev->dev = device;
3473 srp_dev->pd = ib_alloc_pd(device);
3474 if (IS_ERR(srp_dev->pd))
3475 goto free_dev;
3476
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003477 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3478 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3479 IB_ACCESS_LOCAL_WRITE |
3480 IB_ACCESS_REMOTE_READ |
3481 IB_ACCESS_REMOTE_WRITE);
3482 if (IS_ERR(srp_dev->global_mr))
3483 goto err_pd;
3484 } else {
3485 srp_dev->global_mr = NULL;
3486 }
Roland Dreierf5358a12006-06-17 20:37:29 -07003487
Hal Rosenstock41390322015-06-29 09:57:00 -04003488 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003489 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003490 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003491 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003492 }
3493
Roland Dreierf5358a12006-06-17 20:37:29 -07003494 ib_set_client_data(device, &srp_client, srp_dev);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003495 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003496
3497err_pd:
3498 ib_dealloc_pd(srp_dev->pd);
3499
3500free_dev:
3501 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003502}
3503
Haggai Eran7c1eb452015-07-30 17:50:14 +03003504static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003505{
Roland Dreierf5358a12006-06-17 20:37:29 -07003506 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003507 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003508 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003509
Haggai Eran7c1eb452015-07-30 17:50:14 +03003510 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003511 if (!srp_dev)
3512 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003513
Roland Dreierf5358a12006-06-17 20:37:29 -07003514 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003515 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003516 /*
3517 * Wait for the sysfs entry to go away, so that no new
3518 * target ports can be created.
3519 */
3520 wait_for_completion(&host->released);
3521
3522 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003523 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003524 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003525 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003526 list_for_each_entry(target, &host->target_list, list)
3527 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003528 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003529
3530 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003531 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003532 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003533 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003534 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003535
Roland Dreieraef9ec32005-11-02 14:07:13 -08003536 kfree(host);
3537 }
3538
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003539 if (srp_dev->global_mr)
3540 ib_dereg_mr(srp_dev->global_mr);
Roland Dreierf5358a12006-06-17 20:37:29 -07003541 ib_dealloc_pd(srp_dev->pd);
3542
3543 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003544}
3545
FUJITA Tomonori32368222007-06-27 16:33:12 +09003546static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003547 .has_rport_state = true,
3548 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003549 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003550 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3551 .dev_loss_tmo = &srp_dev_loss_tmo,
3552 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003553 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003554 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003555};
3556
Roland Dreieraef9ec32005-11-02 14:07:13 -08003557static int __init srp_init_module(void)
3558{
3559 int ret;
3560
David Dillow49248642011-01-14 18:23:24 -05003561 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003562 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003563 if (!cmd_sg_entries)
3564 cmd_sg_entries = srp_sg_tablesize;
3565 }
3566
3567 if (!cmd_sg_entries)
3568 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3569
3570 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003571 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003572 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003573 }
3574
David Dillowc07d4242011-01-16 13:57:10 -05003575 if (!indirect_sg_entries)
3576 indirect_sg_entries = cmd_sg_entries;
3577 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003578 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3579 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003580 indirect_sg_entries = cmd_sg_entries;
3581 }
3582
Bart Van Asschebcc05912014-07-09 15:57:26 +02003583 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003584 if (!srp_remove_wq) {
3585 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003586 goto out;
3587 }
3588
3589 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003590 ib_srp_transport_template =
3591 srp_attach_transport(&ib_srp_transport_functions);
3592 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003593 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003594
Roland Dreieraef9ec32005-11-02 14:07:13 -08003595 ret = class_register(&srp_class);
3596 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003597 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003598 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003599 }
3600
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003601 ib_sa_register_client(&srp_sa_client);
3602
Roland Dreieraef9ec32005-11-02 14:07:13 -08003603 ret = ib_register_client(&srp_client);
3604 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003605 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003606 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003607 }
3608
Bart Van Asschebcc05912014-07-09 15:57:26 +02003609out:
3610 return ret;
3611
3612unreg_sa:
3613 ib_sa_unregister_client(&srp_sa_client);
3614 class_unregister(&srp_class);
3615
3616release_tr:
3617 srp_release_transport(ib_srp_transport_template);
3618
3619destroy_wq:
3620 destroy_workqueue(srp_remove_wq);
3621 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003622}
3623
3624static void __exit srp_cleanup_module(void)
3625{
3626 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003627 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003628 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003629 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003630 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003631}
3632
3633module_init(srp_init_module);
3634module_exit(srp_cleanup_module);