blob: ff21597aa54d1d4f0fba6f60e27ff13fc40d2e12 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100135static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
137 const char *opname);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800138static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200141static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900142
Roland Dreieraef9ec32005-11-02 14:07:13 -0800143static struct ib_client srp_client = {
144 .name = "srp",
145 .add = srp_add_one,
146 .remove = srp_remove_one
147};
148
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700149static struct ib_sa_client srp_sa_client;
150
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200151static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152{
153 int tmo = *(int *)kp->arg;
154
155 if (tmo >= 0)
156 return sprintf(buffer, "%d", tmo);
157 else
158 return sprintf(buffer, "off");
159}
160
161static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162{
163 int tmo, res;
164
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300165 res = srp_parse_tmo(&tmo, val);
166 if (res)
167 goto out;
168
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200174 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930185static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
Roland Dreieraef9ec32005-11-02 14:07:13 -0800190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700204
205 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700208}
209
Roland Dreieraef9ec32005-11-02 14:07:13 -0800210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300255 pr_debug("QP event %s (%d)\n",
256 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
Bart Van Assche56b53902014-07-09 15:58:22 +0200269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295 struct ib_cm_id *new_cm_id;
296
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500309
310 return 0;
311}
312
Bart Van Assched1b42892014-05-20 15:07:20 +0200313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
319 fmr_param.pool_size = target->scsi_host->can_queue;
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200344 if (d->mr)
345 ib_dereg_mr(d->mr);
346 }
347 kfree(pool);
348}
349
350/**
351 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352 * @device: IB device to allocate fast registration descriptors for.
353 * @pd: Protection domain associated with the FR descriptors.
354 * @pool_size: Number of descriptors to allocate.
355 * @max_page_list_len: Maximum fast registration work request page list length.
356 */
357static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358 struct ib_pd *pd, int pool_size,
359 int max_page_list_len)
360{
361 struct srp_fr_pool *pool;
362 struct srp_fr_desc *d;
363 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200364 int i, ret = -EINVAL;
365
366 if (pool_size <= 0)
367 goto err;
368 ret = -ENOMEM;
369 pool = kzalloc(sizeof(struct srp_fr_pool) +
370 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
371 if (!pool)
372 goto err;
373 pool->size = pool_size;
374 pool->max_page_list_len = max_page_list_len;
375 spin_lock_init(&pool->lock);
376 INIT_LIST_HEAD(&pool->free_list);
377
378 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300379 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200381 if (IS_ERR(mr)) {
382 ret = PTR_ERR(mr);
383 goto destroy_pool;
384 }
385 d->mr = mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200386 list_add_tail(&d->entry, &pool->free_list);
387 }
388
389out:
390 return pool;
391
392destroy_pool:
393 srp_destroy_fr_pool(pool);
394
395err:
396 pool = ERR_PTR(ret);
397 goto out;
398}
399
400/**
401 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402 * @pool: Pool to obtain descriptor from.
403 */
404static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
405{
406 struct srp_fr_desc *d = NULL;
407 unsigned long flags;
408
409 spin_lock_irqsave(&pool->lock, flags);
410 if (!list_empty(&pool->free_list)) {
411 d = list_first_entry(&pool->free_list, typeof(*d), entry);
412 list_del(&d->entry);
413 }
414 spin_unlock_irqrestore(&pool->lock, flags);
415
416 return d;
417}
418
419/**
420 * srp_fr_pool_put() - put an FR descriptor back in the free list
421 * @pool: Pool the descriptor was allocated from.
422 * @desc: Pointer to an array of fast registration descriptor pointers.
423 * @n: Number of descriptors to put back.
424 *
425 * Note: The caller must already have queued an invalidation request for
426 * desc->mr->rkey before calling this function.
427 */
428static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
429 int n)
430{
431 unsigned long flags;
432 int i;
433
434 spin_lock_irqsave(&pool->lock, flags);
435 for (i = 0; i < n; i++)
436 list_add(&desc[i]->entry, &pool->free_list);
437 spin_unlock_irqrestore(&pool->lock, flags);
438}
439
440static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
441{
442 struct srp_device *dev = target->srp_host->srp_dev;
443
444 return srp_create_fr_pool(dev->dev, dev->pd,
445 target->scsi_host->can_queue,
446 dev->max_pages_per_mr);
447}
448
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200449/**
450 * srp_destroy_qp() - destroy an RDMA queue pair
451 * @ch: SRP RDMA channel.
452 *
Steve Wise561392d2016-02-17 08:15:42 -0800453 * Drain the qp before destroying it. This avoids that the receive
454 * completion handler can access the queue pair while it is
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200455 * being destroyed.
456 */
457static void srp_destroy_qp(struct srp_rdma_ch *ch)
458{
Steve Wise561392d2016-02-17 08:15:42 -0800459 ib_drain_rq(ch->qp);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200460 ib_destroy_qp(ch->qp);
461}
462
Bart Van Assche509c07b2014-10-30 14:48:30 +0100463static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800464{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100465 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200466 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800467 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100468 struct ib_cq *recv_cq, *send_cq;
469 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200470 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200471 struct srp_fr_pool *fr_pool = NULL;
Sagi Grimberg09c0c0b2015-12-01 10:18:03 -0800472 const int m = dev->use_fast_reg ? 3 : 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800473 int ret;
474
475 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
476 if (!init_attr)
477 return -ENOMEM;
478
Steve Wise561392d2016-02-17 08:15:42 -0800479 /* queue_size + 1 for ib_drain_rq() */
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100480 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
481 ch->comp_vector, IB_POLL_SOFTIRQ);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100482 if (IS_ERR(recv_cq)) {
483 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800484 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800485 }
486
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100487 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
488 ch->comp_vector, IB_POLL_DIRECT);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100489 if (IS_ERR(send_cq)) {
490 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800491 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000492 }
493
Roland Dreieraef9ec32005-11-02 14:07:13 -0800494 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200495 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200496 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800497 init_attr->cap.max_recv_sge = 1;
498 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200499 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800500 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100501 init_attr->send_cq = send_cq;
502 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800503
Bart Van Assche62154b22014-05-20 15:04:45 +0200504 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100505 if (IS_ERR(qp)) {
506 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800507 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800508 }
509
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100510 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800511 if (ret)
512 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800513
Bart Van Assche002f1562015-08-10 17:08:44 -0700514 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200515 fr_pool = srp_alloc_fr_pool(target);
516 if (IS_ERR(fr_pool)) {
517 ret = PTR_ERR(fr_pool);
518 shost_printk(KERN_WARNING, target->scsi_host, PFX
519 "FR pool allocation failed (%d)\n", ret);
520 goto err_qp;
521 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700522 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200523 fmr_pool = srp_alloc_fmr_pool(target);
524 if (IS_ERR(fmr_pool)) {
525 ret = PTR_ERR(fmr_pool);
526 shost_printk(KERN_WARNING, target->scsi_host, PFX
527 "FMR pool allocation failed (%d)\n", ret);
528 goto err_qp;
529 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200530 }
531
Bart Van Assche509c07b2014-10-30 14:48:30 +0100532 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200533 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100534 if (ch->recv_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100535 ib_free_cq(ch->recv_cq);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100536 if (ch->send_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100537 ib_free_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100538
Bart Van Assche509c07b2014-10-30 14:48:30 +0100539 ch->qp = qp;
540 ch->recv_cq = recv_cq;
541 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100542
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300543 if (dev->use_fast_reg) {
544 if (ch->fr_pool)
545 srp_destroy_fr_pool(ch->fr_pool);
546 ch->fr_pool = fr_pool;
547 } else if (dev->use_fmr) {
548 if (ch->fmr_pool)
549 ib_destroy_fmr_pool(ch->fmr_pool);
550 ch->fmr_pool = fmr_pool;
551 }
552
Roland Dreierda9d2f02010-02-24 15:07:59 -0800553 kfree(init_attr);
554 return 0;
555
556err_qp:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100557 srp_destroy_qp(ch);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800558
559err_send_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100560 ib_free_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800561
562err_recv_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100563 ib_free_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800564
565err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800566 kfree(init_attr);
567 return ret;
568}
569
Bart Van Assche4d73f952013-10-26 14:40:37 +0200570/*
571 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100572 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200573 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100574static void srp_free_ch_ib(struct srp_target_port *target,
575 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800576{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200577 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800578 int i;
579
Bart Van Assched92c0da2014-10-06 17:14:36 +0200580 if (!ch->target)
581 return;
582
Bart Van Assche509c07b2014-10-30 14:48:30 +0100583 if (ch->cm_id) {
584 ib_destroy_cm_id(ch->cm_id);
585 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100586 }
587
Bart Van Assched92c0da2014-10-06 17:14:36 +0200588 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
589 if (!ch->qp)
590 return;
591
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200592 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100593 if (ch->fr_pool)
594 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700595 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100596 if (ch->fmr_pool)
597 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200598 }
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100599
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200600 srp_destroy_qp(ch);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100601 ib_free_cq(ch->send_cq);
602 ib_free_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800603
Bart Van Assched92c0da2014-10-06 17:14:36 +0200604 /*
605 * Avoid that the SCSI error handler tries to use this channel after
606 * it has been freed. The SCSI error handler can namely continue
607 * trying to perform recovery actions after scsi_remove_host()
608 * returned.
609 */
610 ch->target = NULL;
611
Bart Van Assche509c07b2014-10-30 14:48:30 +0100612 ch->qp = NULL;
613 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100614
Bart Van Assche509c07b2014-10-30 14:48:30 +0100615 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200616 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100617 srp_free_iu(target->srp_host, ch->rx_ring[i]);
618 kfree(ch->rx_ring);
619 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200620 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100621 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200622 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100623 srp_free_iu(target->srp_host, ch->tx_ring[i]);
624 kfree(ch->tx_ring);
625 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200626 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800627}
628
629static void srp_path_rec_completion(int status,
630 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100631 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800632{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100633 struct srp_rdma_ch *ch = ch_ptr;
634 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800635
Bart Van Assche509c07b2014-10-30 14:48:30 +0100636 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800637 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500638 shost_printk(KERN_ERR, target->scsi_host,
639 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800640 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100641 ch->path = *pathrec;
642 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800643}
644
Bart Van Assche509c07b2014-10-30 14:48:30 +0100645static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800646{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100648 int ret;
649
Bart Van Assche509c07b2014-10-30 14:48:30 +0100650 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800651
Bart Van Assche509c07b2014-10-30 14:48:30 +0100652 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800653
Bart Van Assche509c07b2014-10-30 14:48:30 +0100654 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
655 target->srp_host->srp_dev->dev,
656 target->srp_host->port,
657 &ch->path,
658 IB_SA_PATH_REC_SERVICE_ID |
659 IB_SA_PATH_REC_DGID |
660 IB_SA_PATH_REC_SGID |
661 IB_SA_PATH_REC_NUMB_PATH |
662 IB_SA_PATH_REC_PKEY,
663 SRP_PATH_REC_TIMEOUT_MS,
664 GFP_KERNEL,
665 srp_path_rec_completion,
666 ch, &ch->path_query);
667 if (ch->path_query_id < 0)
668 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100671 if (ret < 0)
672 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800673
Bart Van Assche509c07b2014-10-30 14:48:30 +0100674 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500675 shost_printk(KERN_WARNING, target->scsi_host,
676 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800677
Bart Van Assche509c07b2014-10-30 14:48:30 +0100678 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800679}
680
Bart Van Assched92c0da2014-10-06 17:14:36 +0200681static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800682{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100683 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800684 struct {
685 struct ib_cm_req_param param;
686 struct srp_login_req priv;
687 } *req = NULL;
688 int status;
689
690 req = kzalloc(sizeof *req, GFP_KERNEL);
691 if (!req)
692 return -ENOMEM;
693
Bart Van Assche509c07b2014-10-30 14:48:30 +0100694 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800695 req->param.alternate_path = NULL;
696 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100697 req->param.qp_num = ch->qp->qp_num;
698 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800699 req->param.private_data = &req->priv;
700 req->param.private_data_len = sizeof req->priv;
701 req->param.flow_control = 1;
702
703 get_random_bytes(&req->param.starting_psn, 4);
704 req->param.starting_psn &= 0xffffff;
705
706 /*
707 * Pick some arbitrary defaults here; we could make these
708 * module parameters if anyone cared about setting them.
709 */
710 req->param.responder_resources = 4;
711 req->param.remote_cm_response_timeout = 20;
712 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200713 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800714 req->param.rnr_retry_count = 7;
715 req->param.max_cm_retries = 15;
716
717 req->priv.opcode = SRP_LOGIN_REQ;
718 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500719 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
721 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200722 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
723 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700724 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700725 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700726 * port identifier format is 8 bytes of ID extension followed
727 * by 8 bytes of GUID. Older drafts put the two halves in the
728 * opposite order, so that the GUID comes first.
729 *
730 * Targets conforming to these obsolete drafts can be
731 * recognized by the I/O Class they report.
732 */
733 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
734 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100735 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700736 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200737 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700738 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
739 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
740 } else {
741 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200742 &target->initiator_ext, 8);
743 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100744 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700745 memcpy(req->priv.target_port_id, &target->id_ext, 8);
746 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
747 }
748
Roland Dreieraef9ec32005-11-02 14:07:13 -0800749 /*
750 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200751 * zero out the first 8 bytes of our initiator port ID and set
752 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800753 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700754 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500755 shost_printk(KERN_DEBUG, target->scsi_host,
756 PFX "Topspin/Cisco initiator port ID workaround "
757 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200758 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800759 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200760 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100761 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800762 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800763
Bart Van Assche509c07b2014-10-30 14:48:30 +0100764 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800765
766 kfree(req);
767
768 return status;
769}
770
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000771static bool srp_queue_remove_work(struct srp_target_port *target)
772{
773 bool changed = false;
774
775 spin_lock_irq(&target->lock);
776 if (target->state != SRP_TARGET_REMOVED) {
777 target->state = SRP_TARGET_REMOVED;
778 changed = true;
779 }
780 spin_unlock_irq(&target->lock);
781
782 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200783 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000784
785 return changed;
786}
787
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788static void srp_disconnect_target(struct srp_target_port *target)
789{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200790 struct srp_rdma_ch *ch;
791 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100792
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200793 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800794
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200795 for (i = 0; i < target->ch_count; i++) {
796 ch = &target->ch[i];
797 ch->connected = false;
798 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
799 shost_printk(KERN_DEBUG, target->scsi_host,
800 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000801 }
Roland Dreiere6581052006-05-17 09:13:21 -0700802 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800803}
804
Bart Van Assche509c07b2014-10-30 14:48:30 +0100805static void srp_free_req_data(struct srp_target_port *target,
806 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500807{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200808 struct srp_device *dev = target->srp_host->srp_dev;
809 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500810 struct srp_request *req;
811 int i;
812
Bart Van Assche47513cf2015-05-18 13:25:54 +0200813 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200814 return;
815
816 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100817 req = &ch->req_ring[i];
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300818 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200819 kfree(req->fr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300820 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200821 kfree(req->fmr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300822 kfree(req->map_page);
823 }
David Dillowc07d4242011-01-16 13:57:10 -0500824 if (req->indirect_dma_addr) {
825 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
826 target->indirect_size,
827 DMA_TO_DEVICE);
828 }
829 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500830 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200831
Bart Van Assche509c07b2014-10-30 14:48:30 +0100832 kfree(ch->req_ring);
833 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500834}
835
Bart Van Assche509c07b2014-10-30 14:48:30 +0100836static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200837{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100838 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200839 struct srp_device *srp_dev = target->srp_host->srp_dev;
840 struct ib_device *ibdev = srp_dev->dev;
841 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200842 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200843 dma_addr_t dma_addr;
844 int i, ret = -ENOMEM;
845
Bart Van Assche509c07b2014-10-30 14:48:30 +0100846 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
847 GFP_KERNEL);
848 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200849 goto out;
850
851 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100852 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200853 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
854 GFP_KERNEL);
855 if (!mr_list)
856 goto out;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300857 if (srp_dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200858 req->fr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300859 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200860 req->fmr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300861 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
862 sizeof(void *), GFP_KERNEL);
863 if (!req->map_page)
864 goto out;
865 }
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200866 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200867 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200868 goto out;
869
870 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
871 target->indirect_size,
872 DMA_TO_DEVICE);
873 if (ib_dma_mapping_error(ibdev, dma_addr))
874 goto out;
875
876 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200877 }
878 ret = 0;
879
880out:
881 return ret;
882}
883
Bart Van Assche683b1592012-01-14 12:40:44 +0000884/**
885 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
886 * @shost: SCSI host whose attributes to remove from sysfs.
887 *
888 * Note: Any attributes defined in the host template and that did not exist
889 * before invocation of this function will be ignored.
890 */
891static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
892{
893 struct device_attribute **attr;
894
895 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
896 device_remove_file(&shost->shost_dev, *attr);
897}
898
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000899static void srp_remove_target(struct srp_target_port *target)
900{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200901 struct srp_rdma_ch *ch;
902 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100903
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000904 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
905
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000906 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200907 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000908 srp_remove_host(target->scsi_host);
909 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100910 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000911 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200912 for (i = 0; i < target->ch_count; i++) {
913 ch = &target->ch[i];
914 srp_free_ch_ib(target, ch);
915 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200916 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200917 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200918 for (i = 0; i < target->ch_count; i++) {
919 ch = &target->ch[i];
920 srp_free_req_data(target, ch);
921 }
922 kfree(target->ch);
923 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200924
925 spin_lock(&target->srp_host->target_lock);
926 list_del(&target->list);
927 spin_unlock(&target->srp_host->target_lock);
928
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000929 scsi_host_put(target->scsi_host);
930}
931
David Howellsc4028952006-11-22 14:57:56 +0000932static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800933{
David Howellsc4028952006-11-22 14:57:56 +0000934 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000935 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800936
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000937 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800938
Bart Van Assche96fc2482013-06-28 14:51:26 +0200939 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800940}
941
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200942static void srp_rport_delete(struct srp_rport *rport)
943{
944 struct srp_target_port *target = rport->lld_data;
945
946 srp_queue_remove_work(target);
947}
948
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200949/**
950 * srp_connected_ch() - number of connected channels
951 * @target: SRP target port.
952 */
953static int srp_connected_ch(struct srp_target_port *target)
954{
955 int i, c = 0;
956
957 for (i = 0; i < target->ch_count; i++)
958 c += target->ch[i].connected;
959
960 return c;
961}
962
Bart Van Assched92c0da2014-10-06 17:14:36 +0200963static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800964{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100965 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800966 int ret;
967
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200968 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000969
Bart Van Assche509c07b2014-10-30 14:48:30 +0100970 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800971 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800972 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800973
974 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100975 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200976 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800977 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800978 goto out;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100979 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100980 if (ret < 0)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800981 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800982
983 /*
984 * The CM event handling code will set status to
985 * SRP_PORT_REDIRECT if we get a port redirect REJ
986 * back, or SRP_DLID_REDIRECT if we get a lid/qp
987 * redirect REJ back.
988 */
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800989 ret = ch->status;
990 switch (ret) {
Roland Dreieraef9ec32005-11-02 14:07:13 -0800991 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200992 ch->connected = true;
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800993 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800994
995 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +0100996 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800997 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800998 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800999 break;
1000
1001 case SRP_DLID_REDIRECT:
1002 break;
1003
David Dillow9fe4bcf2008-01-08 17:08:52 -05001004 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001005 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001006 "giving up on stale connection\n");
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001007 ret = -ECONNRESET;
1008 goto out;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001009
Roland Dreieraef9ec32005-11-02 14:07:13 -08001010 default:
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001011 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001012 }
1013 }
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001014
1015out:
1016 return ret <= 0 ? ret : -ENODEV;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001017}
1018
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001019static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1020{
1021 srp_handle_qp_err(cq, wc, "INV RKEY");
1022}
1023
1024static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1025 u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001026{
1027 struct ib_send_wr *bad_wr;
1028 struct ib_send_wr wr = {
1029 .opcode = IB_WR_LOCAL_INV,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001030 .next = NULL,
1031 .num_sge = 0,
1032 .send_flags = 0,
1033 .ex.invalidate_rkey = rkey,
1034 };
1035
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001036 wr.wr_cqe = &req->reg_cqe;
1037 req->reg_cqe.done = srp_inv_rkey_err_done;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001038 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001039}
1040
Roland Dreierd945e1d2006-05-09 10:50:28 -07001041static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001042 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001043 struct srp_request *req)
1044{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001045 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001046 struct srp_device *dev = target->srp_host->srp_dev;
1047 struct ib_device *ibdev = dev->dev;
1048 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001049
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001050 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001051 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1052 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1053 return;
1054
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001055 if (dev->use_fast_reg) {
1056 struct srp_fr_desc **pfr;
1057
1058 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001059 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001060 if (res < 0) {
1061 shost_printk(KERN_ERR, target->scsi_host, PFX
1062 "Queueing INV WR for rkey %#x failed (%d)\n",
1063 (*pfr)->mr->rkey, res);
1064 queue_work(system_long_wq,
1065 &target->tl_err_work);
1066 }
1067 }
1068 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001069 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001070 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001071 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001072 struct ib_pool_fmr **pfmr;
1073
1074 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1075 ib_fmr_pool_unmap(*pfmr);
1076 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001077
David Dillow8f26c9f2011-01-14 19:45:50 -05001078 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1079 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001080}
1081
Bart Van Assche22032992012-08-14 13:18:53 +00001082/**
1083 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001084 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001085 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001086 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001087 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1088 * ownership of @req->scmnd if it equals @scmnd.
1089 *
1090 * Return value:
1091 * Either NULL or a pointer to the SCSI command the caller became owner of.
1092 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001093static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001094 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001095 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001096 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001097{
Bart Van Assche94a91742010-11-26 14:50:09 -05001098 unsigned long flags;
1099
Bart Van Assche509c07b2014-10-30 14:48:30 +01001100 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001101 if (req->scmnd &&
1102 (!sdev || req->scmnd->device == sdev) &&
1103 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001104 scmnd = req->scmnd;
1105 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001106 } else {
1107 scmnd = NULL;
1108 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001109 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001110
1111 return scmnd;
1112}
1113
1114/**
1115 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001116 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001117 * @req: Request to be freed.
1118 * @scmnd: SCSI command associated with @req.
1119 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001120 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001121static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1122 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001123{
1124 unsigned long flags;
1125
Bart Van Assche509c07b2014-10-30 14:48:30 +01001126 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001127
Bart Van Assche509c07b2014-10-30 14:48:30 +01001128 spin_lock_irqsave(&ch->lock, flags);
1129 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001130 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001131}
1132
Bart Van Assche509c07b2014-10-30 14:48:30 +01001133static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001135{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001136 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001137
1138 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001139 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001140 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001141 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001142 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001143}
1144
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001145static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001146{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001147 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001148 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001149 struct Scsi_Host *shost = target->scsi_host;
1150 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001151 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001152
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001153 /*
1154 * Invoking srp_terminate_io() while srp_queuecommand() is running
1155 * is not safe. Hence the warning statement below.
1156 */
1157 shost_for_each_device(sdev, shost)
1158 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1159
Bart Van Assched92c0da2014-10-06 17:14:36 +02001160 for (i = 0; i < target->ch_count; i++) {
1161 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001162
Bart Van Assched92c0da2014-10-06 17:14:36 +02001163 for (j = 0; j < target->req_ring_size; ++j) {
1164 struct srp_request *req = &ch->req_ring[j];
1165
1166 srp_finish_req(ch, req, NULL,
1167 DID_TRANSPORT_FAILFAST << 16);
1168 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001169 }
1170}
1171
1172/*
1173 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1174 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1175 * srp_reset_device() or srp_reset_host() calls will occur while this function
1176 * is in progress. One way to realize that is not to call this function
1177 * directly but to call srp_reconnect_rport() instead since that last function
1178 * serializes calls of this function via rport->mutex and also blocks
1179 * srp_queuecommand() calls before invoking this function.
1180 */
1181static int srp_rport_reconnect(struct srp_rport *rport)
1182{
1183 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001184 struct srp_rdma_ch *ch;
1185 int i, j, ret = 0;
1186 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001187
Roland Dreieraef9ec32005-11-02 14:07:13 -08001188 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001189
1190 if (target->state == SRP_TARGET_SCANNING)
1191 return -ENODEV;
1192
Roland Dreieraef9ec32005-11-02 14:07:13 -08001193 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001194 * Now get a new local CM ID so that we avoid confusing the target in
1195 * case things are really fouled up. Doing so also ensures that all CM
1196 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001197 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001198 for (i = 0; i < target->ch_count; i++) {
1199 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001200 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001201 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001202 for (i = 0; i < target->ch_count; i++) {
1203 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001204 for (j = 0; j < target->req_ring_size; ++j) {
1205 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001206
Bart Van Assched92c0da2014-10-06 17:14:36 +02001207 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1208 }
1209 }
1210 for (i = 0; i < target->ch_count; i++) {
1211 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001212 /*
1213 * Whether or not creating a new CM ID succeeded, create a new
1214 * QP. This guarantees that all completion callback function
1215 * invocations have finished before request resetting starts.
1216 */
1217 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001218
Bart Van Assched92c0da2014-10-06 17:14:36 +02001219 INIT_LIST_HEAD(&ch->free_tx);
1220 for (j = 0; j < target->queue_size; ++j)
1221 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1222 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001223
1224 target->qp_in_error = false;
1225
Bart Van Assched92c0da2014-10-06 17:14:36 +02001226 for (i = 0; i < target->ch_count; i++) {
1227 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001228 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001229 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001230 ret = srp_connect_ch(ch, multich);
1231 multich = true;
1232 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001233
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001234 if (ret == 0)
1235 shost_printk(KERN_INFO, target->scsi_host,
1236 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001237
1238 return ret;
1239}
1240
David Dillow8f26c9f2011-01-14 19:45:50 -05001241static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1242 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001243{
David Dillow8f26c9f2011-01-14 19:45:50 -05001244 struct srp_direct_buf *desc = state->desc;
1245
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001246 WARN_ON_ONCE(!dma_len);
1247
David Dillow8f26c9f2011-01-14 19:45:50 -05001248 desc->va = cpu_to_be64(dma_addr);
1249 desc->key = cpu_to_be32(rkey);
1250 desc->len = cpu_to_be32(dma_len);
1251
1252 state->total_len += dma_len;
1253 state->desc++;
1254 state->ndesc++;
1255}
1256
1257static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001258 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001259{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001260 struct srp_target_port *target = ch->target;
1261 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001262 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001263 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001264
Bart Van Asschef731ed62015-08-10 17:07:27 -07001265 if (state->fmr.next >= state->fmr.end)
1266 return -ENOMEM;
1267
Sagi Grimberg26630e82015-10-13 19:11:38 +03001268 WARN_ON_ONCE(!dev->use_fmr);
1269
1270 if (state->npages == 0)
1271 return 0;
1272
1273 if (state->npages == 1 && target->global_mr) {
1274 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1275 target->global_mr->rkey);
1276 goto reset_state;
1277 }
1278
Bart Van Assche509c07b2014-10-30 14:48:30 +01001279 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001280 state->npages, io_addr);
1281 if (IS_ERR(fmr))
1282 return PTR_ERR(fmr);
1283
Bart Van Asschef731ed62015-08-10 17:07:27 -07001284 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001285 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001286
Bart Van Assche186fbc62015-08-10 17:06:29 -07001287 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1288 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001289
Sagi Grimberg26630e82015-10-13 19:11:38 +03001290reset_state:
1291 state->npages = 0;
1292 state->dma_len = 0;
1293
David Dillow8f26c9f2011-01-14 19:45:50 -05001294 return 0;
1295}
1296
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001297static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1298{
1299 srp_handle_qp_err(cq, wc, "FAST REG");
1300}
1301
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001302static int srp_map_finish_fr(struct srp_map_state *state,
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001303 struct srp_request *req,
Bart Van Assche57b0be92015-12-01 10:19:38 -08001304 struct srp_rdma_ch *ch, int sg_nents)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001305{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001306 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001307 struct srp_device *dev = target->srp_host->srp_dev;
1308 struct ib_send_wr *bad_wr;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001309 struct ib_reg_wr wr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001310 struct srp_fr_desc *desc;
1311 u32 rkey;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001312 int n, err;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001313
Bart Van Asschef731ed62015-08-10 17:07:27 -07001314 if (state->fr.next >= state->fr.end)
1315 return -ENOMEM;
1316
Sagi Grimberg26630e82015-10-13 19:11:38 +03001317 WARN_ON_ONCE(!dev->use_fast_reg);
1318
Bart Van Assche57b0be92015-12-01 10:19:38 -08001319 if (sg_nents == 0)
Sagi Grimberg26630e82015-10-13 19:11:38 +03001320 return 0;
1321
Bart Van Assche57b0be92015-12-01 10:19:38 -08001322 if (sg_nents == 1 && target->global_mr) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001323 srp_map_desc(state, sg_dma_address(state->sg),
1324 sg_dma_len(state->sg),
Sagi Grimberg26630e82015-10-13 19:11:38 +03001325 target->global_mr->rkey);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001326 return 1;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001327 }
1328
Bart Van Assche509c07b2014-10-30 14:48:30 +01001329 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001330 if (!desc)
1331 return -ENOMEM;
1332
1333 rkey = ib_inc_rkey(desc->mr->rkey);
1334 ib_update_fast_reg_key(desc->mr, rkey);
1335
Bart Van Assche57b0be92015-12-01 10:19:38 -08001336 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001337 if (unlikely(n < 0))
1338 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001339
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001340 req->reg_cqe.done = srp_reg_mr_err_done;
1341
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001342 wr.wr.next = NULL;
1343 wr.wr.opcode = IB_WR_REG_MR;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001344 wr.wr.wr_cqe = &req->reg_cqe;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001345 wr.wr.num_sge = 0;
1346 wr.wr.send_flags = 0;
1347 wr.mr = desc->mr;
1348 wr.key = desc->mr->rkey;
1349 wr.access = (IB_ACCESS_LOCAL_WRITE |
1350 IB_ACCESS_REMOTE_READ |
1351 IB_ACCESS_REMOTE_WRITE);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001352
Bart Van Asschef731ed62015-08-10 17:07:27 -07001353 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001354 state->nmdesc++;
1355
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001356 srp_map_desc(state, desc->mr->iova,
1357 desc->mr->length, desc->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001358
Sagi Grimberg26630e82015-10-13 19:11:38 +03001359 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001360 if (unlikely(err))
Sagi Grimberg26630e82015-10-13 19:11:38 +03001361 return err;
1362
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001363 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001364}
1365
David Dillow8f26c9f2011-01-14 19:45:50 -05001366static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001367 struct srp_rdma_ch *ch,
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001368 struct scatterlist *sg, int sg_index)
David Dillow8f26c9f2011-01-14 19:45:50 -05001369{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001370 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001371 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001372 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001373 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1374 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001375 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001376 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001377
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001378 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001379
David Dillow8f26c9f2011-01-14 19:45:50 -05001380 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001381 unsigned offset = dma_addr & ~dev->mr_page_mask;
1382 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001383 ret = srp_map_finish_fmr(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001384 if (ret)
1385 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001386 }
1387
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001388 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001389
1390 if (!state->npages)
1391 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001392 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001393 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001394 dma_addr += len;
1395 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001396 }
1397
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001398 /*
1399 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001400 * close it out and start a new one -- we can only merge at page
1401 * boundries.
1402 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001403 ret = 0;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001404 if (len != dev->mr_page_size)
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001405 ret = srp_map_finish_fmr(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001406 return ret;
1407}
1408
Sagi Grimberg26630e82015-10-13 19:11:38 +03001409static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1410 struct srp_request *req, struct scatterlist *scat,
1411 int count)
1412{
1413 struct scatterlist *sg;
1414 int i, ret;
1415
1416 state->desc = req->indirect_desc;
1417 state->pages = req->map_page;
1418 state->fmr.next = req->fmr_list;
1419 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1420
1421 for_each_sg(scat, sg, count, i) {
1422 ret = srp_map_sg_entry(state, ch, sg, i);
1423 if (ret)
1424 return ret;
1425 }
1426
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001427 ret = srp_map_finish_fmr(state, ch);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001428 if (ret)
1429 return ret;
1430
1431 req->nmdesc = state->nmdesc;
1432
1433 return 0;
1434}
1435
1436static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1437 struct srp_request *req, struct scatterlist *scat,
1438 int count)
1439{
Sagi Grimberg26630e82015-10-13 19:11:38 +03001440 state->desc = req->indirect_desc;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001441 state->fr.next = req->fr_list;
1442 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1443 state->sg = scat;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001444
Bart Van Assche57b0be92015-12-01 10:19:38 -08001445 while (count) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001446 int i, n;
1447
Doug Ledfordc6333f92015-12-15 14:10:44 -05001448 n = srp_map_finish_fr(state, req, ch, count);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001449 if (unlikely(n < 0))
1450 return n;
1451
Bart Van Assche57b0be92015-12-01 10:19:38 -08001452 count -= n;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001453 for (i = 0; i < n; i++)
1454 state->sg = sg_next(state->sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001455 }
1456
Sagi Grimberg26630e82015-10-13 19:11:38 +03001457 req->nmdesc = state->nmdesc;
1458
1459 return 0;
1460}
1461
1462static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1463 struct srp_request *req, struct scatterlist *scat,
1464 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001465{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001466 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001467 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001468 struct scatterlist *sg;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001469 int i;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001470
Sagi Grimberg26630e82015-10-13 19:11:38 +03001471 state->desc = req->indirect_desc;
1472 for_each_sg(scat, sg, count, i) {
1473 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1474 ib_sg_dma_len(dev->dev, sg),
1475 target->global_mr->rkey);
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001476 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001477
Bart Van Assche52ede082014-05-20 15:07:45 +02001478 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001479
Sagi Grimberg26630e82015-10-13 19:11:38 +03001480 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001481}
1482
Bart Van Assche330179f2015-08-10 17:09:05 -07001483/*
1484 * Register the indirect data buffer descriptor with the HCA.
1485 *
1486 * Note: since the indirect data buffer descriptor has been allocated with
1487 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1488 * memory buffer.
1489 */
1490static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1491 void **next_mr, void **end_mr, u32 idb_len,
1492 __be32 *idb_rkey)
1493{
1494 struct srp_target_port *target = ch->target;
1495 struct srp_device *dev = target->srp_host->srp_dev;
1496 struct srp_map_state state;
1497 struct srp_direct_buf idb_desc;
1498 u64 idb_pages[1];
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001499 struct scatterlist idb_sg[1];
Bart Van Assche330179f2015-08-10 17:09:05 -07001500 int ret;
1501
1502 memset(&state, 0, sizeof(state));
1503 memset(&idb_desc, 0, sizeof(idb_desc));
1504 state.gen.next = next_mr;
1505 state.gen.end = end_mr;
1506 state.desc = &idb_desc;
Bart Van Assche330179f2015-08-10 17:09:05 -07001507 state.base_dma_addr = req->indirect_dma_addr;
1508 state.dma_len = idb_len;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001509
1510 if (dev->use_fast_reg) {
1511 state.sg = idb_sg;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001512 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1513 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
Christoph Hellwigfc925512015-12-01 10:18:30 -08001514#ifdef CONFIG_NEED_SG_DMA_LENGTH
1515 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1516#endif
Doug Ledfordc6333f92015-12-15 14:10:44 -05001517 ret = srp_map_finish_fr(&state, req, ch, 1);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001518 if (ret < 0)
1519 return ret;
1520 } else if (dev->use_fmr) {
1521 state.pages = idb_pages;
1522 state.pages[0] = (req->indirect_dma_addr &
1523 dev->mr_page_mask);
1524 state.npages = 1;
1525 ret = srp_map_finish_fmr(&state, ch);
1526 if (ret < 0)
1527 return ret;
1528 } else {
1529 return -EINVAL;
1530 }
Bart Van Assche330179f2015-08-10 17:09:05 -07001531
1532 *idb_rkey = idb_desc.key;
1533
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001534 return 0;
Bart Van Assche330179f2015-08-10 17:09:05 -07001535}
1536
Bart Van Assche509c07b2014-10-30 14:48:30 +01001537static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001538 struct srp_request *req)
1539{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001540 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001541 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001542 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001543 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001544 struct srp_device *dev;
1545 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001546 struct srp_map_state state;
1547 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001548 u32 idb_len, table_len;
1549 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001550 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001551
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001552 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001553 return sizeof (struct srp_cmd);
1554
1555 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1556 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001557 shost_printk(KERN_WARNING, target->scsi_host,
1558 PFX "Unhandled data direction %d\n",
1559 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001560 return -EINVAL;
1561 }
1562
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001563 nents = scsi_sg_count(scmnd);
1564 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001565
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001566 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001567 ibdev = dev->dev;
1568
1569 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001570 if (unlikely(count == 0))
1571 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001572
1573 fmt = SRP_DATA_DESC_DIRECT;
1574 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001575
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001576 if (count == 1 && target->global_mr) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001577 /*
1578 * The midlayer only generated a single gather/scatter
1579 * entry, or DMA mapping coalesced everything to a
1580 * single entry. So a direct descriptor along with
1581 * the DMA MR suffices.
1582 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001583 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001584
Ralph Campbell85507bc2006-12-12 14:30:55 -08001585 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001586 buf->key = cpu_to_be32(target->global_mr->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001587 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001588
Bart Van Assche52ede082014-05-20 15:07:45 +02001589 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001590 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001591 }
1592
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001593 /*
1594 * We have more than one scatter/gather entry, so build our indirect
1595 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001596 */
1597 indirect_hdr = (void *) cmd->add_data;
1598
David Dillowc07d4242011-01-16 13:57:10 -05001599 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1600 target->indirect_size, DMA_TO_DEVICE);
1601
David Dillow8f26c9f2011-01-14 19:45:50 -05001602 memset(&state, 0, sizeof(state));
Sagi Grimberg26630e82015-10-13 19:11:38 +03001603 if (dev->use_fast_reg)
1604 srp_map_sg_fr(&state, ch, req, scat, count);
1605 else if (dev->use_fmr)
1606 srp_map_sg_fmr(&state, ch, req, scat, count);
1607 else
1608 srp_map_sg_dma(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001609
David Dillowc07d4242011-01-16 13:57:10 -05001610 /* We've mapped the request, now pull as much of the indirect
1611 * descriptor table as we can into the command buffer. If this
1612 * target is not using an external indirect table, we are
1613 * guaranteed to fit into the command, as the SCSI layer won't
1614 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001615 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001616 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001617 /*
1618 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001619 * so use a direct descriptor.
1620 */
1621 struct srp_direct_buf *buf = (void *) cmd->add_data;
1622
David Dillowc07d4242011-01-16 13:57:10 -05001623 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001624 goto map_complete;
1625 }
1626
David Dillowc07d4242011-01-16 13:57:10 -05001627 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1628 !target->allow_ext_sg)) {
1629 shost_printk(KERN_ERR, target->scsi_host,
1630 "Could not fit S/G list into SRP_CMD\n");
1631 return -EIO;
1632 }
1633
1634 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001635 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001636 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001637
1638 fmt = SRP_DATA_DESC_INDIRECT;
1639 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001640 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001641
David Dillowc07d4242011-01-16 13:57:10 -05001642 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1643 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001644
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001645 if (!target->global_mr) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001646 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1647 idb_len, &idb_rkey);
1648 if (ret < 0)
1649 return ret;
1650 req->nmdesc++;
1651 } else {
Bart Van Asschea745f4f42015-12-01 10:18:47 -08001652 idb_rkey = cpu_to_be32(target->global_mr->rkey);
Bart Van Assche330179f2015-08-10 17:09:05 -07001653 }
1654
David Dillowc07d4242011-01-16 13:57:10 -05001655 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001656 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001657 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1658 indirect_hdr->len = cpu_to_be32(state.total_len);
1659
1660 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001661 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001662 else
David Dillowc07d4242011-01-16 13:57:10 -05001663 cmd->data_in_desc_cnt = count;
1664
1665 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1666 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001667
1668map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001669 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1670 cmd->buf_fmt = fmt << 4;
1671 else
1672 cmd->buf_fmt = fmt;
1673
Roland Dreieraef9ec32005-11-02 14:07:13 -08001674 return len;
1675}
1676
David Dillow05a1d752010-10-08 14:48:14 -04001677/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001678 * Return an IU and possible credit to the free pool
1679 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001680static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001681 enum srp_iu_type iu_type)
1682{
1683 unsigned long flags;
1684
Bart Van Assche509c07b2014-10-30 14:48:30 +01001685 spin_lock_irqsave(&ch->lock, flags);
1686 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001687 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001688 ++ch->req_lim;
1689 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001690}
1691
1692/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001693 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001694 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001695 *
1696 * Note:
1697 * An upper limit for the number of allocated information units for each
1698 * request type is:
1699 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1700 * more than Scsi_Host.can_queue requests.
1701 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1702 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1703 * one unanswered SRP request to an initiator.
1704 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001705static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001706 enum srp_iu_type iu_type)
1707{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001708 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001709 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1710 struct srp_iu *iu;
1711
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001712 ib_process_cq_direct(ch->send_cq, -1);
David Dillow05a1d752010-10-08 14:48:14 -04001713
Bart Van Assche509c07b2014-10-30 14:48:30 +01001714 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001715 return NULL;
1716
1717 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001718 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001719 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001720 ++target->zero_req_lim;
1721 return NULL;
1722 }
1723
Bart Van Assche509c07b2014-10-30 14:48:30 +01001724 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001725 }
1726
Bart Van Assche509c07b2014-10-30 14:48:30 +01001727 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001728 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001729 return iu;
1730}
1731
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001732static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1733{
1734 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1735 struct srp_rdma_ch *ch = cq->cq_context;
1736
1737 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1738 srp_handle_qp_err(cq, wc, "SEND");
1739 return;
1740 }
1741
1742 list_add(&iu->list, &ch->free_tx);
1743}
1744
Bart Van Assche509c07b2014-10-30 14:48:30 +01001745static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001746{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001747 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001748 struct ib_sge list;
1749 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001750
1751 list.addr = iu->dma;
1752 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001753 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001754
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001755 iu->cqe.done = srp_send_done;
1756
David Dillow05a1d752010-10-08 14:48:14 -04001757 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001758 wr.wr_cqe = &iu->cqe;
David Dillow05a1d752010-10-08 14:48:14 -04001759 wr.sg_list = &list;
1760 wr.num_sge = 1;
1761 wr.opcode = IB_WR_SEND;
1762 wr.send_flags = IB_SEND_SIGNALED;
1763
Bart Van Assche509c07b2014-10-30 14:48:30 +01001764 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001765}
1766
Bart Van Assche509c07b2014-10-30 14:48:30 +01001767static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001768{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001769 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001770 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001771 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001772
1773 list.addr = iu->dma;
1774 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001775 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001776
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001777 iu->cqe.done = srp_recv_done;
1778
Bart Van Asschec996bb42010-07-30 10:59:05 +00001779 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001780 wr.wr_cqe = &iu->cqe;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001781 wr.sg_list = &list;
1782 wr.num_sge = 1;
1783
Bart Van Assche509c07b2014-10-30 14:48:30 +01001784 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001785}
1786
Bart Van Assche509c07b2014-10-30 14:48:30 +01001787static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001788{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001789 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001790 struct srp_request *req;
1791 struct scsi_cmnd *scmnd;
1792 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001793
Roland Dreieraef9ec32005-11-02 14:07:13 -08001794 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001795 spin_lock_irqsave(&ch->lock, flags);
1796 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1797 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001798
Bart Van Assche509c07b2014-10-30 14:48:30 +01001799 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001800 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001801 ch->tsk_mgmt_status = rsp->data[3];
1802 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001803 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001804 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1805 if (scmnd) {
1806 req = (void *)scmnd->host_scribble;
1807 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1808 }
Bart Van Assche22032992012-08-14 13:18:53 +00001809 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001810 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001811 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1812 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001813
Bart Van Assche509c07b2014-10-30 14:48:30 +01001814 spin_lock_irqsave(&ch->lock, flags);
1815 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1816 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001817
1818 return;
1819 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001820 scmnd->result = rsp->status;
1821
1822 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1823 memcpy(scmnd->sense_buffer, rsp->data +
1824 be32_to_cpu(rsp->resp_data_len),
1825 min_t(int, be32_to_cpu(rsp->sense_data_len),
1826 SCSI_SENSE_BUFFERSIZE));
1827 }
1828
Bart Van Asschee7145312014-07-09 15:57:51 +02001829 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001830 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001831 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1832 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1833 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1834 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1835 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1836 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001837
Bart Van Assche509c07b2014-10-30 14:48:30 +01001838 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001839 be32_to_cpu(rsp->req_lim_delta));
1840
David Dillowf8b6e312010-11-26 13:02:21 -05001841 scmnd->host_scribble = NULL;
1842 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001843 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001844}
1845
Bart Van Assche509c07b2014-10-30 14:48:30 +01001846static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001847 void *rsp, int len)
1848{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001849 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001850 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001851 unsigned long flags;
1852 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001853 int err;
David Dillowbb125882010-10-08 14:40:47 -04001854
Bart Van Assche509c07b2014-10-30 14:48:30 +01001855 spin_lock_irqsave(&ch->lock, flags);
1856 ch->req_lim += req_delta;
1857 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1858 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001859
David Dillowbb125882010-10-08 14:40:47 -04001860 if (!iu) {
1861 shost_printk(KERN_ERR, target->scsi_host, PFX
1862 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001863 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001864 }
1865
1866 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1867 memcpy(iu->buf, rsp, len);
1868 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1869
Bart Van Assche509c07b2014-10-30 14:48:30 +01001870 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001871 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001872 shost_printk(KERN_ERR, target->scsi_host, PFX
1873 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001874 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001875 }
David Dillowbb125882010-10-08 14:40:47 -04001876
David Dillowbb125882010-10-08 14:40:47 -04001877 return err;
1878}
1879
Bart Van Assche509c07b2014-10-30 14:48:30 +01001880static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001881 struct srp_cred_req *req)
1882{
1883 struct srp_cred_rsp rsp = {
1884 .opcode = SRP_CRED_RSP,
1885 .tag = req->tag,
1886 };
1887 s32 delta = be32_to_cpu(req->req_lim_delta);
1888
Bart Van Assche509c07b2014-10-30 14:48:30 +01001889 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1890 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001891 "problems processing SRP_CRED_REQ\n");
1892}
1893
Bart Van Assche509c07b2014-10-30 14:48:30 +01001894static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001895 struct srp_aer_req *req)
1896{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001897 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001898 struct srp_aer_rsp rsp = {
1899 .opcode = SRP_AER_RSP,
1900 .tag = req->tag,
1901 };
1902 s32 delta = be32_to_cpu(req->req_lim_delta);
1903
1904 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001905 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001906
Bart Van Assche509c07b2014-10-30 14:48:30 +01001907 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001908 shost_printk(KERN_ERR, target->scsi_host, PFX
1909 "problems processing SRP_AER_REQ\n");
1910}
1911
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001912static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001913{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001914 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1915 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001916 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001917 struct ib_device *dev = target->srp_host->srp_dev->dev;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001918 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001919 u8 opcode;
1920
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001921 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1922 srp_handle_qp_err(cq, wc, "RECV");
1923 return;
1924 }
1925
Bart Van Assche509c07b2014-10-30 14:48:30 +01001926 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001927 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001928
1929 opcode = *(u8 *) iu->buf;
1930
1931 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001932 shost_printk(KERN_ERR, target->scsi_host,
1933 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001934 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1935 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001936 }
1937
1938 switch (opcode) {
1939 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001940 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001941 break;
1942
David Dillowbb125882010-10-08 14:40:47 -04001943 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001944 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001945 break;
1946
1947 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001948 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001949 break;
1950
Roland Dreieraef9ec32005-11-02 14:07:13 -08001951 case SRP_T_LOGOUT:
1952 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001953 shost_printk(KERN_WARNING, target->scsi_host,
1954 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001955 break;
1956
1957 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001958 shost_printk(KERN_WARNING, target->scsi_host,
1959 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001960 break;
1961 }
1962
Bart Van Assche509c07b2014-10-30 14:48:30 +01001963 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001964 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001965
Bart Van Assche509c07b2014-10-30 14:48:30 +01001966 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001967 if (res != 0)
1968 shost_printk(KERN_ERR, target->scsi_host,
1969 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001970}
1971
Bart Van Asschec1120f82013-10-26 14:35:08 +02001972/**
1973 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001974 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001975 *
1976 * Note: This function may get invoked before the rport has been created,
1977 * hence the target->rport test.
1978 */
1979static void srp_tl_err_work(struct work_struct *work)
1980{
1981 struct srp_target_port *target;
1982
1983 target = container_of(work, struct srp_target_port, tl_err_work);
1984 if (target->rport)
1985 srp_start_tl_fail_timers(target->rport);
1986}
1987
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001988static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
1989 const char *opname)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001990{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001991 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001992 struct srp_target_port *target = ch->target;
1993
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001994 if (ch->connected && !target->qp_in_error) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001995 shost_printk(KERN_ERR, target->scsi_host,
1996 PFX "failed %s status %s (%d) for CQE %p\n",
1997 opname, ib_wc_status_msg(wc->status), wc->status,
1998 wc->wr_cqe);
Bart Van Asschec1120f82013-10-26 14:35:08 +02001999 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01002000 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02002001 target->qp_in_error = true;
2002}
2003
Bart Van Assche76c75b22010-11-26 14:37:47 -05002004static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002005{
Bart Van Assche76c75b22010-11-26 14:37:47 -05002006 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002007 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002008 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002009 struct srp_request *req;
2010 struct srp_iu *iu;
2011 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08002012 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002013 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002014 u32 tag;
2015 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02002016 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002017 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2018
2019 /*
2020 * The SCSI EH thread is the only context from which srp_queuecommand()
2021 * can get invoked for blocked devices (SDEV_BLOCK /
2022 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2023 * locking the rport mutex if invoked from inside the SCSI EH.
2024 */
2025 if (in_scsi_eh)
2026 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002027
Bart Van Assched1b42892014-05-20 15:07:20 +02002028 scmnd->result = srp_chkready(target->rport);
2029 if (unlikely(scmnd->result))
2030 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002031
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002032 WARN_ON_ONCE(scmnd->request->tag < 0);
2033 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002034 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002035 idx = blk_mq_unique_tag_to_tag(tag);
2036 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2037 dev_name(&shost->shost_gendev), tag, idx,
2038 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002039
2040 spin_lock_irqsave(&ch->lock, flags);
2041 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002042 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002043
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002044 if (!iu)
2045 goto err;
2046
2047 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002048 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002049 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002050 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002051
David Dillowf8b6e312010-11-26 13:02:21 -05002052 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002053
2054 cmd = iu->buf;
2055 memset(cmd, 0, sizeof *cmd);
2056
2057 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002058 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002059 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002060 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2061
Roland Dreieraef9ec32005-11-02 14:07:13 -08002062 req->scmnd = scmnd;
2063 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002064
Bart Van Assche509c07b2014-10-30 14:48:30 +01002065 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002066 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002067 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002068 PFX "Failed to map data (%d)\n", len);
2069 /*
2070 * If we ran out of memory descriptors (-ENOMEM) because an
2071 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002072 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002073 * to reduce queue depth temporarily.
2074 */
2075 scmnd->result = len == -ENOMEM ?
2076 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002077 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002078 }
2079
David Dillow49248642011-01-14 18:23:24 -05002080 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002081 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002082
Bart Van Assche509c07b2014-10-30 14:48:30 +01002083 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002084 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002085 goto err_unmap;
2086 }
2087
Bart Van Assched1b42892014-05-20 15:07:20 +02002088 ret = 0;
2089
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002090unlock_rport:
2091 if (in_scsi_eh)
2092 mutex_unlock(&rport->mutex);
2093
Bart Van Assched1b42892014-05-20 15:07:20 +02002094 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002095
2096err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002097 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002098
Bart Van Assche76c75b22010-11-26 14:37:47 -05002099err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002100 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002101
Bart Van Assche024ca902014-05-20 15:03:49 +02002102 /*
2103 * Avoid that the loops that iterate over the request ring can
2104 * encounter a dangling SCSI command pointer.
2105 */
2106 req->scmnd = NULL;
2107
Bart Van Assched1b42892014-05-20 15:07:20 +02002108err:
2109 if (scmnd->result) {
2110 scmnd->scsi_done(scmnd);
2111 ret = 0;
2112 } else {
2113 ret = SCSI_MLQUEUE_HOST_BUSY;
2114 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002115
Bart Van Assched1b42892014-05-20 15:07:20 +02002116 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002117}
2118
Bart Van Assche4d73f952013-10-26 14:40:37 +02002119/*
2120 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002121 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002122 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002123static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002124{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002125 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002126 int i;
2127
Bart Van Assche509c07b2014-10-30 14:48:30 +01002128 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2129 GFP_KERNEL);
2130 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002131 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002132 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2133 GFP_KERNEL);
2134 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002135 goto err_no_ring;
2136
2137 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002138 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2139 ch->max_ti_iu_len,
2140 GFP_KERNEL, DMA_FROM_DEVICE);
2141 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002142 goto err;
2143 }
2144
Bart Van Assche4d73f952013-10-26 14:40:37 +02002145 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002146 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2147 target->max_iu_len,
2148 GFP_KERNEL, DMA_TO_DEVICE);
2149 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002150 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002151
Bart Van Assche509c07b2014-10-30 14:48:30 +01002152 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002153 }
2154
2155 return 0;
2156
2157err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002158 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002159 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2160 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002161 }
2162
Bart Van Assche4d73f952013-10-26 14:40:37 +02002163
2164err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002165 kfree(ch->tx_ring);
2166 ch->tx_ring = NULL;
2167 kfree(ch->rx_ring);
2168 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002169
2170 return -ENOMEM;
2171}
2172
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002173static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2174{
2175 uint64_t T_tr_ns, max_compl_time_ms;
2176 uint32_t rq_tmo_jiffies;
2177
2178 /*
2179 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2180 * table 91), both the QP timeout and the retry count have to be set
2181 * for RC QP's during the RTR to RTS transition.
2182 */
2183 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2184 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2185
2186 /*
2187 * Set target->rq_tmo_jiffies to one second more than the largest time
2188 * it can take before an error completion is generated. See also
2189 * C9-140..142 in the IBTA spec for more information about how to
2190 * convert the QP Local ACK Timeout value to nanoseconds.
2191 */
2192 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2193 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2194 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2195 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2196
2197 return rq_tmo_jiffies;
2198}
2199
David Dillow961e0be2011-01-14 17:32:07 -05002200static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002201 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002202 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002203{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002204 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002205 struct ib_qp_attr *qp_attr = NULL;
2206 int attr_mask = 0;
2207 int ret;
2208 int i;
2209
2210 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002211 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2212 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002213
2214 /*
2215 * Reserve credits for task management so we don't
2216 * bounce requests back to the SCSI mid-layer.
2217 */
2218 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002219 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002220 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002221 target->scsi_host->cmd_per_lun
2222 = min_t(int, target->scsi_host->can_queue,
2223 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002224 } else {
2225 shost_printk(KERN_WARNING, target->scsi_host,
2226 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2227 ret = -ECONNRESET;
2228 goto error;
2229 }
2230
Bart Van Assche509c07b2014-10-30 14:48:30 +01002231 if (!ch->rx_ring) {
2232 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002233 if (ret)
2234 goto error;
2235 }
2236
2237 ret = -ENOMEM;
2238 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2239 if (!qp_attr)
2240 goto error;
2241
2242 qp_attr->qp_state = IB_QPS_RTR;
2243 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2244 if (ret)
2245 goto error_free;
2246
Bart Van Assche509c07b2014-10-30 14:48:30 +01002247 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002248 if (ret)
2249 goto error_free;
2250
Bart Van Assche4d73f952013-10-26 14:40:37 +02002251 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002252 struct srp_iu *iu = ch->rx_ring[i];
2253
2254 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002255 if (ret)
2256 goto error_free;
2257 }
2258
2259 qp_attr->qp_state = IB_QPS_RTS;
2260 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2261 if (ret)
2262 goto error_free;
2263
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002264 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2265
Bart Van Assche509c07b2014-10-30 14:48:30 +01002266 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002267 if (ret)
2268 goto error_free;
2269
2270 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2271
2272error_free:
2273 kfree(qp_attr);
2274
2275error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002276 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002277}
2278
Roland Dreieraef9ec32005-11-02 14:07:13 -08002279static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2280 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002281 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002282{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002283 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002284 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002285 struct ib_class_port_info *cpi;
2286 int opcode;
2287
2288 switch (event->param.rej_rcvd.reason) {
2289 case IB_CM_REJ_PORT_CM_REDIRECT:
2290 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002291 ch->path.dlid = cpi->redirect_lid;
2292 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002293 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002294 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002295
Bart Van Assche509c07b2014-10-30 14:48:30 +01002296 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002297 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2298 break;
2299
2300 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002301 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002302 /*
2303 * Topspin/Cisco SRP gateways incorrectly send
2304 * reject reason code 25 when they mean 24
2305 * (port redirect).
2306 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002307 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002308 event->param.rej_rcvd.ari, 16);
2309
David Dillow7aa54bd2008-01-07 18:23:41 -05002310 shost_printk(KERN_DEBUG, shost,
2311 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002312 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2313 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002314
Bart Van Assche509c07b2014-10-30 14:48:30 +01002315 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002316 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002317 shost_printk(KERN_WARNING, shost,
2318 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002319 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002320 }
2321 break;
2322
2323 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002324 shost_printk(KERN_WARNING, shost,
2325 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002326 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002327 break;
2328
2329 case IB_CM_REJ_CONSUMER_DEFINED:
2330 opcode = *(u8 *) event->private_data;
2331 if (opcode == SRP_LOGIN_REJ) {
2332 struct srp_login_rej *rej = event->private_data;
2333 u32 reason = be32_to_cpu(rej->reason);
2334
2335 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002336 shost_printk(KERN_WARNING, shost,
2337 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002338 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002339 shost_printk(KERN_WARNING, shost, PFX
2340 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002341 target->sgid.raw,
2342 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002343 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002344 shost_printk(KERN_WARNING, shost,
2345 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2346 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002347 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002348 break;
2349
David Dillow9fe4bcf2008-01-08 17:08:52 -05002350 case IB_CM_REJ_STALE_CONN:
2351 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002352 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002353 break;
2354
Roland Dreieraef9ec32005-11-02 14:07:13 -08002355 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002356 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2357 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002358 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002359 }
2360}
2361
2362static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2363{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002364 struct srp_rdma_ch *ch = cm_id->context;
2365 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002366 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002367
2368 switch (event->event) {
2369 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002370 shost_printk(KERN_DEBUG, target->scsi_host,
2371 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002372 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002373 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002374 break;
2375
2376 case IB_CM_REP_RECEIVED:
2377 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002378 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002379 break;
2380
2381 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002382 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002383 comp = 1;
2384
Bart Van Assche509c07b2014-10-30 14:48:30 +01002385 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002386 break;
2387
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002388 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002389 shost_printk(KERN_WARNING, target->scsi_host,
2390 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002391 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002392 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002393 shost_printk(KERN_ERR, target->scsi_host,
2394 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002395 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002396 break;
2397
2398 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002399 shost_printk(KERN_ERR, target->scsi_host,
2400 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002401 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002402
Bart Van Assche509c07b2014-10-30 14:48:30 +01002403 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002404 break;
2405
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002406 case IB_CM_MRA_RECEIVED:
2407 case IB_CM_DREQ_ERROR:
2408 case IB_CM_DREP_RECEIVED:
2409 break;
2410
Roland Dreieraef9ec32005-11-02 14:07:13 -08002411 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002412 shost_printk(KERN_WARNING, target->scsi_host,
2413 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002414 break;
2415 }
2416
2417 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002418 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002419
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420 return 0;
2421}
2422
Jack Wang71444b92013-11-07 11:37:37 +01002423/**
Jack Wang71444b92013-11-07 11:37:37 +01002424 * srp_change_queue_depth - setting device queue depth
2425 * @sdev: scsi device struct
2426 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002427 *
2428 * Returns queue depth.
2429 */
2430static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002431srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002432{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002433 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002434 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002435 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002436}
2437
Bart Van Assche985aa492015-05-18 13:27:14 +02002438static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2439 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002440{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002441 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002442 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002443 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002444 struct srp_iu *iu;
2445 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002446
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002447 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002448 return -1;
2449
Bart Van Assche509c07b2014-10-30 14:48:30 +01002450 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002451
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002452 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002453 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002454 * invoked while a task management function is being sent.
2455 */
2456 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002457 spin_lock_irq(&ch->lock);
2458 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2459 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002460
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002461 if (!iu) {
2462 mutex_unlock(&rport->mutex);
2463
Bart Van Assche76c75b22010-11-26 14:37:47 -05002464 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002465 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002466
David Dillow19081f32010-10-18 08:54:49 -04002467 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2468 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002469 tsk_mgmt = iu->buf;
2470 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2471
2472 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002473 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002474 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002475 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002476 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002477
David Dillow19081f32010-10-18 08:54:49 -04002478 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2479 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002480 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2481 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002482 mutex_unlock(&rport->mutex);
2483
Bart Van Assche76c75b22010-11-26 14:37:47 -05002484 return -1;
2485 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002486 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002487
Bart Van Assche509c07b2014-10-30 14:48:30 +01002488 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002489 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002490 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002491
Roland Dreierd945e1d2006-05-09 10:50:28 -07002492 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002493}
2494
Roland Dreieraef9ec32005-11-02 14:07:13 -08002495static int srp_abort(struct scsi_cmnd *scmnd)
2496{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002497 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002498 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002499 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002500 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002501 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002502 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002503
David Dillow7aa54bd2008-01-07 18:23:41 -05002504 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002505
Bart Van Assched92c0da2014-10-06 17:14:36 +02002506 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002507 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002508 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002509 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2510 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2511 return SUCCESS;
2512 ch = &target->ch[ch_idx];
2513 if (!srp_claim_req(ch, req, NULL, scmnd))
2514 return SUCCESS;
2515 shost_printk(KERN_ERR, target->scsi_host,
2516 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002517 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002518 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002519 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002520 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002521 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002522 else
2523 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002524 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002525 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002526 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002527
Bart Van Assche086f44f2013-06-12 15:23:04 +02002528 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002529}
2530
2531static int srp_reset_device(struct scsi_cmnd *scmnd)
2532{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002533 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002534 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002535 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002536
David Dillow7aa54bd2008-01-07 18:23:41 -05002537 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002538
Bart Van Assched92c0da2014-10-06 17:14:36 +02002539 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002540 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002541 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002542 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002543 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002544 return FAILED;
2545
Bart Van Assched92c0da2014-10-06 17:14:36 +02002546 for (i = 0; i < target->ch_count; i++) {
2547 ch = &target->ch[i];
2548 for (i = 0; i < target->req_ring_size; ++i) {
2549 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002550
Bart Van Assched92c0da2014-10-06 17:14:36 +02002551 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2552 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002553 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002554
Roland Dreierd945e1d2006-05-09 10:50:28 -07002555 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002556}
2557
2558static int srp_reset_host(struct scsi_cmnd *scmnd)
2559{
2560 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002561
David Dillow7aa54bd2008-01-07 18:23:41 -05002562 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002563
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002564 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002565}
2566
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002567static int srp_slave_configure(struct scsi_device *sdev)
2568{
2569 struct Scsi_Host *shost = sdev->host;
2570 struct srp_target_port *target = host_to_target(shost);
2571 struct request_queue *q = sdev->request_queue;
2572 unsigned long timeout;
2573
2574 if (sdev->type == TYPE_DISK) {
2575 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2576 blk_queue_rq_timeout(q, timeout);
2577 }
2578
2579 return 0;
2580}
2581
Tony Jonesee959b02008-02-22 00:13:36 +01002582static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2583 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002584{
Tony Jonesee959b02008-02-22 00:13:36 +01002585 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002586
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002587 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002588}
2589
Tony Jonesee959b02008-02-22 00:13:36 +01002590static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2591 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002592{
Tony Jonesee959b02008-02-22 00:13:36 +01002593 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002594
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002595 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002596}
2597
Tony Jonesee959b02008-02-22 00:13:36 +01002598static ssize_t show_service_id(struct device *dev,
2599 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002600{
Tony Jonesee959b02008-02-22 00:13:36 +01002601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002602
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002603 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002604}
2605
Tony Jonesee959b02008-02-22 00:13:36 +01002606static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2607 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002608{
Tony Jonesee959b02008-02-22 00:13:36 +01002609 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002610
Bart Van Assche747fe002014-10-30 14:48:05 +01002611 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002612}
2613
Bart Van Assche848b3082013-10-26 14:38:12 +02002614static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2615 char *buf)
2616{
2617 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2618
Bart Van Assche747fe002014-10-30 14:48:05 +01002619 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002620}
2621
Tony Jonesee959b02008-02-22 00:13:36 +01002622static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2623 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002624{
Tony Jonesee959b02008-02-22 00:13:36 +01002625 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002626 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002627
Bart Van Assche509c07b2014-10-30 14:48:30 +01002628 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002629}
2630
Tony Jonesee959b02008-02-22 00:13:36 +01002631static ssize_t show_orig_dgid(struct device *dev,
2632 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002633{
Tony Jonesee959b02008-02-22 00:13:36 +01002634 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002635
Bart Van Assche747fe002014-10-30 14:48:05 +01002636 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002637}
2638
Bart Van Assche89de7482010-08-03 14:08:45 +00002639static ssize_t show_req_lim(struct device *dev,
2640 struct device_attribute *attr, char *buf)
2641{
2642 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002643 struct srp_rdma_ch *ch;
2644 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002645
Bart Van Assched92c0da2014-10-06 17:14:36 +02002646 for (i = 0; i < target->ch_count; i++) {
2647 ch = &target->ch[i];
2648 req_lim = min(req_lim, ch->req_lim);
2649 }
2650 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002651}
2652
Tony Jonesee959b02008-02-22 00:13:36 +01002653static ssize_t show_zero_req_lim(struct device *dev,
2654 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002655{
Tony Jonesee959b02008-02-22 00:13:36 +01002656 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002657
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002658 return sprintf(buf, "%d\n", target->zero_req_lim);
2659}
2660
Tony Jonesee959b02008-02-22 00:13:36 +01002661static ssize_t show_local_ib_port(struct device *dev,
2662 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002663{
Tony Jonesee959b02008-02-22 00:13:36 +01002664 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002665
2666 return sprintf(buf, "%d\n", target->srp_host->port);
2667}
2668
Tony Jonesee959b02008-02-22 00:13:36 +01002669static ssize_t show_local_ib_device(struct device *dev,
2670 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002671{
Tony Jonesee959b02008-02-22 00:13:36 +01002672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002673
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002674 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002675}
2676
Bart Van Assched92c0da2014-10-06 17:14:36 +02002677static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2678 char *buf)
2679{
2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681
2682 return sprintf(buf, "%d\n", target->ch_count);
2683}
2684
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002685static ssize_t show_comp_vector(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2687{
2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689
2690 return sprintf(buf, "%d\n", target->comp_vector);
2691}
2692
Vu Pham7bb312e2013-10-26 14:31:27 +02002693static ssize_t show_tl_retry_count(struct device *dev,
2694 struct device_attribute *attr, char *buf)
2695{
2696 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697
2698 return sprintf(buf, "%d\n", target->tl_retry_count);
2699}
2700
David Dillow49248642011-01-14 18:23:24 -05002701static ssize_t show_cmd_sg_entries(struct device *dev,
2702 struct device_attribute *attr, char *buf)
2703{
2704 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2705
2706 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2707}
2708
David Dillowc07d4242011-01-16 13:57:10 -05002709static ssize_t show_allow_ext_sg(struct device *dev,
2710 struct device_attribute *attr, char *buf)
2711{
2712 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2713
2714 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2715}
2716
Tony Jonesee959b02008-02-22 00:13:36 +01002717static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2718static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2719static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2720static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002721static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002722static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2723static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002724static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002725static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2726static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2727static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002728static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002729static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002730static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002731static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002732static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002733
Tony Jonesee959b02008-02-22 00:13:36 +01002734static struct device_attribute *srp_host_attrs[] = {
2735 &dev_attr_id_ext,
2736 &dev_attr_ioc_guid,
2737 &dev_attr_service_id,
2738 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002739 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002740 &dev_attr_dgid,
2741 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002742 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002743 &dev_attr_zero_req_lim,
2744 &dev_attr_local_ib_port,
2745 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002746 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002747 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002748 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002749 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002750 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002751 NULL
2752};
2753
Roland Dreieraef9ec32005-11-02 14:07:13 -08002754static struct scsi_host_template srp_template = {
2755 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002756 .name = "InfiniBand SRP initiator",
2757 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002758 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002759 .info = srp_target_info,
2760 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002761 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002762 .eh_abort_handler = srp_abort,
2763 .eh_device_reset_handler = srp_reset_device,
2764 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002765 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002766 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002767 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002768 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002769 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002770 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002771 .shost_attrs = srp_host_attrs,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002772 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002773};
2774
Bart Van Assche34aa6542014-10-30 14:47:22 +01002775static int srp_sdev_count(struct Scsi_Host *host)
2776{
2777 struct scsi_device *sdev;
2778 int c = 0;
2779
2780 shost_for_each_device(sdev, host)
2781 c++;
2782
2783 return c;
2784}
2785
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002786/*
2787 * Return values:
2788 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2789 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2790 * removal has been scheduled.
2791 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2792 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002793static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2794{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002795 struct srp_rport_identifiers ids;
2796 struct srp_rport *rport;
2797
Bart Van Assche34aa6542014-10-30 14:47:22 +01002798 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002799 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002800 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002801
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002802 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002803 return -ENODEV;
2804
FUJITA Tomonori32368222007-06-27 16:33:12 +09002805 memcpy(ids.port_id, &target->id_ext, 8);
2806 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002807 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002808 rport = srp_rport_add(target->scsi_host, &ids);
2809 if (IS_ERR(rport)) {
2810 scsi_remove_host(target->scsi_host);
2811 return PTR_ERR(rport);
2812 }
2813
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002814 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002815 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002816
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002817 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002818 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002819 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002820
Roland Dreieraef9ec32005-11-02 14:07:13 -08002821 scsi_scan_target(&target->scsi_host->shost_gendev,
Hannes Reinecke1d645082016-03-17 08:39:45 +01002822 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002823
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002824 if (srp_connected_ch(target) < target->ch_count ||
2825 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002826 shost_printk(KERN_INFO, target->scsi_host,
2827 PFX "SCSI scan failed - removing SCSI host\n");
2828 srp_queue_remove_work(target);
2829 goto out;
2830 }
2831
2832 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2833 dev_name(&target->scsi_host->shost_gendev),
2834 srp_sdev_count(target->scsi_host));
2835
2836 spin_lock_irq(&target->lock);
2837 if (target->state == SRP_TARGET_SCANNING)
2838 target->state = SRP_TARGET_LIVE;
2839 spin_unlock_irq(&target->lock);
2840
2841out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002842 return 0;
2843}
2844
Tony Jonesee959b02008-02-22 00:13:36 +01002845static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002846{
2847 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002848 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002849
2850 complete(&host->released);
2851}
2852
2853static struct class srp_class = {
2854 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002855 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002856};
2857
Bart Van Assche96fc2482013-06-28 14:51:26 +02002858/**
2859 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002860 * @host: SRP host.
2861 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002862 */
2863static bool srp_conn_unique(struct srp_host *host,
2864 struct srp_target_port *target)
2865{
2866 struct srp_target_port *t;
2867 bool ret = false;
2868
2869 if (target->state == SRP_TARGET_REMOVED)
2870 goto out;
2871
2872 ret = true;
2873
2874 spin_lock(&host->target_lock);
2875 list_for_each_entry(t, &host->target_list, list) {
2876 if (t != target &&
2877 target->id_ext == t->id_ext &&
2878 target->ioc_guid == t->ioc_guid &&
2879 target->initiator_ext == t->initiator_ext) {
2880 ret = false;
2881 break;
2882 }
2883 }
2884 spin_unlock(&host->target_lock);
2885
2886out:
2887 return ret;
2888}
2889
Roland Dreieraef9ec32005-11-02 14:07:13 -08002890/*
2891 * Target ports are added by writing
2892 *
2893 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2894 * pkey=<P_Key>,service_id=<service ID>
2895 *
2896 * to the add_target sysfs attribute.
2897 */
2898enum {
2899 SRP_OPT_ERR = 0,
2900 SRP_OPT_ID_EXT = 1 << 0,
2901 SRP_OPT_IOC_GUID = 1 << 1,
2902 SRP_OPT_DGID = 1 << 2,
2903 SRP_OPT_PKEY = 1 << 3,
2904 SRP_OPT_SERVICE_ID = 1 << 4,
2905 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002906 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002907 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002908 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002909 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002910 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2911 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002912 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002913 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002914 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002915 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2916 SRP_OPT_IOC_GUID |
2917 SRP_OPT_DGID |
2918 SRP_OPT_PKEY |
2919 SRP_OPT_SERVICE_ID),
2920};
2921
Steven Whitehousea447c092008-10-13 10:46:57 +01002922static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002923 { SRP_OPT_ID_EXT, "id_ext=%s" },
2924 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2925 { SRP_OPT_DGID, "dgid=%s" },
2926 { SRP_OPT_PKEY, "pkey=%x" },
2927 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2928 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2929 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002930 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002931 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002932 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002933 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2934 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002935 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002936 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002937 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002938 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002939};
2940
2941static int srp_parse_options(const char *buf, struct srp_target_port *target)
2942{
2943 char *options, *sep_opt;
2944 char *p;
2945 char dgid[3];
2946 substring_t args[MAX_OPT_ARGS];
2947 int opt_mask = 0;
2948 int token;
2949 int ret = -EINVAL;
2950 int i;
2951
2952 options = kstrdup(buf, GFP_KERNEL);
2953 if (!options)
2954 return -ENOMEM;
2955
2956 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002957 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002958 if (!*p)
2959 continue;
2960
2961 token = match_token(p, srp_opt_tokens, args);
2962 opt_mask |= token;
2963
2964 switch (token) {
2965 case SRP_OPT_ID_EXT:
2966 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002967 if (!p) {
2968 ret = -ENOMEM;
2969 goto out;
2970 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002971 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2972 kfree(p);
2973 break;
2974
2975 case SRP_OPT_IOC_GUID:
2976 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002977 if (!p) {
2978 ret = -ENOMEM;
2979 goto out;
2980 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002981 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2982 kfree(p);
2983 break;
2984
2985 case SRP_OPT_DGID:
2986 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002987 if (!p) {
2988 ret = -ENOMEM;
2989 goto out;
2990 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002991 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002992 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002993 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002994 goto out;
2995 }
2996
2997 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002998 strlcpy(dgid, p + i * 2, sizeof(dgid));
2999 if (sscanf(dgid, "%hhx",
3000 &target->orig_dgid.raw[i]) < 1) {
3001 ret = -EINVAL;
3002 kfree(p);
3003 goto out;
3004 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003005 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08003006 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003007 break;
3008
3009 case SRP_OPT_PKEY:
3010 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003011 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003012 goto out;
3013 }
Bart Van Assche747fe002014-10-30 14:48:05 +01003014 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003015 break;
3016
3017 case SRP_OPT_SERVICE_ID:
3018 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003019 if (!p) {
3020 ret = -ENOMEM;
3021 goto out;
3022 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003023 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3024 kfree(p);
3025 break;
3026
3027 case SRP_OPT_MAX_SECT:
3028 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003029 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003030 goto out;
3031 }
3032 target->scsi_host->max_sectors = token;
3033 break;
3034
Bart Van Assche4d73f952013-10-26 14:40:37 +02003035 case SRP_OPT_QUEUE_SIZE:
3036 if (match_int(args, &token) || token < 1) {
3037 pr_warn("bad queue_size parameter '%s'\n", p);
3038 goto out;
3039 }
3040 target->scsi_host->can_queue = token;
3041 target->queue_size = token + SRP_RSP_SQ_SIZE +
3042 SRP_TSK_MGMT_SQ_SIZE;
3043 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3044 target->scsi_host->cmd_per_lun = token;
3045 break;
3046
Vu Pham52fb2b502006-06-17 20:37:31 -07003047 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003048 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003049 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3050 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003051 goto out;
3052 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003053 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003054 break;
3055
Ramachandra K0c0450db2006-06-17 20:37:38 -07003056 case SRP_OPT_IO_CLASS:
3057 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003058 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003059 goto out;
3060 }
3061 if (token != SRP_REV10_IB_IO_CLASS &&
3062 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003063 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3064 token, SRP_REV10_IB_IO_CLASS,
3065 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003066 goto out;
3067 }
3068 target->io_class = token;
3069 break;
3070
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003071 case SRP_OPT_INITIATOR_EXT:
3072 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003073 if (!p) {
3074 ret = -ENOMEM;
3075 goto out;
3076 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003077 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3078 kfree(p);
3079 break;
3080
David Dillow49248642011-01-14 18:23:24 -05003081 case SRP_OPT_CMD_SG_ENTRIES:
3082 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003083 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3084 p);
David Dillow49248642011-01-14 18:23:24 -05003085 goto out;
3086 }
3087 target->cmd_sg_cnt = token;
3088 break;
3089
David Dillowc07d4242011-01-16 13:57:10 -05003090 case SRP_OPT_ALLOW_EXT_SG:
3091 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003092 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003093 goto out;
3094 }
3095 target->allow_ext_sg = !!token;
3096 break;
3097
3098 case SRP_OPT_SG_TABLESIZE:
3099 if (match_int(args, &token) || token < 1 ||
3100 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003101 pr_warn("bad max sg_tablesize parameter '%s'\n",
3102 p);
David Dillowc07d4242011-01-16 13:57:10 -05003103 goto out;
3104 }
3105 target->sg_tablesize = token;
3106 break;
3107
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003108 case SRP_OPT_COMP_VECTOR:
3109 if (match_int(args, &token) || token < 0) {
3110 pr_warn("bad comp_vector parameter '%s'\n", p);
3111 goto out;
3112 }
3113 target->comp_vector = token;
3114 break;
3115
Vu Pham7bb312e2013-10-26 14:31:27 +02003116 case SRP_OPT_TL_RETRY_COUNT:
3117 if (match_int(args, &token) || token < 2 || token > 7) {
3118 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3119 p);
3120 goto out;
3121 }
3122 target->tl_retry_count = token;
3123 break;
3124
Roland Dreieraef9ec32005-11-02 14:07:13 -08003125 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003126 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3127 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003128 goto out;
3129 }
3130 }
3131
3132 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3133 ret = 0;
3134 else
3135 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3136 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3137 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003138 pr_warn("target creation request is missing parameter '%s'\n",
3139 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003140
Bart Van Assche4d73f952013-10-26 14:40:37 +02003141 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3142 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3143 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3144 target->scsi_host->cmd_per_lun,
3145 target->scsi_host->can_queue);
3146
Roland Dreieraef9ec32005-11-02 14:07:13 -08003147out:
3148 kfree(options);
3149 return ret;
3150}
3151
Tony Jonesee959b02008-02-22 00:13:36 +01003152static ssize_t srp_create_target(struct device *dev,
3153 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003154 const char *buf, size_t count)
3155{
3156 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003157 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003158 struct Scsi_Host *target_host;
3159 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003160 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003161 struct srp_device *srp_dev = host->srp_dev;
3162 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003163 int ret, node_idx, node, cpu, i;
3164 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003165
3166 target_host = scsi_host_alloc(&srp_template,
3167 sizeof (struct srp_target_port));
3168 if (!target_host)
3169 return -ENOMEM;
3170
David Dillow49248642011-01-14 18:23:24 -05003171 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003172 target_host->max_channel = 0;
3173 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003174 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003175 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003176
Roland Dreieraef9ec32005-11-02 14:07:13 -08003177 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003178
David Dillow49248642011-01-14 18:23:24 -05003179 target->io_class = SRP_REV16A_IB_IO_CLASS;
3180 target->scsi_host = target_host;
3181 target->srp_host = host;
Jason Gunthorpee6bf5f42015-07-30 17:22:22 -06003182 target->lkey = host->srp_dev->pd->local_dma_lkey;
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003183 target->global_mr = host->srp_dev->global_mr;
David Dillow49248642011-01-14 18:23:24 -05003184 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003185 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3186 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003187 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003188 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003189
Bart Van Assche34aa6542014-10-30 14:47:22 +01003190 /*
3191 * Avoid that the SCSI host can be removed by srp_remove_target()
3192 * before this function returns.
3193 */
3194 scsi_host_get(target->scsi_host);
3195
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003196 mutex_lock(&host->add_target_mutex);
3197
Roland Dreieraef9ec32005-11-02 14:07:13 -08003198 ret = srp_parse_options(buf, target);
3199 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003200 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003201
Bart Van Assche4d73f952013-10-26 14:40:37 +02003202 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3203
Bart Van Assche96fc2482013-06-28 14:51:26 +02003204 if (!srp_conn_unique(target->srp_host, target)) {
3205 shost_printk(KERN_INFO, target->scsi_host,
3206 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3207 be64_to_cpu(target->id_ext),
3208 be64_to_cpu(target->ioc_guid),
3209 be64_to_cpu(target->initiator_ext));
3210 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003211 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003212 }
3213
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003214 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003215 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003216 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003217 target->sg_tablesize = target->cmd_sg_cnt;
3218 }
3219
3220 target_host->sg_tablesize = target->sg_tablesize;
3221 target->indirect_size = target->sg_tablesize *
3222 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003223 target->max_iu_len = sizeof (struct srp_cmd) +
3224 sizeof (struct srp_indirect_buf) +
3225 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3226
Bart Van Asschec1120f82013-10-26 14:35:08 +02003227 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003228 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003229 spin_lock_init(&target->lock);
Matan Barak55ee3ab2015-10-15 18:38:45 +03003230 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003231 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003232 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003233
Bart Van Assched92c0da2014-10-06 17:14:36 +02003234 ret = -ENOMEM;
3235 target->ch_count = max_t(unsigned, num_online_nodes(),
3236 min(ch_count ? :
3237 min(4 * num_online_nodes(),
3238 ibdev->num_comp_vectors),
3239 num_online_cpus()));
3240 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3241 GFP_KERNEL);
3242 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003243 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003244
Bart Van Assched92c0da2014-10-06 17:14:36 +02003245 node_idx = 0;
3246 for_each_online_node(node) {
3247 const int ch_start = (node_idx * target->ch_count /
3248 num_online_nodes());
3249 const int ch_end = ((node_idx + 1) * target->ch_count /
3250 num_online_nodes());
3251 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3252 num_online_nodes() + target->comp_vector)
3253 % ibdev->num_comp_vectors;
3254 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3255 num_online_nodes() + target->comp_vector)
3256 % ibdev->num_comp_vectors;
3257 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003258
Bart Van Assched92c0da2014-10-06 17:14:36 +02003259 for_each_online_cpu(cpu) {
3260 if (cpu_to_node(cpu) != node)
3261 continue;
3262 if (ch_start + cpu_idx >= ch_end)
3263 continue;
3264 ch = &target->ch[ch_start + cpu_idx];
3265 ch->target = target;
3266 ch->comp_vector = cv_start == cv_end ? cv_start :
3267 cv_start + cpu_idx % (cv_end - cv_start);
3268 spin_lock_init(&ch->lock);
3269 INIT_LIST_HEAD(&ch->free_tx);
3270 ret = srp_new_cm_id(ch);
3271 if (ret)
3272 goto err_disconnect;
3273
3274 ret = srp_create_ch_ib(ch);
3275 if (ret)
3276 goto err_disconnect;
3277
3278 ret = srp_alloc_req_data(ch);
3279 if (ret)
3280 goto err_disconnect;
3281
3282 ret = srp_connect_ch(ch, multich);
3283 if (ret) {
3284 shost_printk(KERN_ERR, target->scsi_host,
3285 PFX "Connection %d/%d failed\n",
3286 ch_start + cpu_idx,
3287 target->ch_count);
3288 if (node_idx == 0 && cpu_idx == 0) {
3289 goto err_disconnect;
3290 } else {
3291 srp_free_ch_ib(target, ch);
3292 srp_free_req_data(target, ch);
3293 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003294 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003295 }
3296 }
3297
3298 multich = true;
3299 cpu_idx++;
3300 }
3301 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003302 }
3303
Bart Van Asschec257ea62015-07-31 14:13:22 -07003304connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003305 target->scsi_host->nr_hw_queues = target->ch_count;
3306
Roland Dreieraef9ec32005-11-02 14:07:13 -08003307 ret = srp_add_target(host, target);
3308 if (ret)
3309 goto err_disconnect;
3310
Bart Van Assche34aa6542014-10-30 14:47:22 +01003311 if (target->state != SRP_TARGET_REMOVED) {
3312 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3313 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3314 be64_to_cpu(target->id_ext),
3315 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003316 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003317 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003318 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003319 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003320
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003321 ret = count;
3322
3323out:
3324 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003325
3326 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003327 if (ret < 0)
3328 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003329
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003330 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003331
3332err_disconnect:
3333 srp_disconnect_target(target);
3334
Bart Van Assched92c0da2014-10-06 17:14:36 +02003335 for (i = 0; i < target->ch_count; i++) {
3336 ch = &target->ch[i];
3337 srp_free_ch_ib(target, ch);
3338 srp_free_req_data(target, ch);
3339 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003340
Bart Van Assched92c0da2014-10-06 17:14:36 +02003341 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003342 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003343}
3344
Tony Jonesee959b02008-02-22 00:13:36 +01003345static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003346
Tony Jonesee959b02008-02-22 00:13:36 +01003347static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3348 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003349{
Tony Jonesee959b02008-02-22 00:13:36 +01003350 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003351
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003352 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003353}
3354
Tony Jonesee959b02008-02-22 00:13:36 +01003355static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003356
Tony Jonesee959b02008-02-22 00:13:36 +01003357static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3358 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003359{
Tony Jonesee959b02008-02-22 00:13:36 +01003360 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003361
3362 return sprintf(buf, "%d\n", host->port);
3363}
3364
Tony Jonesee959b02008-02-22 00:13:36 +01003365static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003366
Roland Dreierf5358a12006-06-17 20:37:29 -07003367static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368{
3369 struct srp_host *host;
3370
3371 host = kzalloc(sizeof *host, GFP_KERNEL);
3372 if (!host)
3373 return NULL;
3374
3375 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003376 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003377 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003378 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003379 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003380 host->port = port;
3381
Tony Jonesee959b02008-02-22 00:13:36 +01003382 host->dev.class = &srp_class;
3383 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003384 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385
Tony Jonesee959b02008-02-22 00:13:36 +01003386 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003387 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003388 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003389 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003390 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003391 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003392 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003393 goto err_class;
3394
3395 return host;
3396
3397err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003398 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003399
Roland Dreierf5358a12006-06-17 20:37:29 -07003400free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003401 kfree(host);
3402
3403 return NULL;
3404}
3405
3406static void srp_add_one(struct ib_device *device)
3407{
Roland Dreierf5358a12006-06-17 20:37:29 -07003408 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003409 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003410 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003411 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003412
Roland Dreierf5358a12006-06-17 20:37:29 -07003413 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3414 if (!srp_dev)
Or Gerlitz4a061b22015-12-18 10:59:46 +02003415 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003416
Bart Van Assched1b42892014-05-20 15:07:20 +02003417 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3418 device->map_phys_fmr && device->unmap_fmr);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003419 srp_dev->has_fr = (device->attrs.device_cap_flags &
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003420 IB_DEVICE_MEM_MGT_EXTENSIONS);
3421 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3422 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3423
3424 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3425 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assche002f1562015-08-10 17:08:44 -07003426 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
Bart Van Assched1b42892014-05-20 15:07:20 +02003427
Roland Dreierf5358a12006-06-17 20:37:29 -07003428 /*
3429 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003430 * minimum of 4096 bytes. We're unlikely to build large sglists
3431 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003432 */
Or Gerlitz4a061b22015-12-18 10:59:46 +02003433 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
Bart Van Assche52ede082014-05-20 15:07:45 +02003434 srp_dev->mr_page_size = 1 << mr_page_shift;
3435 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003436 max_pages_per_mr = device->attrs.max_mr_size;
Bart Van Assche52ede082014-05-20 15:07:45 +02003437 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3438 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3439 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003440 if (srp_dev->use_fast_reg) {
3441 srp_dev->max_pages_per_mr =
3442 min_t(u32, srp_dev->max_pages_per_mr,
Or Gerlitz4a061b22015-12-18 10:59:46 +02003443 device->attrs.max_fast_reg_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003444 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003445 srp_dev->mr_max_size = srp_dev->mr_page_size *
3446 srp_dev->max_pages_per_mr;
Or Gerlitz4a061b22015-12-18 10:59:46 +02003447 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3448 device->name, mr_page_shift, device->attrs.max_mr_size,
3449 device->attrs.max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003450 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003451
3452 INIT_LIST_HEAD(&srp_dev->dev_list);
3453
3454 srp_dev->dev = device;
3455 srp_dev->pd = ib_alloc_pd(device);
3456 if (IS_ERR(srp_dev->pd))
3457 goto free_dev;
3458
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003459 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3460 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3461 IB_ACCESS_LOCAL_WRITE |
3462 IB_ACCESS_REMOTE_READ |
3463 IB_ACCESS_REMOTE_WRITE);
3464 if (IS_ERR(srp_dev->global_mr))
3465 goto err_pd;
3466 } else {
3467 srp_dev->global_mr = NULL;
3468 }
Roland Dreierf5358a12006-06-17 20:37:29 -07003469
Hal Rosenstock41390322015-06-29 09:57:00 -04003470 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003471 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003472 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003473 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003474 }
3475
Roland Dreierf5358a12006-06-17 20:37:29 -07003476 ib_set_client_data(device, &srp_client, srp_dev);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003477 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003478
3479err_pd:
3480 ib_dealloc_pd(srp_dev->pd);
3481
3482free_dev:
3483 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003484}
3485
Haggai Eran7c1eb452015-07-30 17:50:14 +03003486static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003487{
Roland Dreierf5358a12006-06-17 20:37:29 -07003488 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003489 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003490 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003491
Haggai Eran7c1eb452015-07-30 17:50:14 +03003492 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003493 if (!srp_dev)
3494 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003495
Roland Dreierf5358a12006-06-17 20:37:29 -07003496 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003497 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003498 /*
3499 * Wait for the sysfs entry to go away, so that no new
3500 * target ports can be created.
3501 */
3502 wait_for_completion(&host->released);
3503
3504 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003505 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003506 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003507 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003508 list_for_each_entry(target, &host->target_list, list)
3509 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003510 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003511
3512 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003513 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003514 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003515 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003516 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003517
Roland Dreieraef9ec32005-11-02 14:07:13 -08003518 kfree(host);
3519 }
3520
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003521 if (srp_dev->global_mr)
3522 ib_dereg_mr(srp_dev->global_mr);
Roland Dreierf5358a12006-06-17 20:37:29 -07003523 ib_dealloc_pd(srp_dev->pd);
3524
3525 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003526}
3527
FUJITA Tomonori32368222007-06-27 16:33:12 +09003528static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003529 .has_rport_state = true,
3530 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003531 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003532 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3533 .dev_loss_tmo = &srp_dev_loss_tmo,
3534 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003535 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003536 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003537};
3538
Roland Dreieraef9ec32005-11-02 14:07:13 -08003539static int __init srp_init_module(void)
3540{
3541 int ret;
3542
David Dillow49248642011-01-14 18:23:24 -05003543 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003544 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003545 if (!cmd_sg_entries)
3546 cmd_sg_entries = srp_sg_tablesize;
3547 }
3548
3549 if (!cmd_sg_entries)
3550 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3551
3552 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003553 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003554 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003555 }
3556
David Dillowc07d4242011-01-16 13:57:10 -05003557 if (!indirect_sg_entries)
3558 indirect_sg_entries = cmd_sg_entries;
3559 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003560 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3561 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003562 indirect_sg_entries = cmd_sg_entries;
3563 }
3564
Bart Van Asschebcc05912014-07-09 15:57:26 +02003565 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003566 if (!srp_remove_wq) {
3567 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003568 goto out;
3569 }
3570
3571 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003572 ib_srp_transport_template =
3573 srp_attach_transport(&ib_srp_transport_functions);
3574 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003575 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003576
Roland Dreieraef9ec32005-11-02 14:07:13 -08003577 ret = class_register(&srp_class);
3578 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003579 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003580 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003581 }
3582
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003583 ib_sa_register_client(&srp_sa_client);
3584
Roland Dreieraef9ec32005-11-02 14:07:13 -08003585 ret = ib_register_client(&srp_client);
3586 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003587 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003588 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003589 }
3590
Bart Van Asschebcc05912014-07-09 15:57:26 +02003591out:
3592 return ret;
3593
3594unreg_sa:
3595 ib_sa_unregister_client(&srp_sa_client);
3596 class_unregister(&srp_class);
3597
3598release_tr:
3599 srp_release_transport(ib_srp_transport_template);
3600
3601destroy_wq:
3602 destroy_workqueue(srp_remove_wq);
3603 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003604}
3605
3606static void __exit srp_cleanup_module(void)
3607{
3608 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003609 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003610 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003611 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003612 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003613}
3614
3615module_init(srp_init_module);
3616module_exit(srp_cleanup_module);