blob: b7feb7424097d45d062dff4bb9b1ce80d8284ad8 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020071static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020072static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200173 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930184static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
Roland Dreieraef9ec32005-11-02 14:07:13 -0800189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700203
204 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700207}
208
Roland Dreieraef9ec32005-11-02 14:07:13 -0800209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Bart Van Assche56b53902014-07-09 15:58:22 +0200268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349}
350
351/**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361{
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300381 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
382 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397out:
398 return pool;
399
400destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403err:
404 pool = ERR_PTR(ret);
405 goto out;
406}
407
408/**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413{
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425}
426
427/**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438{
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446}
447
448static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449{
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455}
456
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457/**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466static void srp_destroy_qp(struct srp_rdma_ch *ch)
467{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200474 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
Bart Van Assche509c07b2014-10-30 14:48:30 +0100491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800492{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100493 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200494 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800495 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200498 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
Matan Barak8e372102015-06-11 16:35:21 +0300501 struct ib_cq_init_attr cq_attr = {};
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200508 /* + 1 for SRP_LAST_WR_ID */
Matan Barak8e372102015-06-11 16:35:21 +0300509 cq_attr.cqe = target->queue_size + 1;
510 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100511 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300512 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100513 if (IS_ERR(recv_cq)) {
514 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800515 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800516 }
517
Matan Barak8e372102015-06-11 16:35:21 +0300518 cq_attr.cqe = m * target->queue_size;
519 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100520 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300521 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100522 if (IS_ERR(send_cq)) {
523 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800524 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000525 }
526
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100527 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800528
529 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200530 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200531 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800532 init_attr->cap.max_recv_sge = 1;
533 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200534 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800535 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 init_attr->send_cq = send_cq;
537 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800538
Bart Van Assche62154b22014-05-20 15:04:45 +0200539 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100540 if (IS_ERR(qp)) {
541 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800542 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800543 }
544
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100545 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800546 if (ret)
547 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800548
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200549 if (dev->use_fast_reg && dev->has_fr) {
550 fr_pool = srp_alloc_fr_pool(target);
551 if (IS_ERR(fr_pool)) {
552 ret = PTR_ERR(fr_pool);
553 shost_printk(KERN_WARNING, target->scsi_host, PFX
554 "FR pool allocation failed (%d)\n", ret);
555 goto err_qp;
556 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100557 if (ch->fr_pool)
558 srp_destroy_fr_pool(ch->fr_pool);
559 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200560 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200561 fmr_pool = srp_alloc_fmr_pool(target);
562 if (IS_ERR(fmr_pool)) {
563 ret = PTR_ERR(fmr_pool);
564 shost_printk(KERN_WARNING, target->scsi_host, PFX
565 "FMR pool allocation failed (%d)\n", ret);
566 goto err_qp;
567 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100568 if (ch->fmr_pool)
569 ib_destroy_fmr_pool(ch->fmr_pool);
570 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200571 }
572
Bart Van Assche509c07b2014-10-30 14:48:30 +0100573 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200574 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100575 if (ch->recv_cq)
576 ib_destroy_cq(ch->recv_cq);
577 if (ch->send_cq)
578 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100579
Bart Van Assche509c07b2014-10-30 14:48:30 +0100580 ch->qp = qp;
581 ch->recv_cq = recv_cq;
582 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583
Roland Dreierda9d2f02010-02-24 15:07:59 -0800584 kfree(init_attr);
585 return 0;
586
587err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100588 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800589
590err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100591 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800592
593err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100594 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800595
596err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800597 kfree(init_attr);
598 return ret;
599}
600
Bart Van Assche4d73f952013-10-26 14:40:37 +0200601/*
602 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100603 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200604 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100605static void srp_free_ch_ib(struct srp_target_port *target,
606 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800607{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200608 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800609 int i;
610
Bart Van Assched92c0da2014-10-06 17:14:36 +0200611 if (!ch->target)
612 return;
613
Bart Van Assche509c07b2014-10-30 14:48:30 +0100614 if (ch->cm_id) {
615 ib_destroy_cm_id(ch->cm_id);
616 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100617 }
618
Bart Van Assched92c0da2014-10-06 17:14:36 +0200619 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
620 if (!ch->qp)
621 return;
622
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200623 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100624 if (ch->fr_pool)
625 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200626 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100627 if (ch->fmr_pool)
628 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200629 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200630 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100631 ib_destroy_cq(ch->send_cq);
632 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800633
Bart Van Assched92c0da2014-10-06 17:14:36 +0200634 /*
635 * Avoid that the SCSI error handler tries to use this channel after
636 * it has been freed. The SCSI error handler can namely continue
637 * trying to perform recovery actions after scsi_remove_host()
638 * returned.
639 */
640 ch->target = NULL;
641
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 ch->qp = NULL;
643 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100644
Bart Van Assche509c07b2014-10-30 14:48:30 +0100645 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200646 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 srp_free_iu(target->srp_host, ch->rx_ring[i]);
648 kfree(ch->rx_ring);
649 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200650 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100651 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200652 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100653 srp_free_iu(target->srp_host, ch->tx_ring[i]);
654 kfree(ch->tx_ring);
655 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200656 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800657}
658
659static void srp_path_rec_completion(int status,
660 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100661 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100663 struct srp_rdma_ch *ch = ch_ptr;
664 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800665
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800667 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500668 shost_printk(KERN_ERR, target->scsi_host,
669 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800670 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100671 ch->path = *pathrec;
672 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800673}
674
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100678 int ret;
679
Bart Van Assche509c07b2014-10-30 14:48:30 +0100680 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800681
Bart Van Assche509c07b2014-10-30 14:48:30 +0100682 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800683
Bart Van Assche509c07b2014-10-30 14:48:30 +0100684 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
685 target->srp_host->srp_dev->dev,
686 target->srp_host->port,
687 &ch->path,
688 IB_SA_PATH_REC_SERVICE_ID |
689 IB_SA_PATH_REC_DGID |
690 IB_SA_PATH_REC_SGID |
691 IB_SA_PATH_REC_NUMB_PATH |
692 IB_SA_PATH_REC_PKEY,
693 SRP_PATH_REC_TIMEOUT_MS,
694 GFP_KERNEL,
695 srp_path_rec_completion,
696 ch, &ch->path_query);
697 if (ch->path_query_id < 0)
698 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800699
Bart Van Assche509c07b2014-10-30 14:48:30 +0100700 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100701 if (ret < 0)
702 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800703
Bart Van Assche509c07b2014-10-30 14:48:30 +0100704 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500705 shost_printk(KERN_WARNING, target->scsi_host,
706 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709}
710
Bart Van Assched92c0da2014-10-06 17:14:36 +0200711static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800712{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100713 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800714 struct {
715 struct ib_cm_req_param param;
716 struct srp_login_req priv;
717 } *req = NULL;
718 int status;
719
720 req = kzalloc(sizeof *req, GFP_KERNEL);
721 if (!req)
722 return -ENOMEM;
723
Bart Van Assche509c07b2014-10-30 14:48:30 +0100724 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800725 req->param.alternate_path = NULL;
726 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100727 req->param.qp_num = ch->qp->qp_num;
728 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800729 req->param.private_data = &req->priv;
730 req->param.private_data_len = sizeof req->priv;
731 req->param.flow_control = 1;
732
733 get_random_bytes(&req->param.starting_psn, 4);
734 req->param.starting_psn &= 0xffffff;
735
736 /*
737 * Pick some arbitrary defaults here; we could make these
738 * module parameters if anyone cared about setting them.
739 */
740 req->param.responder_resources = 4;
741 req->param.remote_cm_response_timeout = 20;
742 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200743 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800744 req->param.rnr_retry_count = 7;
745 req->param.max_cm_retries = 15;
746
747 req->priv.opcode = SRP_LOGIN_REQ;
748 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500749 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800750 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
751 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200752 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
753 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700754 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700755 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700756 * port identifier format is 8 bytes of ID extension followed
757 * by 8 bytes of GUID. Older drafts put the two halves in the
758 * opposite order, so that the GUID comes first.
759 *
760 * Targets conforming to these obsolete drafts can be
761 * recognized by the I/O Class they report.
762 */
763 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
764 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100765 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700766 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200767 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700768 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
769 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
770 } else {
771 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200772 &target->initiator_ext, 8);
773 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100774 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700775 memcpy(req->priv.target_port_id, &target->id_ext, 8);
776 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
777 }
778
Roland Dreieraef9ec32005-11-02 14:07:13 -0800779 /*
780 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200781 * zero out the first 8 bytes of our initiator port ID and set
782 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800783 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700784 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500785 shost_printk(KERN_DEBUG, target->scsi_host,
786 PFX "Topspin/Cisco initiator port ID workaround "
787 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200788 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800789 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200790 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100791 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800792 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800793
Bart Van Assche509c07b2014-10-30 14:48:30 +0100794 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800795
796 kfree(req);
797
798 return status;
799}
800
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000801static bool srp_queue_remove_work(struct srp_target_port *target)
802{
803 bool changed = false;
804
805 spin_lock_irq(&target->lock);
806 if (target->state != SRP_TARGET_REMOVED) {
807 target->state = SRP_TARGET_REMOVED;
808 changed = true;
809 }
810 spin_unlock_irq(&target->lock);
811
812 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200813 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000814
815 return changed;
816}
817
Roland Dreieraef9ec32005-11-02 14:07:13 -0800818static void srp_disconnect_target(struct srp_target_port *target)
819{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200820 struct srp_rdma_ch *ch;
821 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100822
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200823 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800824
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200825 for (i = 0; i < target->ch_count; i++) {
826 ch = &target->ch[i];
827 ch->connected = false;
828 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
829 shost_printk(KERN_DEBUG, target->scsi_host,
830 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000831 }
Roland Dreiere6581052006-05-17 09:13:21 -0700832 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800833}
834
Bart Van Assche509c07b2014-10-30 14:48:30 +0100835static void srp_free_req_data(struct srp_target_port *target,
836 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500837{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200838 struct srp_device *dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500840 struct srp_request *req;
841 int i;
842
Bart Van Assche47513cf2015-05-18 13:25:54 +0200843 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200844 return;
845
846 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100847 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200848 if (dev->use_fast_reg)
849 kfree(req->fr_list);
850 else
851 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500852 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500853 if (req->indirect_dma_addr) {
854 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
855 target->indirect_size,
856 DMA_TO_DEVICE);
857 }
858 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500859 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200860
Bart Van Assche509c07b2014-10-30 14:48:30 +0100861 kfree(ch->req_ring);
862 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500863}
864
Bart Van Assche509c07b2014-10-30 14:48:30 +0100865static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200866{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100867 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200868 struct srp_device *srp_dev = target->srp_host->srp_dev;
869 struct ib_device *ibdev = srp_dev->dev;
870 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200871 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200872 dma_addr_t dma_addr;
873 int i, ret = -ENOMEM;
874
Bart Van Assche509c07b2014-10-30 14:48:30 +0100875 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
876 GFP_KERNEL);
877 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200878 goto out;
879
880 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100881 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200882 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
883 GFP_KERNEL);
884 if (!mr_list)
885 goto out;
886 if (srp_dev->use_fast_reg)
887 req->fr_list = mr_list;
888 else
889 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200890 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200891 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200892 if (!req->map_page)
893 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200894 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200895 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200896 goto out;
897
898 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
899 target->indirect_size,
900 DMA_TO_DEVICE);
901 if (ib_dma_mapping_error(ibdev, dma_addr))
902 goto out;
903
904 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200905 }
906 ret = 0;
907
908out:
909 return ret;
910}
911
Bart Van Assche683b1592012-01-14 12:40:44 +0000912/**
913 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
914 * @shost: SCSI host whose attributes to remove from sysfs.
915 *
916 * Note: Any attributes defined in the host template and that did not exist
917 * before invocation of this function will be ignored.
918 */
919static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
920{
921 struct device_attribute **attr;
922
923 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
924 device_remove_file(&shost->shost_dev, *attr);
925}
926
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000927static void srp_remove_target(struct srp_target_port *target)
928{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200929 struct srp_rdma_ch *ch;
930 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100931
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000932 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
933
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000934 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200935 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000936 srp_remove_host(target->scsi_host);
937 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100938 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000939 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200940 for (i = 0; i < target->ch_count; i++) {
941 ch = &target->ch[i];
942 srp_free_ch_ib(target, ch);
943 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200944 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200945 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200946 for (i = 0; i < target->ch_count; i++) {
947 ch = &target->ch[i];
948 srp_free_req_data(target, ch);
949 }
950 kfree(target->ch);
951 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200952
953 spin_lock(&target->srp_host->target_lock);
954 list_del(&target->list);
955 spin_unlock(&target->srp_host->target_lock);
956
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000957 scsi_host_put(target->scsi_host);
958}
959
David Howellsc4028952006-11-22 14:57:56 +0000960static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800961{
David Howellsc4028952006-11-22 14:57:56 +0000962 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000963 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800964
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000965 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800966
Bart Van Assche96fc2482013-06-28 14:51:26 +0200967 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800968}
969
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200970static void srp_rport_delete(struct srp_rport *rport)
971{
972 struct srp_target_port *target = rport->lld_data;
973
974 srp_queue_remove_work(target);
975}
976
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200977/**
978 * srp_connected_ch() - number of connected channels
979 * @target: SRP target port.
980 */
981static int srp_connected_ch(struct srp_target_port *target)
982{
983 int i, c = 0;
984
985 for (i = 0; i < target->ch_count; i++)
986 c += target->ch[i].connected;
987
988 return c;
989}
990
Bart Van Assched92c0da2014-10-06 17:14:36 +0200991static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800992{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100993 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800994 int ret;
995
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200996 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000997
Bart Van Assche509c07b2014-10-30 14:48:30 +0100998 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800999 if (ret)
1000 return ret;
1001
1002 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001003 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001004 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001005 if (ret)
1006 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001007 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001008 if (ret < 0)
1009 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001010
1011 /*
1012 * The CM event handling code will set status to
1013 * SRP_PORT_REDIRECT if we get a port redirect REJ
1014 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015 * redirect REJ back.
1016 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001017 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001018 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001019 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001020 return 0;
1021
1022 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001023 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001024 if (ret)
1025 return ret;
1026 break;
1027
1028 case SRP_DLID_REDIRECT:
1029 break;
1030
David Dillow9fe4bcf2008-01-08 17:08:52 -05001031 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001032 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001033 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001034 ch->status = -ECONNRESET;
1035 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001036
Roland Dreieraef9ec32005-11-02 14:07:13 -08001037 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001038 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001039 }
1040 }
1041}
1042
Bart Van Assche509c07b2014-10-30 14:48:30 +01001043static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001044{
1045 struct ib_send_wr *bad_wr;
1046 struct ib_send_wr wr = {
1047 .opcode = IB_WR_LOCAL_INV,
1048 .wr_id = LOCAL_INV_WR_ID_MASK,
1049 .next = NULL,
1050 .num_sge = 0,
1051 .send_flags = 0,
1052 .ex.invalidate_rkey = rkey,
1053 };
1054
Bart Van Assche509c07b2014-10-30 14:48:30 +01001055 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001056}
1057
Roland Dreierd945e1d2006-05-09 10:50:28 -07001058static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001059 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001060 struct srp_request *req)
1061{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001062 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001063 struct srp_device *dev = target->srp_host->srp_dev;
1064 struct ib_device *ibdev = dev->dev;
1065 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001066
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001067 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001068 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1070 return;
1071
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001072 if (dev->use_fast_reg) {
1073 struct srp_fr_desc **pfr;
1074
1075 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001076 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001077 if (res < 0) {
1078 shost_printk(KERN_ERR, target->scsi_host, PFX
1079 "Queueing INV WR for rkey %#x failed (%d)\n",
1080 (*pfr)->mr->rkey, res);
1081 queue_work(system_long_wq,
1082 &target->tl_err_work);
1083 }
1084 }
1085 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001086 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001087 req->nmdesc);
1088 } else {
1089 struct ib_pool_fmr **pfmr;
1090
1091 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 ib_fmr_pool_unmap(*pfmr);
1093 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001094
David Dillow8f26c9f2011-01-14 19:45:50 -05001095 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001097}
1098
Bart Van Assche22032992012-08-14 13:18:53 +00001099/**
1100 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001101 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001102 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001103 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001104 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105 * ownership of @req->scmnd if it equals @scmnd.
1106 *
1107 * Return value:
1108 * Either NULL or a pointer to the SCSI command the caller became owner of.
1109 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001110static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001111 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001112 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001113 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001114{
Bart Van Assche94a91742010-11-26 14:50:09 -05001115 unsigned long flags;
1116
Bart Van Assche509c07b2014-10-30 14:48:30 +01001117 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001118 if (req->scmnd &&
1119 (!sdev || req->scmnd->device == sdev) &&
1120 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001121 scmnd = req->scmnd;
1122 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001123 } else {
1124 scmnd = NULL;
1125 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001126 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001127
1128 return scmnd;
1129}
1130
1131/**
1132 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001133 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001134 * @req: Request to be freed.
1135 * @scmnd: SCSI command associated with @req.
1136 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001137 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001138static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001140{
1141 unsigned long flags;
1142
Bart Van Assche509c07b2014-10-30 14:48:30 +01001143 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001144
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145 spin_lock_irqsave(&ch->lock, flags);
1146 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001147 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001148}
1149
Bart Van Assche509c07b2014-10-30 14:48:30 +01001150static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001152{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001153 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001154
1155 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001156 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001157 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001158 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001159 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001160}
1161
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001162static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001163{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001164 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001165 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001166 struct Scsi_Host *shost = target->scsi_host;
1167 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001168 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001169
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001170 /*
1171 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 * is not safe. Hence the warning statement below.
1173 */
1174 shost_for_each_device(sdev, shost)
1175 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176
Bart Van Assched92c0da2014-10-06 17:14:36 +02001177 for (i = 0; i < target->ch_count; i++) {
1178 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001179
Bart Van Assched92c0da2014-10-06 17:14:36 +02001180 for (j = 0; j < target->req_ring_size; ++j) {
1181 struct srp_request *req = &ch->req_ring[j];
1182
1183 srp_finish_req(ch, req, NULL,
1184 DID_TRANSPORT_FAILFAST << 16);
1185 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001186 }
1187}
1188
1189/*
1190 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192 * srp_reset_device() or srp_reset_host() calls will occur while this function
1193 * is in progress. One way to realize that is not to call this function
1194 * directly but to call srp_reconnect_rport() instead since that last function
1195 * serializes calls of this function via rport->mutex and also blocks
1196 * srp_queuecommand() calls before invoking this function.
1197 */
1198static int srp_rport_reconnect(struct srp_rport *rport)
1199{
1200 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001201 struct srp_rdma_ch *ch;
1202 int i, j, ret = 0;
1203 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001204
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001206
1207 if (target->state == SRP_TARGET_SCANNING)
1208 return -ENODEV;
1209
Roland Dreieraef9ec32005-11-02 14:07:13 -08001210 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001211 * Now get a new local CM ID so that we avoid confusing the target in
1212 * case things are really fouled up. Doing so also ensures that all CM
1213 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001214 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001217 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001218 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001221 for (j = 0; j < target->req_ring_size; ++j) {
1222 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001223
Bart Van Assched92c0da2014-10-06 17:14:36 +02001224 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1225 }
1226 }
1227 for (i = 0; i < target->ch_count; i++) {
1228 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001229 /*
1230 * Whether or not creating a new CM ID succeeded, create a new
1231 * QP. This guarantees that all completion callback function
1232 * invocations have finished before request resetting starts.
1233 */
1234 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001235
Bart Van Assched92c0da2014-10-06 17:14:36 +02001236 INIT_LIST_HEAD(&ch->free_tx);
1237 for (j = 0; j < target->queue_size; ++j)
1238 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1239 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001240
1241 target->qp_in_error = false;
1242
Bart Van Assched92c0da2014-10-06 17:14:36 +02001243 for (i = 0; i < target->ch_count; i++) {
1244 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001245 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001246 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001247 ret = srp_connect_ch(ch, multich);
1248 multich = true;
1249 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001250
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001251 if (ret == 0)
1252 shost_printk(KERN_INFO, target->scsi_host,
1253 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001254
1255 return ret;
1256}
1257
David Dillow8f26c9f2011-01-14 19:45:50 -05001258static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1259 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001260{
David Dillow8f26c9f2011-01-14 19:45:50 -05001261 struct srp_direct_buf *desc = state->desc;
1262
1263 desc->va = cpu_to_be64(dma_addr);
1264 desc->key = cpu_to_be32(rkey);
1265 desc->len = cpu_to_be32(dma_len);
1266
1267 state->total_len += dma_len;
1268 state->desc++;
1269 state->ndesc++;
1270}
1271
1272static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001273 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001274{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001275 struct srp_target_port *target = ch->target;
1276 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001277 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001278 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001279
Bart Van Asschef731ed62015-08-10 17:07:27 -07001280 if (state->fmr.next >= state->fmr.end)
1281 return -ENOMEM;
1282
Bart Van Assche509c07b2014-10-30 14:48:30 +01001283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001284 state->npages, io_addr);
1285 if (IS_ERR(fmr))
1286 return PTR_ERR(fmr);
1287
Bart Van Asschef731ed62015-08-10 17:07:27 -07001288 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001289 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001290
Bart Van Assche186fbc62015-08-10 17:06:29 -07001291 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1292 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001293
David Dillow8f26c9f2011-01-14 19:45:50 -05001294 return 0;
1295}
1296
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001297static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001298 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001299{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001300 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001301 struct srp_device *dev = target->srp_host->srp_dev;
1302 struct ib_send_wr *bad_wr;
1303 struct ib_send_wr wr;
1304 struct srp_fr_desc *desc;
1305 u32 rkey;
1306
Bart Van Asschef731ed62015-08-10 17:07:27 -07001307 if (state->fr.next >= state->fr.end)
1308 return -ENOMEM;
1309
Bart Van Assche509c07b2014-10-30 14:48:30 +01001310 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001311 if (!desc)
1312 return -ENOMEM;
1313
1314 rkey = ib_inc_rkey(desc->mr->rkey);
1315 ib_update_fast_reg_key(desc->mr, rkey);
1316
1317 memcpy(desc->frpl->page_list, state->pages,
1318 sizeof(state->pages[0]) * state->npages);
1319
1320 memset(&wr, 0, sizeof(wr));
1321 wr.opcode = IB_WR_FAST_REG_MR;
1322 wr.wr_id = FAST_REG_WR_ID_MASK;
1323 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1324 wr.wr.fast_reg.page_list = desc->frpl;
1325 wr.wr.fast_reg.page_list_len = state->npages;
1326 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1327 wr.wr.fast_reg.length = state->dma_len;
1328 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1329 IB_ACCESS_REMOTE_READ |
1330 IB_ACCESS_REMOTE_WRITE);
1331 wr.wr.fast_reg.rkey = desc->mr->lkey;
1332
Bart Van Asschef731ed62015-08-10 17:07:27 -07001333 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001334 state->nmdesc++;
1335
1336 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1337 desc->mr->rkey);
1338
Bart Van Assche509c07b2014-10-30 14:48:30 +01001339 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001340}
1341
Bart Van Assche539dde62014-05-20 15:05:46 +02001342static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001343 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001344{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001345 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001346 int ret = 0;
1347
1348 if (state->npages == 0)
1349 return 0;
1350
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001351 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001352 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001353 target->rkey);
1354 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001355 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001356 srp_map_finish_fr(state, ch) :
1357 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001358
1359 if (ret == 0) {
1360 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001361 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001362 }
1363
1364 return ret;
1365}
1366
David Dillow8f26c9f2011-01-14 19:45:50 -05001367static void srp_map_update_start(struct srp_map_state *state,
1368 struct scatterlist *sg, int sg_index,
1369 dma_addr_t dma_addr)
1370{
1371 state->unmapped_sg = sg;
1372 state->unmapped_index = sg_index;
1373 state->unmapped_addr = dma_addr;
1374}
1375
1376static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001377 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001378 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001379 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001380{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001381 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001382 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001383 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001384 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1385 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1386 unsigned int len;
1387 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001388
David Dillow8f26c9f2011-01-14 19:45:50 -05001389 if (!dma_len)
1390 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001391
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001392 if (!use_mr) {
1393 /*
1394 * Once we're in direct map mode for a request, we don't
1395 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001396 * other than the descriptor.
1397 */
1398 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1399 return 0;
1400 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001401
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001402 /*
1403 * If this is the first sg that will be mapped via FMR or via FR, save
1404 * our position. We need to know the first unmapped entry, its index,
1405 * and the first unmapped address within that entry to be able to
1406 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001407 */
1408 if (!state->unmapped_sg)
1409 srp_map_update_start(state, sg, sg_index, dma_addr);
1410
1411 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001412 unsigned offset = dma_addr & ~dev->mr_page_mask;
1413 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001414 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001415 if (ret)
1416 return ret;
1417
1418 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001419 }
1420
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001421 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001422
1423 if (!state->npages)
1424 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001425 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001426 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001427 dma_addr += len;
1428 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001429 }
1430
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001431 /*
1432 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001433 * close it out and start a new one -- we can only merge at page
1434 * boundries.
1435 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001436 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001437 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001438 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001439 if (!ret)
1440 srp_map_update_start(state, NULL, 0, 0);
1441 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001442 return ret;
1443}
1444
Bart Van Assche509c07b2014-10-30 14:48:30 +01001445static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1446 struct srp_request *req, struct scatterlist *scat,
1447 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001448{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001449 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001450 struct srp_device *dev = target->srp_host->srp_dev;
1451 struct ib_device *ibdev = dev->dev;
1452 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001453 int i;
1454 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001455
1456 state->desc = req->indirect_desc;
1457 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001458 if (dev->use_fast_reg) {
Bart Van Asschef731ed62015-08-10 17:07:27 -07001459 state->fr.next = req->fr_list;
1460 state->fr.end = req->fr_list + target->cmd_sg_cnt;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001461 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001462 } else {
Bart Van Asschef731ed62015-08-10 17:07:27 -07001463 state->fmr.next = req->fmr_list;
1464 state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001465 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001466 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001467
1468 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001469 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001470 /*
1471 * Memory registration failed, so backtrack to the
1472 * first unmapped entry and continue on without using
1473 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001474 */
1475 dma_addr_t dma_addr;
1476 unsigned int dma_len;
1477
1478backtrack:
1479 sg = state->unmapped_sg;
1480 i = state->unmapped_index;
1481
1482 dma_addr = ib_sg_dma_address(ibdev, sg);
1483 dma_len = ib_sg_dma_len(ibdev, sg);
1484 dma_len -= (state->unmapped_addr - dma_addr);
1485 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001486 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001487 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1488 }
1489 }
1490
Bart Van Assche509c07b2014-10-30 14:48:30 +01001491 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001492 goto backtrack;
1493
Bart Van Assche52ede082014-05-20 15:07:45 +02001494 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001495
1496 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001497}
1498
Bart Van Assche509c07b2014-10-30 14:48:30 +01001499static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001500 struct srp_request *req)
1501{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001502 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001503 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001504 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001505 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001506 struct srp_device *dev;
1507 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001508 struct srp_map_state state;
1509 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001510 u32 table_len;
1511 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001512
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001513 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001514 return sizeof (struct srp_cmd);
1515
1516 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1517 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001518 shost_printk(KERN_WARNING, target->scsi_host,
1519 PFX "Unhandled data direction %d\n",
1520 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001521 return -EINVAL;
1522 }
1523
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001524 nents = scsi_sg_count(scmnd);
1525 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001526
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001527 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001528 ibdev = dev->dev;
1529
1530 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001531 if (unlikely(count == 0))
1532 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001533
1534 fmt = SRP_DATA_DESC_DIRECT;
1535 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001536
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001537 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001538 /*
1539 * The midlayer only generated a single gather/scatter
1540 * entry, or DMA mapping coalesced everything to a
1541 * single entry. So a direct descriptor along with
1542 * the DMA MR suffices.
1543 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001544 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001545
Ralph Campbell85507bc2006-12-12 14:30:55 -08001546 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001547 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001548 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001549
Bart Van Assche52ede082014-05-20 15:07:45 +02001550 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001551 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001552 }
1553
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001554 /*
1555 * We have more than one scatter/gather entry, so build our indirect
1556 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001557 */
1558 indirect_hdr = (void *) cmd->add_data;
1559
David Dillowc07d4242011-01-16 13:57:10 -05001560 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1561 target->indirect_size, DMA_TO_DEVICE);
1562
David Dillow8f26c9f2011-01-14 19:45:50 -05001563 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001564 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001565
David Dillowc07d4242011-01-16 13:57:10 -05001566 /* We've mapped the request, now pull as much of the indirect
1567 * descriptor table as we can into the command buffer. If this
1568 * target is not using an external indirect table, we are
1569 * guaranteed to fit into the command, as the SCSI layer won't
1570 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001571 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001572 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001573 /*
1574 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001575 * so use a direct descriptor.
1576 */
1577 struct srp_direct_buf *buf = (void *) cmd->add_data;
1578
David Dillowc07d4242011-01-16 13:57:10 -05001579 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001580 goto map_complete;
1581 }
1582
David Dillowc07d4242011-01-16 13:57:10 -05001583 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1584 !target->allow_ext_sg)) {
1585 shost_printk(KERN_ERR, target->scsi_host,
1586 "Could not fit S/G list into SRP_CMD\n");
1587 return -EIO;
1588 }
1589
1590 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001591 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1592
1593 fmt = SRP_DATA_DESC_INDIRECT;
1594 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001595 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001596
David Dillowc07d4242011-01-16 13:57:10 -05001597 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1598 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001599
David Dillowc07d4242011-01-16 13:57:10 -05001600 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001601 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1602 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1603 indirect_hdr->len = cpu_to_be32(state.total_len);
1604
1605 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001606 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001607 else
David Dillowc07d4242011-01-16 13:57:10 -05001608 cmd->data_in_desc_cnt = count;
1609
1610 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1611 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001612
1613map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001614 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1615 cmd->buf_fmt = fmt << 4;
1616 else
1617 cmd->buf_fmt = fmt;
1618
Roland Dreieraef9ec32005-11-02 14:07:13 -08001619 return len;
1620}
1621
David Dillow05a1d752010-10-08 14:48:14 -04001622/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001623 * Return an IU and possible credit to the free pool
1624 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001625static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001626 enum srp_iu_type iu_type)
1627{
1628 unsigned long flags;
1629
Bart Van Assche509c07b2014-10-30 14:48:30 +01001630 spin_lock_irqsave(&ch->lock, flags);
1631 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001632 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001633 ++ch->req_lim;
1634 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001635}
1636
1637/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001638 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001639 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001640 *
1641 * Note:
1642 * An upper limit for the number of allocated information units for each
1643 * request type is:
1644 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1645 * more than Scsi_Host.can_queue requests.
1646 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1647 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1648 * one unanswered SRP request to an initiator.
1649 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001650static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001651 enum srp_iu_type iu_type)
1652{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001653 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001654 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1655 struct srp_iu *iu;
1656
Bart Van Assche509c07b2014-10-30 14:48:30 +01001657 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001658
Bart Van Assche509c07b2014-10-30 14:48:30 +01001659 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001660 return NULL;
1661
1662 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001663 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001664 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001665 ++target->zero_req_lim;
1666 return NULL;
1667 }
1668
Bart Van Assche509c07b2014-10-30 14:48:30 +01001669 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001670 }
1671
Bart Van Assche509c07b2014-10-30 14:48:30 +01001672 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001673 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001674 return iu;
1675}
1676
Bart Van Assche509c07b2014-10-30 14:48:30 +01001677static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001678{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001679 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001680 struct ib_sge list;
1681 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001682
1683 list.addr = iu->dma;
1684 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001685 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001686
1687 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001688 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001689 wr.sg_list = &list;
1690 wr.num_sge = 1;
1691 wr.opcode = IB_WR_SEND;
1692 wr.send_flags = IB_SEND_SIGNALED;
1693
Bart Van Assche509c07b2014-10-30 14:48:30 +01001694 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001695}
1696
Bart Van Assche509c07b2014-10-30 14:48:30 +01001697static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001698{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001699 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001700 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001701 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001702
1703 list.addr = iu->dma;
1704 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001705 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001706
1707 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001708 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001709 wr.sg_list = &list;
1710 wr.num_sge = 1;
1711
Bart Van Assche509c07b2014-10-30 14:48:30 +01001712 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001713}
1714
Bart Van Assche509c07b2014-10-30 14:48:30 +01001715static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001716{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001717 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001718 struct srp_request *req;
1719 struct scsi_cmnd *scmnd;
1720 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001721
Roland Dreieraef9ec32005-11-02 14:07:13 -08001722 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001723 spin_lock_irqsave(&ch->lock, flags);
1724 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1725 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001726
Bart Van Assche509c07b2014-10-30 14:48:30 +01001727 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001728 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001729 ch->tsk_mgmt_status = rsp->data[3];
1730 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001731 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001732 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1733 if (scmnd) {
1734 req = (void *)scmnd->host_scribble;
1735 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1736 }
Bart Van Assche22032992012-08-14 13:18:53 +00001737 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001738 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001739 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1740 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001741
Bart Van Assche509c07b2014-10-30 14:48:30 +01001742 spin_lock_irqsave(&ch->lock, flags);
1743 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1744 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001745
1746 return;
1747 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001748 scmnd->result = rsp->status;
1749
1750 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1751 memcpy(scmnd->sense_buffer, rsp->data +
1752 be32_to_cpu(rsp->resp_data_len),
1753 min_t(int, be32_to_cpu(rsp->sense_data_len),
1754 SCSI_SENSE_BUFFERSIZE));
1755 }
1756
Bart Van Asschee7145312014-07-09 15:57:51 +02001757 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001758 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001759 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1760 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1761 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1762 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1763 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1764 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001765
Bart Van Assche509c07b2014-10-30 14:48:30 +01001766 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001767 be32_to_cpu(rsp->req_lim_delta));
1768
David Dillowf8b6e312010-11-26 13:02:21 -05001769 scmnd->host_scribble = NULL;
1770 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001771 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001772}
1773
Bart Van Assche509c07b2014-10-30 14:48:30 +01001774static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001775 void *rsp, int len)
1776{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001777 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001778 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001779 unsigned long flags;
1780 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001781 int err;
David Dillowbb125882010-10-08 14:40:47 -04001782
Bart Van Assche509c07b2014-10-30 14:48:30 +01001783 spin_lock_irqsave(&ch->lock, flags);
1784 ch->req_lim += req_delta;
1785 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1786 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001787
David Dillowbb125882010-10-08 14:40:47 -04001788 if (!iu) {
1789 shost_printk(KERN_ERR, target->scsi_host, PFX
1790 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001791 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001792 }
1793
1794 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1795 memcpy(iu->buf, rsp, len);
1796 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1797
Bart Van Assche509c07b2014-10-30 14:48:30 +01001798 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001799 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001800 shost_printk(KERN_ERR, target->scsi_host, PFX
1801 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001802 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001803 }
David Dillowbb125882010-10-08 14:40:47 -04001804
David Dillowbb125882010-10-08 14:40:47 -04001805 return err;
1806}
1807
Bart Van Assche509c07b2014-10-30 14:48:30 +01001808static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001809 struct srp_cred_req *req)
1810{
1811 struct srp_cred_rsp rsp = {
1812 .opcode = SRP_CRED_RSP,
1813 .tag = req->tag,
1814 };
1815 s32 delta = be32_to_cpu(req->req_lim_delta);
1816
Bart Van Assche509c07b2014-10-30 14:48:30 +01001817 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1818 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001819 "problems processing SRP_CRED_REQ\n");
1820}
1821
Bart Van Assche509c07b2014-10-30 14:48:30 +01001822static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001823 struct srp_aer_req *req)
1824{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001825 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001826 struct srp_aer_rsp rsp = {
1827 .opcode = SRP_AER_RSP,
1828 .tag = req->tag,
1829 };
1830 s32 delta = be32_to_cpu(req->req_lim_delta);
1831
1832 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001833 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001834
Bart Van Assche509c07b2014-10-30 14:48:30 +01001835 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001836 shost_printk(KERN_ERR, target->scsi_host, PFX
1837 "problems processing SRP_AER_REQ\n");
1838}
1839
Bart Van Assche509c07b2014-10-30 14:48:30 +01001840static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001841{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001842 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001843 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001844 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001845 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001846 u8 opcode;
1847
Bart Van Assche509c07b2014-10-30 14:48:30 +01001848 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001849 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001850
1851 opcode = *(u8 *) iu->buf;
1852
1853 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001854 shost_printk(KERN_ERR, target->scsi_host,
1855 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001856 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1857 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001858 }
1859
1860 switch (opcode) {
1861 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001862 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863 break;
1864
David Dillowbb125882010-10-08 14:40:47 -04001865 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001866 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001867 break;
1868
1869 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001870 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001871 break;
1872
Roland Dreieraef9ec32005-11-02 14:07:13 -08001873 case SRP_T_LOGOUT:
1874 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001875 shost_printk(KERN_WARNING, target->scsi_host,
1876 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001877 break;
1878
1879 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001880 shost_printk(KERN_WARNING, target->scsi_host,
1881 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001882 break;
1883 }
1884
Bart Van Assche509c07b2014-10-30 14:48:30 +01001885 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001886 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001887
Bart Van Assche509c07b2014-10-30 14:48:30 +01001888 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001889 if (res != 0)
1890 shost_printk(KERN_ERR, target->scsi_host,
1891 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001892}
1893
Bart Van Asschec1120f82013-10-26 14:35:08 +02001894/**
1895 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001896 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001897 *
1898 * Note: This function may get invoked before the rport has been created,
1899 * hence the target->rport test.
1900 */
1901static void srp_tl_err_work(struct work_struct *work)
1902{
1903 struct srp_target_port *target;
1904
1905 target = container_of(work, struct srp_target_port, tl_err_work);
1906 if (target->rport)
1907 srp_start_tl_fail_timers(target->rport);
1908}
1909
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001910static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001911 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001912{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001913 struct srp_target_port *target = ch->target;
1914
1915 if (wr_id == SRP_LAST_WR_ID) {
1916 complete(&ch->done);
1917 return;
1918 }
1919
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001920 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001921 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1922 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001923 "LOCAL_INV failed with status %s (%d)\n",
1924 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001925 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1926 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001927 "FAST_REG_MR failed status %s (%d)\n",
1928 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001929 } else {
1930 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03001931 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001932 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03001933 ib_wc_status_msg(wc_status), wc_status,
1934 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001935 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001936 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001937 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001938 target->qp_in_error = true;
1939}
1940
Bart Van Assche509c07b2014-10-30 14:48:30 +01001941static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001942{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001943 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001944 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001945
1946 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1947 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001948 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001949 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001950 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001951 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001952 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001953 }
1954}
1955
Bart Van Assche509c07b2014-10-30 14:48:30 +01001956static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001957{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001958 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001959 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001960 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001961
1962 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001963 if (likely(wc.status == IB_WC_SUCCESS)) {
1964 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001965 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001966 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001967 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001968 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001969 }
1970}
1971
Bart Van Assche76c75b22010-11-26 14:37:47 -05001972static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001973{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001974 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001975 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001976 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001977 struct srp_request *req;
1978 struct srp_iu *iu;
1979 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001980 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001981 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001982 u32 tag;
1983 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001984 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001985 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1986
1987 /*
1988 * The SCSI EH thread is the only context from which srp_queuecommand()
1989 * can get invoked for blocked devices (SDEV_BLOCK /
1990 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1991 * locking the rport mutex if invoked from inside the SCSI EH.
1992 */
1993 if (in_scsi_eh)
1994 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001995
Bart Van Assched1b42892014-05-20 15:07:20 +02001996 scmnd->result = srp_chkready(target->rport);
1997 if (unlikely(scmnd->result))
1998 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001999
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002000 WARN_ON_ONCE(scmnd->request->tag < 0);
2001 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002002 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002003 idx = blk_mq_unique_tag_to_tag(tag);
2004 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2005 dev_name(&shost->shost_gendev), tag, idx,
2006 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002007
2008 spin_lock_irqsave(&ch->lock, flags);
2009 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002010 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002011
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002012 if (!iu)
2013 goto err;
2014
2015 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002016 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002017 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002018 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002019
David Dillowf8b6e312010-11-26 13:02:21 -05002020 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002021
2022 cmd = iu->buf;
2023 memset(cmd, 0, sizeof *cmd);
2024
2025 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002026 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002027 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002028 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2029
Roland Dreieraef9ec32005-11-02 14:07:13 -08002030 req->scmnd = scmnd;
2031 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002032
Bart Van Assche509c07b2014-10-30 14:48:30 +01002033 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002034 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002035 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002036 PFX "Failed to map data (%d)\n", len);
2037 /*
2038 * If we ran out of memory descriptors (-ENOMEM) because an
2039 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002040 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002041 * to reduce queue depth temporarily.
2042 */
2043 scmnd->result = len == -ENOMEM ?
2044 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002045 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002046 }
2047
David Dillow49248642011-01-14 18:23:24 -05002048 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002049 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002050
Bart Van Assche509c07b2014-10-30 14:48:30 +01002051 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002052 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002053 goto err_unmap;
2054 }
2055
Bart Van Assched1b42892014-05-20 15:07:20 +02002056 ret = 0;
2057
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002058unlock_rport:
2059 if (in_scsi_eh)
2060 mutex_unlock(&rport->mutex);
2061
Bart Van Assched1b42892014-05-20 15:07:20 +02002062 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002063
2064err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002065 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002066
Bart Van Assche76c75b22010-11-26 14:37:47 -05002067err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002068 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002069
Bart Van Assche024ca902014-05-20 15:03:49 +02002070 /*
2071 * Avoid that the loops that iterate over the request ring can
2072 * encounter a dangling SCSI command pointer.
2073 */
2074 req->scmnd = NULL;
2075
Bart Van Assched1b42892014-05-20 15:07:20 +02002076err:
2077 if (scmnd->result) {
2078 scmnd->scsi_done(scmnd);
2079 ret = 0;
2080 } else {
2081 ret = SCSI_MLQUEUE_HOST_BUSY;
2082 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002083
Bart Van Assched1b42892014-05-20 15:07:20 +02002084 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002085}
2086
Bart Van Assche4d73f952013-10-26 14:40:37 +02002087/*
2088 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002089 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002090 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002091static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002092{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002093 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002094 int i;
2095
Bart Van Assche509c07b2014-10-30 14:48:30 +01002096 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2097 GFP_KERNEL);
2098 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002099 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002100 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2101 GFP_KERNEL);
2102 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002103 goto err_no_ring;
2104
2105 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002106 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2107 ch->max_ti_iu_len,
2108 GFP_KERNEL, DMA_FROM_DEVICE);
2109 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002110 goto err;
2111 }
2112
Bart Van Assche4d73f952013-10-26 14:40:37 +02002113 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002114 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2115 target->max_iu_len,
2116 GFP_KERNEL, DMA_TO_DEVICE);
2117 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002118 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002119
Bart Van Assche509c07b2014-10-30 14:48:30 +01002120 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002121 }
2122
2123 return 0;
2124
2125err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002126 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002127 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2128 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002129 }
2130
Bart Van Assche4d73f952013-10-26 14:40:37 +02002131
2132err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002133 kfree(ch->tx_ring);
2134 ch->tx_ring = NULL;
2135 kfree(ch->rx_ring);
2136 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002137
2138 return -ENOMEM;
2139}
2140
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002141static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2142{
2143 uint64_t T_tr_ns, max_compl_time_ms;
2144 uint32_t rq_tmo_jiffies;
2145
2146 /*
2147 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2148 * table 91), both the QP timeout and the retry count have to be set
2149 * for RC QP's during the RTR to RTS transition.
2150 */
2151 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2152 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2153
2154 /*
2155 * Set target->rq_tmo_jiffies to one second more than the largest time
2156 * it can take before an error completion is generated. See also
2157 * C9-140..142 in the IBTA spec for more information about how to
2158 * convert the QP Local ACK Timeout value to nanoseconds.
2159 */
2160 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2161 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2162 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2163 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2164
2165 return rq_tmo_jiffies;
2166}
2167
David Dillow961e0be2011-01-14 17:32:07 -05002168static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002169 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002170 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002171{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002172 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002173 struct ib_qp_attr *qp_attr = NULL;
2174 int attr_mask = 0;
2175 int ret;
2176 int i;
2177
2178 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002179 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2180 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002181
2182 /*
2183 * Reserve credits for task management so we don't
2184 * bounce requests back to the SCSI mid-layer.
2185 */
2186 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002187 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002188 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002189 target->scsi_host->cmd_per_lun
2190 = min_t(int, target->scsi_host->can_queue,
2191 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002192 } else {
2193 shost_printk(KERN_WARNING, target->scsi_host,
2194 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2195 ret = -ECONNRESET;
2196 goto error;
2197 }
2198
Bart Van Assche509c07b2014-10-30 14:48:30 +01002199 if (!ch->rx_ring) {
2200 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002201 if (ret)
2202 goto error;
2203 }
2204
2205 ret = -ENOMEM;
2206 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2207 if (!qp_attr)
2208 goto error;
2209
2210 qp_attr->qp_state = IB_QPS_RTR;
2211 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2212 if (ret)
2213 goto error_free;
2214
Bart Van Assche509c07b2014-10-30 14:48:30 +01002215 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002216 if (ret)
2217 goto error_free;
2218
Bart Van Assche4d73f952013-10-26 14:40:37 +02002219 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002220 struct srp_iu *iu = ch->rx_ring[i];
2221
2222 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002223 if (ret)
2224 goto error_free;
2225 }
2226
2227 qp_attr->qp_state = IB_QPS_RTS;
2228 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2229 if (ret)
2230 goto error_free;
2231
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002232 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2233
Bart Van Assche509c07b2014-10-30 14:48:30 +01002234 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002235 if (ret)
2236 goto error_free;
2237
2238 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2239
2240error_free:
2241 kfree(qp_attr);
2242
2243error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002244 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002245}
2246
Roland Dreieraef9ec32005-11-02 14:07:13 -08002247static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2248 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002249 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002250{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002251 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002252 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002253 struct ib_class_port_info *cpi;
2254 int opcode;
2255
2256 switch (event->param.rej_rcvd.reason) {
2257 case IB_CM_REJ_PORT_CM_REDIRECT:
2258 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002259 ch->path.dlid = cpi->redirect_lid;
2260 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002261 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002262 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002263
Bart Van Assche509c07b2014-10-30 14:48:30 +01002264 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002265 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2266 break;
2267
2268 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002269 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002270 /*
2271 * Topspin/Cisco SRP gateways incorrectly send
2272 * reject reason code 25 when they mean 24
2273 * (port redirect).
2274 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002275 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002276 event->param.rej_rcvd.ari, 16);
2277
David Dillow7aa54bd2008-01-07 18:23:41 -05002278 shost_printk(KERN_DEBUG, shost,
2279 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002280 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2281 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002282
Bart Van Assche509c07b2014-10-30 14:48:30 +01002283 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002284 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002285 shost_printk(KERN_WARNING, shost,
2286 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002287 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002288 }
2289 break;
2290
2291 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002292 shost_printk(KERN_WARNING, shost,
2293 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002294 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002295 break;
2296
2297 case IB_CM_REJ_CONSUMER_DEFINED:
2298 opcode = *(u8 *) event->private_data;
2299 if (opcode == SRP_LOGIN_REJ) {
2300 struct srp_login_rej *rej = event->private_data;
2301 u32 reason = be32_to_cpu(rej->reason);
2302
2303 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002304 shost_printk(KERN_WARNING, shost,
2305 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002306 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002307 shost_printk(KERN_WARNING, shost, PFX
2308 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002309 target->sgid.raw,
2310 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002311 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002312 shost_printk(KERN_WARNING, shost,
2313 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2314 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002315 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002316 break;
2317
David Dillow9fe4bcf2008-01-08 17:08:52 -05002318 case IB_CM_REJ_STALE_CONN:
2319 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002320 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002321 break;
2322
Roland Dreieraef9ec32005-11-02 14:07:13 -08002323 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002324 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2325 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002326 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002327 }
2328}
2329
2330static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2331{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002332 struct srp_rdma_ch *ch = cm_id->context;
2333 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002334 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002335
2336 switch (event->event) {
2337 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002338 shost_printk(KERN_DEBUG, target->scsi_host,
2339 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002340 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002341 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002342 break;
2343
2344 case IB_CM_REP_RECEIVED:
2345 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002346 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002347 break;
2348
2349 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002350 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002351 comp = 1;
2352
Bart Van Assche509c07b2014-10-30 14:48:30 +01002353 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002354 break;
2355
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002356 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002357 shost_printk(KERN_WARNING, target->scsi_host,
2358 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002359 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002360 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002361 shost_printk(KERN_ERR, target->scsi_host,
2362 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002363 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002364 break;
2365
2366 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002367 shost_printk(KERN_ERR, target->scsi_host,
2368 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002369 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002370
Bart Van Assche509c07b2014-10-30 14:48:30 +01002371 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002372 break;
2373
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002374 case IB_CM_MRA_RECEIVED:
2375 case IB_CM_DREQ_ERROR:
2376 case IB_CM_DREP_RECEIVED:
2377 break;
2378
Roland Dreieraef9ec32005-11-02 14:07:13 -08002379 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002380 shost_printk(KERN_WARNING, target->scsi_host,
2381 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002382 break;
2383 }
2384
2385 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002386 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002387
Roland Dreieraef9ec32005-11-02 14:07:13 -08002388 return 0;
2389}
2390
Jack Wang71444b92013-11-07 11:37:37 +01002391/**
Jack Wang71444b92013-11-07 11:37:37 +01002392 * srp_change_queue_depth - setting device queue depth
2393 * @sdev: scsi device struct
2394 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002395 *
2396 * Returns queue depth.
2397 */
2398static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002399srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002400{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002401 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002402 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002403 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002404}
2405
Bart Van Assche985aa492015-05-18 13:27:14 +02002406static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2407 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002408{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002409 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002410 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002411 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002412 struct srp_iu *iu;
2413 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002414
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002415 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002416 return -1;
2417
Bart Van Assche509c07b2014-10-30 14:48:30 +01002418 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002419
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002420 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002421 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002422 * invoked while a task management function is being sent.
2423 */
2424 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002425 spin_lock_irq(&ch->lock);
2426 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2427 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002428
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002429 if (!iu) {
2430 mutex_unlock(&rport->mutex);
2431
Bart Van Assche76c75b22010-11-26 14:37:47 -05002432 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002433 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002434
David Dillow19081f32010-10-18 08:54:49 -04002435 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2436 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002437 tsk_mgmt = iu->buf;
2438 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2439
2440 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002441 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002442 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002443 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002444 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002445
David Dillow19081f32010-10-18 08:54:49 -04002446 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2447 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002448 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2449 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002450 mutex_unlock(&rport->mutex);
2451
Bart Van Assche76c75b22010-11-26 14:37:47 -05002452 return -1;
2453 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002454 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002455
Bart Van Assche509c07b2014-10-30 14:48:30 +01002456 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002457 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002458 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002459
Roland Dreierd945e1d2006-05-09 10:50:28 -07002460 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002461}
2462
Roland Dreieraef9ec32005-11-02 14:07:13 -08002463static int srp_abort(struct scsi_cmnd *scmnd)
2464{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002465 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002466 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002467 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002468 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002469 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002470 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002471
David Dillow7aa54bd2008-01-07 18:23:41 -05002472 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002473
Bart Van Assched92c0da2014-10-06 17:14:36 +02002474 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002475 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002476 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002477 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2478 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2479 return SUCCESS;
2480 ch = &target->ch[ch_idx];
2481 if (!srp_claim_req(ch, req, NULL, scmnd))
2482 return SUCCESS;
2483 shost_printk(KERN_ERR, target->scsi_host,
2484 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002485 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002486 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002487 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002488 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002489 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002490 else
2491 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002492 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002493 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002494 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002495
Bart Van Assche086f44f2013-06-12 15:23:04 +02002496 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002497}
2498
2499static int srp_reset_device(struct scsi_cmnd *scmnd)
2500{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002501 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002502 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002503 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002504
David Dillow7aa54bd2008-01-07 18:23:41 -05002505 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002506
Bart Van Assched92c0da2014-10-06 17:14:36 +02002507 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002508 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002509 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002510 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002511 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002512 return FAILED;
2513
Bart Van Assched92c0da2014-10-06 17:14:36 +02002514 for (i = 0; i < target->ch_count; i++) {
2515 ch = &target->ch[i];
2516 for (i = 0; i < target->req_ring_size; ++i) {
2517 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002518
Bart Van Assched92c0da2014-10-06 17:14:36 +02002519 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2520 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002521 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002522
Roland Dreierd945e1d2006-05-09 10:50:28 -07002523 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002524}
2525
2526static int srp_reset_host(struct scsi_cmnd *scmnd)
2527{
2528 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002529
David Dillow7aa54bd2008-01-07 18:23:41 -05002530 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002531
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002532 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002533}
2534
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002535static int srp_slave_configure(struct scsi_device *sdev)
2536{
2537 struct Scsi_Host *shost = sdev->host;
2538 struct srp_target_port *target = host_to_target(shost);
2539 struct request_queue *q = sdev->request_queue;
2540 unsigned long timeout;
2541
2542 if (sdev->type == TYPE_DISK) {
2543 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2544 blk_queue_rq_timeout(q, timeout);
2545 }
2546
2547 return 0;
2548}
2549
Tony Jonesee959b02008-02-22 00:13:36 +01002550static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2551 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002552{
Tony Jonesee959b02008-02-22 00:13:36 +01002553 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002554
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002555 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002556}
2557
Tony Jonesee959b02008-02-22 00:13:36 +01002558static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2559 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002560{
Tony Jonesee959b02008-02-22 00:13:36 +01002561 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002562
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002563 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002564}
2565
Tony Jonesee959b02008-02-22 00:13:36 +01002566static ssize_t show_service_id(struct device *dev,
2567 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002568{
Tony Jonesee959b02008-02-22 00:13:36 +01002569 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002570
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002571 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002572}
2573
Tony Jonesee959b02008-02-22 00:13:36 +01002574static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2575 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002576{
Tony Jonesee959b02008-02-22 00:13:36 +01002577 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002578
Bart Van Assche747fe002014-10-30 14:48:05 +01002579 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002580}
2581
Bart Van Assche848b3082013-10-26 14:38:12 +02002582static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2583 char *buf)
2584{
2585 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2586
Bart Van Assche747fe002014-10-30 14:48:05 +01002587 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002588}
2589
Tony Jonesee959b02008-02-22 00:13:36 +01002590static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2591 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002592{
Tony Jonesee959b02008-02-22 00:13:36 +01002593 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002594 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002595
Bart Van Assche509c07b2014-10-30 14:48:30 +01002596 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002597}
2598
Tony Jonesee959b02008-02-22 00:13:36 +01002599static ssize_t show_orig_dgid(struct device *dev,
2600 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002601{
Tony Jonesee959b02008-02-22 00:13:36 +01002602 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002603
Bart Van Assche747fe002014-10-30 14:48:05 +01002604 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002605}
2606
Bart Van Assche89de7482010-08-03 14:08:45 +00002607static ssize_t show_req_lim(struct device *dev,
2608 struct device_attribute *attr, char *buf)
2609{
2610 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002611 struct srp_rdma_ch *ch;
2612 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002613
Bart Van Assched92c0da2014-10-06 17:14:36 +02002614 for (i = 0; i < target->ch_count; i++) {
2615 ch = &target->ch[i];
2616 req_lim = min(req_lim, ch->req_lim);
2617 }
2618 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002619}
2620
Tony Jonesee959b02008-02-22 00:13:36 +01002621static ssize_t show_zero_req_lim(struct device *dev,
2622 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002623{
Tony Jonesee959b02008-02-22 00:13:36 +01002624 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002625
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002626 return sprintf(buf, "%d\n", target->zero_req_lim);
2627}
2628
Tony Jonesee959b02008-02-22 00:13:36 +01002629static ssize_t show_local_ib_port(struct device *dev,
2630 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002631{
Tony Jonesee959b02008-02-22 00:13:36 +01002632 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002633
2634 return sprintf(buf, "%d\n", target->srp_host->port);
2635}
2636
Tony Jonesee959b02008-02-22 00:13:36 +01002637static ssize_t show_local_ib_device(struct device *dev,
2638 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002639{
Tony Jonesee959b02008-02-22 00:13:36 +01002640 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002641
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002642 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002643}
2644
Bart Van Assched92c0da2014-10-06 17:14:36 +02002645static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2646 char *buf)
2647{
2648 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2649
2650 return sprintf(buf, "%d\n", target->ch_count);
2651}
2652
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002653static ssize_t show_comp_vector(struct device *dev,
2654 struct device_attribute *attr, char *buf)
2655{
2656 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2657
2658 return sprintf(buf, "%d\n", target->comp_vector);
2659}
2660
Vu Pham7bb312e2013-10-26 14:31:27 +02002661static ssize_t show_tl_retry_count(struct device *dev,
2662 struct device_attribute *attr, char *buf)
2663{
2664 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2665
2666 return sprintf(buf, "%d\n", target->tl_retry_count);
2667}
2668
David Dillow49248642011-01-14 18:23:24 -05002669static ssize_t show_cmd_sg_entries(struct device *dev,
2670 struct device_attribute *attr, char *buf)
2671{
2672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2673
2674 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2675}
2676
David Dillowc07d4242011-01-16 13:57:10 -05002677static ssize_t show_allow_ext_sg(struct device *dev,
2678 struct device_attribute *attr, char *buf)
2679{
2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681
2682 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2683}
2684
Tony Jonesee959b02008-02-22 00:13:36 +01002685static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2686static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2687static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2688static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002689static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002690static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2691static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002692static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002693static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2694static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2695static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002696static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002697static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002698static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002699static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002700static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002701
Tony Jonesee959b02008-02-22 00:13:36 +01002702static struct device_attribute *srp_host_attrs[] = {
2703 &dev_attr_id_ext,
2704 &dev_attr_ioc_guid,
2705 &dev_attr_service_id,
2706 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002707 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002708 &dev_attr_dgid,
2709 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002710 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002711 &dev_attr_zero_req_lim,
2712 &dev_attr_local_ib_port,
2713 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002714 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002715 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002716 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002717 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002718 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002719 NULL
2720};
2721
Roland Dreieraef9ec32005-11-02 14:07:13 -08002722static struct scsi_host_template srp_template = {
2723 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002724 .name = "InfiniBand SRP initiator",
2725 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002726 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002727 .info = srp_target_info,
2728 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002729 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002730 .eh_abort_handler = srp_abort,
2731 .eh_device_reset_handler = srp_reset_device,
2732 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002733 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002734 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002735 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002736 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002737 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002738 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002739 .shost_attrs = srp_host_attrs,
2740 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002741 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002742};
2743
Bart Van Assche34aa6542014-10-30 14:47:22 +01002744static int srp_sdev_count(struct Scsi_Host *host)
2745{
2746 struct scsi_device *sdev;
2747 int c = 0;
2748
2749 shost_for_each_device(sdev, host)
2750 c++;
2751
2752 return c;
2753}
2754
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002755/*
2756 * Return values:
2757 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2758 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2759 * removal has been scheduled.
2760 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2761 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002762static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2763{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002764 struct srp_rport_identifiers ids;
2765 struct srp_rport *rport;
2766
Bart Van Assche34aa6542014-10-30 14:47:22 +01002767 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002768 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002769 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002770
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002771 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002772 return -ENODEV;
2773
FUJITA Tomonori32368222007-06-27 16:33:12 +09002774 memcpy(ids.port_id, &target->id_ext, 8);
2775 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002776 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002777 rport = srp_rport_add(target->scsi_host, &ids);
2778 if (IS_ERR(rport)) {
2779 scsi_remove_host(target->scsi_host);
2780 return PTR_ERR(rport);
2781 }
2782
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002783 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002784 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002785
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002786 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002787 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002788 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002789
Roland Dreieraef9ec32005-11-02 14:07:13 -08002790 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002791 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002792
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002793 if (srp_connected_ch(target) < target->ch_count ||
2794 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002795 shost_printk(KERN_INFO, target->scsi_host,
2796 PFX "SCSI scan failed - removing SCSI host\n");
2797 srp_queue_remove_work(target);
2798 goto out;
2799 }
2800
2801 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2802 dev_name(&target->scsi_host->shost_gendev),
2803 srp_sdev_count(target->scsi_host));
2804
2805 spin_lock_irq(&target->lock);
2806 if (target->state == SRP_TARGET_SCANNING)
2807 target->state = SRP_TARGET_LIVE;
2808 spin_unlock_irq(&target->lock);
2809
2810out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002811 return 0;
2812}
2813
Tony Jonesee959b02008-02-22 00:13:36 +01002814static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002815{
2816 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002817 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002818
2819 complete(&host->released);
2820}
2821
2822static struct class srp_class = {
2823 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002824 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002825};
2826
Bart Van Assche96fc2482013-06-28 14:51:26 +02002827/**
2828 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002829 * @host: SRP host.
2830 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002831 */
2832static bool srp_conn_unique(struct srp_host *host,
2833 struct srp_target_port *target)
2834{
2835 struct srp_target_port *t;
2836 bool ret = false;
2837
2838 if (target->state == SRP_TARGET_REMOVED)
2839 goto out;
2840
2841 ret = true;
2842
2843 spin_lock(&host->target_lock);
2844 list_for_each_entry(t, &host->target_list, list) {
2845 if (t != target &&
2846 target->id_ext == t->id_ext &&
2847 target->ioc_guid == t->ioc_guid &&
2848 target->initiator_ext == t->initiator_ext) {
2849 ret = false;
2850 break;
2851 }
2852 }
2853 spin_unlock(&host->target_lock);
2854
2855out:
2856 return ret;
2857}
2858
Roland Dreieraef9ec32005-11-02 14:07:13 -08002859/*
2860 * Target ports are added by writing
2861 *
2862 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2863 * pkey=<P_Key>,service_id=<service ID>
2864 *
2865 * to the add_target sysfs attribute.
2866 */
2867enum {
2868 SRP_OPT_ERR = 0,
2869 SRP_OPT_ID_EXT = 1 << 0,
2870 SRP_OPT_IOC_GUID = 1 << 1,
2871 SRP_OPT_DGID = 1 << 2,
2872 SRP_OPT_PKEY = 1 << 3,
2873 SRP_OPT_SERVICE_ID = 1 << 4,
2874 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002875 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002876 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002877 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002878 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002879 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2880 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002881 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002882 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002883 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002884 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2885 SRP_OPT_IOC_GUID |
2886 SRP_OPT_DGID |
2887 SRP_OPT_PKEY |
2888 SRP_OPT_SERVICE_ID),
2889};
2890
Steven Whitehousea447c092008-10-13 10:46:57 +01002891static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002892 { SRP_OPT_ID_EXT, "id_ext=%s" },
2893 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2894 { SRP_OPT_DGID, "dgid=%s" },
2895 { SRP_OPT_PKEY, "pkey=%x" },
2896 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2897 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2898 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002899 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002900 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002901 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002902 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2903 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002904 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002905 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002906 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002907 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002908};
2909
2910static int srp_parse_options(const char *buf, struct srp_target_port *target)
2911{
2912 char *options, *sep_opt;
2913 char *p;
2914 char dgid[3];
2915 substring_t args[MAX_OPT_ARGS];
2916 int opt_mask = 0;
2917 int token;
2918 int ret = -EINVAL;
2919 int i;
2920
2921 options = kstrdup(buf, GFP_KERNEL);
2922 if (!options)
2923 return -ENOMEM;
2924
2925 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002926 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002927 if (!*p)
2928 continue;
2929
2930 token = match_token(p, srp_opt_tokens, args);
2931 opt_mask |= token;
2932
2933 switch (token) {
2934 case SRP_OPT_ID_EXT:
2935 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002936 if (!p) {
2937 ret = -ENOMEM;
2938 goto out;
2939 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002940 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2941 kfree(p);
2942 break;
2943
2944 case SRP_OPT_IOC_GUID:
2945 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002946 if (!p) {
2947 ret = -ENOMEM;
2948 goto out;
2949 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002950 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2951 kfree(p);
2952 break;
2953
2954 case SRP_OPT_DGID:
2955 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002956 if (!p) {
2957 ret = -ENOMEM;
2958 goto out;
2959 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002960 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002961 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002962 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002963 goto out;
2964 }
2965
2966 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002967 strlcpy(dgid, p + i * 2, sizeof(dgid));
2968 if (sscanf(dgid, "%hhx",
2969 &target->orig_dgid.raw[i]) < 1) {
2970 ret = -EINVAL;
2971 kfree(p);
2972 goto out;
2973 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002974 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002975 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002976 break;
2977
2978 case SRP_OPT_PKEY:
2979 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002980 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002981 goto out;
2982 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002983 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002984 break;
2985
2986 case SRP_OPT_SERVICE_ID:
2987 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002988 if (!p) {
2989 ret = -ENOMEM;
2990 goto out;
2991 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002992 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2993 kfree(p);
2994 break;
2995
2996 case SRP_OPT_MAX_SECT:
2997 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002998 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002999 goto out;
3000 }
3001 target->scsi_host->max_sectors = token;
3002 break;
3003
Bart Van Assche4d73f952013-10-26 14:40:37 +02003004 case SRP_OPT_QUEUE_SIZE:
3005 if (match_int(args, &token) || token < 1) {
3006 pr_warn("bad queue_size parameter '%s'\n", p);
3007 goto out;
3008 }
3009 target->scsi_host->can_queue = token;
3010 target->queue_size = token + SRP_RSP_SQ_SIZE +
3011 SRP_TSK_MGMT_SQ_SIZE;
3012 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3013 target->scsi_host->cmd_per_lun = token;
3014 break;
3015
Vu Pham52fb2b502006-06-17 20:37:31 -07003016 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003017 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003018 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3019 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003020 goto out;
3021 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003022 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003023 break;
3024
Ramachandra K0c0450db2006-06-17 20:37:38 -07003025 case SRP_OPT_IO_CLASS:
3026 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003027 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003028 goto out;
3029 }
3030 if (token != SRP_REV10_IB_IO_CLASS &&
3031 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003032 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3033 token, SRP_REV10_IB_IO_CLASS,
3034 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003035 goto out;
3036 }
3037 target->io_class = token;
3038 break;
3039
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003040 case SRP_OPT_INITIATOR_EXT:
3041 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003042 if (!p) {
3043 ret = -ENOMEM;
3044 goto out;
3045 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003046 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3047 kfree(p);
3048 break;
3049
David Dillow49248642011-01-14 18:23:24 -05003050 case SRP_OPT_CMD_SG_ENTRIES:
3051 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003052 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3053 p);
David Dillow49248642011-01-14 18:23:24 -05003054 goto out;
3055 }
3056 target->cmd_sg_cnt = token;
3057 break;
3058
David Dillowc07d4242011-01-16 13:57:10 -05003059 case SRP_OPT_ALLOW_EXT_SG:
3060 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003061 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003062 goto out;
3063 }
3064 target->allow_ext_sg = !!token;
3065 break;
3066
3067 case SRP_OPT_SG_TABLESIZE:
3068 if (match_int(args, &token) || token < 1 ||
3069 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003070 pr_warn("bad max sg_tablesize parameter '%s'\n",
3071 p);
David Dillowc07d4242011-01-16 13:57:10 -05003072 goto out;
3073 }
3074 target->sg_tablesize = token;
3075 break;
3076
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003077 case SRP_OPT_COMP_VECTOR:
3078 if (match_int(args, &token) || token < 0) {
3079 pr_warn("bad comp_vector parameter '%s'\n", p);
3080 goto out;
3081 }
3082 target->comp_vector = token;
3083 break;
3084
Vu Pham7bb312e2013-10-26 14:31:27 +02003085 case SRP_OPT_TL_RETRY_COUNT:
3086 if (match_int(args, &token) || token < 2 || token > 7) {
3087 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3088 p);
3089 goto out;
3090 }
3091 target->tl_retry_count = token;
3092 break;
3093
Roland Dreieraef9ec32005-11-02 14:07:13 -08003094 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003095 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3096 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003097 goto out;
3098 }
3099 }
3100
3101 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3102 ret = 0;
3103 else
3104 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3105 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3106 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003107 pr_warn("target creation request is missing parameter '%s'\n",
3108 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003109
Bart Van Assche4d73f952013-10-26 14:40:37 +02003110 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3111 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3112 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3113 target->scsi_host->cmd_per_lun,
3114 target->scsi_host->can_queue);
3115
Roland Dreieraef9ec32005-11-02 14:07:13 -08003116out:
3117 kfree(options);
3118 return ret;
3119}
3120
Tony Jonesee959b02008-02-22 00:13:36 +01003121static ssize_t srp_create_target(struct device *dev,
3122 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003123 const char *buf, size_t count)
3124{
3125 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003126 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003127 struct Scsi_Host *target_host;
3128 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003129 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003130 struct srp_device *srp_dev = host->srp_dev;
3131 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003132 int ret, node_idx, node, cpu, i;
3133 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003134
3135 target_host = scsi_host_alloc(&srp_template,
3136 sizeof (struct srp_target_port));
3137 if (!target_host)
3138 return -ENOMEM;
3139
David Dillow49248642011-01-14 18:23:24 -05003140 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003141 target_host->max_channel = 0;
3142 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003143 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003144 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003145
Roland Dreieraef9ec32005-11-02 14:07:13 -08003146 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003147
David Dillow49248642011-01-14 18:23:24 -05003148 target->io_class = SRP_REV16A_IB_IO_CLASS;
3149 target->scsi_host = target_host;
3150 target->srp_host = host;
Jason Gunthorpee6bf5f482015-07-30 17:22:22 -06003151 target->lkey = host->srp_dev->pd->local_dma_lkey;
David Dillow49248642011-01-14 18:23:24 -05003152 target->rkey = host->srp_dev->mr->rkey;
3153 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003154 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3155 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003156 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003157 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003158
Bart Van Assche34aa6542014-10-30 14:47:22 +01003159 /*
3160 * Avoid that the SCSI host can be removed by srp_remove_target()
3161 * before this function returns.
3162 */
3163 scsi_host_get(target->scsi_host);
3164
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003165 mutex_lock(&host->add_target_mutex);
3166
Roland Dreieraef9ec32005-11-02 14:07:13 -08003167 ret = srp_parse_options(buf, target);
3168 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003169 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003170
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003171 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3172 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003173 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003174
Bart Van Assche4d73f952013-10-26 14:40:37 +02003175 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3176
Bart Van Assche96fc2482013-06-28 14:51:26 +02003177 if (!srp_conn_unique(target->srp_host, target)) {
3178 shost_printk(KERN_INFO, target->scsi_host,
3179 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3180 be64_to_cpu(target->id_ext),
3181 be64_to_cpu(target->ioc_guid),
3182 be64_to_cpu(target->initiator_ext));
3183 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003184 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003185 }
3186
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003187 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003188 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003189 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003190 target->sg_tablesize = target->cmd_sg_cnt;
3191 }
3192
3193 target_host->sg_tablesize = target->sg_tablesize;
3194 target->indirect_size = target->sg_tablesize *
3195 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003196 target->max_iu_len = sizeof (struct srp_cmd) +
3197 sizeof (struct srp_indirect_buf) +
3198 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3199
Bart Van Asschec1120f82013-10-26 14:35:08 +02003200 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003201 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003202 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003203 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003204 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003205 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003206
Bart Van Assched92c0da2014-10-06 17:14:36 +02003207 ret = -ENOMEM;
3208 target->ch_count = max_t(unsigned, num_online_nodes(),
3209 min(ch_count ? :
3210 min(4 * num_online_nodes(),
3211 ibdev->num_comp_vectors),
3212 num_online_cpus()));
3213 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3214 GFP_KERNEL);
3215 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003216 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003217
Bart Van Assched92c0da2014-10-06 17:14:36 +02003218 node_idx = 0;
3219 for_each_online_node(node) {
3220 const int ch_start = (node_idx * target->ch_count /
3221 num_online_nodes());
3222 const int ch_end = ((node_idx + 1) * target->ch_count /
3223 num_online_nodes());
3224 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3225 num_online_nodes() + target->comp_vector)
3226 % ibdev->num_comp_vectors;
3227 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3228 num_online_nodes() + target->comp_vector)
3229 % ibdev->num_comp_vectors;
3230 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003231
Bart Van Assched92c0da2014-10-06 17:14:36 +02003232 for_each_online_cpu(cpu) {
3233 if (cpu_to_node(cpu) != node)
3234 continue;
3235 if (ch_start + cpu_idx >= ch_end)
3236 continue;
3237 ch = &target->ch[ch_start + cpu_idx];
3238 ch->target = target;
3239 ch->comp_vector = cv_start == cv_end ? cv_start :
3240 cv_start + cpu_idx % (cv_end - cv_start);
3241 spin_lock_init(&ch->lock);
3242 INIT_LIST_HEAD(&ch->free_tx);
3243 ret = srp_new_cm_id(ch);
3244 if (ret)
3245 goto err_disconnect;
3246
3247 ret = srp_create_ch_ib(ch);
3248 if (ret)
3249 goto err_disconnect;
3250
3251 ret = srp_alloc_req_data(ch);
3252 if (ret)
3253 goto err_disconnect;
3254
3255 ret = srp_connect_ch(ch, multich);
3256 if (ret) {
3257 shost_printk(KERN_ERR, target->scsi_host,
3258 PFX "Connection %d/%d failed\n",
3259 ch_start + cpu_idx,
3260 target->ch_count);
3261 if (node_idx == 0 && cpu_idx == 0) {
3262 goto err_disconnect;
3263 } else {
3264 srp_free_ch_ib(target, ch);
3265 srp_free_req_data(target, ch);
3266 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003267 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003268 }
3269 }
3270
3271 multich = true;
3272 cpu_idx++;
3273 }
3274 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003275 }
3276
Bart Van Asschec257ea62015-07-31 14:13:22 -07003277connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003278 target->scsi_host->nr_hw_queues = target->ch_count;
3279
Roland Dreieraef9ec32005-11-02 14:07:13 -08003280 ret = srp_add_target(host, target);
3281 if (ret)
3282 goto err_disconnect;
3283
Bart Van Assche34aa6542014-10-30 14:47:22 +01003284 if (target->state != SRP_TARGET_REMOVED) {
3285 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3286 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3287 be64_to_cpu(target->id_ext),
3288 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003289 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003290 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003291 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003292 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003293
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003294 ret = count;
3295
3296out:
3297 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003298
3299 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003300 if (ret < 0)
3301 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003302
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003303 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003304
3305err_disconnect:
3306 srp_disconnect_target(target);
3307
Bart Van Assched92c0da2014-10-06 17:14:36 +02003308 for (i = 0; i < target->ch_count; i++) {
3309 ch = &target->ch[i];
3310 srp_free_ch_ib(target, ch);
3311 srp_free_req_data(target, ch);
3312 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003313
Bart Van Assched92c0da2014-10-06 17:14:36 +02003314 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003315 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003316}
3317
Tony Jonesee959b02008-02-22 00:13:36 +01003318static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003319
Tony Jonesee959b02008-02-22 00:13:36 +01003320static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3321 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003322{
Tony Jonesee959b02008-02-22 00:13:36 +01003323 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003324
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003325 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003326}
3327
Tony Jonesee959b02008-02-22 00:13:36 +01003328static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003329
Tony Jonesee959b02008-02-22 00:13:36 +01003330static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3331 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003332{
Tony Jonesee959b02008-02-22 00:13:36 +01003333 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003334
3335 return sprintf(buf, "%d\n", host->port);
3336}
3337
Tony Jonesee959b02008-02-22 00:13:36 +01003338static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003339
Roland Dreierf5358a12006-06-17 20:37:29 -07003340static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003341{
3342 struct srp_host *host;
3343
3344 host = kzalloc(sizeof *host, GFP_KERNEL);
3345 if (!host)
3346 return NULL;
3347
3348 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003349 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003350 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003351 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003352 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003353 host->port = port;
3354
Tony Jonesee959b02008-02-22 00:13:36 +01003355 host->dev.class = &srp_class;
3356 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003357 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003358
Tony Jonesee959b02008-02-22 00:13:36 +01003359 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003360 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003361 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003362 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003363 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003364 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003365 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003366 goto err_class;
3367
3368 return host;
3369
3370err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003371 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003372
Roland Dreierf5358a12006-06-17 20:37:29 -07003373free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003374 kfree(host);
3375
3376 return NULL;
3377}
3378
3379static void srp_add_one(struct ib_device *device)
3380{
Roland Dreierf5358a12006-06-17 20:37:29 -07003381 struct srp_device *srp_dev;
3382 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003383 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003384 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003385 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003386
Roland Dreierf5358a12006-06-17 20:37:29 -07003387 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3388 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003389 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003390
Roland Dreierf5358a12006-06-17 20:37:29 -07003391 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003392 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003393 goto free_attr;
3394 }
3395
3396 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3397 if (!srp_dev)
3398 goto free_attr;
3399
Bart Van Assched1b42892014-05-20 15:07:20 +02003400 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3401 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003402 srp_dev->has_fr = (dev_attr->device_cap_flags &
3403 IB_DEVICE_MEM_MGT_EXTENSIONS);
3404 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3405 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3406
3407 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3408 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003409
Roland Dreierf5358a12006-06-17 20:37:29 -07003410 /*
3411 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003412 * minimum of 4096 bytes. We're unlikely to build large sglists
3413 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003414 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003415 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3416 srp_dev->mr_page_size = 1 << mr_page_shift;
3417 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3418 max_pages_per_mr = dev_attr->max_mr_size;
3419 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3420 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3421 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003422 if (srp_dev->use_fast_reg) {
3423 srp_dev->max_pages_per_mr =
3424 min_t(u32, srp_dev->max_pages_per_mr,
3425 dev_attr->max_fast_reg_page_list_len);
3426 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003427 srp_dev->mr_max_size = srp_dev->mr_page_size *
3428 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003429 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003430 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003431 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003432 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003433
3434 INIT_LIST_HEAD(&srp_dev->dev_list);
3435
3436 srp_dev->dev = device;
3437 srp_dev->pd = ib_alloc_pd(device);
3438 if (IS_ERR(srp_dev->pd))
3439 goto free_dev;
3440
3441 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3442 IB_ACCESS_LOCAL_WRITE |
3443 IB_ACCESS_REMOTE_READ |
3444 IB_ACCESS_REMOTE_WRITE);
3445 if (IS_ERR(srp_dev->mr))
3446 goto err_pd;
3447
Hal Rosenstock41390322015-06-29 09:57:00 -04003448 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003449 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003450 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003451 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003452 }
3453
Roland Dreierf5358a12006-06-17 20:37:29 -07003454 ib_set_client_data(device, &srp_client, srp_dev);
3455
3456 goto free_attr;
3457
3458err_pd:
3459 ib_dealloc_pd(srp_dev->pd);
3460
3461free_dev:
3462 kfree(srp_dev);
3463
3464free_attr:
3465 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003466}
3467
Haggai Eran7c1eb452015-07-30 17:50:14 +03003468static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003469{
Roland Dreierf5358a12006-06-17 20:37:29 -07003470 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003471 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003472 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003473
Haggai Eran7c1eb452015-07-30 17:50:14 +03003474 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003475 if (!srp_dev)
3476 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003477
Roland Dreierf5358a12006-06-17 20:37:29 -07003478 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003479 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003480 /*
3481 * Wait for the sysfs entry to go away, so that no new
3482 * target ports can be created.
3483 */
3484 wait_for_completion(&host->released);
3485
3486 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003487 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003488 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003489 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003490 list_for_each_entry(target, &host->target_list, list)
3491 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003492 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003493
3494 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003495 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003496 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003497 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003498 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003499
Roland Dreieraef9ec32005-11-02 14:07:13 -08003500 kfree(host);
3501 }
3502
Roland Dreierf5358a12006-06-17 20:37:29 -07003503 ib_dereg_mr(srp_dev->mr);
3504 ib_dealloc_pd(srp_dev->pd);
3505
3506 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003507}
3508
FUJITA Tomonori32368222007-06-27 16:33:12 +09003509static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003510 .has_rport_state = true,
3511 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003512 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003513 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3514 .dev_loss_tmo = &srp_dev_loss_tmo,
3515 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003516 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003517 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003518};
3519
Roland Dreieraef9ec32005-11-02 14:07:13 -08003520static int __init srp_init_module(void)
3521{
3522 int ret;
3523
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003524 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003525
David Dillow49248642011-01-14 18:23:24 -05003526 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003527 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003528 if (!cmd_sg_entries)
3529 cmd_sg_entries = srp_sg_tablesize;
3530 }
3531
3532 if (!cmd_sg_entries)
3533 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3534
3535 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003536 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003537 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003538 }
3539
David Dillowc07d4242011-01-16 13:57:10 -05003540 if (!indirect_sg_entries)
3541 indirect_sg_entries = cmd_sg_entries;
3542 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003543 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3544 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003545 indirect_sg_entries = cmd_sg_entries;
3546 }
3547
Bart Van Asschebcc05912014-07-09 15:57:26 +02003548 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003549 if (!srp_remove_wq) {
3550 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003551 goto out;
3552 }
3553
3554 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003555 ib_srp_transport_template =
3556 srp_attach_transport(&ib_srp_transport_functions);
3557 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003558 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003559
Roland Dreieraef9ec32005-11-02 14:07:13 -08003560 ret = class_register(&srp_class);
3561 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003562 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003563 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003564 }
3565
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003566 ib_sa_register_client(&srp_sa_client);
3567
Roland Dreieraef9ec32005-11-02 14:07:13 -08003568 ret = ib_register_client(&srp_client);
3569 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003570 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003571 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003572 }
3573
Bart Van Asschebcc05912014-07-09 15:57:26 +02003574out:
3575 return ret;
3576
3577unreg_sa:
3578 ib_sa_unregister_client(&srp_sa_client);
3579 class_unregister(&srp_class);
3580
3581release_tr:
3582 srp_release_transport(ib_srp_transport_template);
3583
3584destroy_wq:
3585 destroy_workqueue(srp_remove_wq);
3586 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003587}
3588
3589static void __exit srp_cleanup_module(void)
3590{
3591 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003592 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003593 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003594 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003595 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003596}
3597
3598module_init(srp_init_module);
3599module_exit(srp_cleanup_module);