blob: 31a20b462266611299aeeae5cd51fd19b69b635e [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020071static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020072static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
134static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200173 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930184static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
Roland Dreieraef9ec32005-11-02 14:07:13 -0800189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700203
204 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700207}
208
Roland Dreieraef9ec32005-11-02 14:07:13 -0800209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Bart Van Assche56b53902014-07-09 15:58:22 +0200268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349}
350
351/**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361{
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
382 if (IS_ERR(mr)) {
383 ret = PTR_ERR(mr);
384 goto destroy_pool;
385 }
386 d->mr = mr;
387 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
388 if (IS_ERR(frpl)) {
389 ret = PTR_ERR(frpl);
390 goto destroy_pool;
391 }
392 d->frpl = frpl;
393 list_add_tail(&d->entry, &pool->free_list);
394 }
395
396out:
397 return pool;
398
399destroy_pool:
400 srp_destroy_fr_pool(pool);
401
402err:
403 pool = ERR_PTR(ret);
404 goto out;
405}
406
407/**
408 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
409 * @pool: Pool to obtain descriptor from.
410 */
411static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
412{
413 struct srp_fr_desc *d = NULL;
414 unsigned long flags;
415
416 spin_lock_irqsave(&pool->lock, flags);
417 if (!list_empty(&pool->free_list)) {
418 d = list_first_entry(&pool->free_list, typeof(*d), entry);
419 list_del(&d->entry);
420 }
421 spin_unlock_irqrestore(&pool->lock, flags);
422
423 return d;
424}
425
426/**
427 * srp_fr_pool_put() - put an FR descriptor back in the free list
428 * @pool: Pool the descriptor was allocated from.
429 * @desc: Pointer to an array of fast registration descriptor pointers.
430 * @n: Number of descriptors to put back.
431 *
432 * Note: The caller must already have queued an invalidation request for
433 * desc->mr->rkey before calling this function.
434 */
435static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
436 int n)
437{
438 unsigned long flags;
439 int i;
440
441 spin_lock_irqsave(&pool->lock, flags);
442 for (i = 0; i < n; i++)
443 list_add(&desc[i]->entry, &pool->free_list);
444 spin_unlock_irqrestore(&pool->lock, flags);
445}
446
447static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
448{
449 struct srp_device *dev = target->srp_host->srp_dev;
450
451 return srp_create_fr_pool(dev->dev, dev->pd,
452 target->scsi_host->can_queue,
453 dev->max_pages_per_mr);
454}
455
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200456/**
457 * srp_destroy_qp() - destroy an RDMA queue pair
458 * @ch: SRP RDMA channel.
459 *
460 * Change a queue pair into the error state and wait until all receive
461 * completions have been processed before destroying it. This avoids that
462 * the receive completion handler can access the queue pair while it is
463 * being destroyed.
464 */
465static void srp_destroy_qp(struct srp_rdma_ch *ch)
466{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200467 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
468 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
469 struct ib_recv_wr *bad_wr;
470 int ret;
471
472 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200473 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200474
475 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
476 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
477 if (ret)
478 goto out;
479
480 init_completion(&ch->done);
481 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
482 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
483 if (ret == 0)
484 wait_for_completion(&ch->done);
485
486out:
487 ib_destroy_qp(ch->qp);
488}
489
Bart Van Assche509c07b2014-10-30 14:48:30 +0100490static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800491{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100492 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200493 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800494 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100495 struct ib_cq *recv_cq, *send_cq;
496 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200497 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200498 struct srp_fr_pool *fr_pool = NULL;
499 const int m = 1 + dev->use_fast_reg;
Matan Barak8e372102015-06-11 16:35:21 +0300500 struct ib_cq_init_attr cq_attr = {};
Roland Dreieraef9ec32005-11-02 14:07:13 -0800501 int ret;
502
503 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
504 if (!init_attr)
505 return -ENOMEM;
506
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200507 /* + 1 for SRP_LAST_WR_ID */
Matan Barak8e372102015-06-11 16:35:21 +0300508 cq_attr.cqe = target->queue_size + 1;
509 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100510 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300511 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100512 if (IS_ERR(recv_cq)) {
513 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800514 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800515 }
516
Matan Barak8e372102015-06-11 16:35:21 +0300517 cq_attr.cqe = m * target->queue_size;
518 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100519 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300520 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100521 if (IS_ERR(send_cq)) {
522 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800523 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000524 }
525
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100526 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800527
528 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200529 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200530 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800531 init_attr->cap.max_recv_sge = 1;
532 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200533 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100535 init_attr->send_cq = send_cq;
536 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800537
Bart Van Assche62154b22014-05-20 15:04:45 +0200538 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100539 if (IS_ERR(qp)) {
540 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800541 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800542 }
543
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100544 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800545 if (ret)
546 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800547
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200548 if (dev->use_fast_reg && dev->has_fr) {
549 fr_pool = srp_alloc_fr_pool(target);
550 if (IS_ERR(fr_pool)) {
551 ret = PTR_ERR(fr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100556 if (ch->fr_pool)
557 srp_destroy_fr_pool(ch->fr_pool);
558 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200559 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200560 fmr_pool = srp_alloc_fmr_pool(target);
561 if (IS_ERR(fmr_pool)) {
562 ret = PTR_ERR(fmr_pool);
563 shost_printk(KERN_WARNING, target->scsi_host, PFX
564 "FMR pool allocation failed (%d)\n", ret);
565 goto err_qp;
566 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100567 if (ch->fmr_pool)
568 ib_destroy_fmr_pool(ch->fmr_pool);
569 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200570 }
571
Bart Van Assche509c07b2014-10-30 14:48:30 +0100572 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200573 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100574 if (ch->recv_cq)
575 ib_destroy_cq(ch->recv_cq);
576 if (ch->send_cq)
577 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100578
Bart Van Assche509c07b2014-10-30 14:48:30 +0100579 ch->qp = qp;
580 ch->recv_cq = recv_cq;
581 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100582
Roland Dreierda9d2f02010-02-24 15:07:59 -0800583 kfree(init_attr);
584 return 0;
585
586err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100587 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800588
589err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100590 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800591
592err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100593 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800594
595err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800596 kfree(init_attr);
597 return ret;
598}
599
Bart Van Assche4d73f952013-10-26 14:40:37 +0200600/*
601 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100602 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200603 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100604static void srp_free_ch_ib(struct srp_target_port *target,
605 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800606{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200607 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800608 int i;
609
Bart Van Assched92c0da2014-10-06 17:14:36 +0200610 if (!ch->target)
611 return;
612
Bart Van Assche509c07b2014-10-30 14:48:30 +0100613 if (ch->cm_id) {
614 ib_destroy_cm_id(ch->cm_id);
615 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100616 }
617
Bart Van Assched92c0da2014-10-06 17:14:36 +0200618 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
619 if (!ch->qp)
620 return;
621
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200622 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100623 if (ch->fr_pool)
624 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200625 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 if (ch->fmr_pool)
627 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200628 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200629 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100630 ib_destroy_cq(ch->send_cq);
631 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800632
Bart Van Assched92c0da2014-10-06 17:14:36 +0200633 /*
634 * Avoid that the SCSI error handler tries to use this channel after
635 * it has been freed. The SCSI error handler can namely continue
636 * trying to perform recovery actions after scsi_remove_host()
637 * returned.
638 */
639 ch->target = NULL;
640
Bart Van Assche509c07b2014-10-30 14:48:30 +0100641 ch->qp = NULL;
642 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100643
Bart Van Assche509c07b2014-10-30 14:48:30 +0100644 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200645 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 srp_free_iu(target->srp_host, ch->rx_ring[i]);
647 kfree(ch->rx_ring);
648 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200649 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100650 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200651 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100652 srp_free_iu(target->srp_host, ch->tx_ring[i]);
653 kfree(ch->tx_ring);
654 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200655 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800656}
657
658static void srp_path_rec_completion(int status,
659 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100660 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800661{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100662 struct srp_rdma_ch *ch = ch_ptr;
663 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800664
Bart Van Assche509c07b2014-10-30 14:48:30 +0100665 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800666 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500667 shost_printk(KERN_ERR, target->scsi_host,
668 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670 ch->path = *pathrec;
671 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672}
673
Bart Van Assche509c07b2014-10-30 14:48:30 +0100674static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800675{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100676 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100677 int ret;
678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800680
Bart Van Assche509c07b2014-10-30 14:48:30 +0100681 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800682
Bart Van Assche509c07b2014-10-30 14:48:30 +0100683 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
684 target->srp_host->srp_dev->dev,
685 target->srp_host->port,
686 &ch->path,
687 IB_SA_PATH_REC_SERVICE_ID |
688 IB_SA_PATH_REC_DGID |
689 IB_SA_PATH_REC_SGID |
690 IB_SA_PATH_REC_NUMB_PATH |
691 IB_SA_PATH_REC_PKEY,
692 SRP_PATH_REC_TIMEOUT_MS,
693 GFP_KERNEL,
694 srp_path_rec_completion,
695 ch, &ch->path_query);
696 if (ch->path_query_id < 0)
697 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698
Bart Van Assche509c07b2014-10-30 14:48:30 +0100699 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100700 if (ret < 0)
701 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702
Bart Van Assche509c07b2014-10-30 14:48:30 +0100703 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500704 shost_printk(KERN_WARNING, target->scsi_host,
705 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800706
Bart Van Assche509c07b2014-10-30 14:48:30 +0100707 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800708}
709
Bart Van Assched92c0da2014-10-06 17:14:36 +0200710static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800711{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100712 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800713 struct {
714 struct ib_cm_req_param param;
715 struct srp_login_req priv;
716 } *req = NULL;
717 int status;
718
719 req = kzalloc(sizeof *req, GFP_KERNEL);
720 if (!req)
721 return -ENOMEM;
722
Bart Van Assche509c07b2014-10-30 14:48:30 +0100723 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800724 req->param.alternate_path = NULL;
725 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100726 req->param.qp_num = ch->qp->qp_num;
727 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800728 req->param.private_data = &req->priv;
729 req->param.private_data_len = sizeof req->priv;
730 req->param.flow_control = 1;
731
732 get_random_bytes(&req->param.starting_psn, 4);
733 req->param.starting_psn &= 0xffffff;
734
735 /*
736 * Pick some arbitrary defaults here; we could make these
737 * module parameters if anyone cared about setting them.
738 */
739 req->param.responder_resources = 4;
740 req->param.remote_cm_response_timeout = 20;
741 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200742 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800743 req->param.rnr_retry_count = 7;
744 req->param.max_cm_retries = 15;
745
746 req->priv.opcode = SRP_LOGIN_REQ;
747 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500748 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800749 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
750 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200751 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
752 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700753 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700754 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700755 * port identifier format is 8 bytes of ID extension followed
756 * by 8 bytes of GUID. Older drafts put the two halves in the
757 * opposite order, so that the GUID comes first.
758 *
759 * Targets conforming to these obsolete drafts can be
760 * recognized by the I/O Class they report.
761 */
762 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
763 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100764 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700765 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200766 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700767 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
768 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
769 } else {
770 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200771 &target->initiator_ext, 8);
772 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100773 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700774 memcpy(req->priv.target_port_id, &target->id_ext, 8);
775 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
776 }
777
Roland Dreieraef9ec32005-11-02 14:07:13 -0800778 /*
779 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200780 * zero out the first 8 bytes of our initiator port ID and set
781 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800782 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700783 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500784 shost_printk(KERN_DEBUG, target->scsi_host,
785 PFX "Topspin/Cisco initiator port ID workaround "
786 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200787 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200789 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100790 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800791 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800792
Bart Van Assche509c07b2014-10-30 14:48:30 +0100793 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800794
795 kfree(req);
796
797 return status;
798}
799
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000800static bool srp_queue_remove_work(struct srp_target_port *target)
801{
802 bool changed = false;
803
804 spin_lock_irq(&target->lock);
805 if (target->state != SRP_TARGET_REMOVED) {
806 target->state = SRP_TARGET_REMOVED;
807 changed = true;
808 }
809 spin_unlock_irq(&target->lock);
810
811 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200812 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000813
814 return changed;
815}
816
Roland Dreieraef9ec32005-11-02 14:07:13 -0800817static void srp_disconnect_target(struct srp_target_port *target)
818{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200819 struct srp_rdma_ch *ch;
820 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100821
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200822 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800823
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200824 for (i = 0; i < target->ch_count; i++) {
825 ch = &target->ch[i];
826 ch->connected = false;
827 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
828 shost_printk(KERN_DEBUG, target->scsi_host,
829 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000830 }
Roland Dreiere6581052006-05-17 09:13:21 -0700831 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800832}
833
Bart Van Assche509c07b2014-10-30 14:48:30 +0100834static void srp_free_req_data(struct srp_target_port *target,
835 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500836{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200837 struct srp_device *dev = target->srp_host->srp_dev;
838 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500839 struct srp_request *req;
840 int i;
841
Bart Van Assche47513cf2015-05-18 13:25:54 +0200842 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200843 return;
844
845 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100846 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200847 if (dev->use_fast_reg)
848 kfree(req->fr_list);
849 else
850 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500851 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500852 if (req->indirect_dma_addr) {
853 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
854 target->indirect_size,
855 DMA_TO_DEVICE);
856 }
857 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500858 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200859
Bart Van Assche509c07b2014-10-30 14:48:30 +0100860 kfree(ch->req_ring);
861 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500862}
863
Bart Van Assche509c07b2014-10-30 14:48:30 +0100864static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200865{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100866 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200867 struct srp_device *srp_dev = target->srp_host->srp_dev;
868 struct ib_device *ibdev = srp_dev->dev;
869 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200870 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200871 dma_addr_t dma_addr;
872 int i, ret = -ENOMEM;
873
Bart Van Assche509c07b2014-10-30 14:48:30 +0100874 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
875 GFP_KERNEL);
876 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200877 goto out;
878
879 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100880 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200881 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
882 GFP_KERNEL);
883 if (!mr_list)
884 goto out;
885 if (srp_dev->use_fast_reg)
886 req->fr_list = mr_list;
887 else
888 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200889 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200890 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200891 if (!req->map_page)
892 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200893 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200894 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200895 goto out;
896
897 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
898 target->indirect_size,
899 DMA_TO_DEVICE);
900 if (ib_dma_mapping_error(ibdev, dma_addr))
901 goto out;
902
903 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200904 }
905 ret = 0;
906
907out:
908 return ret;
909}
910
Bart Van Assche683b1592012-01-14 12:40:44 +0000911/**
912 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
913 * @shost: SCSI host whose attributes to remove from sysfs.
914 *
915 * Note: Any attributes defined in the host template and that did not exist
916 * before invocation of this function will be ignored.
917 */
918static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
919{
920 struct device_attribute **attr;
921
922 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
923 device_remove_file(&shost->shost_dev, *attr);
924}
925
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000926static void srp_remove_target(struct srp_target_port *target)
927{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200928 struct srp_rdma_ch *ch;
929 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100930
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000931 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
932
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000933 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200934 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000935 srp_remove_host(target->scsi_host);
936 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100937 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000938 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200939 for (i = 0; i < target->ch_count; i++) {
940 ch = &target->ch[i];
941 srp_free_ch_ib(target, ch);
942 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200943 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200944 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200945 for (i = 0; i < target->ch_count; i++) {
946 ch = &target->ch[i];
947 srp_free_req_data(target, ch);
948 }
949 kfree(target->ch);
950 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200951
952 spin_lock(&target->srp_host->target_lock);
953 list_del(&target->list);
954 spin_unlock(&target->srp_host->target_lock);
955
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000956 scsi_host_put(target->scsi_host);
957}
958
David Howellsc4028952006-11-22 14:57:56 +0000959static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800960{
David Howellsc4028952006-11-22 14:57:56 +0000961 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000962 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000964 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800965
Bart Van Assche96fc2482013-06-28 14:51:26 +0200966 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800967}
968
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200969static void srp_rport_delete(struct srp_rport *rport)
970{
971 struct srp_target_port *target = rport->lld_data;
972
973 srp_queue_remove_work(target);
974}
975
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200976/**
977 * srp_connected_ch() - number of connected channels
978 * @target: SRP target port.
979 */
980static int srp_connected_ch(struct srp_target_port *target)
981{
982 int i, c = 0;
983
984 for (i = 0; i < target->ch_count; i++)
985 c += target->ch[i].connected;
986
987 return c;
988}
989
Bart Van Assched92c0da2014-10-06 17:14:36 +0200990static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800991{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100992 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800993 int ret;
994
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200995 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000996
Bart Van Assche509c07b2014-10-30 14:48:30 +0100997 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800998 if (ret)
999 return ret;
1000
1001 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001002 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001003 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001004 if (ret)
1005 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001006 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001007 if (ret < 0)
1008 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001009
1010 /*
1011 * The CM event handling code will set status to
1012 * SRP_PORT_REDIRECT if we get a port redirect REJ
1013 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1014 * redirect REJ back.
1015 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001016 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001017 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001018 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001019 return 0;
1020
1021 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001022 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001023 if (ret)
1024 return ret;
1025 break;
1026
1027 case SRP_DLID_REDIRECT:
1028 break;
1029
David Dillow9fe4bcf2008-01-08 17:08:52 -05001030 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001031 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001032 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001033 ch->status = -ECONNRESET;
1034 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001035
Roland Dreieraef9ec32005-11-02 14:07:13 -08001036 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001037 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001038 }
1039 }
1040}
1041
Bart Van Assche509c07b2014-10-30 14:48:30 +01001042static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001043{
1044 struct ib_send_wr *bad_wr;
1045 struct ib_send_wr wr = {
1046 .opcode = IB_WR_LOCAL_INV,
1047 .wr_id = LOCAL_INV_WR_ID_MASK,
1048 .next = NULL,
1049 .num_sge = 0,
1050 .send_flags = 0,
1051 .ex.invalidate_rkey = rkey,
1052 };
1053
Bart Van Assche509c07b2014-10-30 14:48:30 +01001054 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001055}
1056
Roland Dreierd945e1d2006-05-09 10:50:28 -07001057static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001058 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001059 struct srp_request *req)
1060{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001061 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001062 struct srp_device *dev = target->srp_host->srp_dev;
1063 struct ib_device *ibdev = dev->dev;
1064 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001065
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001066 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001067 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1068 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1069 return;
1070
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001071 if (dev->use_fast_reg) {
1072 struct srp_fr_desc **pfr;
1073
1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001075 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001076 if (res < 0) {
1077 shost_printk(KERN_ERR, target->scsi_host, PFX
1078 "Queueing INV WR for rkey %#x failed (%d)\n",
1079 (*pfr)->mr->rkey, res);
1080 queue_work(system_long_wq,
1081 &target->tl_err_work);
1082 }
1083 }
1084 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001086 req->nmdesc);
1087 } else {
1088 struct ib_pool_fmr **pfmr;
1089
1090 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1091 ib_fmr_pool_unmap(*pfmr);
1092 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001093
David Dillow8f26c9f2011-01-14 19:45:50 -05001094 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1095 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001096}
1097
Bart Van Assche22032992012-08-14 13:18:53 +00001098/**
1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001100 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001101 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001102 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1104 * ownership of @req->scmnd if it equals @scmnd.
1105 *
1106 * Return value:
1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1108 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001109static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001110 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001111 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001112 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001113{
Bart Van Assche94a91742010-11-26 14:50:09 -05001114 unsigned long flags;
1115
Bart Van Assche509c07b2014-10-30 14:48:30 +01001116 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001117 if (req->scmnd &&
1118 (!sdev || req->scmnd->device == sdev) &&
1119 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001120 scmnd = req->scmnd;
1121 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001122 } else {
1123 scmnd = NULL;
1124 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001125 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001126
1127 return scmnd;
1128}
1129
1130/**
1131 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001132 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001133 * @req: Request to be freed.
1134 * @scmnd: SCSI command associated with @req.
1135 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001136 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001137static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001139{
1140 unsigned long flags;
1141
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001143
Bart Van Assche509c07b2014-10-30 14:48:30 +01001144 spin_lock_irqsave(&ch->lock, flags);
1145 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001146 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001147}
1148
Bart Van Assche509c07b2014-10-30 14:48:30 +01001149static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1150 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001151{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001153
1154 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001155 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001156 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001157 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001158 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001159}
1160
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001161static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001162{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001163 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001164 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001165 struct Scsi_Host *shost = target->scsi_host;
1166 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001167 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001168
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001169 /*
1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
1171 * is not safe. Hence the warning statement below.
1172 */
1173 shost_for_each_device(sdev, shost)
1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1175
Bart Van Assched92c0da2014-10-06 17:14:36 +02001176 for (i = 0; i < target->ch_count; i++) {
1177 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001178
Bart Van Assched92c0da2014-10-06 17:14:36 +02001179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1181
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1184 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001185 }
1186}
1187
1188/*
1189 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1190 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1191 * srp_reset_device() or srp_reset_host() calls will occur while this function
1192 * is in progress. One way to realize that is not to call this function
1193 * directly but to call srp_reconnect_rport() instead since that last function
1194 * serializes calls of this function via rport->mutex and also blocks
1195 * srp_queuecommand() calls before invoking this function.
1196 */
1197static int srp_rport_reconnect(struct srp_rport *rport)
1198{
1199 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001200 struct srp_rdma_ch *ch;
1201 int i, j, ret = 0;
1202 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001203
Roland Dreieraef9ec32005-11-02 14:07:13 -08001204 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001205
1206 if (target->state == SRP_TARGET_SCANNING)
1207 return -ENODEV;
1208
Roland Dreieraef9ec32005-11-02 14:07:13 -08001209 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001210 * Now get a new local CM ID so that we avoid confusing the target in
1211 * case things are really fouled up. Doing so also ensures that all CM
1212 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001213 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001216 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001217 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001218 for (i = 0; i < target->ch_count; i++) {
1219 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001220 for (j = 0; j < target->req_ring_size; ++j) {
1221 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001222
Bart Van Assched92c0da2014-10-06 17:14:36 +02001223 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1224 }
1225 }
1226 for (i = 0; i < target->ch_count; i++) {
1227 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001228 /*
1229 * Whether or not creating a new CM ID succeeded, create a new
1230 * QP. This guarantees that all completion callback function
1231 * invocations have finished before request resetting starts.
1232 */
1233 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001234
Bart Van Assched92c0da2014-10-06 17:14:36 +02001235 INIT_LIST_HEAD(&ch->free_tx);
1236 for (j = 0; j < target->queue_size; ++j)
1237 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1238 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001239
1240 target->qp_in_error = false;
1241
Bart Van Assched92c0da2014-10-06 17:14:36 +02001242 for (i = 0; i < target->ch_count; i++) {
1243 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001244 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001245 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001246 ret = srp_connect_ch(ch, multich);
1247 multich = true;
1248 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001249
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001250 if (ret == 0)
1251 shost_printk(KERN_INFO, target->scsi_host,
1252 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001253
1254 return ret;
1255}
1256
David Dillow8f26c9f2011-01-14 19:45:50 -05001257static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1258 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001259{
David Dillow8f26c9f2011-01-14 19:45:50 -05001260 struct srp_direct_buf *desc = state->desc;
1261
1262 desc->va = cpu_to_be64(dma_addr);
1263 desc->key = cpu_to_be32(rkey);
1264 desc->len = cpu_to_be32(dma_len);
1265
1266 state->total_len += dma_len;
1267 state->desc++;
1268 state->ndesc++;
1269}
1270
1271static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001272 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001273{
David Dillow8f26c9f2011-01-14 19:45:50 -05001274 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001275 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001276
Bart Van Assche509c07b2014-10-30 14:48:30 +01001277 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001278 state->npages, io_addr);
1279 if (IS_ERR(fmr))
1280 return PTR_ERR(fmr);
1281
1282 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001283 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001284
Bart Van Assche52ede082014-05-20 15:07:45 +02001285 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001286
David Dillow8f26c9f2011-01-14 19:45:50 -05001287 return 0;
1288}
1289
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001290static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001291 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001292{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001293 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001294 struct srp_device *dev = target->srp_host->srp_dev;
1295 struct ib_send_wr *bad_wr;
1296 struct ib_send_wr wr;
1297 struct srp_fr_desc *desc;
1298 u32 rkey;
1299
Bart Van Assche509c07b2014-10-30 14:48:30 +01001300 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001301 if (!desc)
1302 return -ENOMEM;
1303
1304 rkey = ib_inc_rkey(desc->mr->rkey);
1305 ib_update_fast_reg_key(desc->mr, rkey);
1306
1307 memcpy(desc->frpl->page_list, state->pages,
1308 sizeof(state->pages[0]) * state->npages);
1309
1310 memset(&wr, 0, sizeof(wr));
1311 wr.opcode = IB_WR_FAST_REG_MR;
1312 wr.wr_id = FAST_REG_WR_ID_MASK;
1313 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1314 wr.wr.fast_reg.page_list = desc->frpl;
1315 wr.wr.fast_reg.page_list_len = state->npages;
1316 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1317 wr.wr.fast_reg.length = state->dma_len;
1318 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1319 IB_ACCESS_REMOTE_READ |
1320 IB_ACCESS_REMOTE_WRITE);
1321 wr.wr.fast_reg.rkey = desc->mr->lkey;
1322
1323 *state->next_fr++ = desc;
1324 state->nmdesc++;
1325
1326 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1327 desc->mr->rkey);
1328
Bart Van Assche509c07b2014-10-30 14:48:30 +01001329 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001330}
1331
Bart Van Assche539dde62014-05-20 15:05:46 +02001332static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001333 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001334{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001335 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001336 int ret = 0;
1337
1338 if (state->npages == 0)
1339 return 0;
1340
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001341 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001342 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001343 target->rkey);
1344 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001345 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001346 srp_map_finish_fr(state, ch) :
1347 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001348
1349 if (ret == 0) {
1350 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001351 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001352 }
1353
1354 return ret;
1355}
1356
David Dillow8f26c9f2011-01-14 19:45:50 -05001357static void srp_map_update_start(struct srp_map_state *state,
1358 struct scatterlist *sg, int sg_index,
1359 dma_addr_t dma_addr)
1360{
1361 state->unmapped_sg = sg;
1362 state->unmapped_index = sg_index;
1363 state->unmapped_addr = dma_addr;
1364}
1365
1366static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001367 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001368 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001369 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001370{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001371 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001372 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001373 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001374 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1375 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1376 unsigned int len;
1377 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001378
David Dillow8f26c9f2011-01-14 19:45:50 -05001379 if (!dma_len)
1380 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001381
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001382 if (!use_mr) {
1383 /*
1384 * Once we're in direct map mode for a request, we don't
1385 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001386 * other than the descriptor.
1387 */
1388 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1389 return 0;
1390 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001391
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001392 /*
1393 * Since not all RDMA HW drivers support non-zero page offsets for
1394 * FMR, if we start at an offset into a page, don't merge into the
1395 * current FMR mapping. Finish it out, and use the kernel's MR for
1396 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001397 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001398 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1399 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001400 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001401 if (ret)
1402 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001403
David Dillow8f26c9f2011-01-14 19:45:50 -05001404 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1405 srp_map_update_start(state, NULL, 0, 0);
1406 return 0;
1407 }
1408
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001409 /*
1410 * If this is the first sg that will be mapped via FMR or via FR, save
1411 * our position. We need to know the first unmapped entry, its index,
1412 * and the first unmapped address within that entry to be able to
1413 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001414 */
1415 if (!state->unmapped_sg)
1416 srp_map_update_start(state, sg, sg_index, dma_addr);
1417
1418 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001419 unsigned offset = dma_addr & ~dev->mr_page_mask;
1420 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001421 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001422 if (ret)
1423 return ret;
1424
1425 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001426 }
1427
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001428 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001429
1430 if (!state->npages)
1431 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001432 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001433 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001434 dma_addr += len;
1435 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001436 }
1437
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001438 /*
1439 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001440 * close it out and start a new one -- we can only merge at page
1441 * boundries.
1442 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001443 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001444 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001445 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001446 if (!ret)
1447 srp_map_update_start(state, NULL, 0, 0);
1448 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001449 return ret;
1450}
1451
Bart Van Assche509c07b2014-10-30 14:48:30 +01001452static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1453 struct srp_request *req, struct scatterlist *scat,
1454 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001455{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001456 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001457 struct srp_device *dev = target->srp_host->srp_dev;
1458 struct ib_device *ibdev = dev->dev;
1459 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001460 int i;
1461 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001462
1463 state->desc = req->indirect_desc;
1464 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001465 if (dev->use_fast_reg) {
1466 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001467 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001468 } else {
1469 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001470 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001471 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001472
1473 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001474 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001475 /*
1476 * Memory registration failed, so backtrack to the
1477 * first unmapped entry and continue on without using
1478 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001479 */
1480 dma_addr_t dma_addr;
1481 unsigned int dma_len;
1482
1483backtrack:
1484 sg = state->unmapped_sg;
1485 i = state->unmapped_index;
1486
1487 dma_addr = ib_sg_dma_address(ibdev, sg);
1488 dma_len = ib_sg_dma_len(ibdev, sg);
1489 dma_len -= (state->unmapped_addr - dma_addr);
1490 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001491 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001492 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1493 }
1494 }
1495
Bart Van Assche509c07b2014-10-30 14:48:30 +01001496 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001497 goto backtrack;
1498
Bart Van Assche52ede082014-05-20 15:07:45 +02001499 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001500
1501 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001502}
1503
Bart Van Assche509c07b2014-10-30 14:48:30 +01001504static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001505 struct srp_request *req)
1506{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001507 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001508 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001509 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001510 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001511 struct srp_device *dev;
1512 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001513 struct srp_map_state state;
1514 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001515 u32 table_len;
1516 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001517
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001518 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001519 return sizeof (struct srp_cmd);
1520
1521 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1522 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001523 shost_printk(KERN_WARNING, target->scsi_host,
1524 PFX "Unhandled data direction %d\n",
1525 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001526 return -EINVAL;
1527 }
1528
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001529 nents = scsi_sg_count(scmnd);
1530 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001531
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001532 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001533 ibdev = dev->dev;
1534
1535 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001536 if (unlikely(count == 0))
1537 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001538
1539 fmt = SRP_DATA_DESC_DIRECT;
1540 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001541
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001542 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001543 /*
1544 * The midlayer only generated a single gather/scatter
1545 * entry, or DMA mapping coalesced everything to a
1546 * single entry. So a direct descriptor along with
1547 * the DMA MR suffices.
1548 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001549 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001550
Ralph Campbell85507bc2006-12-12 14:30:55 -08001551 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001552 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001553 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001554
Bart Van Assche52ede082014-05-20 15:07:45 +02001555 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001556 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001557 }
1558
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001559 /*
1560 * We have more than one scatter/gather entry, so build our indirect
1561 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001562 */
1563 indirect_hdr = (void *) cmd->add_data;
1564
David Dillowc07d4242011-01-16 13:57:10 -05001565 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1566 target->indirect_size, DMA_TO_DEVICE);
1567
David Dillow8f26c9f2011-01-14 19:45:50 -05001568 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001569 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001570
David Dillowc07d4242011-01-16 13:57:10 -05001571 /* We've mapped the request, now pull as much of the indirect
1572 * descriptor table as we can into the command buffer. If this
1573 * target is not using an external indirect table, we are
1574 * guaranteed to fit into the command, as the SCSI layer won't
1575 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001576 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001577 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001578 /*
1579 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001580 * so use a direct descriptor.
1581 */
1582 struct srp_direct_buf *buf = (void *) cmd->add_data;
1583
David Dillowc07d4242011-01-16 13:57:10 -05001584 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001585 goto map_complete;
1586 }
1587
David Dillowc07d4242011-01-16 13:57:10 -05001588 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1589 !target->allow_ext_sg)) {
1590 shost_printk(KERN_ERR, target->scsi_host,
1591 "Could not fit S/G list into SRP_CMD\n");
1592 return -EIO;
1593 }
1594
1595 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001596 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1597
1598 fmt = SRP_DATA_DESC_INDIRECT;
1599 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001600 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001601
David Dillowc07d4242011-01-16 13:57:10 -05001602 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1603 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001604
David Dillowc07d4242011-01-16 13:57:10 -05001605 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001606 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1607 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1608 indirect_hdr->len = cpu_to_be32(state.total_len);
1609
1610 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001611 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001612 else
David Dillowc07d4242011-01-16 13:57:10 -05001613 cmd->data_in_desc_cnt = count;
1614
1615 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1616 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001617
1618map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001619 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1620 cmd->buf_fmt = fmt << 4;
1621 else
1622 cmd->buf_fmt = fmt;
1623
Roland Dreieraef9ec32005-11-02 14:07:13 -08001624 return len;
1625}
1626
David Dillow05a1d752010-10-08 14:48:14 -04001627/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001628 * Return an IU and possible credit to the free pool
1629 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001630static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001631 enum srp_iu_type iu_type)
1632{
1633 unsigned long flags;
1634
Bart Van Assche509c07b2014-10-30 14:48:30 +01001635 spin_lock_irqsave(&ch->lock, flags);
1636 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001637 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001638 ++ch->req_lim;
1639 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001640}
1641
1642/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001643 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001644 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001645 *
1646 * Note:
1647 * An upper limit for the number of allocated information units for each
1648 * request type is:
1649 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1650 * more than Scsi_Host.can_queue requests.
1651 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1652 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1653 * one unanswered SRP request to an initiator.
1654 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001655static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001656 enum srp_iu_type iu_type)
1657{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001658 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001659 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1660 struct srp_iu *iu;
1661
Bart Van Assche509c07b2014-10-30 14:48:30 +01001662 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001663
Bart Van Assche509c07b2014-10-30 14:48:30 +01001664 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001665 return NULL;
1666
1667 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001668 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001669 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001670 ++target->zero_req_lim;
1671 return NULL;
1672 }
1673
Bart Van Assche509c07b2014-10-30 14:48:30 +01001674 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001675 }
1676
Bart Van Assche509c07b2014-10-30 14:48:30 +01001677 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001678 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001679 return iu;
1680}
1681
Bart Van Assche509c07b2014-10-30 14:48:30 +01001682static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001683{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001684 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001685 struct ib_sge list;
1686 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001687
1688 list.addr = iu->dma;
1689 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001690 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001691
1692 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001693 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001694 wr.sg_list = &list;
1695 wr.num_sge = 1;
1696 wr.opcode = IB_WR_SEND;
1697 wr.send_flags = IB_SEND_SIGNALED;
1698
Bart Van Assche509c07b2014-10-30 14:48:30 +01001699 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001700}
1701
Bart Van Assche509c07b2014-10-30 14:48:30 +01001702static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001703{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001704 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001705 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001706 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001707
1708 list.addr = iu->dma;
1709 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001710 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001711
1712 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001713 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001714 wr.sg_list = &list;
1715 wr.num_sge = 1;
1716
Bart Van Assche509c07b2014-10-30 14:48:30 +01001717 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001718}
1719
Bart Van Assche509c07b2014-10-30 14:48:30 +01001720static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001721{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001722 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001723 struct srp_request *req;
1724 struct scsi_cmnd *scmnd;
1725 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001726
Roland Dreieraef9ec32005-11-02 14:07:13 -08001727 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001728 spin_lock_irqsave(&ch->lock, flags);
1729 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1730 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001731
Bart Van Assche509c07b2014-10-30 14:48:30 +01001732 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001733 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001734 ch->tsk_mgmt_status = rsp->data[3];
1735 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001736 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001737 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1738 if (scmnd) {
1739 req = (void *)scmnd->host_scribble;
1740 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1741 }
Bart Van Assche22032992012-08-14 13:18:53 +00001742 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001743 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001744 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1745 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001746
Bart Van Assche509c07b2014-10-30 14:48:30 +01001747 spin_lock_irqsave(&ch->lock, flags);
1748 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1749 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001750
1751 return;
1752 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001753 scmnd->result = rsp->status;
1754
1755 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1756 memcpy(scmnd->sense_buffer, rsp->data +
1757 be32_to_cpu(rsp->resp_data_len),
1758 min_t(int, be32_to_cpu(rsp->sense_data_len),
1759 SCSI_SENSE_BUFFERSIZE));
1760 }
1761
Bart Van Asschee7145312014-07-09 15:57:51 +02001762 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001763 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001764 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1765 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1766 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1767 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1768 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1769 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001770
Bart Van Assche509c07b2014-10-30 14:48:30 +01001771 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001772 be32_to_cpu(rsp->req_lim_delta));
1773
David Dillowf8b6e312010-11-26 13:02:21 -05001774 scmnd->host_scribble = NULL;
1775 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001776 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001777}
1778
Bart Van Assche509c07b2014-10-30 14:48:30 +01001779static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001780 void *rsp, int len)
1781{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001782 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001783 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001784 unsigned long flags;
1785 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001786 int err;
David Dillowbb125882010-10-08 14:40:47 -04001787
Bart Van Assche509c07b2014-10-30 14:48:30 +01001788 spin_lock_irqsave(&ch->lock, flags);
1789 ch->req_lim += req_delta;
1790 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1791 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001792
David Dillowbb125882010-10-08 14:40:47 -04001793 if (!iu) {
1794 shost_printk(KERN_ERR, target->scsi_host, PFX
1795 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001796 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001797 }
1798
1799 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1800 memcpy(iu->buf, rsp, len);
1801 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1802
Bart Van Assche509c07b2014-10-30 14:48:30 +01001803 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001804 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001805 shost_printk(KERN_ERR, target->scsi_host, PFX
1806 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001807 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001808 }
David Dillowbb125882010-10-08 14:40:47 -04001809
David Dillowbb125882010-10-08 14:40:47 -04001810 return err;
1811}
1812
Bart Van Assche509c07b2014-10-30 14:48:30 +01001813static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001814 struct srp_cred_req *req)
1815{
1816 struct srp_cred_rsp rsp = {
1817 .opcode = SRP_CRED_RSP,
1818 .tag = req->tag,
1819 };
1820 s32 delta = be32_to_cpu(req->req_lim_delta);
1821
Bart Van Assche509c07b2014-10-30 14:48:30 +01001822 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1823 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001824 "problems processing SRP_CRED_REQ\n");
1825}
1826
Bart Van Assche509c07b2014-10-30 14:48:30 +01001827static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001828 struct srp_aer_req *req)
1829{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001830 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001831 struct srp_aer_rsp rsp = {
1832 .opcode = SRP_AER_RSP,
1833 .tag = req->tag,
1834 };
1835 s32 delta = be32_to_cpu(req->req_lim_delta);
1836
1837 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001838 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001839
Bart Van Assche509c07b2014-10-30 14:48:30 +01001840 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001841 shost_printk(KERN_ERR, target->scsi_host, PFX
1842 "problems processing SRP_AER_REQ\n");
1843}
1844
Bart Van Assche509c07b2014-10-30 14:48:30 +01001845static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001846{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001847 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001848 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001849 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001850 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001851 u8 opcode;
1852
Bart Van Assche509c07b2014-10-30 14:48:30 +01001853 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001854 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001855
1856 opcode = *(u8 *) iu->buf;
1857
1858 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001859 shost_printk(KERN_ERR, target->scsi_host,
1860 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001861 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1862 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863 }
1864
1865 switch (opcode) {
1866 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001867 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001868 break;
1869
David Dillowbb125882010-10-08 14:40:47 -04001870 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001871 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001872 break;
1873
1874 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001875 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001876 break;
1877
Roland Dreieraef9ec32005-11-02 14:07:13 -08001878 case SRP_T_LOGOUT:
1879 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001880 shost_printk(KERN_WARNING, target->scsi_host,
1881 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001882 break;
1883
1884 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001885 shost_printk(KERN_WARNING, target->scsi_host,
1886 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001887 break;
1888 }
1889
Bart Van Assche509c07b2014-10-30 14:48:30 +01001890 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001891 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001892
Bart Van Assche509c07b2014-10-30 14:48:30 +01001893 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001894 if (res != 0)
1895 shost_printk(KERN_ERR, target->scsi_host,
1896 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001897}
1898
Bart Van Asschec1120f82013-10-26 14:35:08 +02001899/**
1900 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001901 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001902 *
1903 * Note: This function may get invoked before the rport has been created,
1904 * hence the target->rport test.
1905 */
1906static void srp_tl_err_work(struct work_struct *work)
1907{
1908 struct srp_target_port *target;
1909
1910 target = container_of(work, struct srp_target_port, tl_err_work);
1911 if (target->rport)
1912 srp_start_tl_fail_timers(target->rport);
1913}
1914
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001915static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001916 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001917{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001918 struct srp_target_port *target = ch->target;
1919
1920 if (wr_id == SRP_LAST_WR_ID) {
1921 complete(&ch->done);
1922 return;
1923 }
1924
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001925 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001926 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1927 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001928 "LOCAL_INV failed with status %s (%d)\n",
1929 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001930 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1931 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001932 "FAST_REG_MR failed status %s (%d)\n",
1933 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001934 } else {
1935 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03001936 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001937 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03001938 ib_wc_status_msg(wc_status), wc_status,
1939 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001940 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001941 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001942 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001943 target->qp_in_error = true;
1944}
1945
Bart Van Assche509c07b2014-10-30 14:48:30 +01001946static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001947{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001948 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001949 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001950
1951 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1952 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001953 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001954 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001955 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001956 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001957 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001958 }
1959}
1960
Bart Van Assche509c07b2014-10-30 14:48:30 +01001961static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001962{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001963 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001964 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001965 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001966
1967 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001968 if (likely(wc.status == IB_WC_SUCCESS)) {
1969 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001970 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001971 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001972 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001973 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001974 }
1975}
1976
Bart Van Assche76c75b22010-11-26 14:37:47 -05001977static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001978{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001979 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001980 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001981 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001982 struct srp_request *req;
1983 struct srp_iu *iu;
1984 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001985 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001986 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001987 u32 tag;
1988 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001989 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001990 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1991
1992 /*
1993 * The SCSI EH thread is the only context from which srp_queuecommand()
1994 * can get invoked for blocked devices (SDEV_BLOCK /
1995 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1996 * locking the rport mutex if invoked from inside the SCSI EH.
1997 */
1998 if (in_scsi_eh)
1999 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002000
Bart Van Assched1b42892014-05-20 15:07:20 +02002001 scmnd->result = srp_chkready(target->rport);
2002 if (unlikely(scmnd->result))
2003 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002004
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002005 WARN_ON_ONCE(scmnd->request->tag < 0);
2006 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002007 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002008 idx = blk_mq_unique_tag_to_tag(tag);
2009 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2010 dev_name(&shost->shost_gendev), tag, idx,
2011 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002012
2013 spin_lock_irqsave(&ch->lock, flags);
2014 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002015 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002016
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002017 if (!iu)
2018 goto err;
2019
2020 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002021 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002022 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002023 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002024
David Dillowf8b6e312010-11-26 13:02:21 -05002025 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002026
2027 cmd = iu->buf;
2028 memset(cmd, 0, sizeof *cmd);
2029
2030 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002031 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002032 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002033 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2034
Roland Dreieraef9ec32005-11-02 14:07:13 -08002035 req->scmnd = scmnd;
2036 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002037
Bart Van Assche509c07b2014-10-30 14:48:30 +01002038 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002039 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002040 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002041 PFX "Failed to map data (%d)\n", len);
2042 /*
2043 * If we ran out of memory descriptors (-ENOMEM) because an
2044 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002045 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002046 * to reduce queue depth temporarily.
2047 */
2048 scmnd->result = len == -ENOMEM ?
2049 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002050 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002051 }
2052
David Dillow49248642011-01-14 18:23:24 -05002053 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002054 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002055
Bart Van Assche509c07b2014-10-30 14:48:30 +01002056 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002057 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002058 goto err_unmap;
2059 }
2060
Bart Van Assched1b42892014-05-20 15:07:20 +02002061 ret = 0;
2062
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002063unlock_rport:
2064 if (in_scsi_eh)
2065 mutex_unlock(&rport->mutex);
2066
Bart Van Assched1b42892014-05-20 15:07:20 +02002067 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002068
2069err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002070 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002071
Bart Van Assche76c75b22010-11-26 14:37:47 -05002072err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002073 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002074
Bart Van Assche024ca902014-05-20 15:03:49 +02002075 /*
2076 * Avoid that the loops that iterate over the request ring can
2077 * encounter a dangling SCSI command pointer.
2078 */
2079 req->scmnd = NULL;
2080
Bart Van Assched1b42892014-05-20 15:07:20 +02002081err:
2082 if (scmnd->result) {
2083 scmnd->scsi_done(scmnd);
2084 ret = 0;
2085 } else {
2086 ret = SCSI_MLQUEUE_HOST_BUSY;
2087 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002088
Bart Van Assched1b42892014-05-20 15:07:20 +02002089 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002090}
2091
Bart Van Assche4d73f952013-10-26 14:40:37 +02002092/*
2093 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002094 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002095 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002096static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002097{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002098 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002099 int i;
2100
Bart Van Assche509c07b2014-10-30 14:48:30 +01002101 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2102 GFP_KERNEL);
2103 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002104 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002105 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2106 GFP_KERNEL);
2107 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002108 goto err_no_ring;
2109
2110 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002111 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2112 ch->max_ti_iu_len,
2113 GFP_KERNEL, DMA_FROM_DEVICE);
2114 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002115 goto err;
2116 }
2117
Bart Van Assche4d73f952013-10-26 14:40:37 +02002118 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002119 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2120 target->max_iu_len,
2121 GFP_KERNEL, DMA_TO_DEVICE);
2122 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002123 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002124
Bart Van Assche509c07b2014-10-30 14:48:30 +01002125 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002126 }
2127
2128 return 0;
2129
2130err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002131 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002132 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2133 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002134 }
2135
Bart Van Assche4d73f952013-10-26 14:40:37 +02002136
2137err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002138 kfree(ch->tx_ring);
2139 ch->tx_ring = NULL;
2140 kfree(ch->rx_ring);
2141 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002142
2143 return -ENOMEM;
2144}
2145
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002146static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2147{
2148 uint64_t T_tr_ns, max_compl_time_ms;
2149 uint32_t rq_tmo_jiffies;
2150
2151 /*
2152 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2153 * table 91), both the QP timeout and the retry count have to be set
2154 * for RC QP's during the RTR to RTS transition.
2155 */
2156 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2157 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2158
2159 /*
2160 * Set target->rq_tmo_jiffies to one second more than the largest time
2161 * it can take before an error completion is generated. See also
2162 * C9-140..142 in the IBTA spec for more information about how to
2163 * convert the QP Local ACK Timeout value to nanoseconds.
2164 */
2165 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2166 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2167 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2168 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2169
2170 return rq_tmo_jiffies;
2171}
2172
David Dillow961e0be2011-01-14 17:32:07 -05002173static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2174 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002175 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002176{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002177 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002178 struct ib_qp_attr *qp_attr = NULL;
2179 int attr_mask = 0;
2180 int ret;
2181 int i;
2182
2183 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002184 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2185 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002186
2187 /*
2188 * Reserve credits for task management so we don't
2189 * bounce requests back to the SCSI mid-layer.
2190 */
2191 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002192 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002193 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002194 target->scsi_host->cmd_per_lun
2195 = min_t(int, target->scsi_host->can_queue,
2196 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002197 } else {
2198 shost_printk(KERN_WARNING, target->scsi_host,
2199 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2200 ret = -ECONNRESET;
2201 goto error;
2202 }
2203
Bart Van Assche509c07b2014-10-30 14:48:30 +01002204 if (!ch->rx_ring) {
2205 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002206 if (ret)
2207 goto error;
2208 }
2209
2210 ret = -ENOMEM;
2211 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2212 if (!qp_attr)
2213 goto error;
2214
2215 qp_attr->qp_state = IB_QPS_RTR;
2216 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2217 if (ret)
2218 goto error_free;
2219
Bart Van Assche509c07b2014-10-30 14:48:30 +01002220 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002221 if (ret)
2222 goto error_free;
2223
Bart Van Assche4d73f952013-10-26 14:40:37 +02002224 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002225 struct srp_iu *iu = ch->rx_ring[i];
2226
2227 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002228 if (ret)
2229 goto error_free;
2230 }
2231
2232 qp_attr->qp_state = IB_QPS_RTS;
2233 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2234 if (ret)
2235 goto error_free;
2236
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002237 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2238
Bart Van Assche509c07b2014-10-30 14:48:30 +01002239 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002240 if (ret)
2241 goto error_free;
2242
2243 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2244
2245error_free:
2246 kfree(qp_attr);
2247
2248error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002249 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002250}
2251
Roland Dreieraef9ec32005-11-02 14:07:13 -08002252static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2253 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002254 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002255{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002256 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002257 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002258 struct ib_class_port_info *cpi;
2259 int opcode;
2260
2261 switch (event->param.rej_rcvd.reason) {
2262 case IB_CM_REJ_PORT_CM_REDIRECT:
2263 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002264 ch->path.dlid = cpi->redirect_lid;
2265 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002266 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002267 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002268
Bart Van Assche509c07b2014-10-30 14:48:30 +01002269 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002270 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2271 break;
2272
2273 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002274 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002275 /*
2276 * Topspin/Cisco SRP gateways incorrectly send
2277 * reject reason code 25 when they mean 24
2278 * (port redirect).
2279 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002280 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002281 event->param.rej_rcvd.ari, 16);
2282
David Dillow7aa54bd2008-01-07 18:23:41 -05002283 shost_printk(KERN_DEBUG, shost,
2284 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002285 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2286 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002287
Bart Van Assche509c07b2014-10-30 14:48:30 +01002288 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002289 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002290 shost_printk(KERN_WARNING, shost,
2291 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002292 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002293 }
2294 break;
2295
2296 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002297 shost_printk(KERN_WARNING, shost,
2298 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002299 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002300 break;
2301
2302 case IB_CM_REJ_CONSUMER_DEFINED:
2303 opcode = *(u8 *) event->private_data;
2304 if (opcode == SRP_LOGIN_REJ) {
2305 struct srp_login_rej *rej = event->private_data;
2306 u32 reason = be32_to_cpu(rej->reason);
2307
2308 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002309 shost_printk(KERN_WARNING, shost,
2310 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002311 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002312 shost_printk(KERN_WARNING, shost, PFX
2313 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002314 target->sgid.raw,
2315 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002316 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002317 shost_printk(KERN_WARNING, shost,
2318 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2319 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002320 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002321 break;
2322
David Dillow9fe4bcf2008-01-08 17:08:52 -05002323 case IB_CM_REJ_STALE_CONN:
2324 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002325 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002326 break;
2327
Roland Dreieraef9ec32005-11-02 14:07:13 -08002328 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002329 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2330 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002331 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002332 }
2333}
2334
2335static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2336{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002337 struct srp_rdma_ch *ch = cm_id->context;
2338 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002339 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002340
2341 switch (event->event) {
2342 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002343 shost_printk(KERN_DEBUG, target->scsi_host,
2344 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002345 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002346 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002347 break;
2348
2349 case IB_CM_REP_RECEIVED:
2350 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002351 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002352 break;
2353
2354 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002355 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002356 comp = 1;
2357
Bart Van Assche509c07b2014-10-30 14:48:30 +01002358 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002359 break;
2360
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002361 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002362 shost_printk(KERN_WARNING, target->scsi_host,
2363 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002364 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002365 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002366 shost_printk(KERN_ERR, target->scsi_host,
2367 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002368 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002369 break;
2370
2371 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002372 shost_printk(KERN_ERR, target->scsi_host,
2373 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002374 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002375
Bart Van Assche509c07b2014-10-30 14:48:30 +01002376 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002377 break;
2378
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002379 case IB_CM_MRA_RECEIVED:
2380 case IB_CM_DREQ_ERROR:
2381 case IB_CM_DREP_RECEIVED:
2382 break;
2383
Roland Dreieraef9ec32005-11-02 14:07:13 -08002384 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002385 shost_printk(KERN_WARNING, target->scsi_host,
2386 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002387 break;
2388 }
2389
2390 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002391 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002392
Roland Dreieraef9ec32005-11-02 14:07:13 -08002393 return 0;
2394}
2395
Jack Wang71444b92013-11-07 11:37:37 +01002396/**
Jack Wang71444b92013-11-07 11:37:37 +01002397 * srp_change_queue_depth - setting device queue depth
2398 * @sdev: scsi device struct
2399 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002400 *
2401 * Returns queue depth.
2402 */
2403static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002404srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002405{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002406 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002407 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002408 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002409}
2410
Bart Van Assche985aa492015-05-18 13:27:14 +02002411static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2412 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002413{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002414 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002415 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002416 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002417 struct srp_iu *iu;
2418 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002419
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002420 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002421 return -1;
2422
Bart Van Assche509c07b2014-10-30 14:48:30 +01002423 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002424
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002425 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002426 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002427 * invoked while a task management function is being sent.
2428 */
2429 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002430 spin_lock_irq(&ch->lock);
2431 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2432 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002433
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002434 if (!iu) {
2435 mutex_unlock(&rport->mutex);
2436
Bart Van Assche76c75b22010-11-26 14:37:47 -05002437 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002438 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002439
David Dillow19081f32010-10-18 08:54:49 -04002440 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2441 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002442 tsk_mgmt = iu->buf;
2443 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2444
2445 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002446 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002447 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002448 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002449 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002450
David Dillow19081f32010-10-18 08:54:49 -04002451 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2452 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002453 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2454 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002455 mutex_unlock(&rport->mutex);
2456
Bart Van Assche76c75b22010-11-26 14:37:47 -05002457 return -1;
2458 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002459 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002460
Bart Van Assche509c07b2014-10-30 14:48:30 +01002461 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002462 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002463 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002464
Roland Dreierd945e1d2006-05-09 10:50:28 -07002465 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002466}
2467
Roland Dreieraef9ec32005-11-02 14:07:13 -08002468static int srp_abort(struct scsi_cmnd *scmnd)
2469{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002470 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002471 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002472 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002473 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002474 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002475 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002476
David Dillow7aa54bd2008-01-07 18:23:41 -05002477 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002478
Bart Van Assched92c0da2014-10-06 17:14:36 +02002479 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002480 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002481 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002482 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2483 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2484 return SUCCESS;
2485 ch = &target->ch[ch_idx];
2486 if (!srp_claim_req(ch, req, NULL, scmnd))
2487 return SUCCESS;
2488 shost_printk(KERN_ERR, target->scsi_host,
2489 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002490 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002491 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002492 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002493 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002494 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002495 else
2496 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002497 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002498 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002499 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002500
Bart Van Assche086f44f2013-06-12 15:23:04 +02002501 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002502}
2503
2504static int srp_reset_device(struct scsi_cmnd *scmnd)
2505{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002506 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002507 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002508 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002509
David Dillow7aa54bd2008-01-07 18:23:41 -05002510 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002511
Bart Van Assched92c0da2014-10-06 17:14:36 +02002512 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002513 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002514 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002515 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002516 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002517 return FAILED;
2518
Bart Van Assched92c0da2014-10-06 17:14:36 +02002519 for (i = 0; i < target->ch_count; i++) {
2520 ch = &target->ch[i];
2521 for (i = 0; i < target->req_ring_size; ++i) {
2522 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002523
Bart Van Assched92c0da2014-10-06 17:14:36 +02002524 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2525 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002526 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002527
Roland Dreierd945e1d2006-05-09 10:50:28 -07002528 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002529}
2530
2531static int srp_reset_host(struct scsi_cmnd *scmnd)
2532{
2533 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002534
David Dillow7aa54bd2008-01-07 18:23:41 -05002535 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002536
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002537 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002538}
2539
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002540static int srp_slave_configure(struct scsi_device *sdev)
2541{
2542 struct Scsi_Host *shost = sdev->host;
2543 struct srp_target_port *target = host_to_target(shost);
2544 struct request_queue *q = sdev->request_queue;
2545 unsigned long timeout;
2546
2547 if (sdev->type == TYPE_DISK) {
2548 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2549 blk_queue_rq_timeout(q, timeout);
2550 }
2551
2552 return 0;
2553}
2554
Tony Jonesee959b02008-02-22 00:13:36 +01002555static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2556 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002557{
Tony Jonesee959b02008-02-22 00:13:36 +01002558 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002559
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002560 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002561}
2562
Tony Jonesee959b02008-02-22 00:13:36 +01002563static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2564 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002565{
Tony Jonesee959b02008-02-22 00:13:36 +01002566 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002567
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002568 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002569}
2570
Tony Jonesee959b02008-02-22 00:13:36 +01002571static ssize_t show_service_id(struct device *dev,
2572 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002573{
Tony Jonesee959b02008-02-22 00:13:36 +01002574 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002575
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002576 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002577}
2578
Tony Jonesee959b02008-02-22 00:13:36 +01002579static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2580 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002581{
Tony Jonesee959b02008-02-22 00:13:36 +01002582 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002583
Bart Van Assche747fe002014-10-30 14:48:05 +01002584 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002585}
2586
Bart Van Assche848b3082013-10-26 14:38:12 +02002587static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2588 char *buf)
2589{
2590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2591
Bart Van Assche747fe002014-10-30 14:48:05 +01002592 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002593}
2594
Tony Jonesee959b02008-02-22 00:13:36 +01002595static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2596 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002597{
Tony Jonesee959b02008-02-22 00:13:36 +01002598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002599 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002600
Bart Van Assche509c07b2014-10-30 14:48:30 +01002601 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002602}
2603
Tony Jonesee959b02008-02-22 00:13:36 +01002604static ssize_t show_orig_dgid(struct device *dev,
2605 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002606{
Tony Jonesee959b02008-02-22 00:13:36 +01002607 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002608
Bart Van Assche747fe002014-10-30 14:48:05 +01002609 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002610}
2611
Bart Van Assche89de7482010-08-03 14:08:45 +00002612static ssize_t show_req_lim(struct device *dev,
2613 struct device_attribute *attr, char *buf)
2614{
2615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002616 struct srp_rdma_ch *ch;
2617 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002618
Bart Van Assched92c0da2014-10-06 17:14:36 +02002619 for (i = 0; i < target->ch_count; i++) {
2620 ch = &target->ch[i];
2621 req_lim = min(req_lim, ch->req_lim);
2622 }
2623 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002624}
2625
Tony Jonesee959b02008-02-22 00:13:36 +01002626static ssize_t show_zero_req_lim(struct device *dev,
2627 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002628{
Tony Jonesee959b02008-02-22 00:13:36 +01002629 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002630
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002631 return sprintf(buf, "%d\n", target->zero_req_lim);
2632}
2633
Tony Jonesee959b02008-02-22 00:13:36 +01002634static ssize_t show_local_ib_port(struct device *dev,
2635 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002636{
Tony Jonesee959b02008-02-22 00:13:36 +01002637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002638
2639 return sprintf(buf, "%d\n", target->srp_host->port);
2640}
2641
Tony Jonesee959b02008-02-22 00:13:36 +01002642static ssize_t show_local_ib_device(struct device *dev,
2643 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002644{
Tony Jonesee959b02008-02-22 00:13:36 +01002645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002646
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002647 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002648}
2649
Bart Van Assched92c0da2014-10-06 17:14:36 +02002650static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2651 char *buf)
2652{
2653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2654
2655 return sprintf(buf, "%d\n", target->ch_count);
2656}
2657
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002658static ssize_t show_comp_vector(struct device *dev,
2659 struct device_attribute *attr, char *buf)
2660{
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2662
2663 return sprintf(buf, "%d\n", target->comp_vector);
2664}
2665
Vu Pham7bb312e2013-10-26 14:31:27 +02002666static ssize_t show_tl_retry_count(struct device *dev,
2667 struct device_attribute *attr, char *buf)
2668{
2669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2670
2671 return sprintf(buf, "%d\n", target->tl_retry_count);
2672}
2673
David Dillow49248642011-01-14 18:23:24 -05002674static ssize_t show_cmd_sg_entries(struct device *dev,
2675 struct device_attribute *attr, char *buf)
2676{
2677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2678
2679 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2680}
2681
David Dillowc07d4242011-01-16 13:57:10 -05002682static ssize_t show_allow_ext_sg(struct device *dev,
2683 struct device_attribute *attr, char *buf)
2684{
2685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2686
2687 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2688}
2689
Tony Jonesee959b02008-02-22 00:13:36 +01002690static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2691static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2692static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2693static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002694static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002695static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2696static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002697static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002698static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2699static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2700static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002701static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002702static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002703static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002704static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002705static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002706
Tony Jonesee959b02008-02-22 00:13:36 +01002707static struct device_attribute *srp_host_attrs[] = {
2708 &dev_attr_id_ext,
2709 &dev_attr_ioc_guid,
2710 &dev_attr_service_id,
2711 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002712 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002713 &dev_attr_dgid,
2714 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002715 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002716 &dev_attr_zero_req_lim,
2717 &dev_attr_local_ib_port,
2718 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002719 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002720 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002721 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002722 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002723 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002724 NULL
2725};
2726
Roland Dreieraef9ec32005-11-02 14:07:13 -08002727static struct scsi_host_template srp_template = {
2728 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002729 .name = "InfiniBand SRP initiator",
2730 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002731 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002732 .info = srp_target_info,
2733 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002734 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002735 .eh_abort_handler = srp_abort,
2736 .eh_device_reset_handler = srp_reset_device,
2737 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002738 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002739 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002740 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002741 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002742 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002743 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002744 .shost_attrs = srp_host_attrs,
2745 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002746 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002747};
2748
Bart Van Assche34aa6542014-10-30 14:47:22 +01002749static int srp_sdev_count(struct Scsi_Host *host)
2750{
2751 struct scsi_device *sdev;
2752 int c = 0;
2753
2754 shost_for_each_device(sdev, host)
2755 c++;
2756
2757 return c;
2758}
2759
Roland Dreieraef9ec32005-11-02 14:07:13 -08002760static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2761{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002762 struct srp_rport_identifiers ids;
2763 struct srp_rport *rport;
2764
Bart Van Assche34aa6542014-10-30 14:47:22 +01002765 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002766 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002767 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002768
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002769 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002770 return -ENODEV;
2771
FUJITA Tomonori32368222007-06-27 16:33:12 +09002772 memcpy(ids.port_id, &target->id_ext, 8);
2773 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002774 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002775 rport = srp_rport_add(target->scsi_host, &ids);
2776 if (IS_ERR(rport)) {
2777 scsi_remove_host(target->scsi_host);
2778 return PTR_ERR(rport);
2779 }
2780
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002781 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002782 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002783
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002784 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002785 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002786 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002787
Roland Dreieraef9ec32005-11-02 14:07:13 -08002788 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002789 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002790
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002791 if (srp_connected_ch(target) < target->ch_count ||
2792 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002793 shost_printk(KERN_INFO, target->scsi_host,
2794 PFX "SCSI scan failed - removing SCSI host\n");
2795 srp_queue_remove_work(target);
2796 goto out;
2797 }
2798
2799 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2800 dev_name(&target->scsi_host->shost_gendev),
2801 srp_sdev_count(target->scsi_host));
2802
2803 spin_lock_irq(&target->lock);
2804 if (target->state == SRP_TARGET_SCANNING)
2805 target->state = SRP_TARGET_LIVE;
2806 spin_unlock_irq(&target->lock);
2807
2808out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002809 return 0;
2810}
2811
Tony Jonesee959b02008-02-22 00:13:36 +01002812static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002813{
2814 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002815 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002816
2817 complete(&host->released);
2818}
2819
2820static struct class srp_class = {
2821 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002822 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002823};
2824
Bart Van Assche96fc2482013-06-28 14:51:26 +02002825/**
2826 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002827 * @host: SRP host.
2828 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002829 */
2830static bool srp_conn_unique(struct srp_host *host,
2831 struct srp_target_port *target)
2832{
2833 struct srp_target_port *t;
2834 bool ret = false;
2835
2836 if (target->state == SRP_TARGET_REMOVED)
2837 goto out;
2838
2839 ret = true;
2840
2841 spin_lock(&host->target_lock);
2842 list_for_each_entry(t, &host->target_list, list) {
2843 if (t != target &&
2844 target->id_ext == t->id_ext &&
2845 target->ioc_guid == t->ioc_guid &&
2846 target->initiator_ext == t->initiator_ext) {
2847 ret = false;
2848 break;
2849 }
2850 }
2851 spin_unlock(&host->target_lock);
2852
2853out:
2854 return ret;
2855}
2856
Roland Dreieraef9ec32005-11-02 14:07:13 -08002857/*
2858 * Target ports are added by writing
2859 *
2860 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2861 * pkey=<P_Key>,service_id=<service ID>
2862 *
2863 * to the add_target sysfs attribute.
2864 */
2865enum {
2866 SRP_OPT_ERR = 0,
2867 SRP_OPT_ID_EXT = 1 << 0,
2868 SRP_OPT_IOC_GUID = 1 << 1,
2869 SRP_OPT_DGID = 1 << 2,
2870 SRP_OPT_PKEY = 1 << 3,
2871 SRP_OPT_SERVICE_ID = 1 << 4,
2872 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002873 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002874 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002875 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002876 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002877 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2878 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002879 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002880 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002881 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002882 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2883 SRP_OPT_IOC_GUID |
2884 SRP_OPT_DGID |
2885 SRP_OPT_PKEY |
2886 SRP_OPT_SERVICE_ID),
2887};
2888
Steven Whitehousea447c092008-10-13 10:46:57 +01002889static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002890 { SRP_OPT_ID_EXT, "id_ext=%s" },
2891 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2892 { SRP_OPT_DGID, "dgid=%s" },
2893 { SRP_OPT_PKEY, "pkey=%x" },
2894 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2895 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2896 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002897 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002898 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002899 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002900 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2901 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002902 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002903 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002904 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002905 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002906};
2907
2908static int srp_parse_options(const char *buf, struct srp_target_port *target)
2909{
2910 char *options, *sep_opt;
2911 char *p;
2912 char dgid[3];
2913 substring_t args[MAX_OPT_ARGS];
2914 int opt_mask = 0;
2915 int token;
2916 int ret = -EINVAL;
2917 int i;
2918
2919 options = kstrdup(buf, GFP_KERNEL);
2920 if (!options)
2921 return -ENOMEM;
2922
2923 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002924 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002925 if (!*p)
2926 continue;
2927
2928 token = match_token(p, srp_opt_tokens, args);
2929 opt_mask |= token;
2930
2931 switch (token) {
2932 case SRP_OPT_ID_EXT:
2933 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002934 if (!p) {
2935 ret = -ENOMEM;
2936 goto out;
2937 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002938 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2939 kfree(p);
2940 break;
2941
2942 case SRP_OPT_IOC_GUID:
2943 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002944 if (!p) {
2945 ret = -ENOMEM;
2946 goto out;
2947 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002948 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2949 kfree(p);
2950 break;
2951
2952 case SRP_OPT_DGID:
2953 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002954 if (!p) {
2955 ret = -ENOMEM;
2956 goto out;
2957 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002958 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002959 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002960 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002961 goto out;
2962 }
2963
2964 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002965 strlcpy(dgid, p + i * 2, sizeof(dgid));
2966 if (sscanf(dgid, "%hhx",
2967 &target->orig_dgid.raw[i]) < 1) {
2968 ret = -EINVAL;
2969 kfree(p);
2970 goto out;
2971 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002972 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002973 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002974 break;
2975
2976 case SRP_OPT_PKEY:
2977 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002978 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002979 goto out;
2980 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002981 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002982 break;
2983
2984 case SRP_OPT_SERVICE_ID:
2985 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002986 if (!p) {
2987 ret = -ENOMEM;
2988 goto out;
2989 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002990 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2991 kfree(p);
2992 break;
2993
2994 case SRP_OPT_MAX_SECT:
2995 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002996 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002997 goto out;
2998 }
2999 target->scsi_host->max_sectors = token;
3000 break;
3001
Bart Van Assche4d73f952013-10-26 14:40:37 +02003002 case SRP_OPT_QUEUE_SIZE:
3003 if (match_int(args, &token) || token < 1) {
3004 pr_warn("bad queue_size parameter '%s'\n", p);
3005 goto out;
3006 }
3007 target->scsi_host->can_queue = token;
3008 target->queue_size = token + SRP_RSP_SQ_SIZE +
3009 SRP_TSK_MGMT_SQ_SIZE;
3010 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3011 target->scsi_host->cmd_per_lun = token;
3012 break;
3013
Vu Pham52fb2b502006-06-17 20:37:31 -07003014 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003015 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003016 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3017 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003018 goto out;
3019 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003020 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003021 break;
3022
Ramachandra K0c0450db2006-06-17 20:37:38 -07003023 case SRP_OPT_IO_CLASS:
3024 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003025 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003026 goto out;
3027 }
3028 if (token != SRP_REV10_IB_IO_CLASS &&
3029 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003030 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3031 token, SRP_REV10_IB_IO_CLASS,
3032 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003033 goto out;
3034 }
3035 target->io_class = token;
3036 break;
3037
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003038 case SRP_OPT_INITIATOR_EXT:
3039 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003040 if (!p) {
3041 ret = -ENOMEM;
3042 goto out;
3043 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003044 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3045 kfree(p);
3046 break;
3047
David Dillow49248642011-01-14 18:23:24 -05003048 case SRP_OPT_CMD_SG_ENTRIES:
3049 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003050 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3051 p);
David Dillow49248642011-01-14 18:23:24 -05003052 goto out;
3053 }
3054 target->cmd_sg_cnt = token;
3055 break;
3056
David Dillowc07d4242011-01-16 13:57:10 -05003057 case SRP_OPT_ALLOW_EXT_SG:
3058 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003059 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003060 goto out;
3061 }
3062 target->allow_ext_sg = !!token;
3063 break;
3064
3065 case SRP_OPT_SG_TABLESIZE:
3066 if (match_int(args, &token) || token < 1 ||
3067 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003068 pr_warn("bad max sg_tablesize parameter '%s'\n",
3069 p);
David Dillowc07d4242011-01-16 13:57:10 -05003070 goto out;
3071 }
3072 target->sg_tablesize = token;
3073 break;
3074
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003075 case SRP_OPT_COMP_VECTOR:
3076 if (match_int(args, &token) || token < 0) {
3077 pr_warn("bad comp_vector parameter '%s'\n", p);
3078 goto out;
3079 }
3080 target->comp_vector = token;
3081 break;
3082
Vu Pham7bb312e2013-10-26 14:31:27 +02003083 case SRP_OPT_TL_RETRY_COUNT:
3084 if (match_int(args, &token) || token < 2 || token > 7) {
3085 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3086 p);
3087 goto out;
3088 }
3089 target->tl_retry_count = token;
3090 break;
3091
Roland Dreieraef9ec32005-11-02 14:07:13 -08003092 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003093 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3094 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003095 goto out;
3096 }
3097 }
3098
3099 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3100 ret = 0;
3101 else
3102 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3103 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3104 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003105 pr_warn("target creation request is missing parameter '%s'\n",
3106 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003107
Bart Van Assche4d73f952013-10-26 14:40:37 +02003108 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3109 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3110 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3111 target->scsi_host->cmd_per_lun,
3112 target->scsi_host->can_queue);
3113
Roland Dreieraef9ec32005-11-02 14:07:13 -08003114out:
3115 kfree(options);
3116 return ret;
3117}
3118
Tony Jonesee959b02008-02-22 00:13:36 +01003119static ssize_t srp_create_target(struct device *dev,
3120 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003121 const char *buf, size_t count)
3122{
3123 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003124 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003125 struct Scsi_Host *target_host;
3126 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003127 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003128 struct srp_device *srp_dev = host->srp_dev;
3129 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003130 int ret, node_idx, node, cpu, i;
3131 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003132
3133 target_host = scsi_host_alloc(&srp_template,
3134 sizeof (struct srp_target_port));
3135 if (!target_host)
3136 return -ENOMEM;
3137
David Dillow49248642011-01-14 18:23:24 -05003138 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003139 target_host->max_channel = 0;
3140 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003141 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003142 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003143
Roland Dreieraef9ec32005-11-02 14:07:13 -08003144 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003145
David Dillow49248642011-01-14 18:23:24 -05003146 target->io_class = SRP_REV16A_IB_IO_CLASS;
3147 target->scsi_host = target_host;
3148 target->srp_host = host;
3149 target->lkey = host->srp_dev->mr->lkey;
3150 target->rkey = host->srp_dev->mr->rkey;
3151 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003152 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3153 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003154 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003155 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003156
Bart Van Assche34aa6542014-10-30 14:47:22 +01003157 /*
3158 * Avoid that the SCSI host can be removed by srp_remove_target()
3159 * before this function returns.
3160 */
3161 scsi_host_get(target->scsi_host);
3162
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003163 mutex_lock(&host->add_target_mutex);
3164
Roland Dreieraef9ec32005-11-02 14:07:13 -08003165 ret = srp_parse_options(buf, target);
3166 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003167 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003168
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003169 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3170 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003171 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003172
Bart Van Assche4d73f952013-10-26 14:40:37 +02003173 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3174
Bart Van Assche96fc2482013-06-28 14:51:26 +02003175 if (!srp_conn_unique(target->srp_host, target)) {
3176 shost_printk(KERN_INFO, target->scsi_host,
3177 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3178 be64_to_cpu(target->id_ext),
3179 be64_to_cpu(target->ioc_guid),
3180 be64_to_cpu(target->initiator_ext));
3181 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003182 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003183 }
3184
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003185 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003186 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003187 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003188 target->sg_tablesize = target->cmd_sg_cnt;
3189 }
3190
3191 target_host->sg_tablesize = target->sg_tablesize;
3192 target->indirect_size = target->sg_tablesize *
3193 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003194 target->max_iu_len = sizeof (struct srp_cmd) +
3195 sizeof (struct srp_indirect_buf) +
3196 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3197
Bart Van Asschec1120f82013-10-26 14:35:08 +02003198 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003199 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003200 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003201 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003202 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003203 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003204
Bart Van Assched92c0da2014-10-06 17:14:36 +02003205 ret = -ENOMEM;
3206 target->ch_count = max_t(unsigned, num_online_nodes(),
3207 min(ch_count ? :
3208 min(4 * num_online_nodes(),
3209 ibdev->num_comp_vectors),
3210 num_online_cpus()));
3211 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3212 GFP_KERNEL);
3213 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003214 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003215
Bart Van Assched92c0da2014-10-06 17:14:36 +02003216 node_idx = 0;
3217 for_each_online_node(node) {
3218 const int ch_start = (node_idx * target->ch_count /
3219 num_online_nodes());
3220 const int ch_end = ((node_idx + 1) * target->ch_count /
3221 num_online_nodes());
3222 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3223 num_online_nodes() + target->comp_vector)
3224 % ibdev->num_comp_vectors;
3225 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3226 num_online_nodes() + target->comp_vector)
3227 % ibdev->num_comp_vectors;
3228 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003229
Bart Van Assched92c0da2014-10-06 17:14:36 +02003230 for_each_online_cpu(cpu) {
3231 if (cpu_to_node(cpu) != node)
3232 continue;
3233 if (ch_start + cpu_idx >= ch_end)
3234 continue;
3235 ch = &target->ch[ch_start + cpu_idx];
3236 ch->target = target;
3237 ch->comp_vector = cv_start == cv_end ? cv_start :
3238 cv_start + cpu_idx % (cv_end - cv_start);
3239 spin_lock_init(&ch->lock);
3240 INIT_LIST_HEAD(&ch->free_tx);
3241 ret = srp_new_cm_id(ch);
3242 if (ret)
3243 goto err_disconnect;
3244
3245 ret = srp_create_ch_ib(ch);
3246 if (ret)
3247 goto err_disconnect;
3248
3249 ret = srp_alloc_req_data(ch);
3250 if (ret)
3251 goto err_disconnect;
3252
3253 ret = srp_connect_ch(ch, multich);
3254 if (ret) {
3255 shost_printk(KERN_ERR, target->scsi_host,
3256 PFX "Connection %d/%d failed\n",
3257 ch_start + cpu_idx,
3258 target->ch_count);
3259 if (node_idx == 0 && cpu_idx == 0) {
3260 goto err_disconnect;
3261 } else {
3262 srp_free_ch_ib(target, ch);
3263 srp_free_req_data(target, ch);
3264 target->ch_count = ch - target->ch;
3265 break;
3266 }
3267 }
3268
3269 multich = true;
3270 cpu_idx++;
3271 }
3272 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003273 }
3274
Bart Van Assched92c0da2014-10-06 17:14:36 +02003275 target->scsi_host->nr_hw_queues = target->ch_count;
3276
Roland Dreieraef9ec32005-11-02 14:07:13 -08003277 ret = srp_add_target(host, target);
3278 if (ret)
3279 goto err_disconnect;
3280
Bart Van Assche34aa6542014-10-30 14:47:22 +01003281 if (target->state != SRP_TARGET_REMOVED) {
3282 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3283 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3284 be64_to_cpu(target->id_ext),
3285 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003286 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003287 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003288 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003289 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003290
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003291 ret = count;
3292
3293out:
3294 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003295
3296 scsi_host_put(target->scsi_host);
3297
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003298 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003299
3300err_disconnect:
3301 srp_disconnect_target(target);
3302
Bart Van Assched92c0da2014-10-06 17:14:36 +02003303 for (i = 0; i < target->ch_count; i++) {
3304 ch = &target->ch[i];
3305 srp_free_ch_ib(target, ch);
3306 srp_free_req_data(target, ch);
3307 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003308
Bart Van Assched92c0da2014-10-06 17:14:36 +02003309 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003310 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003311}
3312
Tony Jonesee959b02008-02-22 00:13:36 +01003313static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003314
Tony Jonesee959b02008-02-22 00:13:36 +01003315static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3316 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003317{
Tony Jonesee959b02008-02-22 00:13:36 +01003318 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003319
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003320 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003321}
3322
Tony Jonesee959b02008-02-22 00:13:36 +01003323static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003324
Tony Jonesee959b02008-02-22 00:13:36 +01003325static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3326 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003327{
Tony Jonesee959b02008-02-22 00:13:36 +01003328 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003329
3330 return sprintf(buf, "%d\n", host->port);
3331}
3332
Tony Jonesee959b02008-02-22 00:13:36 +01003333static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003334
Roland Dreierf5358a12006-06-17 20:37:29 -07003335static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003336{
3337 struct srp_host *host;
3338
3339 host = kzalloc(sizeof *host, GFP_KERNEL);
3340 if (!host)
3341 return NULL;
3342
3343 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003344 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003345 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003346 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003347 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003348 host->port = port;
3349
Tony Jonesee959b02008-02-22 00:13:36 +01003350 host->dev.class = &srp_class;
3351 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003352 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003353
Tony Jonesee959b02008-02-22 00:13:36 +01003354 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003355 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003356 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003357 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003358 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003359 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003360 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003361 goto err_class;
3362
3363 return host;
3364
3365err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003366 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003367
Roland Dreierf5358a12006-06-17 20:37:29 -07003368free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003369 kfree(host);
3370
3371 return NULL;
3372}
3373
3374static void srp_add_one(struct ib_device *device)
3375{
Roland Dreierf5358a12006-06-17 20:37:29 -07003376 struct srp_device *srp_dev;
3377 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003378 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003379 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003380 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003381
Roland Dreierf5358a12006-06-17 20:37:29 -07003382 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3383 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003384 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385
Roland Dreierf5358a12006-06-17 20:37:29 -07003386 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003387 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003388 goto free_attr;
3389 }
3390
3391 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3392 if (!srp_dev)
3393 goto free_attr;
3394
Bart Van Assched1b42892014-05-20 15:07:20 +02003395 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3396 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003397 srp_dev->has_fr = (dev_attr->device_cap_flags &
3398 IB_DEVICE_MEM_MGT_EXTENSIONS);
3399 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3400 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3401
3402 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3403 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003404
Roland Dreierf5358a12006-06-17 20:37:29 -07003405 /*
3406 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003407 * minimum of 4096 bytes. We're unlikely to build large sglists
3408 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003409 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003410 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3411 srp_dev->mr_page_size = 1 << mr_page_shift;
3412 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3413 max_pages_per_mr = dev_attr->max_mr_size;
3414 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3415 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3416 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003417 if (srp_dev->use_fast_reg) {
3418 srp_dev->max_pages_per_mr =
3419 min_t(u32, srp_dev->max_pages_per_mr,
3420 dev_attr->max_fast_reg_page_list_len);
3421 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003422 srp_dev->mr_max_size = srp_dev->mr_page_size *
3423 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003424 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003425 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003426 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003427 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003428
3429 INIT_LIST_HEAD(&srp_dev->dev_list);
3430
3431 srp_dev->dev = device;
3432 srp_dev->pd = ib_alloc_pd(device);
3433 if (IS_ERR(srp_dev->pd))
3434 goto free_dev;
3435
3436 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3437 IB_ACCESS_LOCAL_WRITE |
3438 IB_ACCESS_REMOTE_READ |
3439 IB_ACCESS_REMOTE_WRITE);
3440 if (IS_ERR(srp_dev->mr))
3441 goto err_pd;
3442
Hal Rosenstock41390322015-06-29 09:57:00 -04003443 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003444 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003445 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003446 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003447 }
3448
Roland Dreierf5358a12006-06-17 20:37:29 -07003449 ib_set_client_data(device, &srp_client, srp_dev);
3450
3451 goto free_attr;
3452
3453err_pd:
3454 ib_dealloc_pd(srp_dev->pd);
3455
3456free_dev:
3457 kfree(srp_dev);
3458
3459free_attr:
3460 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003461}
3462
3463static void srp_remove_one(struct ib_device *device)
3464{
Roland Dreierf5358a12006-06-17 20:37:29 -07003465 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003466 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003467 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003468
Roland Dreierf5358a12006-06-17 20:37:29 -07003469 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003470 if (!srp_dev)
3471 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003472
Roland Dreierf5358a12006-06-17 20:37:29 -07003473 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003474 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003475 /*
3476 * Wait for the sysfs entry to go away, so that no new
3477 * target ports can be created.
3478 */
3479 wait_for_completion(&host->released);
3480
3481 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003482 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003483 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003484 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003485 list_for_each_entry(target, &host->target_list, list)
3486 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003487 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003488
3489 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003490 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003491 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003492 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003493 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003494
Roland Dreieraef9ec32005-11-02 14:07:13 -08003495 kfree(host);
3496 }
3497
Roland Dreierf5358a12006-06-17 20:37:29 -07003498 ib_dereg_mr(srp_dev->mr);
3499 ib_dealloc_pd(srp_dev->pd);
3500
3501 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003502}
3503
FUJITA Tomonori32368222007-06-27 16:33:12 +09003504static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003505 .has_rport_state = true,
3506 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003507 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003508 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3509 .dev_loss_tmo = &srp_dev_loss_tmo,
3510 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003511 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003512 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003513};
3514
Roland Dreieraef9ec32005-11-02 14:07:13 -08003515static int __init srp_init_module(void)
3516{
3517 int ret;
3518
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003519 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003520
David Dillow49248642011-01-14 18:23:24 -05003521 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003522 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003523 if (!cmd_sg_entries)
3524 cmd_sg_entries = srp_sg_tablesize;
3525 }
3526
3527 if (!cmd_sg_entries)
3528 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3529
3530 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003531 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003532 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003533 }
3534
David Dillowc07d4242011-01-16 13:57:10 -05003535 if (!indirect_sg_entries)
3536 indirect_sg_entries = cmd_sg_entries;
3537 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003538 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3539 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003540 indirect_sg_entries = cmd_sg_entries;
3541 }
3542
Bart Van Asschebcc05912014-07-09 15:57:26 +02003543 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003544 if (!srp_remove_wq) {
3545 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003546 goto out;
3547 }
3548
3549 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003550 ib_srp_transport_template =
3551 srp_attach_transport(&ib_srp_transport_functions);
3552 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003553 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003554
Roland Dreieraef9ec32005-11-02 14:07:13 -08003555 ret = class_register(&srp_class);
3556 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003557 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003558 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003559 }
3560
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003561 ib_sa_register_client(&srp_sa_client);
3562
Roland Dreieraef9ec32005-11-02 14:07:13 -08003563 ret = ib_register_client(&srp_client);
3564 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003565 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003566 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003567 }
3568
Bart Van Asschebcc05912014-07-09 15:57:26 +02003569out:
3570 return ret;
3571
3572unreg_sa:
3573 ib_sa_unregister_client(&srp_sa_client);
3574 class_unregister(&srp_class);
3575
3576release_tr:
3577 srp_release_transport(ib_srp_transport_template);
3578
3579destroy_wq:
3580 destroy_workqueue(srp_remove_wq);
3581 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003582}
3583
3584static void __exit srp_cleanup_module(void)
3585{
3586 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003587 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003588 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003589 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003590 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003591}
3592
3593module_init(srp_init_module);
3594module_exit(srp_cleanup_module);