blob: f2c8dcaf96a70cfefb1fb620178d780d50b4c90d [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200173 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930184static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
Roland Dreieraef9ec32005-11-02 14:07:13 -0800189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700203
204 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700207}
208
Roland Dreieraef9ec32005-11-02 14:07:13 -0800209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Bart Van Assche56b53902014-07-09 15:58:22 +0200268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200343 if (d->mr)
344 ib_dereg_mr(d->mr);
345 }
346 kfree(pool);
347}
348
349/**
350 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
351 * @device: IB device to allocate fast registration descriptors for.
352 * @pd: Protection domain associated with the FR descriptors.
353 * @pool_size: Number of descriptors to allocate.
354 * @max_page_list_len: Maximum fast registration work request page list length.
355 */
356static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
357 struct ib_pd *pd, int pool_size,
358 int max_page_list_len)
359{
360 struct srp_fr_pool *pool;
361 struct srp_fr_desc *d;
362 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200363 int i, ret = -EINVAL;
364
365 if (pool_size <= 0)
366 goto err;
367 ret = -ENOMEM;
368 pool = kzalloc(sizeof(struct srp_fr_pool) +
369 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
370 if (!pool)
371 goto err;
372 pool->size = pool_size;
373 pool->max_page_list_len = max_page_list_len;
374 spin_lock_init(&pool->lock);
375 INIT_LIST_HEAD(&pool->free_list);
376
377 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
379 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200380 if (IS_ERR(mr)) {
381 ret = PTR_ERR(mr);
382 goto destroy_pool;
383 }
384 d->mr = mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200385 list_add_tail(&d->entry, &pool->free_list);
386 }
387
388out:
389 return pool;
390
391destroy_pool:
392 srp_destroy_fr_pool(pool);
393
394err:
395 pool = ERR_PTR(ret);
396 goto out;
397}
398
399/**
400 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
401 * @pool: Pool to obtain descriptor from.
402 */
403static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
404{
405 struct srp_fr_desc *d = NULL;
406 unsigned long flags;
407
408 spin_lock_irqsave(&pool->lock, flags);
409 if (!list_empty(&pool->free_list)) {
410 d = list_first_entry(&pool->free_list, typeof(*d), entry);
411 list_del(&d->entry);
412 }
413 spin_unlock_irqrestore(&pool->lock, flags);
414
415 return d;
416}
417
418/**
419 * srp_fr_pool_put() - put an FR descriptor back in the free list
420 * @pool: Pool the descriptor was allocated from.
421 * @desc: Pointer to an array of fast registration descriptor pointers.
422 * @n: Number of descriptors to put back.
423 *
424 * Note: The caller must already have queued an invalidation request for
425 * desc->mr->rkey before calling this function.
426 */
427static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
428 int n)
429{
430 unsigned long flags;
431 int i;
432
433 spin_lock_irqsave(&pool->lock, flags);
434 for (i = 0; i < n; i++)
435 list_add(&desc[i]->entry, &pool->free_list);
436 spin_unlock_irqrestore(&pool->lock, flags);
437}
438
439static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
440{
441 struct srp_device *dev = target->srp_host->srp_dev;
442
443 return srp_create_fr_pool(dev->dev, dev->pd,
444 target->scsi_host->can_queue,
445 dev->max_pages_per_mr);
446}
447
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
452 * Change a queue pair into the error state and wait until all receive
453 * completions have been processed before destroying it. This avoids that
454 * the receive completion handler can access the queue pair while it is
455 * being destroyed.
456 */
457static void srp_destroy_qp(struct srp_rdma_ch *ch)
458{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200459 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
461 struct ib_recv_wr *bad_wr;
462 int ret;
463
464 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200465 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200466
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
468 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
469 if (ret)
470 goto out;
471
472 init_completion(&ch->done);
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
474 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
475 if (ret == 0)
476 wait_for_completion(&ch->done);
477
478out:
479 ib_destroy_qp(ch->qp);
480}
481
Bart Van Assche509c07b2014-10-30 14:48:30 +0100482static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800483{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100484 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200485 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800486 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100487 struct ib_cq *recv_cq, *send_cq;
488 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200489 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = 1 + dev->use_fast_reg;
Matan Barak8e372102015-06-11 16:35:21 +0300492 struct ib_cq_init_attr cq_attr = {};
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493 int ret;
494
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 if (!init_attr)
497 return -ENOMEM;
498
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200499 /* + 1 for SRP_LAST_WR_ID */
Matan Barak8e372102015-06-11 16:35:21 +0300500 cq_attr.cqe = target->queue_size + 1;
501 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100502 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300503 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100504 if (IS_ERR(recv_cq)) {
505 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800506 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800507 }
508
Matan Barak8e372102015-06-11 16:35:21 +0300509 cq_attr.cqe = m * target->queue_size;
510 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300512 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100513 if (IS_ERR(send_cq)) {
514 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800515 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000516 }
517
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100518 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800519
520 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200521 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200522 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800523 init_attr->cap.max_recv_sge = 1;
524 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200525 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800526 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100527 init_attr->send_cq = send_cq;
528 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800529
Bart Van Assche62154b22014-05-20 15:04:45 +0200530 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100531 if (IS_ERR(qp)) {
532 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800533 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534 }
535
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800537 if (ret)
538 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800539
Bart Van Assche002f1562015-08-10 17:08:44 -0700540 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200541 fr_pool = srp_alloc_fr_pool(target);
542 if (IS_ERR(fr_pool)) {
543 ret = PTR_ERR(fr_pool);
544 shost_printk(KERN_WARNING, target->scsi_host, PFX
545 "FR pool allocation failed (%d)\n", ret);
546 goto err_qp;
547 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700548 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200549 fmr_pool = srp_alloc_fmr_pool(target);
550 if (IS_ERR(fmr_pool)) {
551 ret = PTR_ERR(fmr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FMR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200556 }
557
Bart Van Assche509c07b2014-10-30 14:48:30 +0100558 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200559 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100560 if (ch->recv_cq)
561 ib_destroy_cq(ch->recv_cq);
562 if (ch->send_cq)
563 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100564
Bart Van Assche509c07b2014-10-30 14:48:30 +0100565 ch->qp = qp;
566 ch->recv_cq = recv_cq;
567 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100568
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300569 if (dev->use_fast_reg) {
570 if (ch->fr_pool)
571 srp_destroy_fr_pool(ch->fr_pool);
572 ch->fr_pool = fr_pool;
573 } else if (dev->use_fmr) {
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
576 ch->fmr_pool = fmr_pool;
577 }
578
Roland Dreierda9d2f02010-02-24 15:07:59 -0800579 kfree(init_attr);
580 return 0;
581
582err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800584
585err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100586 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800587
588err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100589 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800590
591err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800592 kfree(init_attr);
593 return ret;
594}
595
Bart Van Assche4d73f952013-10-26 14:40:37 +0200596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100598 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200599 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200603 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604 int i;
605
Bart Van Assched92c0da2014-10-06 17:14:36 +0200606 if (!ch->target)
607 return;
608
Bart Van Assche509c07b2014-10-30 14:48:30 +0100609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100612 }
613
Bart Van Assched92c0da2014-10-06 17:14:36 +0200614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200618 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700621 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200624 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200625 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800628
Bart Van Assched92c0da2014-10-06 17:14:36 +0200629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
Bart Van Assche509c07b2014-10-30 14:48:30 +0100637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100639
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200641 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200645 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200647 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200651 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100656 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800657{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800660
Bart Van Assche509c07b2014-10-30 14:48:30 +0100661 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800665 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 ch->path = *pathrec;
667 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668}
669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100673 int ret;
674
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694
Bart Van Assche509c07b2014-10-30 14:48:30 +0100695 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100696 if (ret < 0)
697 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698
Bart Van Assche509c07b2014-10-30 14:48:30 +0100699 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702
Bart Van Assche509c07b2014-10-30 14:48:30 +0100703 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800704}
705
Bart Van Assched92c0da2014-10-06 17:14:36 +0200706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
Bart Van Assche509c07b2014-10-30 14:48:30 +0100719 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200738 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700749 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700750 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100760 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700761 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200762 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100769 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
Roland Dreieraef9ec32005-11-02 14:07:13 -0800774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800778 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700779 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200783 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800784 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200785 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100786 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788
Bart Van Assche509c07b2014-10-30 14:48:30 +0100789 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800790
791 kfree(req);
792
793 return status;
794}
795
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200808 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000809
810 return changed;
811}
812
Roland Dreieraef9ec32005-11-02 14:07:13 -0800813static void srp_disconnect_target(struct srp_target_port *target)
814{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200815 struct srp_rdma_ch *ch;
816 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100817
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200818 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800819
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000826 }
Roland Dreiere6581052006-05-17 09:13:21 -0700827 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828}
829
Bart Van Assche509c07b2014-10-30 14:48:30 +0100830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500832{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500835 struct srp_request *req;
836 int i;
837
Bart Van Assche47513cf2015-05-18 13:25:54 +0200838 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100842 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200843 if (dev->use_fast_reg)
844 kfree(req->fr_list);
845 else
846 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500847 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500848 if (req->indirect_dma_addr) {
849 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
850 target->indirect_size,
851 DMA_TO_DEVICE);
852 }
853 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500854 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200855
Bart Van Assche509c07b2014-10-30 14:48:30 +0100856 kfree(ch->req_ring);
857 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500858}
859
Bart Van Assche509c07b2014-10-30 14:48:30 +0100860static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200861{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100862 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200863 struct srp_device *srp_dev = target->srp_host->srp_dev;
864 struct ib_device *ibdev = srp_dev->dev;
865 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200866 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200867 dma_addr_t dma_addr;
868 int i, ret = -ENOMEM;
869
Bart Van Assche509c07b2014-10-30 14:48:30 +0100870 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
871 GFP_KERNEL);
872 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200873 goto out;
874
875 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100876 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200877 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
878 GFP_KERNEL);
879 if (!mr_list)
880 goto out;
881 if (srp_dev->use_fast_reg)
882 req->fr_list = mr_list;
883 else
884 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200885 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200886 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200887 if (!req->map_page)
888 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200889 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200890 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200891 goto out;
892
893 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
894 target->indirect_size,
895 DMA_TO_DEVICE);
896 if (ib_dma_mapping_error(ibdev, dma_addr))
897 goto out;
898
899 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200900 }
901 ret = 0;
902
903out:
904 return ret;
905}
906
Bart Van Assche683b1592012-01-14 12:40:44 +0000907/**
908 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
909 * @shost: SCSI host whose attributes to remove from sysfs.
910 *
911 * Note: Any attributes defined in the host template and that did not exist
912 * before invocation of this function will be ignored.
913 */
914static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
915{
916 struct device_attribute **attr;
917
918 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
919 device_remove_file(&shost->shost_dev, *attr);
920}
921
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000922static void srp_remove_target(struct srp_target_port *target)
923{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200924 struct srp_rdma_ch *ch;
925 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100926
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000927 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
928
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000929 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200930 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000931 srp_remove_host(target->scsi_host);
932 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100933 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000934 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200935 for (i = 0; i < target->ch_count; i++) {
936 ch = &target->ch[i];
937 srp_free_ch_ib(target, ch);
938 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200939 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200940 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200941 for (i = 0; i < target->ch_count; i++) {
942 ch = &target->ch[i];
943 srp_free_req_data(target, ch);
944 }
945 kfree(target->ch);
946 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200947
948 spin_lock(&target->srp_host->target_lock);
949 list_del(&target->list);
950 spin_unlock(&target->srp_host->target_lock);
951
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000952 scsi_host_put(target->scsi_host);
953}
954
David Howellsc4028952006-11-22 14:57:56 +0000955static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800956{
David Howellsc4028952006-11-22 14:57:56 +0000957 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000958 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800959
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000960 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800961
Bart Van Assche96fc2482013-06-28 14:51:26 +0200962 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963}
964
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200965static void srp_rport_delete(struct srp_rport *rport)
966{
967 struct srp_target_port *target = rport->lld_data;
968
969 srp_queue_remove_work(target);
970}
971
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200972/**
973 * srp_connected_ch() - number of connected channels
974 * @target: SRP target port.
975 */
976static int srp_connected_ch(struct srp_target_port *target)
977{
978 int i, c = 0;
979
980 for (i = 0; i < target->ch_count; i++)
981 c += target->ch[i].connected;
982
983 return c;
984}
985
Bart Van Assched92c0da2014-10-06 17:14:36 +0200986static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800987{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100988 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800989 int ret;
990
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200991 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000992
Bart Van Assche509c07b2014-10-30 14:48:30 +0100993 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800994 if (ret)
995 return ret;
996
997 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100998 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200999 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001000 if (ret)
1001 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001002 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001003 if (ret < 0)
1004 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001005
1006 /*
1007 * The CM event handling code will set status to
1008 * SRP_PORT_REDIRECT if we get a port redirect REJ
1009 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1010 * redirect REJ back.
1011 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001012 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001013 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001014 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001015 return 0;
1016
1017 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001018 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001019 if (ret)
1020 return ret;
1021 break;
1022
1023 case SRP_DLID_REDIRECT:
1024 break;
1025
David Dillow9fe4bcf2008-01-08 17:08:52 -05001026 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001027 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001028 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001029 ch->status = -ECONNRESET;
1030 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001031
Roland Dreieraef9ec32005-11-02 14:07:13 -08001032 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001033 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001034 }
1035 }
1036}
1037
Bart Van Assche509c07b2014-10-30 14:48:30 +01001038static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001039{
1040 struct ib_send_wr *bad_wr;
1041 struct ib_send_wr wr = {
1042 .opcode = IB_WR_LOCAL_INV,
1043 .wr_id = LOCAL_INV_WR_ID_MASK,
1044 .next = NULL,
1045 .num_sge = 0,
1046 .send_flags = 0,
1047 .ex.invalidate_rkey = rkey,
1048 };
1049
Bart Van Assche509c07b2014-10-30 14:48:30 +01001050 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001051}
1052
Roland Dreierd945e1d2006-05-09 10:50:28 -07001053static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001054 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001055 struct srp_request *req)
1056{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001057 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001058 struct srp_device *dev = target->srp_host->srp_dev;
1059 struct ib_device *ibdev = dev->dev;
1060 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001061
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001062 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001063 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1064 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1065 return;
1066
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001067 if (dev->use_fast_reg) {
1068 struct srp_fr_desc **pfr;
1069
1070 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001072 if (res < 0) {
1073 shost_printk(KERN_ERR, target->scsi_host, PFX
1074 "Queueing INV WR for rkey %#x failed (%d)\n",
1075 (*pfr)->mr->rkey, res);
1076 queue_work(system_long_wq,
1077 &target->tl_err_work);
1078 }
1079 }
1080 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001081 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001082 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001083 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001084 struct ib_pool_fmr **pfmr;
1085
1086 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1087 ib_fmr_pool_unmap(*pfmr);
1088 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001089
David Dillow8f26c9f2011-01-14 19:45:50 -05001090 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1091 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001092}
1093
Bart Van Assche22032992012-08-14 13:18:53 +00001094/**
1095 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001096 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001097 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001098 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001099 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1100 * ownership of @req->scmnd if it equals @scmnd.
1101 *
1102 * Return value:
1103 * Either NULL or a pointer to the SCSI command the caller became owner of.
1104 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001105static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001106 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001107 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001108 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001109{
Bart Van Assche94a91742010-11-26 14:50:09 -05001110 unsigned long flags;
1111
Bart Van Assche509c07b2014-10-30 14:48:30 +01001112 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001113 if (req->scmnd &&
1114 (!sdev || req->scmnd->device == sdev) &&
1115 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001116 scmnd = req->scmnd;
1117 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001118 } else {
1119 scmnd = NULL;
1120 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001121 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001122
1123 return scmnd;
1124}
1125
1126/**
1127 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001128 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001129 * @req: Request to be freed.
1130 * @scmnd: SCSI command associated with @req.
1131 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001132 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001133static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001135{
1136 unsigned long flags;
1137
Bart Van Assche509c07b2014-10-30 14:48:30 +01001138 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001139
Bart Van Assche509c07b2014-10-30 14:48:30 +01001140 spin_lock_irqsave(&ch->lock, flags);
1141 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001143}
1144
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1146 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001147{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001149
1150 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001151 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001152 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001153 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001154 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001155}
1156
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001157static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001158{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001159 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001160 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001161 struct Scsi_Host *shost = target->scsi_host;
1162 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001163 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001164
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001165 /*
1166 * Invoking srp_terminate_io() while srp_queuecommand() is running
1167 * is not safe. Hence the warning statement below.
1168 */
1169 shost_for_each_device(sdev, shost)
1170 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1171
Bart Van Assched92c0da2014-10-06 17:14:36 +02001172 for (i = 0; i < target->ch_count; i++) {
1173 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001174
Bart Van Assched92c0da2014-10-06 17:14:36 +02001175 for (j = 0; j < target->req_ring_size; ++j) {
1176 struct srp_request *req = &ch->req_ring[j];
1177
1178 srp_finish_req(ch, req, NULL,
1179 DID_TRANSPORT_FAILFAST << 16);
1180 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001181 }
1182}
1183
1184/*
1185 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1186 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1187 * srp_reset_device() or srp_reset_host() calls will occur while this function
1188 * is in progress. One way to realize that is not to call this function
1189 * directly but to call srp_reconnect_rport() instead since that last function
1190 * serializes calls of this function via rport->mutex and also blocks
1191 * srp_queuecommand() calls before invoking this function.
1192 */
1193static int srp_rport_reconnect(struct srp_rport *rport)
1194{
1195 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001196 struct srp_rdma_ch *ch;
1197 int i, j, ret = 0;
1198 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001199
Roland Dreieraef9ec32005-11-02 14:07:13 -08001200 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001201
1202 if (target->state == SRP_TARGET_SCANNING)
1203 return -ENODEV;
1204
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001206 * Now get a new local CM ID so that we avoid confusing the target in
1207 * case things are really fouled up. Doing so also ensures that all CM
1208 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001209 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001210 for (i = 0; i < target->ch_count; i++) {
1211 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001212 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001213 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001216 for (j = 0; j < target->req_ring_size; ++j) {
1217 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001218
Bart Van Assched92c0da2014-10-06 17:14:36 +02001219 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1220 }
1221 }
1222 for (i = 0; i < target->ch_count; i++) {
1223 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001224 /*
1225 * Whether or not creating a new CM ID succeeded, create a new
1226 * QP. This guarantees that all completion callback function
1227 * invocations have finished before request resetting starts.
1228 */
1229 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001230
Bart Van Assched92c0da2014-10-06 17:14:36 +02001231 INIT_LIST_HEAD(&ch->free_tx);
1232 for (j = 0; j < target->queue_size; ++j)
1233 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1234 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001235
1236 target->qp_in_error = false;
1237
Bart Van Assched92c0da2014-10-06 17:14:36 +02001238 for (i = 0; i < target->ch_count; i++) {
1239 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001240 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001241 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001242 ret = srp_connect_ch(ch, multich);
1243 multich = true;
1244 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001245
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001246 if (ret == 0)
1247 shost_printk(KERN_INFO, target->scsi_host,
1248 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001249
1250 return ret;
1251}
1252
David Dillow8f26c9f2011-01-14 19:45:50 -05001253static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1254 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001255{
David Dillow8f26c9f2011-01-14 19:45:50 -05001256 struct srp_direct_buf *desc = state->desc;
1257
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001258 WARN_ON_ONCE(!dma_len);
1259
David Dillow8f26c9f2011-01-14 19:45:50 -05001260 desc->va = cpu_to_be64(dma_addr);
1261 desc->key = cpu_to_be32(rkey);
1262 desc->len = cpu_to_be32(dma_len);
1263
1264 state->total_len += dma_len;
1265 state->desc++;
1266 state->ndesc++;
1267}
1268
1269static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001270 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001271{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001272 struct srp_target_port *target = ch->target;
1273 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001274 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001275 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001276
Bart Van Asschef731ed62015-08-10 17:07:27 -07001277 if (state->fmr.next >= state->fmr.end)
1278 return -ENOMEM;
1279
Sagi Grimberg26630e82015-10-13 19:11:38 +03001280 WARN_ON_ONCE(!dev->use_fmr);
1281
1282 if (state->npages == 0)
1283 return 0;
1284
1285 if (state->npages == 1 && target->global_mr) {
1286 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1287 target->global_mr->rkey);
1288 goto reset_state;
1289 }
1290
Bart Van Assche509c07b2014-10-30 14:48:30 +01001291 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001292 state->npages, io_addr);
1293 if (IS_ERR(fmr))
1294 return PTR_ERR(fmr);
1295
Bart Van Asschef731ed62015-08-10 17:07:27 -07001296 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001297 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001298
Bart Van Assche186fbc62015-08-10 17:06:29 -07001299 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1300 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001301
Sagi Grimberg26630e82015-10-13 19:11:38 +03001302reset_state:
1303 state->npages = 0;
1304 state->dma_len = 0;
1305
David Dillow8f26c9f2011-01-14 19:45:50 -05001306 return 0;
1307}
1308
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001309static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001310 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001311{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001312 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001313 struct srp_device *dev = target->srp_host->srp_dev;
1314 struct ib_send_wr *bad_wr;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001315 struct ib_reg_wr wr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001316 struct srp_fr_desc *desc;
1317 u32 rkey;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001318 int n, err;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001319
Bart Van Asschef731ed62015-08-10 17:07:27 -07001320 if (state->fr.next >= state->fr.end)
1321 return -ENOMEM;
1322
Sagi Grimberg26630e82015-10-13 19:11:38 +03001323 WARN_ON_ONCE(!dev->use_fast_reg);
1324
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001325 if (state->sg_nents == 0)
Sagi Grimberg26630e82015-10-13 19:11:38 +03001326 return 0;
1327
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001328 if (state->sg_nents == 1 && target->global_mr) {
1329 srp_map_desc(state, sg_dma_address(state->sg),
1330 sg_dma_len(state->sg),
Sagi Grimberg26630e82015-10-13 19:11:38 +03001331 target->global_mr->rkey);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001332 return 1;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001333 }
1334
Bart Van Assche509c07b2014-10-30 14:48:30 +01001335 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001336 if (!desc)
1337 return -ENOMEM;
1338
1339 rkey = ib_inc_rkey(desc->mr->rkey);
1340 ib_update_fast_reg_key(desc->mr, rkey);
1341
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001342 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
1343 dev->mr_page_size);
1344 if (unlikely(n < 0))
1345 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001346
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001347 wr.wr.next = NULL;
1348 wr.wr.opcode = IB_WR_REG_MR;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001349 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001350 wr.wr.num_sge = 0;
1351 wr.wr.send_flags = 0;
1352 wr.mr = desc->mr;
1353 wr.key = desc->mr->rkey;
1354 wr.access = (IB_ACCESS_LOCAL_WRITE |
1355 IB_ACCESS_REMOTE_READ |
1356 IB_ACCESS_REMOTE_WRITE);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001357
Bart Van Asschef731ed62015-08-10 17:07:27 -07001358 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001359 state->nmdesc++;
1360
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001361 srp_map_desc(state, desc->mr->iova,
1362 desc->mr->length, desc->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001363
Sagi Grimberg26630e82015-10-13 19:11:38 +03001364 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001365 if (unlikely(err))
Sagi Grimberg26630e82015-10-13 19:11:38 +03001366 return err;
1367
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001368 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001369}
1370
Bart Van Assche539dde62014-05-20 15:05:46 +02001371static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001372 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001373{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001374 struct srp_target_port *target = ch->target;
Bart Van Assche002f1562015-08-10 17:08:44 -07001375 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche539dde62014-05-20 15:05:46 +02001376
Sagi Grimberg26630e82015-10-13 19:11:38 +03001377 return dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
1378 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001379}
1380
David Dillow8f26c9f2011-01-14 19:45:50 -05001381static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001382 struct srp_rdma_ch *ch,
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001383 struct scatterlist *sg, int sg_index)
David Dillow8f26c9f2011-01-14 19:45:50 -05001384{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001385 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001386 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001387 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001388 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1389 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001390 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001391 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001392
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001393 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001394
David Dillow8f26c9f2011-01-14 19:45:50 -05001395 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001396 unsigned offset = dma_addr & ~dev->mr_page_mask;
1397 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001398 ret = srp_map_finish_fmr(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001399 if (ret)
1400 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001401 }
1402
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001403 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001404
1405 if (!state->npages)
1406 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001407 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001408 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001409 dma_addr += len;
1410 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001411 }
1412
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001413 /*
1414 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001415 * close it out and start a new one -- we can only merge at page
1416 * boundries.
1417 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001418 ret = 0;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001419 if (len != dev->mr_page_size)
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001420 ret = srp_map_finish_fmr(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001421 return ret;
1422}
1423
Sagi Grimberg26630e82015-10-13 19:11:38 +03001424static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1425 struct srp_request *req, struct scatterlist *scat,
1426 int count)
1427{
1428 struct scatterlist *sg;
1429 int i, ret;
1430
1431 state->desc = req->indirect_desc;
1432 state->pages = req->map_page;
1433 state->fmr.next = req->fmr_list;
1434 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1435
1436 for_each_sg(scat, sg, count, i) {
1437 ret = srp_map_sg_entry(state, ch, sg, i);
1438 if (ret)
1439 return ret;
1440 }
1441
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001442 ret = srp_map_finish_fmr(state, ch);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001443 if (ret)
1444 return ret;
1445
1446 req->nmdesc = state->nmdesc;
1447
1448 return 0;
1449}
1450
1451static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1452 struct srp_request *req, struct scatterlist *scat,
1453 int count)
1454{
Sagi Grimberg26630e82015-10-13 19:11:38 +03001455 state->desc = req->indirect_desc;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001456 state->fr.next = req->fr_list;
1457 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1458 state->sg = scat;
1459 state->sg_nents = scsi_sg_count(req->scmnd);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001460
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001461 while (state->sg_nents) {
1462 int i, n;
1463
1464 n = srp_map_finish_fr(state, ch);
1465 if (unlikely(n < 0))
1466 return n;
1467
1468 state->sg_nents -= n;
1469 for (i = 0; i < n; i++)
1470 state->sg = sg_next(state->sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001471 }
1472
Sagi Grimberg26630e82015-10-13 19:11:38 +03001473 req->nmdesc = state->nmdesc;
1474
1475 return 0;
1476}
1477
1478static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1479 struct srp_request *req, struct scatterlist *scat,
1480 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001481{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001482 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001483 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001484 struct scatterlist *sg;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001485 int i;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001486
Sagi Grimberg26630e82015-10-13 19:11:38 +03001487 state->desc = req->indirect_desc;
1488 for_each_sg(scat, sg, count, i) {
1489 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1490 ib_sg_dma_len(dev->dev, sg),
1491 target->global_mr->rkey);
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001492 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001493
Bart Van Assche52ede082014-05-20 15:07:45 +02001494 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001495
Sagi Grimberg26630e82015-10-13 19:11:38 +03001496 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001497}
1498
Bart Van Assche330179f2015-08-10 17:09:05 -07001499/*
1500 * Register the indirect data buffer descriptor with the HCA.
1501 *
1502 * Note: since the indirect data buffer descriptor has been allocated with
1503 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1504 * memory buffer.
1505 */
1506static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1507 void **next_mr, void **end_mr, u32 idb_len,
1508 __be32 *idb_rkey)
1509{
1510 struct srp_target_port *target = ch->target;
1511 struct srp_device *dev = target->srp_host->srp_dev;
1512 struct srp_map_state state;
1513 struct srp_direct_buf idb_desc;
1514 u64 idb_pages[1];
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001515 struct scatterlist idb_sg[1];
Bart Van Assche330179f2015-08-10 17:09:05 -07001516 int ret;
1517
1518 memset(&state, 0, sizeof(state));
1519 memset(&idb_desc, 0, sizeof(idb_desc));
1520 state.gen.next = next_mr;
1521 state.gen.end = end_mr;
1522 state.desc = &idb_desc;
Bart Van Assche330179f2015-08-10 17:09:05 -07001523 state.base_dma_addr = req->indirect_dma_addr;
1524 state.dma_len = idb_len;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001525
1526 if (dev->use_fast_reg) {
1527 state.sg = idb_sg;
1528 state.sg_nents = 1;
1529 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1530 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1531 ret = srp_map_finish_fr(&state, ch);
1532 if (ret < 0)
1533 return ret;
1534 } else if (dev->use_fmr) {
1535 state.pages = idb_pages;
1536 state.pages[0] = (req->indirect_dma_addr &
1537 dev->mr_page_mask);
1538 state.npages = 1;
1539 ret = srp_map_finish_fmr(&state, ch);
1540 if (ret < 0)
1541 return ret;
1542 } else {
1543 return -EINVAL;
1544 }
Bart Van Assche330179f2015-08-10 17:09:05 -07001545
1546 *idb_rkey = idb_desc.key;
1547
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001548 return 0;
Bart Van Assche330179f2015-08-10 17:09:05 -07001549}
1550
Bart Van Assche509c07b2014-10-30 14:48:30 +01001551static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001552 struct srp_request *req)
1553{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001554 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001555 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001556 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001557 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001558 struct srp_device *dev;
1559 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001560 struct srp_map_state state;
1561 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001562 u32 idb_len, table_len;
1563 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001564 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001565
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001566 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001567 return sizeof (struct srp_cmd);
1568
1569 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1570 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001571 shost_printk(KERN_WARNING, target->scsi_host,
1572 PFX "Unhandled data direction %d\n",
1573 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001574 return -EINVAL;
1575 }
1576
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001577 nents = scsi_sg_count(scmnd);
1578 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001579
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001580 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001581 ibdev = dev->dev;
1582
1583 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001584 if (unlikely(count == 0))
1585 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001586
1587 fmt = SRP_DATA_DESC_DIRECT;
1588 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001589
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001590 if (count == 1 && target->global_mr) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001591 /*
1592 * The midlayer only generated a single gather/scatter
1593 * entry, or DMA mapping coalesced everything to a
1594 * single entry. So a direct descriptor along with
1595 * the DMA MR suffices.
1596 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001597 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001598
Ralph Campbell85507bc2006-12-12 14:30:55 -08001599 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001600 buf->key = cpu_to_be32(target->global_mr->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001601 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001602
Bart Van Assche52ede082014-05-20 15:07:45 +02001603 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001604 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001605 }
1606
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001607 /*
1608 * We have more than one scatter/gather entry, so build our indirect
1609 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001610 */
1611 indirect_hdr = (void *) cmd->add_data;
1612
David Dillowc07d4242011-01-16 13:57:10 -05001613 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1614 target->indirect_size, DMA_TO_DEVICE);
1615
David Dillow8f26c9f2011-01-14 19:45:50 -05001616 memset(&state, 0, sizeof(state));
Sagi Grimberg26630e82015-10-13 19:11:38 +03001617 if (dev->use_fast_reg)
1618 srp_map_sg_fr(&state, ch, req, scat, count);
1619 else if (dev->use_fmr)
1620 srp_map_sg_fmr(&state, ch, req, scat, count);
1621 else
1622 srp_map_sg_dma(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001623
David Dillowc07d4242011-01-16 13:57:10 -05001624 /* We've mapped the request, now pull as much of the indirect
1625 * descriptor table as we can into the command buffer. If this
1626 * target is not using an external indirect table, we are
1627 * guaranteed to fit into the command, as the SCSI layer won't
1628 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001629 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001630 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001631 /*
1632 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001633 * so use a direct descriptor.
1634 */
1635 struct srp_direct_buf *buf = (void *) cmd->add_data;
1636
David Dillowc07d4242011-01-16 13:57:10 -05001637 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001638 goto map_complete;
1639 }
1640
David Dillowc07d4242011-01-16 13:57:10 -05001641 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1642 !target->allow_ext_sg)) {
1643 shost_printk(KERN_ERR, target->scsi_host,
1644 "Could not fit S/G list into SRP_CMD\n");
1645 return -EIO;
1646 }
1647
1648 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001649 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001650 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001651
1652 fmt = SRP_DATA_DESC_INDIRECT;
1653 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001654 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001655
David Dillowc07d4242011-01-16 13:57:10 -05001656 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1657 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001658
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001659 if (!target->global_mr) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001660 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1661 idb_len, &idb_rkey);
1662 if (ret < 0)
1663 return ret;
1664 req->nmdesc++;
1665 } else {
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001666 idb_rkey = target->global_mr->rkey;
Bart Van Assche330179f2015-08-10 17:09:05 -07001667 }
1668
David Dillowc07d4242011-01-16 13:57:10 -05001669 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001670 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001671 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1672 indirect_hdr->len = cpu_to_be32(state.total_len);
1673
1674 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001675 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001676 else
David Dillowc07d4242011-01-16 13:57:10 -05001677 cmd->data_in_desc_cnt = count;
1678
1679 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1680 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001681
1682map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001683 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1684 cmd->buf_fmt = fmt << 4;
1685 else
1686 cmd->buf_fmt = fmt;
1687
Roland Dreieraef9ec32005-11-02 14:07:13 -08001688 return len;
1689}
1690
David Dillow05a1d752010-10-08 14:48:14 -04001691/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001692 * Return an IU and possible credit to the free pool
1693 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001694static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001695 enum srp_iu_type iu_type)
1696{
1697 unsigned long flags;
1698
Bart Van Assche509c07b2014-10-30 14:48:30 +01001699 spin_lock_irqsave(&ch->lock, flags);
1700 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001701 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001702 ++ch->req_lim;
1703 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001704}
1705
1706/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001707 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001708 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001709 *
1710 * Note:
1711 * An upper limit for the number of allocated information units for each
1712 * request type is:
1713 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1714 * more than Scsi_Host.can_queue requests.
1715 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1716 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1717 * one unanswered SRP request to an initiator.
1718 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001719static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001720 enum srp_iu_type iu_type)
1721{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001722 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001723 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1724 struct srp_iu *iu;
1725
Bart Van Assche509c07b2014-10-30 14:48:30 +01001726 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001727
Bart Van Assche509c07b2014-10-30 14:48:30 +01001728 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001729 return NULL;
1730
1731 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001732 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001733 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001734 ++target->zero_req_lim;
1735 return NULL;
1736 }
1737
Bart Van Assche509c07b2014-10-30 14:48:30 +01001738 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001739 }
1740
Bart Van Assche509c07b2014-10-30 14:48:30 +01001741 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001742 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001743 return iu;
1744}
1745
Bart Van Assche509c07b2014-10-30 14:48:30 +01001746static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001747{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001748 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001749 struct ib_sge list;
1750 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001751
1752 list.addr = iu->dma;
1753 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001754 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001755
1756 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001757 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001758 wr.sg_list = &list;
1759 wr.num_sge = 1;
1760 wr.opcode = IB_WR_SEND;
1761 wr.send_flags = IB_SEND_SIGNALED;
1762
Bart Van Assche509c07b2014-10-30 14:48:30 +01001763 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001764}
1765
Bart Van Assche509c07b2014-10-30 14:48:30 +01001766static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001767{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001768 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001769 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001770 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001771
1772 list.addr = iu->dma;
1773 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001774 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001775
1776 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001777 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001778 wr.sg_list = &list;
1779 wr.num_sge = 1;
1780
Bart Van Assche509c07b2014-10-30 14:48:30 +01001781 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001782}
1783
Bart Van Assche509c07b2014-10-30 14:48:30 +01001784static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001785{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001786 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001787 struct srp_request *req;
1788 struct scsi_cmnd *scmnd;
1789 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001790
Roland Dreieraef9ec32005-11-02 14:07:13 -08001791 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001792 spin_lock_irqsave(&ch->lock, flags);
1793 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1794 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001795
Bart Van Assche509c07b2014-10-30 14:48:30 +01001796 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001797 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001798 ch->tsk_mgmt_status = rsp->data[3];
1799 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001800 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001801 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1802 if (scmnd) {
1803 req = (void *)scmnd->host_scribble;
1804 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1805 }
Bart Van Assche22032992012-08-14 13:18:53 +00001806 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001807 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001808 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1809 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001810
Bart Van Assche509c07b2014-10-30 14:48:30 +01001811 spin_lock_irqsave(&ch->lock, flags);
1812 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1813 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001814
1815 return;
1816 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001817 scmnd->result = rsp->status;
1818
1819 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1820 memcpy(scmnd->sense_buffer, rsp->data +
1821 be32_to_cpu(rsp->resp_data_len),
1822 min_t(int, be32_to_cpu(rsp->sense_data_len),
1823 SCSI_SENSE_BUFFERSIZE));
1824 }
1825
Bart Van Asschee7145312014-07-09 15:57:51 +02001826 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001827 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001828 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1829 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1830 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1831 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1832 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1833 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001834
Bart Van Assche509c07b2014-10-30 14:48:30 +01001835 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001836 be32_to_cpu(rsp->req_lim_delta));
1837
David Dillowf8b6e312010-11-26 13:02:21 -05001838 scmnd->host_scribble = NULL;
1839 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001840 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001841}
1842
Bart Van Assche509c07b2014-10-30 14:48:30 +01001843static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001844 void *rsp, int len)
1845{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001846 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001847 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001848 unsigned long flags;
1849 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001850 int err;
David Dillowbb125882010-10-08 14:40:47 -04001851
Bart Van Assche509c07b2014-10-30 14:48:30 +01001852 spin_lock_irqsave(&ch->lock, flags);
1853 ch->req_lim += req_delta;
1854 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1855 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001856
David Dillowbb125882010-10-08 14:40:47 -04001857 if (!iu) {
1858 shost_printk(KERN_ERR, target->scsi_host, PFX
1859 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001860 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001861 }
1862
1863 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1864 memcpy(iu->buf, rsp, len);
1865 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1866
Bart Van Assche509c07b2014-10-30 14:48:30 +01001867 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001868 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001869 shost_printk(KERN_ERR, target->scsi_host, PFX
1870 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001871 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001872 }
David Dillowbb125882010-10-08 14:40:47 -04001873
David Dillowbb125882010-10-08 14:40:47 -04001874 return err;
1875}
1876
Bart Van Assche509c07b2014-10-30 14:48:30 +01001877static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001878 struct srp_cred_req *req)
1879{
1880 struct srp_cred_rsp rsp = {
1881 .opcode = SRP_CRED_RSP,
1882 .tag = req->tag,
1883 };
1884 s32 delta = be32_to_cpu(req->req_lim_delta);
1885
Bart Van Assche509c07b2014-10-30 14:48:30 +01001886 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1887 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001888 "problems processing SRP_CRED_REQ\n");
1889}
1890
Bart Van Assche509c07b2014-10-30 14:48:30 +01001891static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001892 struct srp_aer_req *req)
1893{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001894 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001895 struct srp_aer_rsp rsp = {
1896 .opcode = SRP_AER_RSP,
1897 .tag = req->tag,
1898 };
1899 s32 delta = be32_to_cpu(req->req_lim_delta);
1900
1901 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001902 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001903
Bart Van Assche509c07b2014-10-30 14:48:30 +01001904 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001905 shost_printk(KERN_ERR, target->scsi_host, PFX
1906 "problems processing SRP_AER_REQ\n");
1907}
1908
Bart Van Assche509c07b2014-10-30 14:48:30 +01001909static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001910{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001911 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001912 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001913 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001914 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001915 u8 opcode;
1916
Bart Van Assche509c07b2014-10-30 14:48:30 +01001917 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001918 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001919
1920 opcode = *(u8 *) iu->buf;
1921
1922 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001923 shost_printk(KERN_ERR, target->scsi_host,
1924 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001925 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1926 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001927 }
1928
1929 switch (opcode) {
1930 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001931 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001932 break;
1933
David Dillowbb125882010-10-08 14:40:47 -04001934 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001935 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001936 break;
1937
1938 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001939 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001940 break;
1941
Roland Dreieraef9ec32005-11-02 14:07:13 -08001942 case SRP_T_LOGOUT:
1943 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001944 shost_printk(KERN_WARNING, target->scsi_host,
1945 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001946 break;
1947
1948 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001949 shost_printk(KERN_WARNING, target->scsi_host,
1950 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001951 break;
1952 }
1953
Bart Van Assche509c07b2014-10-30 14:48:30 +01001954 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001955 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001956
Bart Van Assche509c07b2014-10-30 14:48:30 +01001957 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001958 if (res != 0)
1959 shost_printk(KERN_ERR, target->scsi_host,
1960 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001961}
1962
Bart Van Asschec1120f82013-10-26 14:35:08 +02001963/**
1964 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001965 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001966 *
1967 * Note: This function may get invoked before the rport has been created,
1968 * hence the target->rport test.
1969 */
1970static void srp_tl_err_work(struct work_struct *work)
1971{
1972 struct srp_target_port *target;
1973
1974 target = container_of(work, struct srp_target_port, tl_err_work);
1975 if (target->rport)
1976 srp_start_tl_fail_timers(target->rport);
1977}
1978
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001979static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001980 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001981{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001982 struct srp_target_port *target = ch->target;
1983
1984 if (wr_id == SRP_LAST_WR_ID) {
1985 complete(&ch->done);
1986 return;
1987 }
1988
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001989 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001990 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1991 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001992 "LOCAL_INV failed with status %s (%d)\n",
1993 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001994 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1995 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001996 "FAST_REG_MR failed status %s (%d)\n",
1997 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001998 } else {
1999 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03002000 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02002001 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03002002 ib_wc_status_msg(wc_status), wc_status,
2003 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02002004 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02002005 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01002006 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02002007 target->qp_in_error = true;
2008}
2009
Bart Van Assche509c07b2014-10-30 14:48:30 +01002010static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002011{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002012 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002013 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002014
2015 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2016 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02002017 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002018 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02002019 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002020 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002021 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002022 }
2023}
2024
Bart Van Assche509c07b2014-10-30 14:48:30 +01002025static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002026{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002027 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002028 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002029 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002030
2031 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02002032 if (likely(wc.status == IB_WC_SUCCESS)) {
2033 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002034 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02002035 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002036 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002037 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002038 }
2039}
2040
Bart Van Assche76c75b22010-11-26 14:37:47 -05002041static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002042{
Bart Van Assche76c75b22010-11-26 14:37:47 -05002043 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002044 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002045 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002046 struct srp_request *req;
2047 struct srp_iu *iu;
2048 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08002049 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002050 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002051 u32 tag;
2052 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02002053 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002054 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2055
2056 /*
2057 * The SCSI EH thread is the only context from which srp_queuecommand()
2058 * can get invoked for blocked devices (SDEV_BLOCK /
2059 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2060 * locking the rport mutex if invoked from inside the SCSI EH.
2061 */
2062 if (in_scsi_eh)
2063 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002064
Bart Van Assched1b42892014-05-20 15:07:20 +02002065 scmnd->result = srp_chkready(target->rport);
2066 if (unlikely(scmnd->result))
2067 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002068
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002069 WARN_ON_ONCE(scmnd->request->tag < 0);
2070 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002071 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002072 idx = blk_mq_unique_tag_to_tag(tag);
2073 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2074 dev_name(&shost->shost_gendev), tag, idx,
2075 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002076
2077 spin_lock_irqsave(&ch->lock, flags);
2078 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002079 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002080
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002081 if (!iu)
2082 goto err;
2083
2084 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002085 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002086 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002087 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002088
David Dillowf8b6e312010-11-26 13:02:21 -05002089 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002090
2091 cmd = iu->buf;
2092 memset(cmd, 0, sizeof *cmd);
2093
2094 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002095 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002096 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002097 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2098
Roland Dreieraef9ec32005-11-02 14:07:13 -08002099 req->scmnd = scmnd;
2100 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002101
Bart Van Assche509c07b2014-10-30 14:48:30 +01002102 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002103 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002104 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002105 PFX "Failed to map data (%d)\n", len);
2106 /*
2107 * If we ran out of memory descriptors (-ENOMEM) because an
2108 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002109 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002110 * to reduce queue depth temporarily.
2111 */
2112 scmnd->result = len == -ENOMEM ?
2113 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002114 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002115 }
2116
David Dillow49248642011-01-14 18:23:24 -05002117 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002118 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002119
Bart Van Assche509c07b2014-10-30 14:48:30 +01002120 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002121 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002122 goto err_unmap;
2123 }
2124
Bart Van Assched1b42892014-05-20 15:07:20 +02002125 ret = 0;
2126
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002127unlock_rport:
2128 if (in_scsi_eh)
2129 mutex_unlock(&rport->mutex);
2130
Bart Van Assched1b42892014-05-20 15:07:20 +02002131 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002132
2133err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002134 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002135
Bart Van Assche76c75b22010-11-26 14:37:47 -05002136err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002137 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002138
Bart Van Assche024ca902014-05-20 15:03:49 +02002139 /*
2140 * Avoid that the loops that iterate over the request ring can
2141 * encounter a dangling SCSI command pointer.
2142 */
2143 req->scmnd = NULL;
2144
Bart Van Assched1b42892014-05-20 15:07:20 +02002145err:
2146 if (scmnd->result) {
2147 scmnd->scsi_done(scmnd);
2148 ret = 0;
2149 } else {
2150 ret = SCSI_MLQUEUE_HOST_BUSY;
2151 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002152
Bart Van Assched1b42892014-05-20 15:07:20 +02002153 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002154}
2155
Bart Van Assche4d73f952013-10-26 14:40:37 +02002156/*
2157 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002158 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002159 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002160static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002161{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002162 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002163 int i;
2164
Bart Van Assche509c07b2014-10-30 14:48:30 +01002165 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2166 GFP_KERNEL);
2167 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002168 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002169 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2170 GFP_KERNEL);
2171 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002172 goto err_no_ring;
2173
2174 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002175 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2176 ch->max_ti_iu_len,
2177 GFP_KERNEL, DMA_FROM_DEVICE);
2178 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002179 goto err;
2180 }
2181
Bart Van Assche4d73f952013-10-26 14:40:37 +02002182 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002183 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2184 target->max_iu_len,
2185 GFP_KERNEL, DMA_TO_DEVICE);
2186 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002187 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002188
Bart Van Assche509c07b2014-10-30 14:48:30 +01002189 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002190 }
2191
2192 return 0;
2193
2194err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002195 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002196 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2197 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002198 }
2199
Bart Van Assche4d73f952013-10-26 14:40:37 +02002200
2201err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002202 kfree(ch->tx_ring);
2203 ch->tx_ring = NULL;
2204 kfree(ch->rx_ring);
2205 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002206
2207 return -ENOMEM;
2208}
2209
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002210static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2211{
2212 uint64_t T_tr_ns, max_compl_time_ms;
2213 uint32_t rq_tmo_jiffies;
2214
2215 /*
2216 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2217 * table 91), both the QP timeout and the retry count have to be set
2218 * for RC QP's during the RTR to RTS transition.
2219 */
2220 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2221 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2222
2223 /*
2224 * Set target->rq_tmo_jiffies to one second more than the largest time
2225 * it can take before an error completion is generated. See also
2226 * C9-140..142 in the IBTA spec for more information about how to
2227 * convert the QP Local ACK Timeout value to nanoseconds.
2228 */
2229 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2230 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2231 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2232 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2233
2234 return rq_tmo_jiffies;
2235}
2236
David Dillow961e0be2011-01-14 17:32:07 -05002237static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002238 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002239 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002240{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002241 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002242 struct ib_qp_attr *qp_attr = NULL;
2243 int attr_mask = 0;
2244 int ret;
2245 int i;
2246
2247 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002248 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2249 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002250
2251 /*
2252 * Reserve credits for task management so we don't
2253 * bounce requests back to the SCSI mid-layer.
2254 */
2255 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002256 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002257 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002258 target->scsi_host->cmd_per_lun
2259 = min_t(int, target->scsi_host->can_queue,
2260 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002261 } else {
2262 shost_printk(KERN_WARNING, target->scsi_host,
2263 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2264 ret = -ECONNRESET;
2265 goto error;
2266 }
2267
Bart Van Assche509c07b2014-10-30 14:48:30 +01002268 if (!ch->rx_ring) {
2269 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002270 if (ret)
2271 goto error;
2272 }
2273
2274 ret = -ENOMEM;
2275 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2276 if (!qp_attr)
2277 goto error;
2278
2279 qp_attr->qp_state = IB_QPS_RTR;
2280 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2281 if (ret)
2282 goto error_free;
2283
Bart Van Assche509c07b2014-10-30 14:48:30 +01002284 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002285 if (ret)
2286 goto error_free;
2287
Bart Van Assche4d73f952013-10-26 14:40:37 +02002288 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002289 struct srp_iu *iu = ch->rx_ring[i];
2290
2291 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002292 if (ret)
2293 goto error_free;
2294 }
2295
2296 qp_attr->qp_state = IB_QPS_RTS;
2297 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2298 if (ret)
2299 goto error_free;
2300
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002301 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2302
Bart Van Assche509c07b2014-10-30 14:48:30 +01002303 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002304 if (ret)
2305 goto error_free;
2306
2307 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2308
2309error_free:
2310 kfree(qp_attr);
2311
2312error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002313 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002314}
2315
Roland Dreieraef9ec32005-11-02 14:07:13 -08002316static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2317 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002318 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002319{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002320 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002321 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002322 struct ib_class_port_info *cpi;
2323 int opcode;
2324
2325 switch (event->param.rej_rcvd.reason) {
2326 case IB_CM_REJ_PORT_CM_REDIRECT:
2327 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002328 ch->path.dlid = cpi->redirect_lid;
2329 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002330 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002331 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002332
Bart Van Assche509c07b2014-10-30 14:48:30 +01002333 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002334 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2335 break;
2336
2337 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002338 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002339 /*
2340 * Topspin/Cisco SRP gateways incorrectly send
2341 * reject reason code 25 when they mean 24
2342 * (port redirect).
2343 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002344 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002345 event->param.rej_rcvd.ari, 16);
2346
David Dillow7aa54bd2008-01-07 18:23:41 -05002347 shost_printk(KERN_DEBUG, shost,
2348 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002349 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2350 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002351
Bart Van Assche509c07b2014-10-30 14:48:30 +01002352 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002353 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002354 shost_printk(KERN_WARNING, shost,
2355 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002356 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002357 }
2358 break;
2359
2360 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002361 shost_printk(KERN_WARNING, shost,
2362 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002363 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002364 break;
2365
2366 case IB_CM_REJ_CONSUMER_DEFINED:
2367 opcode = *(u8 *) event->private_data;
2368 if (opcode == SRP_LOGIN_REJ) {
2369 struct srp_login_rej *rej = event->private_data;
2370 u32 reason = be32_to_cpu(rej->reason);
2371
2372 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002373 shost_printk(KERN_WARNING, shost,
2374 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002375 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002376 shost_printk(KERN_WARNING, shost, PFX
2377 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002378 target->sgid.raw,
2379 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002380 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002381 shost_printk(KERN_WARNING, shost,
2382 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2383 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002384 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002385 break;
2386
David Dillow9fe4bcf2008-01-08 17:08:52 -05002387 case IB_CM_REJ_STALE_CONN:
2388 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002389 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002390 break;
2391
Roland Dreieraef9ec32005-11-02 14:07:13 -08002392 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002393 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2394 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002395 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002396 }
2397}
2398
2399static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2400{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002401 struct srp_rdma_ch *ch = cm_id->context;
2402 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002403 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002404
2405 switch (event->event) {
2406 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002407 shost_printk(KERN_DEBUG, target->scsi_host,
2408 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002409 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002410 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002411 break;
2412
2413 case IB_CM_REP_RECEIVED:
2414 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002415 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002416 break;
2417
2418 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002419 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420 comp = 1;
2421
Bart Van Assche509c07b2014-10-30 14:48:30 +01002422 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002423 break;
2424
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002425 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002426 shost_printk(KERN_WARNING, target->scsi_host,
2427 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002428 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002429 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002430 shost_printk(KERN_ERR, target->scsi_host,
2431 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002432 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002433 break;
2434
2435 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002436 shost_printk(KERN_ERR, target->scsi_host,
2437 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002438 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002439
Bart Van Assche509c07b2014-10-30 14:48:30 +01002440 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002441 break;
2442
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002443 case IB_CM_MRA_RECEIVED:
2444 case IB_CM_DREQ_ERROR:
2445 case IB_CM_DREP_RECEIVED:
2446 break;
2447
Roland Dreieraef9ec32005-11-02 14:07:13 -08002448 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002449 shost_printk(KERN_WARNING, target->scsi_host,
2450 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002451 break;
2452 }
2453
2454 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002455 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456
Roland Dreieraef9ec32005-11-02 14:07:13 -08002457 return 0;
2458}
2459
Jack Wang71444b92013-11-07 11:37:37 +01002460/**
Jack Wang71444b92013-11-07 11:37:37 +01002461 * srp_change_queue_depth - setting device queue depth
2462 * @sdev: scsi device struct
2463 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002464 *
2465 * Returns queue depth.
2466 */
2467static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002468srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002469{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002470 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002471 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002472 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002473}
2474
Bart Van Assche985aa492015-05-18 13:27:14 +02002475static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2476 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002477{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002478 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002479 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002480 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002481 struct srp_iu *iu;
2482 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002483
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002484 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002485 return -1;
2486
Bart Van Assche509c07b2014-10-30 14:48:30 +01002487 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002488
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002489 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002490 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002491 * invoked while a task management function is being sent.
2492 */
2493 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002494 spin_lock_irq(&ch->lock);
2495 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2496 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002497
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002498 if (!iu) {
2499 mutex_unlock(&rport->mutex);
2500
Bart Van Assche76c75b22010-11-26 14:37:47 -05002501 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002502 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002503
David Dillow19081f32010-10-18 08:54:49 -04002504 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2505 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002506 tsk_mgmt = iu->buf;
2507 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2508
2509 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002510 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002511 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002512 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002513 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002514
David Dillow19081f32010-10-18 08:54:49 -04002515 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2516 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002517 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2518 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002519 mutex_unlock(&rport->mutex);
2520
Bart Van Assche76c75b22010-11-26 14:37:47 -05002521 return -1;
2522 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002523 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002524
Bart Van Assche509c07b2014-10-30 14:48:30 +01002525 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002526 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002527 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002528
Roland Dreierd945e1d2006-05-09 10:50:28 -07002529 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002530}
2531
Roland Dreieraef9ec32005-11-02 14:07:13 -08002532static int srp_abort(struct scsi_cmnd *scmnd)
2533{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002534 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002535 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002536 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002537 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002538 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002539 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002540
David Dillow7aa54bd2008-01-07 18:23:41 -05002541 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002542
Bart Van Assched92c0da2014-10-06 17:14:36 +02002543 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002544 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002545 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002546 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2547 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2548 return SUCCESS;
2549 ch = &target->ch[ch_idx];
2550 if (!srp_claim_req(ch, req, NULL, scmnd))
2551 return SUCCESS;
2552 shost_printk(KERN_ERR, target->scsi_host,
2553 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002554 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002555 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002556 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002557 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002558 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002559 else
2560 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002561 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002562 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002563 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002564
Bart Van Assche086f44f2013-06-12 15:23:04 +02002565 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002566}
2567
2568static int srp_reset_device(struct scsi_cmnd *scmnd)
2569{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002570 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002571 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002572 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002573
David Dillow7aa54bd2008-01-07 18:23:41 -05002574 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002575
Bart Van Assched92c0da2014-10-06 17:14:36 +02002576 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002577 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002578 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002579 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002580 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002581 return FAILED;
2582
Bart Van Assched92c0da2014-10-06 17:14:36 +02002583 for (i = 0; i < target->ch_count; i++) {
2584 ch = &target->ch[i];
2585 for (i = 0; i < target->req_ring_size; ++i) {
2586 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002587
Bart Van Assched92c0da2014-10-06 17:14:36 +02002588 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2589 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002590 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002591
Roland Dreierd945e1d2006-05-09 10:50:28 -07002592 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002593}
2594
2595static int srp_reset_host(struct scsi_cmnd *scmnd)
2596{
2597 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002598
David Dillow7aa54bd2008-01-07 18:23:41 -05002599 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002600
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002601 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002602}
2603
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002604static int srp_slave_configure(struct scsi_device *sdev)
2605{
2606 struct Scsi_Host *shost = sdev->host;
2607 struct srp_target_port *target = host_to_target(shost);
2608 struct request_queue *q = sdev->request_queue;
2609 unsigned long timeout;
2610
2611 if (sdev->type == TYPE_DISK) {
2612 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2613 blk_queue_rq_timeout(q, timeout);
2614 }
2615
2616 return 0;
2617}
2618
Tony Jonesee959b02008-02-22 00:13:36 +01002619static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2620 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002621{
Tony Jonesee959b02008-02-22 00:13:36 +01002622 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002623
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002624 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002625}
2626
Tony Jonesee959b02008-02-22 00:13:36 +01002627static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2628 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002629{
Tony Jonesee959b02008-02-22 00:13:36 +01002630 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002631
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002632 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002633}
2634
Tony Jonesee959b02008-02-22 00:13:36 +01002635static ssize_t show_service_id(struct device *dev,
2636 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002637{
Tony Jonesee959b02008-02-22 00:13:36 +01002638 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002639
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002640 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002641}
2642
Tony Jonesee959b02008-02-22 00:13:36 +01002643static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2644 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002645{
Tony Jonesee959b02008-02-22 00:13:36 +01002646 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002647
Bart Van Assche747fe002014-10-30 14:48:05 +01002648 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002649}
2650
Bart Van Assche848b3082013-10-26 14:38:12 +02002651static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2652 char *buf)
2653{
2654 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2655
Bart Van Assche747fe002014-10-30 14:48:05 +01002656 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002657}
2658
Tony Jonesee959b02008-02-22 00:13:36 +01002659static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2660 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002661{
Tony Jonesee959b02008-02-22 00:13:36 +01002662 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002663 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002664
Bart Van Assche509c07b2014-10-30 14:48:30 +01002665 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002666}
2667
Tony Jonesee959b02008-02-22 00:13:36 +01002668static ssize_t show_orig_dgid(struct device *dev,
2669 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002670{
Tony Jonesee959b02008-02-22 00:13:36 +01002671 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002672
Bart Van Assche747fe002014-10-30 14:48:05 +01002673 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002674}
2675
Bart Van Assche89de7482010-08-03 14:08:45 +00002676static ssize_t show_req_lim(struct device *dev,
2677 struct device_attribute *attr, char *buf)
2678{
2679 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002680 struct srp_rdma_ch *ch;
2681 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002682
Bart Van Assched92c0da2014-10-06 17:14:36 +02002683 for (i = 0; i < target->ch_count; i++) {
2684 ch = &target->ch[i];
2685 req_lim = min(req_lim, ch->req_lim);
2686 }
2687 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002688}
2689
Tony Jonesee959b02008-02-22 00:13:36 +01002690static ssize_t show_zero_req_lim(struct device *dev,
2691 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002692{
Tony Jonesee959b02008-02-22 00:13:36 +01002693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002694
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002695 return sprintf(buf, "%d\n", target->zero_req_lim);
2696}
2697
Tony Jonesee959b02008-02-22 00:13:36 +01002698static ssize_t show_local_ib_port(struct device *dev,
2699 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002700{
Tony Jonesee959b02008-02-22 00:13:36 +01002701 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002702
2703 return sprintf(buf, "%d\n", target->srp_host->port);
2704}
2705
Tony Jonesee959b02008-02-22 00:13:36 +01002706static ssize_t show_local_ib_device(struct device *dev,
2707 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002708{
Tony Jonesee959b02008-02-22 00:13:36 +01002709 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002710
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002711 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002712}
2713
Bart Van Assched92c0da2014-10-06 17:14:36 +02002714static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2715 char *buf)
2716{
2717 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2718
2719 return sprintf(buf, "%d\n", target->ch_count);
2720}
2721
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002722static ssize_t show_comp_vector(struct device *dev,
2723 struct device_attribute *attr, char *buf)
2724{
2725 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2726
2727 return sprintf(buf, "%d\n", target->comp_vector);
2728}
2729
Vu Pham7bb312e2013-10-26 14:31:27 +02002730static ssize_t show_tl_retry_count(struct device *dev,
2731 struct device_attribute *attr, char *buf)
2732{
2733 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2734
2735 return sprintf(buf, "%d\n", target->tl_retry_count);
2736}
2737
David Dillow49248642011-01-14 18:23:24 -05002738static ssize_t show_cmd_sg_entries(struct device *dev,
2739 struct device_attribute *attr, char *buf)
2740{
2741 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2742
2743 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2744}
2745
David Dillowc07d4242011-01-16 13:57:10 -05002746static ssize_t show_allow_ext_sg(struct device *dev,
2747 struct device_attribute *attr, char *buf)
2748{
2749 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2750
2751 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2752}
2753
Tony Jonesee959b02008-02-22 00:13:36 +01002754static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2755static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2756static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2757static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002758static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002759static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2760static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002761static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002762static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2763static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2764static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002765static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002766static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002767static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002768static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002769static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002770
Tony Jonesee959b02008-02-22 00:13:36 +01002771static struct device_attribute *srp_host_attrs[] = {
2772 &dev_attr_id_ext,
2773 &dev_attr_ioc_guid,
2774 &dev_attr_service_id,
2775 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002776 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002777 &dev_attr_dgid,
2778 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002779 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002780 &dev_attr_zero_req_lim,
2781 &dev_attr_local_ib_port,
2782 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002783 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002784 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002785 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002786 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002787 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002788 NULL
2789};
2790
Roland Dreieraef9ec32005-11-02 14:07:13 -08002791static struct scsi_host_template srp_template = {
2792 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002793 .name = "InfiniBand SRP initiator",
2794 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002795 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002796 .info = srp_target_info,
2797 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002798 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002799 .eh_abort_handler = srp_abort,
2800 .eh_device_reset_handler = srp_reset_device,
2801 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002802 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002803 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002804 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002805 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002806 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002807 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002808 .shost_attrs = srp_host_attrs,
2809 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002810 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002811};
2812
Bart Van Assche34aa6542014-10-30 14:47:22 +01002813static int srp_sdev_count(struct Scsi_Host *host)
2814{
2815 struct scsi_device *sdev;
2816 int c = 0;
2817
2818 shost_for_each_device(sdev, host)
2819 c++;
2820
2821 return c;
2822}
2823
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002824/*
2825 * Return values:
2826 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2827 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2828 * removal has been scheduled.
2829 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2830 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002831static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2832{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002833 struct srp_rport_identifiers ids;
2834 struct srp_rport *rport;
2835
Bart Van Assche34aa6542014-10-30 14:47:22 +01002836 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002837 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002838 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002839
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002840 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002841 return -ENODEV;
2842
FUJITA Tomonori32368222007-06-27 16:33:12 +09002843 memcpy(ids.port_id, &target->id_ext, 8);
2844 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002845 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002846 rport = srp_rport_add(target->scsi_host, &ids);
2847 if (IS_ERR(rport)) {
2848 scsi_remove_host(target->scsi_host);
2849 return PTR_ERR(rport);
2850 }
2851
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002852 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002853 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002854
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002855 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002856 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002857 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002858
Roland Dreieraef9ec32005-11-02 14:07:13 -08002859 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002860 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002861
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002862 if (srp_connected_ch(target) < target->ch_count ||
2863 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002864 shost_printk(KERN_INFO, target->scsi_host,
2865 PFX "SCSI scan failed - removing SCSI host\n");
2866 srp_queue_remove_work(target);
2867 goto out;
2868 }
2869
2870 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2871 dev_name(&target->scsi_host->shost_gendev),
2872 srp_sdev_count(target->scsi_host));
2873
2874 spin_lock_irq(&target->lock);
2875 if (target->state == SRP_TARGET_SCANNING)
2876 target->state = SRP_TARGET_LIVE;
2877 spin_unlock_irq(&target->lock);
2878
2879out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002880 return 0;
2881}
2882
Tony Jonesee959b02008-02-22 00:13:36 +01002883static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002884{
2885 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002886 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002887
2888 complete(&host->released);
2889}
2890
2891static struct class srp_class = {
2892 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002893 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002894};
2895
Bart Van Assche96fc2482013-06-28 14:51:26 +02002896/**
2897 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002898 * @host: SRP host.
2899 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002900 */
2901static bool srp_conn_unique(struct srp_host *host,
2902 struct srp_target_port *target)
2903{
2904 struct srp_target_port *t;
2905 bool ret = false;
2906
2907 if (target->state == SRP_TARGET_REMOVED)
2908 goto out;
2909
2910 ret = true;
2911
2912 spin_lock(&host->target_lock);
2913 list_for_each_entry(t, &host->target_list, list) {
2914 if (t != target &&
2915 target->id_ext == t->id_ext &&
2916 target->ioc_guid == t->ioc_guid &&
2917 target->initiator_ext == t->initiator_ext) {
2918 ret = false;
2919 break;
2920 }
2921 }
2922 spin_unlock(&host->target_lock);
2923
2924out:
2925 return ret;
2926}
2927
Roland Dreieraef9ec32005-11-02 14:07:13 -08002928/*
2929 * Target ports are added by writing
2930 *
2931 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2932 * pkey=<P_Key>,service_id=<service ID>
2933 *
2934 * to the add_target sysfs attribute.
2935 */
2936enum {
2937 SRP_OPT_ERR = 0,
2938 SRP_OPT_ID_EXT = 1 << 0,
2939 SRP_OPT_IOC_GUID = 1 << 1,
2940 SRP_OPT_DGID = 1 << 2,
2941 SRP_OPT_PKEY = 1 << 3,
2942 SRP_OPT_SERVICE_ID = 1 << 4,
2943 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002944 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002945 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002946 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002947 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002948 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2949 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002950 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002951 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002952 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002953 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2954 SRP_OPT_IOC_GUID |
2955 SRP_OPT_DGID |
2956 SRP_OPT_PKEY |
2957 SRP_OPT_SERVICE_ID),
2958};
2959
Steven Whitehousea447c092008-10-13 10:46:57 +01002960static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002961 { SRP_OPT_ID_EXT, "id_ext=%s" },
2962 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2963 { SRP_OPT_DGID, "dgid=%s" },
2964 { SRP_OPT_PKEY, "pkey=%x" },
2965 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2966 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2967 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002968 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002969 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002970 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002971 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2972 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002973 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002974 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002975 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002976 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002977};
2978
2979static int srp_parse_options(const char *buf, struct srp_target_port *target)
2980{
2981 char *options, *sep_opt;
2982 char *p;
2983 char dgid[3];
2984 substring_t args[MAX_OPT_ARGS];
2985 int opt_mask = 0;
2986 int token;
2987 int ret = -EINVAL;
2988 int i;
2989
2990 options = kstrdup(buf, GFP_KERNEL);
2991 if (!options)
2992 return -ENOMEM;
2993
2994 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002995 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002996 if (!*p)
2997 continue;
2998
2999 token = match_token(p, srp_opt_tokens, args);
3000 opt_mask |= token;
3001
3002 switch (token) {
3003 case SRP_OPT_ID_EXT:
3004 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003005 if (!p) {
3006 ret = -ENOMEM;
3007 goto out;
3008 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003009 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3010 kfree(p);
3011 break;
3012
3013 case SRP_OPT_IOC_GUID:
3014 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003015 if (!p) {
3016 ret = -ENOMEM;
3017 goto out;
3018 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003019 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3020 kfree(p);
3021 break;
3022
3023 case SRP_OPT_DGID:
3024 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003025 if (!p) {
3026 ret = -ENOMEM;
3027 goto out;
3028 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003029 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003030 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07003031 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003032 goto out;
3033 }
3034
3035 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01003036 strlcpy(dgid, p + i * 2, sizeof(dgid));
3037 if (sscanf(dgid, "%hhx",
3038 &target->orig_dgid.raw[i]) < 1) {
3039 ret = -EINVAL;
3040 kfree(p);
3041 goto out;
3042 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003043 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08003044 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003045 break;
3046
3047 case SRP_OPT_PKEY:
3048 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003049 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003050 goto out;
3051 }
Bart Van Assche747fe002014-10-30 14:48:05 +01003052 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003053 break;
3054
3055 case SRP_OPT_SERVICE_ID:
3056 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003057 if (!p) {
3058 ret = -ENOMEM;
3059 goto out;
3060 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003061 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3062 kfree(p);
3063 break;
3064
3065 case SRP_OPT_MAX_SECT:
3066 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003067 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003068 goto out;
3069 }
3070 target->scsi_host->max_sectors = token;
3071 break;
3072
Bart Van Assche4d73f952013-10-26 14:40:37 +02003073 case SRP_OPT_QUEUE_SIZE:
3074 if (match_int(args, &token) || token < 1) {
3075 pr_warn("bad queue_size parameter '%s'\n", p);
3076 goto out;
3077 }
3078 target->scsi_host->can_queue = token;
3079 target->queue_size = token + SRP_RSP_SQ_SIZE +
3080 SRP_TSK_MGMT_SQ_SIZE;
3081 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3082 target->scsi_host->cmd_per_lun = token;
3083 break;
3084
Vu Pham52fb2b502006-06-17 20:37:31 -07003085 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003086 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003087 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3088 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003089 goto out;
3090 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003091 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003092 break;
3093
Ramachandra K0c0450db2006-06-17 20:37:38 -07003094 case SRP_OPT_IO_CLASS:
3095 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003096 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003097 goto out;
3098 }
3099 if (token != SRP_REV10_IB_IO_CLASS &&
3100 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003101 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3102 token, SRP_REV10_IB_IO_CLASS,
3103 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003104 goto out;
3105 }
3106 target->io_class = token;
3107 break;
3108
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003109 case SRP_OPT_INITIATOR_EXT:
3110 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003111 if (!p) {
3112 ret = -ENOMEM;
3113 goto out;
3114 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003115 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3116 kfree(p);
3117 break;
3118
David Dillow49248642011-01-14 18:23:24 -05003119 case SRP_OPT_CMD_SG_ENTRIES:
3120 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003121 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3122 p);
David Dillow49248642011-01-14 18:23:24 -05003123 goto out;
3124 }
3125 target->cmd_sg_cnt = token;
3126 break;
3127
David Dillowc07d4242011-01-16 13:57:10 -05003128 case SRP_OPT_ALLOW_EXT_SG:
3129 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003130 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003131 goto out;
3132 }
3133 target->allow_ext_sg = !!token;
3134 break;
3135
3136 case SRP_OPT_SG_TABLESIZE:
3137 if (match_int(args, &token) || token < 1 ||
3138 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003139 pr_warn("bad max sg_tablesize parameter '%s'\n",
3140 p);
David Dillowc07d4242011-01-16 13:57:10 -05003141 goto out;
3142 }
3143 target->sg_tablesize = token;
3144 break;
3145
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003146 case SRP_OPT_COMP_VECTOR:
3147 if (match_int(args, &token) || token < 0) {
3148 pr_warn("bad comp_vector parameter '%s'\n", p);
3149 goto out;
3150 }
3151 target->comp_vector = token;
3152 break;
3153
Vu Pham7bb312e2013-10-26 14:31:27 +02003154 case SRP_OPT_TL_RETRY_COUNT:
3155 if (match_int(args, &token) || token < 2 || token > 7) {
3156 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3157 p);
3158 goto out;
3159 }
3160 target->tl_retry_count = token;
3161 break;
3162
Roland Dreieraef9ec32005-11-02 14:07:13 -08003163 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003164 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3165 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003166 goto out;
3167 }
3168 }
3169
3170 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3171 ret = 0;
3172 else
3173 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3174 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3175 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003176 pr_warn("target creation request is missing parameter '%s'\n",
3177 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003178
Bart Van Assche4d73f952013-10-26 14:40:37 +02003179 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3180 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3181 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3182 target->scsi_host->cmd_per_lun,
3183 target->scsi_host->can_queue);
3184
Roland Dreieraef9ec32005-11-02 14:07:13 -08003185out:
3186 kfree(options);
3187 return ret;
3188}
3189
Tony Jonesee959b02008-02-22 00:13:36 +01003190static ssize_t srp_create_target(struct device *dev,
3191 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003192 const char *buf, size_t count)
3193{
3194 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003195 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003196 struct Scsi_Host *target_host;
3197 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003198 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003199 struct srp_device *srp_dev = host->srp_dev;
3200 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003201 int ret, node_idx, node, cpu, i;
3202 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003203
3204 target_host = scsi_host_alloc(&srp_template,
3205 sizeof (struct srp_target_port));
3206 if (!target_host)
3207 return -ENOMEM;
3208
David Dillow49248642011-01-14 18:23:24 -05003209 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003210 target_host->max_channel = 0;
3211 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003212 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003213 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003214
Roland Dreieraef9ec32005-11-02 14:07:13 -08003215 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003216
David Dillow49248642011-01-14 18:23:24 -05003217 target->io_class = SRP_REV16A_IB_IO_CLASS;
3218 target->scsi_host = target_host;
3219 target->srp_host = host;
Jason Gunthorpee6bf5f42015-07-30 17:22:22 -06003220 target->lkey = host->srp_dev->pd->local_dma_lkey;
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003221 target->global_mr = host->srp_dev->global_mr;
David Dillow49248642011-01-14 18:23:24 -05003222 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003223 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3224 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003225 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003226 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003227
Bart Van Assche34aa6542014-10-30 14:47:22 +01003228 /*
3229 * Avoid that the SCSI host can be removed by srp_remove_target()
3230 * before this function returns.
3231 */
3232 scsi_host_get(target->scsi_host);
3233
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003234 mutex_lock(&host->add_target_mutex);
3235
Roland Dreieraef9ec32005-11-02 14:07:13 -08003236 ret = srp_parse_options(buf, target);
3237 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003238 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003239
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003240 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3241 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003242 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003243
Bart Van Assche4d73f952013-10-26 14:40:37 +02003244 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3245
Bart Van Assche96fc2482013-06-28 14:51:26 +02003246 if (!srp_conn_unique(target->srp_host, target)) {
3247 shost_printk(KERN_INFO, target->scsi_host,
3248 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3249 be64_to_cpu(target->id_ext),
3250 be64_to_cpu(target->ioc_guid),
3251 be64_to_cpu(target->initiator_ext));
3252 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003253 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003254 }
3255
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003256 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003257 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003258 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003259 target->sg_tablesize = target->cmd_sg_cnt;
3260 }
3261
3262 target_host->sg_tablesize = target->sg_tablesize;
3263 target->indirect_size = target->sg_tablesize *
3264 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003265 target->max_iu_len = sizeof (struct srp_cmd) +
3266 sizeof (struct srp_indirect_buf) +
3267 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3268
Bart Van Asschec1120f82013-10-26 14:35:08 +02003269 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003270 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003271 spin_lock_init(&target->lock);
Matan Barak55ee3ab2015-10-15 18:38:45 +03003272 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003273 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003274 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003275
Bart Van Assched92c0da2014-10-06 17:14:36 +02003276 ret = -ENOMEM;
3277 target->ch_count = max_t(unsigned, num_online_nodes(),
3278 min(ch_count ? :
3279 min(4 * num_online_nodes(),
3280 ibdev->num_comp_vectors),
3281 num_online_cpus()));
3282 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3283 GFP_KERNEL);
3284 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003285 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003286
Bart Van Assched92c0da2014-10-06 17:14:36 +02003287 node_idx = 0;
3288 for_each_online_node(node) {
3289 const int ch_start = (node_idx * target->ch_count /
3290 num_online_nodes());
3291 const int ch_end = ((node_idx + 1) * target->ch_count /
3292 num_online_nodes());
3293 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3294 num_online_nodes() + target->comp_vector)
3295 % ibdev->num_comp_vectors;
3296 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3297 num_online_nodes() + target->comp_vector)
3298 % ibdev->num_comp_vectors;
3299 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003300
Bart Van Assched92c0da2014-10-06 17:14:36 +02003301 for_each_online_cpu(cpu) {
3302 if (cpu_to_node(cpu) != node)
3303 continue;
3304 if (ch_start + cpu_idx >= ch_end)
3305 continue;
3306 ch = &target->ch[ch_start + cpu_idx];
3307 ch->target = target;
3308 ch->comp_vector = cv_start == cv_end ? cv_start :
3309 cv_start + cpu_idx % (cv_end - cv_start);
3310 spin_lock_init(&ch->lock);
3311 INIT_LIST_HEAD(&ch->free_tx);
3312 ret = srp_new_cm_id(ch);
3313 if (ret)
3314 goto err_disconnect;
3315
3316 ret = srp_create_ch_ib(ch);
3317 if (ret)
3318 goto err_disconnect;
3319
3320 ret = srp_alloc_req_data(ch);
3321 if (ret)
3322 goto err_disconnect;
3323
3324 ret = srp_connect_ch(ch, multich);
3325 if (ret) {
3326 shost_printk(KERN_ERR, target->scsi_host,
3327 PFX "Connection %d/%d failed\n",
3328 ch_start + cpu_idx,
3329 target->ch_count);
3330 if (node_idx == 0 && cpu_idx == 0) {
3331 goto err_disconnect;
3332 } else {
3333 srp_free_ch_ib(target, ch);
3334 srp_free_req_data(target, ch);
3335 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003336 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003337 }
3338 }
3339
3340 multich = true;
3341 cpu_idx++;
3342 }
3343 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003344 }
3345
Bart Van Asschec257ea62015-07-31 14:13:22 -07003346connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003347 target->scsi_host->nr_hw_queues = target->ch_count;
3348
Roland Dreieraef9ec32005-11-02 14:07:13 -08003349 ret = srp_add_target(host, target);
3350 if (ret)
3351 goto err_disconnect;
3352
Bart Van Assche34aa6542014-10-30 14:47:22 +01003353 if (target->state != SRP_TARGET_REMOVED) {
3354 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3355 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3356 be64_to_cpu(target->id_ext),
3357 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003358 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003359 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003360 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003361 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003362
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003363 ret = count;
3364
3365out:
3366 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003367
3368 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003369 if (ret < 0)
3370 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003371
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003372 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003373
3374err_disconnect:
3375 srp_disconnect_target(target);
3376
Bart Van Assched92c0da2014-10-06 17:14:36 +02003377 for (i = 0; i < target->ch_count; i++) {
3378 ch = &target->ch[i];
3379 srp_free_ch_ib(target, ch);
3380 srp_free_req_data(target, ch);
3381 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003382
Bart Van Assched92c0da2014-10-06 17:14:36 +02003383 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003384 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385}
3386
Tony Jonesee959b02008-02-22 00:13:36 +01003387static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003388
Tony Jonesee959b02008-02-22 00:13:36 +01003389static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3390 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003391{
Tony Jonesee959b02008-02-22 00:13:36 +01003392 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003393
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003394 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003395}
3396
Tony Jonesee959b02008-02-22 00:13:36 +01003397static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003398
Tony Jonesee959b02008-02-22 00:13:36 +01003399static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3400 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003401{
Tony Jonesee959b02008-02-22 00:13:36 +01003402 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003403
3404 return sprintf(buf, "%d\n", host->port);
3405}
3406
Tony Jonesee959b02008-02-22 00:13:36 +01003407static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003408
Roland Dreierf5358a12006-06-17 20:37:29 -07003409static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003410{
3411 struct srp_host *host;
3412
3413 host = kzalloc(sizeof *host, GFP_KERNEL);
3414 if (!host)
3415 return NULL;
3416
3417 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003418 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003419 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003420 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003421 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003422 host->port = port;
3423
Tony Jonesee959b02008-02-22 00:13:36 +01003424 host->dev.class = &srp_class;
3425 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003426 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003427
Tony Jonesee959b02008-02-22 00:13:36 +01003428 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003429 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003430 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003431 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003432 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003433 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003434 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003435 goto err_class;
3436
3437 return host;
3438
3439err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003440 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003441
Roland Dreierf5358a12006-06-17 20:37:29 -07003442free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003443 kfree(host);
3444
3445 return NULL;
3446}
3447
3448static void srp_add_one(struct ib_device *device)
3449{
Roland Dreierf5358a12006-06-17 20:37:29 -07003450 struct srp_device *srp_dev;
3451 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003452 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003453 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003454 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003455
Roland Dreierf5358a12006-06-17 20:37:29 -07003456 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3457 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003458 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003459
Roland Dreierf5358a12006-06-17 20:37:29 -07003460 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003461 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003462 goto free_attr;
3463 }
3464
3465 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3466 if (!srp_dev)
3467 goto free_attr;
3468
Bart Van Assched1b42892014-05-20 15:07:20 +02003469 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3470 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003471 srp_dev->has_fr = (dev_attr->device_cap_flags &
3472 IB_DEVICE_MEM_MGT_EXTENSIONS);
3473 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3474 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3475
3476 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3477 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assche002f1562015-08-10 17:08:44 -07003478 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
Bart Van Assched1b42892014-05-20 15:07:20 +02003479
Roland Dreierf5358a12006-06-17 20:37:29 -07003480 /*
3481 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003482 * minimum of 4096 bytes. We're unlikely to build large sglists
3483 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003484 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003485 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3486 srp_dev->mr_page_size = 1 << mr_page_shift;
3487 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3488 max_pages_per_mr = dev_attr->max_mr_size;
3489 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3490 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3491 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003492 if (srp_dev->use_fast_reg) {
3493 srp_dev->max_pages_per_mr =
3494 min_t(u32, srp_dev->max_pages_per_mr,
3495 dev_attr->max_fast_reg_page_list_len);
3496 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003497 srp_dev->mr_max_size = srp_dev->mr_page_size *
3498 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003499 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003500 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003501 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003502 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003503
3504 INIT_LIST_HEAD(&srp_dev->dev_list);
3505
3506 srp_dev->dev = device;
3507 srp_dev->pd = ib_alloc_pd(device);
3508 if (IS_ERR(srp_dev->pd))
3509 goto free_dev;
3510
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003511 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3512 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3513 IB_ACCESS_LOCAL_WRITE |
3514 IB_ACCESS_REMOTE_READ |
3515 IB_ACCESS_REMOTE_WRITE);
3516 if (IS_ERR(srp_dev->global_mr))
3517 goto err_pd;
3518 } else {
3519 srp_dev->global_mr = NULL;
3520 }
Roland Dreierf5358a12006-06-17 20:37:29 -07003521
Hal Rosenstock41390322015-06-29 09:57:00 -04003522 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003523 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003524 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003525 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003526 }
3527
Roland Dreierf5358a12006-06-17 20:37:29 -07003528 ib_set_client_data(device, &srp_client, srp_dev);
3529
3530 goto free_attr;
3531
3532err_pd:
3533 ib_dealloc_pd(srp_dev->pd);
3534
3535free_dev:
3536 kfree(srp_dev);
3537
3538free_attr:
3539 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003540}
3541
Haggai Eran7c1eb452015-07-30 17:50:14 +03003542static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003543{
Roland Dreierf5358a12006-06-17 20:37:29 -07003544 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003545 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003546 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003547
Haggai Eran7c1eb452015-07-30 17:50:14 +03003548 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003549 if (!srp_dev)
3550 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003551
Roland Dreierf5358a12006-06-17 20:37:29 -07003552 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003553 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003554 /*
3555 * Wait for the sysfs entry to go away, so that no new
3556 * target ports can be created.
3557 */
3558 wait_for_completion(&host->released);
3559
3560 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003561 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003562 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003563 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003564 list_for_each_entry(target, &host->target_list, list)
3565 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003566 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003567
3568 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003569 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003570 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003571 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003572 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003573
Roland Dreieraef9ec32005-11-02 14:07:13 -08003574 kfree(host);
3575 }
3576
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003577 if (srp_dev->global_mr)
3578 ib_dereg_mr(srp_dev->global_mr);
Roland Dreierf5358a12006-06-17 20:37:29 -07003579 ib_dealloc_pd(srp_dev->pd);
3580
3581 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003582}
3583
FUJITA Tomonori32368222007-06-27 16:33:12 +09003584static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003585 .has_rport_state = true,
3586 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003587 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003588 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3589 .dev_loss_tmo = &srp_dev_loss_tmo,
3590 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003591 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003592 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003593};
3594
Roland Dreieraef9ec32005-11-02 14:07:13 -08003595static int __init srp_init_module(void)
3596{
3597 int ret;
3598
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003599 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003600
David Dillow49248642011-01-14 18:23:24 -05003601 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003602 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003603 if (!cmd_sg_entries)
3604 cmd_sg_entries = srp_sg_tablesize;
3605 }
3606
3607 if (!cmd_sg_entries)
3608 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3609
3610 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003611 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003612 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003613 }
3614
David Dillowc07d4242011-01-16 13:57:10 -05003615 if (!indirect_sg_entries)
3616 indirect_sg_entries = cmd_sg_entries;
3617 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003618 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3619 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003620 indirect_sg_entries = cmd_sg_entries;
3621 }
3622
Bart Van Asschebcc05912014-07-09 15:57:26 +02003623 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003624 if (!srp_remove_wq) {
3625 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003626 goto out;
3627 }
3628
3629 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003630 ib_srp_transport_template =
3631 srp_attach_transport(&ib_srp_transport_functions);
3632 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003633 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003634
Roland Dreieraef9ec32005-11-02 14:07:13 -08003635 ret = class_register(&srp_class);
3636 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003637 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003638 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003639 }
3640
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003641 ib_sa_register_client(&srp_sa_client);
3642
Roland Dreieraef9ec32005-11-02 14:07:13 -08003643 ret = ib_register_client(&srp_client);
3644 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003645 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003646 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003647 }
3648
Bart Van Asschebcc05912014-07-09 15:57:26 +02003649out:
3650 return ret;
3651
3652unreg_sa:
3653 ib_sa_unregister_client(&srp_sa_client);
3654 class_unregister(&srp_class);
3655
3656release_tr:
3657 srp_release_transport(ib_srp_transport_template);
3658
3659destroy_wq:
3660 destroy_workqueue(srp_remove_wq);
3661 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003662}
3663
3664static void __exit srp_cleanup_module(void)
3665{
3666 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003667 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003668 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003669 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003670 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003671}
3672
3673module_init(srp_init_module);
3674module_exit(srp_cleanup_module);