blob: fac142389731d995d9e616b7b84fde81c6313879 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200173 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930184static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
Roland Dreieraef9ec32005-11-02 14:07:13 -0800189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700203
204 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700207}
208
Roland Dreieraef9ec32005-11-02 14:07:13 -0800209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Bart Van Assche56b53902014-07-09 15:58:22 +0200268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200343 if (d->mr)
344 ib_dereg_mr(d->mr);
345 }
346 kfree(pool);
347}
348
349/**
350 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
351 * @device: IB device to allocate fast registration descriptors for.
352 * @pd: Protection domain associated with the FR descriptors.
353 * @pool_size: Number of descriptors to allocate.
354 * @max_page_list_len: Maximum fast registration work request page list length.
355 */
356static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
357 struct ib_pd *pd, int pool_size,
358 int max_page_list_len)
359{
360 struct srp_fr_pool *pool;
361 struct srp_fr_desc *d;
362 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200363 int i, ret = -EINVAL;
364
365 if (pool_size <= 0)
366 goto err;
367 ret = -ENOMEM;
368 pool = kzalloc(sizeof(struct srp_fr_pool) +
369 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
370 if (!pool)
371 goto err;
372 pool->size = pool_size;
373 pool->max_page_list_len = max_page_list_len;
374 spin_lock_init(&pool->lock);
375 INIT_LIST_HEAD(&pool->free_list);
376
377 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
379 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200380 if (IS_ERR(mr)) {
381 ret = PTR_ERR(mr);
382 goto destroy_pool;
383 }
384 d->mr = mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200385 list_add_tail(&d->entry, &pool->free_list);
386 }
387
388out:
389 return pool;
390
391destroy_pool:
392 srp_destroy_fr_pool(pool);
393
394err:
395 pool = ERR_PTR(ret);
396 goto out;
397}
398
399/**
400 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
401 * @pool: Pool to obtain descriptor from.
402 */
403static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
404{
405 struct srp_fr_desc *d = NULL;
406 unsigned long flags;
407
408 spin_lock_irqsave(&pool->lock, flags);
409 if (!list_empty(&pool->free_list)) {
410 d = list_first_entry(&pool->free_list, typeof(*d), entry);
411 list_del(&d->entry);
412 }
413 spin_unlock_irqrestore(&pool->lock, flags);
414
415 return d;
416}
417
418/**
419 * srp_fr_pool_put() - put an FR descriptor back in the free list
420 * @pool: Pool the descriptor was allocated from.
421 * @desc: Pointer to an array of fast registration descriptor pointers.
422 * @n: Number of descriptors to put back.
423 *
424 * Note: The caller must already have queued an invalidation request for
425 * desc->mr->rkey before calling this function.
426 */
427static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
428 int n)
429{
430 unsigned long flags;
431 int i;
432
433 spin_lock_irqsave(&pool->lock, flags);
434 for (i = 0; i < n; i++)
435 list_add(&desc[i]->entry, &pool->free_list);
436 spin_unlock_irqrestore(&pool->lock, flags);
437}
438
439static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
440{
441 struct srp_device *dev = target->srp_host->srp_dev;
442
443 return srp_create_fr_pool(dev->dev, dev->pd,
444 target->scsi_host->can_queue,
445 dev->max_pages_per_mr);
446}
447
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
452 * Change a queue pair into the error state and wait until all receive
453 * completions have been processed before destroying it. This avoids that
454 * the receive completion handler can access the queue pair while it is
455 * being destroyed.
456 */
457static void srp_destroy_qp(struct srp_rdma_ch *ch)
458{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200459 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
461 struct ib_recv_wr *bad_wr;
462 int ret;
463
464 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200465 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200466
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
468 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
469 if (ret)
470 goto out;
471
472 init_completion(&ch->done);
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
474 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
475 if (ret == 0)
476 wait_for_completion(&ch->done);
477
478out:
479 ib_destroy_qp(ch->qp);
480}
481
Bart Van Assche509c07b2014-10-30 14:48:30 +0100482static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800483{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100484 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200485 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800486 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100487 struct ib_cq *recv_cq, *send_cq;
488 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200489 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200490 struct srp_fr_pool *fr_pool = NULL;
Sagi Grimberg09c0c0b2015-12-01 10:18:03 -0800491 const int m = dev->use_fast_reg ? 3 : 1;
Matan Barak8e372102015-06-11 16:35:21 +0300492 struct ib_cq_init_attr cq_attr = {};
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493 int ret;
494
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 if (!init_attr)
497 return -ENOMEM;
498
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200499 /* + 1 for SRP_LAST_WR_ID */
Matan Barak8e372102015-06-11 16:35:21 +0300500 cq_attr.cqe = target->queue_size + 1;
501 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100502 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300503 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100504 if (IS_ERR(recv_cq)) {
505 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800506 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800507 }
508
Matan Barak8e372102015-06-11 16:35:21 +0300509 cq_attr.cqe = m * target->queue_size;
510 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300512 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100513 if (IS_ERR(send_cq)) {
514 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800515 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000516 }
517
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100518 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800519
520 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200521 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200522 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800523 init_attr->cap.max_recv_sge = 1;
524 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200525 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800526 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100527 init_attr->send_cq = send_cq;
528 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800529
Bart Van Assche62154b22014-05-20 15:04:45 +0200530 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100531 if (IS_ERR(qp)) {
532 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800533 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534 }
535
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800537 if (ret)
538 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800539
Bart Van Assche002f1562015-08-10 17:08:44 -0700540 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200541 fr_pool = srp_alloc_fr_pool(target);
542 if (IS_ERR(fr_pool)) {
543 ret = PTR_ERR(fr_pool);
544 shost_printk(KERN_WARNING, target->scsi_host, PFX
545 "FR pool allocation failed (%d)\n", ret);
546 goto err_qp;
547 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700548 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200549 fmr_pool = srp_alloc_fmr_pool(target);
550 if (IS_ERR(fmr_pool)) {
551 ret = PTR_ERR(fmr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FMR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200556 }
557
Bart Van Assche509c07b2014-10-30 14:48:30 +0100558 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200559 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100560 if (ch->recv_cq)
561 ib_destroy_cq(ch->recv_cq);
562 if (ch->send_cq)
563 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100564
Bart Van Assche509c07b2014-10-30 14:48:30 +0100565 ch->qp = qp;
566 ch->recv_cq = recv_cq;
567 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100568
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300569 if (dev->use_fast_reg) {
570 if (ch->fr_pool)
571 srp_destroy_fr_pool(ch->fr_pool);
572 ch->fr_pool = fr_pool;
573 } else if (dev->use_fmr) {
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
576 ch->fmr_pool = fmr_pool;
577 }
578
Roland Dreierda9d2f02010-02-24 15:07:59 -0800579 kfree(init_attr);
580 return 0;
581
582err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800584
585err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100586 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800587
588err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100589 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800590
591err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800592 kfree(init_attr);
593 return ret;
594}
595
Bart Van Assche4d73f952013-10-26 14:40:37 +0200596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100598 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200599 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200603 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604 int i;
605
Bart Van Assched92c0da2014-10-06 17:14:36 +0200606 if (!ch->target)
607 return;
608
Bart Van Assche509c07b2014-10-30 14:48:30 +0100609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100612 }
613
Bart Van Assched92c0da2014-10-06 17:14:36 +0200614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200618 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700621 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200624 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200625 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800628
Bart Van Assched92c0da2014-10-06 17:14:36 +0200629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
Bart Van Assche509c07b2014-10-30 14:48:30 +0100637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100639
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200641 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200645 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200647 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200651 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100656 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800657{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800660
Bart Van Assche509c07b2014-10-30 14:48:30 +0100661 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800665 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 ch->path = *pathrec;
667 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668}
669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100673 int ret;
674
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694
Bart Van Assche509c07b2014-10-30 14:48:30 +0100695 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100696 if (ret < 0)
697 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698
Bart Van Assche509c07b2014-10-30 14:48:30 +0100699 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702
Bart Van Assche509c07b2014-10-30 14:48:30 +0100703 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800704}
705
Bart Van Assched92c0da2014-10-06 17:14:36 +0200706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
Bart Van Assche509c07b2014-10-30 14:48:30 +0100719 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200738 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700749 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700750 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100760 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700761 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200762 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100769 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
Roland Dreieraef9ec32005-11-02 14:07:13 -0800774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800778 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700779 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200783 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800784 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200785 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100786 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788
Bart Van Assche509c07b2014-10-30 14:48:30 +0100789 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800790
791 kfree(req);
792
793 return status;
794}
795
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200808 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000809
810 return changed;
811}
812
Roland Dreieraef9ec32005-11-02 14:07:13 -0800813static void srp_disconnect_target(struct srp_target_port *target)
814{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200815 struct srp_rdma_ch *ch;
816 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100817
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200818 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800819
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000826 }
Roland Dreiere6581052006-05-17 09:13:21 -0700827 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828}
829
Bart Van Assche509c07b2014-10-30 14:48:30 +0100830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500832{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500835 struct srp_request *req;
836 int i;
837
Bart Van Assche47513cf2015-05-18 13:25:54 +0200838 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100842 req = &ch->req_ring[i];
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300843 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200844 kfree(req->fr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300845 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200846 kfree(req->fmr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300847 kfree(req->map_page);
848 }
David Dillowc07d4242011-01-16 13:57:10 -0500849 if (req->indirect_dma_addr) {
850 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
851 target->indirect_size,
852 DMA_TO_DEVICE);
853 }
854 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500855 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200856
Bart Van Assche509c07b2014-10-30 14:48:30 +0100857 kfree(ch->req_ring);
858 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500859}
860
Bart Van Assche509c07b2014-10-30 14:48:30 +0100861static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200862{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100863 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200864 struct srp_device *srp_dev = target->srp_host->srp_dev;
865 struct ib_device *ibdev = srp_dev->dev;
866 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200867 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200868 dma_addr_t dma_addr;
869 int i, ret = -ENOMEM;
870
Bart Van Assche509c07b2014-10-30 14:48:30 +0100871 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
872 GFP_KERNEL);
873 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200874 goto out;
875
876 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100877 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200878 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
879 GFP_KERNEL);
880 if (!mr_list)
881 goto out;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300882 if (srp_dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200883 req->fr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300884 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200885 req->fmr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300886 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
887 sizeof(void *), GFP_KERNEL);
888 if (!req->map_page)
889 goto out;
890 }
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200891 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200892 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200893 goto out;
894
895 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
896 target->indirect_size,
897 DMA_TO_DEVICE);
898 if (ib_dma_mapping_error(ibdev, dma_addr))
899 goto out;
900
901 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200902 }
903 ret = 0;
904
905out:
906 return ret;
907}
908
Bart Van Assche683b1592012-01-14 12:40:44 +0000909/**
910 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
911 * @shost: SCSI host whose attributes to remove from sysfs.
912 *
913 * Note: Any attributes defined in the host template and that did not exist
914 * before invocation of this function will be ignored.
915 */
916static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
917{
918 struct device_attribute **attr;
919
920 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
921 device_remove_file(&shost->shost_dev, *attr);
922}
923
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000924static void srp_remove_target(struct srp_target_port *target)
925{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200926 struct srp_rdma_ch *ch;
927 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100928
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000929 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
930
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000931 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200932 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000933 srp_remove_host(target->scsi_host);
934 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100935 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000936 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200937 for (i = 0; i < target->ch_count; i++) {
938 ch = &target->ch[i];
939 srp_free_ch_ib(target, ch);
940 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200941 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200942 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200943 for (i = 0; i < target->ch_count; i++) {
944 ch = &target->ch[i];
945 srp_free_req_data(target, ch);
946 }
947 kfree(target->ch);
948 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200949
950 spin_lock(&target->srp_host->target_lock);
951 list_del(&target->list);
952 spin_unlock(&target->srp_host->target_lock);
953
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000954 scsi_host_put(target->scsi_host);
955}
956
David Howellsc4028952006-11-22 14:57:56 +0000957static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800958{
David Howellsc4028952006-11-22 14:57:56 +0000959 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000960 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800961
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000962 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963
Bart Van Assche96fc2482013-06-28 14:51:26 +0200964 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800965}
966
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200967static void srp_rport_delete(struct srp_rport *rport)
968{
969 struct srp_target_port *target = rport->lld_data;
970
971 srp_queue_remove_work(target);
972}
973
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200974/**
975 * srp_connected_ch() - number of connected channels
976 * @target: SRP target port.
977 */
978static int srp_connected_ch(struct srp_target_port *target)
979{
980 int i, c = 0;
981
982 for (i = 0; i < target->ch_count; i++)
983 c += target->ch[i].connected;
984
985 return c;
986}
987
Bart Van Assched92c0da2014-10-06 17:14:36 +0200988static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800989{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100990 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800991 int ret;
992
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200993 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000994
Bart Van Assche509c07b2014-10-30 14:48:30 +0100995 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800996 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800997 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800998
999 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001000 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001001 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001002 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001003 goto out;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001004 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001005 if (ret < 0)
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001006 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001007
1008 /*
1009 * The CM event handling code will set status to
1010 * SRP_PORT_REDIRECT if we get a port redirect REJ
1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1012 * redirect REJ back.
1013 */
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001014 ret = ch->status;
1015 switch (ret) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001016 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001017 ch->connected = true;
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001018 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001019
1020 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001021 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001022 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001023 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001024 break;
1025
1026 case SRP_DLID_REDIRECT:
1027 break;
1028
David Dillow9fe4bcf2008-01-08 17:08:52 -05001029 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001030 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001031 "giving up on stale connection\n");
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001032 ret = -ECONNRESET;
1033 goto out;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001034
Roland Dreieraef9ec32005-11-02 14:07:13 -08001035 default:
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001036 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001037 }
1038 }
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001039
1040out:
1041 return ret <= 0 ? ret : -ENODEV;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001042}
1043
Bart Van Assche509c07b2014-10-30 14:48:30 +01001044static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001045{
1046 struct ib_send_wr *bad_wr;
1047 struct ib_send_wr wr = {
1048 .opcode = IB_WR_LOCAL_INV,
1049 .wr_id = LOCAL_INV_WR_ID_MASK,
1050 .next = NULL,
1051 .num_sge = 0,
1052 .send_flags = 0,
1053 .ex.invalidate_rkey = rkey,
1054 };
1055
Bart Van Assche509c07b2014-10-30 14:48:30 +01001056 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001057}
1058
Roland Dreierd945e1d2006-05-09 10:50:28 -07001059static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001060 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001061 struct srp_request *req)
1062{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001063 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001064 struct srp_device *dev = target->srp_host->srp_dev;
1065 struct ib_device *ibdev = dev->dev;
1066 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001067
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001068 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001069 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1070 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1071 return;
1072
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001073 if (dev->use_fast_reg) {
1074 struct srp_fr_desc **pfr;
1075
1076 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001077 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001078 if (res < 0) {
1079 shost_printk(KERN_ERR, target->scsi_host, PFX
1080 "Queueing INV WR for rkey %#x failed (%d)\n",
1081 (*pfr)->mr->rkey, res);
1082 queue_work(system_long_wq,
1083 &target->tl_err_work);
1084 }
1085 }
1086 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001087 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001088 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001089 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001090 struct ib_pool_fmr **pfmr;
1091
1092 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1093 ib_fmr_pool_unmap(*pfmr);
1094 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001095
David Dillow8f26c9f2011-01-14 19:45:50 -05001096 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1097 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001098}
1099
Bart Van Assche22032992012-08-14 13:18:53 +00001100/**
1101 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001102 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001103 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001104 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001105 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1106 * ownership of @req->scmnd if it equals @scmnd.
1107 *
1108 * Return value:
1109 * Either NULL or a pointer to the SCSI command the caller became owner of.
1110 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001111static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001112 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001113 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001114 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001115{
Bart Van Assche94a91742010-11-26 14:50:09 -05001116 unsigned long flags;
1117
Bart Van Assche509c07b2014-10-30 14:48:30 +01001118 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001119 if (req->scmnd &&
1120 (!sdev || req->scmnd->device == sdev) &&
1121 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001122 scmnd = req->scmnd;
1123 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001124 } else {
1125 scmnd = NULL;
1126 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001127 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001128
1129 return scmnd;
1130}
1131
1132/**
1133 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001134 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001135 * @req: Request to be freed.
1136 * @scmnd: SCSI command associated with @req.
1137 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001138 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001139static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1140 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001141{
1142 unsigned long flags;
1143
Bart Van Assche509c07b2014-10-30 14:48:30 +01001144 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001145
Bart Van Assche509c07b2014-10-30 14:48:30 +01001146 spin_lock_irqsave(&ch->lock, flags);
1147 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001148 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001149}
1150
Bart Van Assche509c07b2014-10-30 14:48:30 +01001151static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1152 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001153{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001154 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001155
1156 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001157 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001158 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001159 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001160 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001161}
1162
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001163static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001164{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001165 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001166 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001167 struct Scsi_Host *shost = target->scsi_host;
1168 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001169 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001170
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001171 /*
1172 * Invoking srp_terminate_io() while srp_queuecommand() is running
1173 * is not safe. Hence the warning statement below.
1174 */
1175 shost_for_each_device(sdev, shost)
1176 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1177
Bart Van Assched92c0da2014-10-06 17:14:36 +02001178 for (i = 0; i < target->ch_count; i++) {
1179 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001180
Bart Van Assched92c0da2014-10-06 17:14:36 +02001181 for (j = 0; j < target->req_ring_size; ++j) {
1182 struct srp_request *req = &ch->req_ring[j];
1183
1184 srp_finish_req(ch, req, NULL,
1185 DID_TRANSPORT_FAILFAST << 16);
1186 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001187 }
1188}
1189
1190/*
1191 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1192 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1193 * srp_reset_device() or srp_reset_host() calls will occur while this function
1194 * is in progress. One way to realize that is not to call this function
1195 * directly but to call srp_reconnect_rport() instead since that last function
1196 * serializes calls of this function via rport->mutex and also blocks
1197 * srp_queuecommand() calls before invoking this function.
1198 */
1199static int srp_rport_reconnect(struct srp_rport *rport)
1200{
1201 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001202 struct srp_rdma_ch *ch;
1203 int i, j, ret = 0;
1204 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001205
Roland Dreieraef9ec32005-11-02 14:07:13 -08001206 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001207
1208 if (target->state == SRP_TARGET_SCANNING)
1209 return -ENODEV;
1210
Roland Dreieraef9ec32005-11-02 14:07:13 -08001211 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001212 * Now get a new local CM ID so that we avoid confusing the target in
1213 * case things are really fouled up. Doing so also ensures that all CM
1214 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001215 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001216 for (i = 0; i < target->ch_count; i++) {
1217 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001218 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001219 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001222 for (j = 0; j < target->req_ring_size; ++j) {
1223 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001224
Bart Van Assched92c0da2014-10-06 17:14:36 +02001225 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1226 }
1227 }
1228 for (i = 0; i < target->ch_count; i++) {
1229 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001230 /*
1231 * Whether or not creating a new CM ID succeeded, create a new
1232 * QP. This guarantees that all completion callback function
1233 * invocations have finished before request resetting starts.
1234 */
1235 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001236
Bart Van Assched92c0da2014-10-06 17:14:36 +02001237 INIT_LIST_HEAD(&ch->free_tx);
1238 for (j = 0; j < target->queue_size; ++j)
1239 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1240 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001241
1242 target->qp_in_error = false;
1243
Bart Van Assched92c0da2014-10-06 17:14:36 +02001244 for (i = 0; i < target->ch_count; i++) {
1245 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001246 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001247 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001248 ret = srp_connect_ch(ch, multich);
1249 multich = true;
1250 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001251
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001252 if (ret == 0)
1253 shost_printk(KERN_INFO, target->scsi_host,
1254 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001255
1256 return ret;
1257}
1258
David Dillow8f26c9f2011-01-14 19:45:50 -05001259static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1260 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001261{
David Dillow8f26c9f2011-01-14 19:45:50 -05001262 struct srp_direct_buf *desc = state->desc;
1263
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001264 WARN_ON_ONCE(!dma_len);
1265
David Dillow8f26c9f2011-01-14 19:45:50 -05001266 desc->va = cpu_to_be64(dma_addr);
1267 desc->key = cpu_to_be32(rkey);
1268 desc->len = cpu_to_be32(dma_len);
1269
1270 state->total_len += dma_len;
1271 state->desc++;
1272 state->ndesc++;
1273}
1274
1275static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001276 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001277{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001278 struct srp_target_port *target = ch->target;
1279 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001280 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001281 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001282
Bart Van Asschef731ed62015-08-10 17:07:27 -07001283 if (state->fmr.next >= state->fmr.end)
1284 return -ENOMEM;
1285
Sagi Grimberg26630e82015-10-13 19:11:38 +03001286 WARN_ON_ONCE(!dev->use_fmr);
1287
1288 if (state->npages == 0)
1289 return 0;
1290
1291 if (state->npages == 1 && target->global_mr) {
1292 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1293 target->global_mr->rkey);
1294 goto reset_state;
1295 }
1296
Bart Van Assche509c07b2014-10-30 14:48:30 +01001297 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001298 state->npages, io_addr);
1299 if (IS_ERR(fmr))
1300 return PTR_ERR(fmr);
1301
Bart Van Asschef731ed62015-08-10 17:07:27 -07001302 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001303 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001304
Bart Van Assche186fbc62015-08-10 17:06:29 -07001305 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1306 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001307
Sagi Grimberg26630e82015-10-13 19:11:38 +03001308reset_state:
1309 state->npages = 0;
1310 state->dma_len = 0;
1311
David Dillow8f26c9f2011-01-14 19:45:50 -05001312 return 0;
1313}
1314
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001315static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001316 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001317{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001318 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001319 struct srp_device *dev = target->srp_host->srp_dev;
1320 struct ib_send_wr *bad_wr;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001321 struct ib_reg_wr wr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001322 struct srp_fr_desc *desc;
1323 u32 rkey;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001324 int n, err;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001325
Bart Van Asschef731ed62015-08-10 17:07:27 -07001326 if (state->fr.next >= state->fr.end)
1327 return -ENOMEM;
1328
Sagi Grimberg26630e82015-10-13 19:11:38 +03001329 WARN_ON_ONCE(!dev->use_fast_reg);
1330
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001331 if (state->sg_nents == 0)
Sagi Grimberg26630e82015-10-13 19:11:38 +03001332 return 0;
1333
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001334 if (state->sg_nents == 1 && target->global_mr) {
1335 srp_map_desc(state, sg_dma_address(state->sg),
1336 sg_dma_len(state->sg),
Sagi Grimberg26630e82015-10-13 19:11:38 +03001337 target->global_mr->rkey);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001338 return 1;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001339 }
1340
Bart Van Assche509c07b2014-10-30 14:48:30 +01001341 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001342 if (!desc)
1343 return -ENOMEM;
1344
1345 rkey = ib_inc_rkey(desc->mr->rkey);
1346 ib_update_fast_reg_key(desc->mr, rkey);
1347
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001348 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
1349 dev->mr_page_size);
1350 if (unlikely(n < 0))
1351 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001352
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001353 wr.wr.next = NULL;
1354 wr.wr.opcode = IB_WR_REG_MR;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001355 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001356 wr.wr.num_sge = 0;
1357 wr.wr.send_flags = 0;
1358 wr.mr = desc->mr;
1359 wr.key = desc->mr->rkey;
1360 wr.access = (IB_ACCESS_LOCAL_WRITE |
1361 IB_ACCESS_REMOTE_READ |
1362 IB_ACCESS_REMOTE_WRITE);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001363
Bart Van Asschef731ed62015-08-10 17:07:27 -07001364 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001365 state->nmdesc++;
1366
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001367 srp_map_desc(state, desc->mr->iova,
1368 desc->mr->length, desc->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001369
Sagi Grimberg26630e82015-10-13 19:11:38 +03001370 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001371 if (unlikely(err))
Sagi Grimberg26630e82015-10-13 19:11:38 +03001372 return err;
1373
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001374 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001375}
1376
David Dillow8f26c9f2011-01-14 19:45:50 -05001377static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001378 struct srp_rdma_ch *ch,
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001379 struct scatterlist *sg, int sg_index)
David Dillow8f26c9f2011-01-14 19:45:50 -05001380{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001381 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001382 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001383 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001384 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1385 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001386 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001387 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001388
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001389 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001390
David Dillow8f26c9f2011-01-14 19:45:50 -05001391 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001392 unsigned offset = dma_addr & ~dev->mr_page_mask;
1393 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001394 ret = srp_map_finish_fmr(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001395 if (ret)
1396 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001397 }
1398
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001399 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001400
1401 if (!state->npages)
1402 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001403 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001404 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001405 dma_addr += len;
1406 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001407 }
1408
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001409 /*
1410 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001411 * close it out and start a new one -- we can only merge at page
1412 * boundries.
1413 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001414 ret = 0;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001415 if (len != dev->mr_page_size)
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001416 ret = srp_map_finish_fmr(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001417 return ret;
1418}
1419
Sagi Grimberg26630e82015-10-13 19:11:38 +03001420static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1421 struct srp_request *req, struct scatterlist *scat,
1422 int count)
1423{
1424 struct scatterlist *sg;
1425 int i, ret;
1426
1427 state->desc = req->indirect_desc;
1428 state->pages = req->map_page;
1429 state->fmr.next = req->fmr_list;
1430 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1431
1432 for_each_sg(scat, sg, count, i) {
1433 ret = srp_map_sg_entry(state, ch, sg, i);
1434 if (ret)
1435 return ret;
1436 }
1437
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001438 ret = srp_map_finish_fmr(state, ch);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001439 if (ret)
1440 return ret;
1441
1442 req->nmdesc = state->nmdesc;
1443
1444 return 0;
1445}
1446
1447static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1448 struct srp_request *req, struct scatterlist *scat,
1449 int count)
1450{
Sagi Grimberg26630e82015-10-13 19:11:38 +03001451 state->desc = req->indirect_desc;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001452 state->fr.next = req->fr_list;
1453 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1454 state->sg = scat;
1455 state->sg_nents = scsi_sg_count(req->scmnd);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001456
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001457 while (state->sg_nents) {
1458 int i, n;
1459
1460 n = srp_map_finish_fr(state, ch);
1461 if (unlikely(n < 0))
1462 return n;
1463
1464 state->sg_nents -= n;
1465 for (i = 0; i < n; i++)
1466 state->sg = sg_next(state->sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001467 }
1468
Sagi Grimberg26630e82015-10-13 19:11:38 +03001469 req->nmdesc = state->nmdesc;
1470
1471 return 0;
1472}
1473
1474static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1475 struct srp_request *req, struct scatterlist *scat,
1476 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001477{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001478 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001479 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001480 struct scatterlist *sg;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001481 int i;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001482
Sagi Grimberg26630e82015-10-13 19:11:38 +03001483 state->desc = req->indirect_desc;
1484 for_each_sg(scat, sg, count, i) {
1485 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1486 ib_sg_dma_len(dev->dev, sg),
1487 target->global_mr->rkey);
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001488 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001489
Bart Van Assche52ede082014-05-20 15:07:45 +02001490 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001491
Sagi Grimberg26630e82015-10-13 19:11:38 +03001492 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001493}
1494
Bart Van Assche330179f2015-08-10 17:09:05 -07001495/*
1496 * Register the indirect data buffer descriptor with the HCA.
1497 *
1498 * Note: since the indirect data buffer descriptor has been allocated with
1499 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1500 * memory buffer.
1501 */
1502static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1503 void **next_mr, void **end_mr, u32 idb_len,
1504 __be32 *idb_rkey)
1505{
1506 struct srp_target_port *target = ch->target;
1507 struct srp_device *dev = target->srp_host->srp_dev;
1508 struct srp_map_state state;
1509 struct srp_direct_buf idb_desc;
1510 u64 idb_pages[1];
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001511 struct scatterlist idb_sg[1];
Bart Van Assche330179f2015-08-10 17:09:05 -07001512 int ret;
1513
1514 memset(&state, 0, sizeof(state));
1515 memset(&idb_desc, 0, sizeof(idb_desc));
1516 state.gen.next = next_mr;
1517 state.gen.end = end_mr;
1518 state.desc = &idb_desc;
Bart Van Assche330179f2015-08-10 17:09:05 -07001519 state.base_dma_addr = req->indirect_dma_addr;
1520 state.dma_len = idb_len;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001521
1522 if (dev->use_fast_reg) {
1523 state.sg = idb_sg;
1524 state.sg_nents = 1;
1525 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1526 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
Christoph Hellwigfc925512015-12-01 10:18:30 -08001527#ifdef CONFIG_NEED_SG_DMA_LENGTH
1528 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1529#endif
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001530 ret = srp_map_finish_fr(&state, ch);
1531 if (ret < 0)
1532 return ret;
1533 } else if (dev->use_fmr) {
1534 state.pages = idb_pages;
1535 state.pages[0] = (req->indirect_dma_addr &
1536 dev->mr_page_mask);
1537 state.npages = 1;
1538 ret = srp_map_finish_fmr(&state, ch);
1539 if (ret < 0)
1540 return ret;
1541 } else {
1542 return -EINVAL;
1543 }
Bart Van Assche330179f2015-08-10 17:09:05 -07001544
1545 *idb_rkey = idb_desc.key;
1546
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001547 return 0;
Bart Van Assche330179f2015-08-10 17:09:05 -07001548}
1549
Bart Van Assche509c07b2014-10-30 14:48:30 +01001550static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001551 struct srp_request *req)
1552{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001553 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001554 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001555 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001556 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001557 struct srp_device *dev;
1558 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001559 struct srp_map_state state;
1560 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001561 u32 idb_len, table_len;
1562 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001563 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001564
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001565 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001566 return sizeof (struct srp_cmd);
1567
1568 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1569 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001570 shost_printk(KERN_WARNING, target->scsi_host,
1571 PFX "Unhandled data direction %d\n",
1572 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001573 return -EINVAL;
1574 }
1575
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001576 nents = scsi_sg_count(scmnd);
1577 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001578
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001579 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001580 ibdev = dev->dev;
1581
1582 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001583 if (unlikely(count == 0))
1584 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001585
1586 fmt = SRP_DATA_DESC_DIRECT;
1587 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001588
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001589 if (count == 1 && target->global_mr) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001590 /*
1591 * The midlayer only generated a single gather/scatter
1592 * entry, or DMA mapping coalesced everything to a
1593 * single entry. So a direct descriptor along with
1594 * the DMA MR suffices.
1595 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001596 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001597
Ralph Campbell85507bc2006-12-12 14:30:55 -08001598 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001599 buf->key = cpu_to_be32(target->global_mr->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001600 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001601
Bart Van Assche52ede082014-05-20 15:07:45 +02001602 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001603 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001604 }
1605
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001606 /*
1607 * We have more than one scatter/gather entry, so build our indirect
1608 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001609 */
1610 indirect_hdr = (void *) cmd->add_data;
1611
David Dillowc07d4242011-01-16 13:57:10 -05001612 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1613 target->indirect_size, DMA_TO_DEVICE);
1614
David Dillow8f26c9f2011-01-14 19:45:50 -05001615 memset(&state, 0, sizeof(state));
Sagi Grimberg26630e82015-10-13 19:11:38 +03001616 if (dev->use_fast_reg)
1617 srp_map_sg_fr(&state, ch, req, scat, count);
1618 else if (dev->use_fmr)
1619 srp_map_sg_fmr(&state, ch, req, scat, count);
1620 else
1621 srp_map_sg_dma(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001622
David Dillowc07d4242011-01-16 13:57:10 -05001623 /* We've mapped the request, now pull as much of the indirect
1624 * descriptor table as we can into the command buffer. If this
1625 * target is not using an external indirect table, we are
1626 * guaranteed to fit into the command, as the SCSI layer won't
1627 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001628 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001629 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001630 /*
1631 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001632 * so use a direct descriptor.
1633 */
1634 struct srp_direct_buf *buf = (void *) cmd->add_data;
1635
David Dillowc07d4242011-01-16 13:57:10 -05001636 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001637 goto map_complete;
1638 }
1639
David Dillowc07d4242011-01-16 13:57:10 -05001640 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1641 !target->allow_ext_sg)) {
1642 shost_printk(KERN_ERR, target->scsi_host,
1643 "Could not fit S/G list into SRP_CMD\n");
1644 return -EIO;
1645 }
1646
1647 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001648 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001649 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001650
1651 fmt = SRP_DATA_DESC_INDIRECT;
1652 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001653 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001654
David Dillowc07d4242011-01-16 13:57:10 -05001655 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1656 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001657
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001658 if (!target->global_mr) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001659 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1660 idb_len, &idb_rkey);
1661 if (ret < 0)
1662 return ret;
1663 req->nmdesc++;
1664 } else {
Bart Van Asschea745f4f42015-12-01 10:18:47 -08001665 idb_rkey = cpu_to_be32(target->global_mr->rkey);
Bart Van Assche330179f2015-08-10 17:09:05 -07001666 }
1667
David Dillowc07d4242011-01-16 13:57:10 -05001668 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001669 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001670 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1671 indirect_hdr->len = cpu_to_be32(state.total_len);
1672
1673 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001674 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001675 else
David Dillowc07d4242011-01-16 13:57:10 -05001676 cmd->data_in_desc_cnt = count;
1677
1678 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1679 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001680
1681map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001682 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1683 cmd->buf_fmt = fmt << 4;
1684 else
1685 cmd->buf_fmt = fmt;
1686
Roland Dreieraef9ec32005-11-02 14:07:13 -08001687 return len;
1688}
1689
David Dillow05a1d752010-10-08 14:48:14 -04001690/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001691 * Return an IU and possible credit to the free pool
1692 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001693static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001694 enum srp_iu_type iu_type)
1695{
1696 unsigned long flags;
1697
Bart Van Assche509c07b2014-10-30 14:48:30 +01001698 spin_lock_irqsave(&ch->lock, flags);
1699 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001700 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001701 ++ch->req_lim;
1702 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001703}
1704
1705/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001706 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001707 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001708 *
1709 * Note:
1710 * An upper limit for the number of allocated information units for each
1711 * request type is:
1712 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1713 * more than Scsi_Host.can_queue requests.
1714 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1715 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1716 * one unanswered SRP request to an initiator.
1717 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001718static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001719 enum srp_iu_type iu_type)
1720{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001721 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001722 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1723 struct srp_iu *iu;
1724
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001726
Bart Van Assche509c07b2014-10-30 14:48:30 +01001727 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001728 return NULL;
1729
1730 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001731 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001732 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001733 ++target->zero_req_lim;
1734 return NULL;
1735 }
1736
Bart Van Assche509c07b2014-10-30 14:48:30 +01001737 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001738 }
1739
Bart Van Assche509c07b2014-10-30 14:48:30 +01001740 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001741 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001742 return iu;
1743}
1744
Bart Van Assche509c07b2014-10-30 14:48:30 +01001745static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001746{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001747 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001748 struct ib_sge list;
1749 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001750
1751 list.addr = iu->dma;
1752 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001753 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001754
1755 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001756 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001757 wr.sg_list = &list;
1758 wr.num_sge = 1;
1759 wr.opcode = IB_WR_SEND;
1760 wr.send_flags = IB_SEND_SIGNALED;
1761
Bart Van Assche509c07b2014-10-30 14:48:30 +01001762 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001763}
1764
Bart Van Assche509c07b2014-10-30 14:48:30 +01001765static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001766{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001767 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001768 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001769 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001770
1771 list.addr = iu->dma;
1772 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001773 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001774
1775 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001776 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001777 wr.sg_list = &list;
1778 wr.num_sge = 1;
1779
Bart Van Assche509c07b2014-10-30 14:48:30 +01001780 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001781}
1782
Bart Van Assche509c07b2014-10-30 14:48:30 +01001783static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001784{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001785 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001786 struct srp_request *req;
1787 struct scsi_cmnd *scmnd;
1788 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001789
Roland Dreieraef9ec32005-11-02 14:07:13 -08001790 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001791 spin_lock_irqsave(&ch->lock, flags);
1792 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1793 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001794
Bart Van Assche509c07b2014-10-30 14:48:30 +01001795 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001796 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001797 ch->tsk_mgmt_status = rsp->data[3];
1798 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001799 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001800 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1801 if (scmnd) {
1802 req = (void *)scmnd->host_scribble;
1803 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1804 }
Bart Van Assche22032992012-08-14 13:18:53 +00001805 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001806 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001807 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1808 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001809
Bart Van Assche509c07b2014-10-30 14:48:30 +01001810 spin_lock_irqsave(&ch->lock, flags);
1811 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1812 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001813
1814 return;
1815 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001816 scmnd->result = rsp->status;
1817
1818 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1819 memcpy(scmnd->sense_buffer, rsp->data +
1820 be32_to_cpu(rsp->resp_data_len),
1821 min_t(int, be32_to_cpu(rsp->sense_data_len),
1822 SCSI_SENSE_BUFFERSIZE));
1823 }
1824
Bart Van Asschee7145312014-07-09 15:57:51 +02001825 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001826 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001827 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1828 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1829 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1830 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1831 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1832 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001833
Bart Van Assche509c07b2014-10-30 14:48:30 +01001834 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001835 be32_to_cpu(rsp->req_lim_delta));
1836
David Dillowf8b6e312010-11-26 13:02:21 -05001837 scmnd->host_scribble = NULL;
1838 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001839 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001840}
1841
Bart Van Assche509c07b2014-10-30 14:48:30 +01001842static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001843 void *rsp, int len)
1844{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001845 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001846 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001847 unsigned long flags;
1848 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001849 int err;
David Dillowbb125882010-10-08 14:40:47 -04001850
Bart Van Assche509c07b2014-10-30 14:48:30 +01001851 spin_lock_irqsave(&ch->lock, flags);
1852 ch->req_lim += req_delta;
1853 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1854 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001855
David Dillowbb125882010-10-08 14:40:47 -04001856 if (!iu) {
1857 shost_printk(KERN_ERR, target->scsi_host, PFX
1858 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001859 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001860 }
1861
1862 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1863 memcpy(iu->buf, rsp, len);
1864 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1865
Bart Van Assche509c07b2014-10-30 14:48:30 +01001866 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001867 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001868 shost_printk(KERN_ERR, target->scsi_host, PFX
1869 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001870 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001871 }
David Dillowbb125882010-10-08 14:40:47 -04001872
David Dillowbb125882010-10-08 14:40:47 -04001873 return err;
1874}
1875
Bart Van Assche509c07b2014-10-30 14:48:30 +01001876static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001877 struct srp_cred_req *req)
1878{
1879 struct srp_cred_rsp rsp = {
1880 .opcode = SRP_CRED_RSP,
1881 .tag = req->tag,
1882 };
1883 s32 delta = be32_to_cpu(req->req_lim_delta);
1884
Bart Van Assche509c07b2014-10-30 14:48:30 +01001885 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1886 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001887 "problems processing SRP_CRED_REQ\n");
1888}
1889
Bart Van Assche509c07b2014-10-30 14:48:30 +01001890static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001891 struct srp_aer_req *req)
1892{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001893 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001894 struct srp_aer_rsp rsp = {
1895 .opcode = SRP_AER_RSP,
1896 .tag = req->tag,
1897 };
1898 s32 delta = be32_to_cpu(req->req_lim_delta);
1899
1900 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001901 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001902
Bart Van Assche509c07b2014-10-30 14:48:30 +01001903 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001904 shost_printk(KERN_ERR, target->scsi_host, PFX
1905 "problems processing SRP_AER_REQ\n");
1906}
1907
Bart Van Assche509c07b2014-10-30 14:48:30 +01001908static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001909{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001910 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001911 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001912 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001913 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001914 u8 opcode;
1915
Bart Van Assche509c07b2014-10-30 14:48:30 +01001916 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001917 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001918
1919 opcode = *(u8 *) iu->buf;
1920
1921 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001922 shost_printk(KERN_ERR, target->scsi_host,
1923 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001924 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1925 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001926 }
1927
1928 switch (opcode) {
1929 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001930 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001931 break;
1932
David Dillowbb125882010-10-08 14:40:47 -04001933 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001934 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001935 break;
1936
1937 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001938 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001939 break;
1940
Roland Dreieraef9ec32005-11-02 14:07:13 -08001941 case SRP_T_LOGOUT:
1942 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001943 shost_printk(KERN_WARNING, target->scsi_host,
1944 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001945 break;
1946
1947 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001948 shost_printk(KERN_WARNING, target->scsi_host,
1949 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001950 break;
1951 }
1952
Bart Van Assche509c07b2014-10-30 14:48:30 +01001953 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001954 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001955
Bart Van Assche509c07b2014-10-30 14:48:30 +01001956 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001957 if (res != 0)
1958 shost_printk(KERN_ERR, target->scsi_host,
1959 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001960}
1961
Bart Van Asschec1120f82013-10-26 14:35:08 +02001962/**
1963 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001964 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001965 *
1966 * Note: This function may get invoked before the rport has been created,
1967 * hence the target->rport test.
1968 */
1969static void srp_tl_err_work(struct work_struct *work)
1970{
1971 struct srp_target_port *target;
1972
1973 target = container_of(work, struct srp_target_port, tl_err_work);
1974 if (target->rport)
1975 srp_start_tl_fail_timers(target->rport);
1976}
1977
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001978static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001979 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001980{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001981 struct srp_target_port *target = ch->target;
1982
1983 if (wr_id == SRP_LAST_WR_ID) {
1984 complete(&ch->done);
1985 return;
1986 }
1987
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001988 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001989 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1990 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001991 "LOCAL_INV failed with status %s (%d)\n",
1992 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001993 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1994 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001995 "FAST_REG_MR failed status %s (%d)\n",
1996 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001997 } else {
1998 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03001999 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02002000 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03002001 ib_wc_status_msg(wc_status), wc_status,
2002 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02002003 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02002004 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01002005 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02002006 target->qp_in_error = true;
2007}
2008
Bart Van Assche509c07b2014-10-30 14:48:30 +01002009static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002010{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002011 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002012 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002013
2014 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2015 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02002016 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002017 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02002018 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002019 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002020 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002021 }
2022}
2023
Bart Van Assche509c07b2014-10-30 14:48:30 +01002024static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002025{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002026 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002027 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002028 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002029
2030 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02002031 if (likely(wc.status == IB_WC_SUCCESS)) {
2032 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002033 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02002034 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002035 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002036 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002037 }
2038}
2039
Bart Van Assche76c75b22010-11-26 14:37:47 -05002040static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002041{
Bart Van Assche76c75b22010-11-26 14:37:47 -05002042 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002043 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002044 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002045 struct srp_request *req;
2046 struct srp_iu *iu;
2047 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08002048 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002049 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002050 u32 tag;
2051 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02002052 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002053 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2054
2055 /*
2056 * The SCSI EH thread is the only context from which srp_queuecommand()
2057 * can get invoked for blocked devices (SDEV_BLOCK /
2058 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2059 * locking the rport mutex if invoked from inside the SCSI EH.
2060 */
2061 if (in_scsi_eh)
2062 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002063
Bart Van Assched1b42892014-05-20 15:07:20 +02002064 scmnd->result = srp_chkready(target->rport);
2065 if (unlikely(scmnd->result))
2066 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002067
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002068 WARN_ON_ONCE(scmnd->request->tag < 0);
2069 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002070 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002071 idx = blk_mq_unique_tag_to_tag(tag);
2072 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2073 dev_name(&shost->shost_gendev), tag, idx,
2074 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002075
2076 spin_lock_irqsave(&ch->lock, flags);
2077 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002078 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002079
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002080 if (!iu)
2081 goto err;
2082
2083 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002084 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002085 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002086 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002087
David Dillowf8b6e312010-11-26 13:02:21 -05002088 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002089
2090 cmd = iu->buf;
2091 memset(cmd, 0, sizeof *cmd);
2092
2093 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002094 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002095 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002096 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2097
Roland Dreieraef9ec32005-11-02 14:07:13 -08002098 req->scmnd = scmnd;
2099 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002100
Bart Van Assche509c07b2014-10-30 14:48:30 +01002101 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002102 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002103 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002104 PFX "Failed to map data (%d)\n", len);
2105 /*
2106 * If we ran out of memory descriptors (-ENOMEM) because an
2107 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002108 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002109 * to reduce queue depth temporarily.
2110 */
2111 scmnd->result = len == -ENOMEM ?
2112 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002113 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002114 }
2115
David Dillow49248642011-01-14 18:23:24 -05002116 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002117 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002118
Bart Van Assche509c07b2014-10-30 14:48:30 +01002119 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002120 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002121 goto err_unmap;
2122 }
2123
Bart Van Assched1b42892014-05-20 15:07:20 +02002124 ret = 0;
2125
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002126unlock_rport:
2127 if (in_scsi_eh)
2128 mutex_unlock(&rport->mutex);
2129
Bart Van Assched1b42892014-05-20 15:07:20 +02002130 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002131
2132err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002133 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002134
Bart Van Assche76c75b22010-11-26 14:37:47 -05002135err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002136 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002137
Bart Van Assche024ca902014-05-20 15:03:49 +02002138 /*
2139 * Avoid that the loops that iterate over the request ring can
2140 * encounter a dangling SCSI command pointer.
2141 */
2142 req->scmnd = NULL;
2143
Bart Van Assched1b42892014-05-20 15:07:20 +02002144err:
2145 if (scmnd->result) {
2146 scmnd->scsi_done(scmnd);
2147 ret = 0;
2148 } else {
2149 ret = SCSI_MLQUEUE_HOST_BUSY;
2150 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002151
Bart Van Assched1b42892014-05-20 15:07:20 +02002152 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002153}
2154
Bart Van Assche4d73f952013-10-26 14:40:37 +02002155/*
2156 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002157 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002158 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002159static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002160{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002161 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002162 int i;
2163
Bart Van Assche509c07b2014-10-30 14:48:30 +01002164 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2165 GFP_KERNEL);
2166 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002167 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002168 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2169 GFP_KERNEL);
2170 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002171 goto err_no_ring;
2172
2173 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002174 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2175 ch->max_ti_iu_len,
2176 GFP_KERNEL, DMA_FROM_DEVICE);
2177 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002178 goto err;
2179 }
2180
Bart Van Assche4d73f952013-10-26 14:40:37 +02002181 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002182 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2183 target->max_iu_len,
2184 GFP_KERNEL, DMA_TO_DEVICE);
2185 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002186 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002187
Bart Van Assche509c07b2014-10-30 14:48:30 +01002188 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002189 }
2190
2191 return 0;
2192
2193err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002194 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002195 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2196 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002197 }
2198
Bart Van Assche4d73f952013-10-26 14:40:37 +02002199
2200err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002201 kfree(ch->tx_ring);
2202 ch->tx_ring = NULL;
2203 kfree(ch->rx_ring);
2204 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002205
2206 return -ENOMEM;
2207}
2208
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002209static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2210{
2211 uint64_t T_tr_ns, max_compl_time_ms;
2212 uint32_t rq_tmo_jiffies;
2213
2214 /*
2215 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2216 * table 91), both the QP timeout and the retry count have to be set
2217 * for RC QP's during the RTR to RTS transition.
2218 */
2219 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2220 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2221
2222 /*
2223 * Set target->rq_tmo_jiffies to one second more than the largest time
2224 * it can take before an error completion is generated. See also
2225 * C9-140..142 in the IBTA spec for more information about how to
2226 * convert the QP Local ACK Timeout value to nanoseconds.
2227 */
2228 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2229 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2230 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2231 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2232
2233 return rq_tmo_jiffies;
2234}
2235
David Dillow961e0be2011-01-14 17:32:07 -05002236static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002237 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002238 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002239{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002240 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002241 struct ib_qp_attr *qp_attr = NULL;
2242 int attr_mask = 0;
2243 int ret;
2244 int i;
2245
2246 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002247 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2248 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002249
2250 /*
2251 * Reserve credits for task management so we don't
2252 * bounce requests back to the SCSI mid-layer.
2253 */
2254 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002255 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002256 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002257 target->scsi_host->cmd_per_lun
2258 = min_t(int, target->scsi_host->can_queue,
2259 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002260 } else {
2261 shost_printk(KERN_WARNING, target->scsi_host,
2262 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2263 ret = -ECONNRESET;
2264 goto error;
2265 }
2266
Bart Van Assche509c07b2014-10-30 14:48:30 +01002267 if (!ch->rx_ring) {
2268 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002269 if (ret)
2270 goto error;
2271 }
2272
2273 ret = -ENOMEM;
2274 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2275 if (!qp_attr)
2276 goto error;
2277
2278 qp_attr->qp_state = IB_QPS_RTR;
2279 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2280 if (ret)
2281 goto error_free;
2282
Bart Van Assche509c07b2014-10-30 14:48:30 +01002283 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002284 if (ret)
2285 goto error_free;
2286
Bart Van Assche4d73f952013-10-26 14:40:37 +02002287 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002288 struct srp_iu *iu = ch->rx_ring[i];
2289
2290 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002291 if (ret)
2292 goto error_free;
2293 }
2294
2295 qp_attr->qp_state = IB_QPS_RTS;
2296 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2297 if (ret)
2298 goto error_free;
2299
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002300 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2301
Bart Van Assche509c07b2014-10-30 14:48:30 +01002302 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002303 if (ret)
2304 goto error_free;
2305
2306 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2307
2308error_free:
2309 kfree(qp_attr);
2310
2311error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002312 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002313}
2314
Roland Dreieraef9ec32005-11-02 14:07:13 -08002315static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2316 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002317 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002318{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002319 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002320 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002321 struct ib_class_port_info *cpi;
2322 int opcode;
2323
2324 switch (event->param.rej_rcvd.reason) {
2325 case IB_CM_REJ_PORT_CM_REDIRECT:
2326 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002327 ch->path.dlid = cpi->redirect_lid;
2328 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002329 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002330 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002331
Bart Van Assche509c07b2014-10-30 14:48:30 +01002332 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002333 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2334 break;
2335
2336 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002337 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002338 /*
2339 * Topspin/Cisco SRP gateways incorrectly send
2340 * reject reason code 25 when they mean 24
2341 * (port redirect).
2342 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002343 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002344 event->param.rej_rcvd.ari, 16);
2345
David Dillow7aa54bd2008-01-07 18:23:41 -05002346 shost_printk(KERN_DEBUG, shost,
2347 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002348 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2349 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002350
Bart Van Assche509c07b2014-10-30 14:48:30 +01002351 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002352 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002353 shost_printk(KERN_WARNING, shost,
2354 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002355 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002356 }
2357 break;
2358
2359 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002360 shost_printk(KERN_WARNING, shost,
2361 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002362 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002363 break;
2364
2365 case IB_CM_REJ_CONSUMER_DEFINED:
2366 opcode = *(u8 *) event->private_data;
2367 if (opcode == SRP_LOGIN_REJ) {
2368 struct srp_login_rej *rej = event->private_data;
2369 u32 reason = be32_to_cpu(rej->reason);
2370
2371 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002372 shost_printk(KERN_WARNING, shost,
2373 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002374 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002375 shost_printk(KERN_WARNING, shost, PFX
2376 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002377 target->sgid.raw,
2378 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002379 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002380 shost_printk(KERN_WARNING, shost,
2381 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2382 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002383 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002384 break;
2385
David Dillow9fe4bcf2008-01-08 17:08:52 -05002386 case IB_CM_REJ_STALE_CONN:
2387 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002388 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002389 break;
2390
Roland Dreieraef9ec32005-11-02 14:07:13 -08002391 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002392 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2393 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002394 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395 }
2396}
2397
2398static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2399{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002400 struct srp_rdma_ch *ch = cm_id->context;
2401 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002402 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002403
2404 switch (event->event) {
2405 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002406 shost_printk(KERN_DEBUG, target->scsi_host,
2407 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002408 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002409 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002410 break;
2411
2412 case IB_CM_REP_RECEIVED:
2413 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002414 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002415 break;
2416
2417 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002418 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002419 comp = 1;
2420
Bart Van Assche509c07b2014-10-30 14:48:30 +01002421 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002422 break;
2423
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002424 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002425 shost_printk(KERN_WARNING, target->scsi_host,
2426 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002427 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002428 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002429 shost_printk(KERN_ERR, target->scsi_host,
2430 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002431 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002432 break;
2433
2434 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002435 shost_printk(KERN_ERR, target->scsi_host,
2436 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002437 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002438
Bart Van Assche509c07b2014-10-30 14:48:30 +01002439 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002440 break;
2441
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002442 case IB_CM_MRA_RECEIVED:
2443 case IB_CM_DREQ_ERROR:
2444 case IB_CM_DREP_RECEIVED:
2445 break;
2446
Roland Dreieraef9ec32005-11-02 14:07:13 -08002447 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002448 shost_printk(KERN_WARNING, target->scsi_host,
2449 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002450 break;
2451 }
2452
2453 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002454 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002455
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456 return 0;
2457}
2458
Jack Wang71444b92013-11-07 11:37:37 +01002459/**
Jack Wang71444b92013-11-07 11:37:37 +01002460 * srp_change_queue_depth - setting device queue depth
2461 * @sdev: scsi device struct
2462 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002463 *
2464 * Returns queue depth.
2465 */
2466static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002467srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002468{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002469 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002470 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002471 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002472}
2473
Bart Van Assche985aa492015-05-18 13:27:14 +02002474static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2475 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002476{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002477 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002478 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002479 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002480 struct srp_iu *iu;
2481 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002482
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002483 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002484 return -1;
2485
Bart Van Assche509c07b2014-10-30 14:48:30 +01002486 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002487
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002488 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002489 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002490 * invoked while a task management function is being sent.
2491 */
2492 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002493 spin_lock_irq(&ch->lock);
2494 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2495 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002496
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002497 if (!iu) {
2498 mutex_unlock(&rport->mutex);
2499
Bart Van Assche76c75b22010-11-26 14:37:47 -05002500 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002501 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002502
David Dillow19081f32010-10-18 08:54:49 -04002503 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2504 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002505 tsk_mgmt = iu->buf;
2506 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2507
2508 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002509 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002510 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002511 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002512 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002513
David Dillow19081f32010-10-18 08:54:49 -04002514 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2515 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002516 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2517 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002518 mutex_unlock(&rport->mutex);
2519
Bart Van Assche76c75b22010-11-26 14:37:47 -05002520 return -1;
2521 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002522 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002523
Bart Van Assche509c07b2014-10-30 14:48:30 +01002524 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002525 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002526 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002527
Roland Dreierd945e1d2006-05-09 10:50:28 -07002528 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002529}
2530
Roland Dreieraef9ec32005-11-02 14:07:13 -08002531static int srp_abort(struct scsi_cmnd *scmnd)
2532{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002533 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002534 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002535 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002536 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002537 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002538 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002539
David Dillow7aa54bd2008-01-07 18:23:41 -05002540 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002541
Bart Van Assched92c0da2014-10-06 17:14:36 +02002542 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002543 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002544 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002545 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2546 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2547 return SUCCESS;
2548 ch = &target->ch[ch_idx];
2549 if (!srp_claim_req(ch, req, NULL, scmnd))
2550 return SUCCESS;
2551 shost_printk(KERN_ERR, target->scsi_host,
2552 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002553 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002554 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002555 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002556 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002557 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002558 else
2559 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002560 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002561 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002562 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002563
Bart Van Assche086f44f2013-06-12 15:23:04 +02002564 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002565}
2566
2567static int srp_reset_device(struct scsi_cmnd *scmnd)
2568{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002569 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002570 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002571 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002572
David Dillow7aa54bd2008-01-07 18:23:41 -05002573 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002574
Bart Van Assched92c0da2014-10-06 17:14:36 +02002575 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002576 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002577 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002578 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002579 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002580 return FAILED;
2581
Bart Van Assched92c0da2014-10-06 17:14:36 +02002582 for (i = 0; i < target->ch_count; i++) {
2583 ch = &target->ch[i];
2584 for (i = 0; i < target->req_ring_size; ++i) {
2585 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002586
Bart Van Assched92c0da2014-10-06 17:14:36 +02002587 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2588 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002589 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002590
Roland Dreierd945e1d2006-05-09 10:50:28 -07002591 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002592}
2593
2594static int srp_reset_host(struct scsi_cmnd *scmnd)
2595{
2596 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002597
David Dillow7aa54bd2008-01-07 18:23:41 -05002598 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002599
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002600 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002601}
2602
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002603static int srp_slave_configure(struct scsi_device *sdev)
2604{
2605 struct Scsi_Host *shost = sdev->host;
2606 struct srp_target_port *target = host_to_target(shost);
2607 struct request_queue *q = sdev->request_queue;
2608 unsigned long timeout;
2609
2610 if (sdev->type == TYPE_DISK) {
2611 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2612 blk_queue_rq_timeout(q, timeout);
2613 }
2614
2615 return 0;
2616}
2617
Tony Jonesee959b02008-02-22 00:13:36 +01002618static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2619 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002620{
Tony Jonesee959b02008-02-22 00:13:36 +01002621 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002622
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002623 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002624}
2625
Tony Jonesee959b02008-02-22 00:13:36 +01002626static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2627 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002628{
Tony Jonesee959b02008-02-22 00:13:36 +01002629 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002630
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002631 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002632}
2633
Tony Jonesee959b02008-02-22 00:13:36 +01002634static ssize_t show_service_id(struct device *dev,
2635 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002636{
Tony Jonesee959b02008-02-22 00:13:36 +01002637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002638
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002639 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002640}
2641
Tony Jonesee959b02008-02-22 00:13:36 +01002642static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2643 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002644{
Tony Jonesee959b02008-02-22 00:13:36 +01002645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002646
Bart Van Assche747fe002014-10-30 14:48:05 +01002647 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002648}
2649
Bart Van Assche848b3082013-10-26 14:38:12 +02002650static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2651 char *buf)
2652{
2653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2654
Bart Van Assche747fe002014-10-30 14:48:05 +01002655 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002656}
2657
Tony Jonesee959b02008-02-22 00:13:36 +01002658static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2659 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002660{
Tony Jonesee959b02008-02-22 00:13:36 +01002661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002662 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002663
Bart Van Assche509c07b2014-10-30 14:48:30 +01002664 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002665}
2666
Tony Jonesee959b02008-02-22 00:13:36 +01002667static ssize_t show_orig_dgid(struct device *dev,
2668 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002669{
Tony Jonesee959b02008-02-22 00:13:36 +01002670 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002671
Bart Van Assche747fe002014-10-30 14:48:05 +01002672 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002673}
2674
Bart Van Assche89de7482010-08-03 14:08:45 +00002675static ssize_t show_req_lim(struct device *dev,
2676 struct device_attribute *attr, char *buf)
2677{
2678 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002679 struct srp_rdma_ch *ch;
2680 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002681
Bart Van Assched92c0da2014-10-06 17:14:36 +02002682 for (i = 0; i < target->ch_count; i++) {
2683 ch = &target->ch[i];
2684 req_lim = min(req_lim, ch->req_lim);
2685 }
2686 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002687}
2688
Tony Jonesee959b02008-02-22 00:13:36 +01002689static ssize_t show_zero_req_lim(struct device *dev,
2690 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002691{
Tony Jonesee959b02008-02-22 00:13:36 +01002692 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002693
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002694 return sprintf(buf, "%d\n", target->zero_req_lim);
2695}
2696
Tony Jonesee959b02008-02-22 00:13:36 +01002697static ssize_t show_local_ib_port(struct device *dev,
2698 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002699{
Tony Jonesee959b02008-02-22 00:13:36 +01002700 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002701
2702 return sprintf(buf, "%d\n", target->srp_host->port);
2703}
2704
Tony Jonesee959b02008-02-22 00:13:36 +01002705static ssize_t show_local_ib_device(struct device *dev,
2706 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002707{
Tony Jonesee959b02008-02-22 00:13:36 +01002708 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002709
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002710 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002711}
2712
Bart Van Assched92c0da2014-10-06 17:14:36 +02002713static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2714 char *buf)
2715{
2716 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2717
2718 return sprintf(buf, "%d\n", target->ch_count);
2719}
2720
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002721static ssize_t show_comp_vector(struct device *dev,
2722 struct device_attribute *attr, char *buf)
2723{
2724 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2725
2726 return sprintf(buf, "%d\n", target->comp_vector);
2727}
2728
Vu Pham7bb312e2013-10-26 14:31:27 +02002729static ssize_t show_tl_retry_count(struct device *dev,
2730 struct device_attribute *attr, char *buf)
2731{
2732 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2733
2734 return sprintf(buf, "%d\n", target->tl_retry_count);
2735}
2736
David Dillow49248642011-01-14 18:23:24 -05002737static ssize_t show_cmd_sg_entries(struct device *dev,
2738 struct device_attribute *attr, char *buf)
2739{
2740 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2741
2742 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2743}
2744
David Dillowc07d4242011-01-16 13:57:10 -05002745static ssize_t show_allow_ext_sg(struct device *dev,
2746 struct device_attribute *attr, char *buf)
2747{
2748 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2749
2750 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2751}
2752
Tony Jonesee959b02008-02-22 00:13:36 +01002753static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2754static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2755static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2756static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002757static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002758static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2759static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002760static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002761static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2762static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2763static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002764static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002765static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002766static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002767static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002768static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002769
Tony Jonesee959b02008-02-22 00:13:36 +01002770static struct device_attribute *srp_host_attrs[] = {
2771 &dev_attr_id_ext,
2772 &dev_attr_ioc_guid,
2773 &dev_attr_service_id,
2774 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002775 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002776 &dev_attr_dgid,
2777 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002778 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002779 &dev_attr_zero_req_lim,
2780 &dev_attr_local_ib_port,
2781 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002782 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002783 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002784 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002785 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002786 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002787 NULL
2788};
2789
Roland Dreieraef9ec32005-11-02 14:07:13 -08002790static struct scsi_host_template srp_template = {
2791 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002792 .name = "InfiniBand SRP initiator",
2793 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002794 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002795 .info = srp_target_info,
2796 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002797 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002798 .eh_abort_handler = srp_abort,
2799 .eh_device_reset_handler = srp_reset_device,
2800 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002801 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002802 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002803 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002804 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002805 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002806 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002807 .shost_attrs = srp_host_attrs,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002808 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002809};
2810
Bart Van Assche34aa6542014-10-30 14:47:22 +01002811static int srp_sdev_count(struct Scsi_Host *host)
2812{
2813 struct scsi_device *sdev;
2814 int c = 0;
2815
2816 shost_for_each_device(sdev, host)
2817 c++;
2818
2819 return c;
2820}
2821
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002822/*
2823 * Return values:
2824 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2825 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2826 * removal has been scheduled.
2827 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2828 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002829static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2830{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002831 struct srp_rport_identifiers ids;
2832 struct srp_rport *rport;
2833
Bart Van Assche34aa6542014-10-30 14:47:22 +01002834 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002835 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002836 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002837
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002838 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002839 return -ENODEV;
2840
FUJITA Tomonori32368222007-06-27 16:33:12 +09002841 memcpy(ids.port_id, &target->id_ext, 8);
2842 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002843 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002844 rport = srp_rport_add(target->scsi_host, &ids);
2845 if (IS_ERR(rport)) {
2846 scsi_remove_host(target->scsi_host);
2847 return PTR_ERR(rport);
2848 }
2849
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002850 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002851 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002852
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002853 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002854 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002855 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002856
Roland Dreieraef9ec32005-11-02 14:07:13 -08002857 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002858 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002859
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002860 if (srp_connected_ch(target) < target->ch_count ||
2861 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002862 shost_printk(KERN_INFO, target->scsi_host,
2863 PFX "SCSI scan failed - removing SCSI host\n");
2864 srp_queue_remove_work(target);
2865 goto out;
2866 }
2867
2868 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2869 dev_name(&target->scsi_host->shost_gendev),
2870 srp_sdev_count(target->scsi_host));
2871
2872 spin_lock_irq(&target->lock);
2873 if (target->state == SRP_TARGET_SCANNING)
2874 target->state = SRP_TARGET_LIVE;
2875 spin_unlock_irq(&target->lock);
2876
2877out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002878 return 0;
2879}
2880
Tony Jonesee959b02008-02-22 00:13:36 +01002881static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002882{
2883 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002884 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002885
2886 complete(&host->released);
2887}
2888
2889static struct class srp_class = {
2890 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002891 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002892};
2893
Bart Van Assche96fc2482013-06-28 14:51:26 +02002894/**
2895 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002896 * @host: SRP host.
2897 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002898 */
2899static bool srp_conn_unique(struct srp_host *host,
2900 struct srp_target_port *target)
2901{
2902 struct srp_target_port *t;
2903 bool ret = false;
2904
2905 if (target->state == SRP_TARGET_REMOVED)
2906 goto out;
2907
2908 ret = true;
2909
2910 spin_lock(&host->target_lock);
2911 list_for_each_entry(t, &host->target_list, list) {
2912 if (t != target &&
2913 target->id_ext == t->id_ext &&
2914 target->ioc_guid == t->ioc_guid &&
2915 target->initiator_ext == t->initiator_ext) {
2916 ret = false;
2917 break;
2918 }
2919 }
2920 spin_unlock(&host->target_lock);
2921
2922out:
2923 return ret;
2924}
2925
Roland Dreieraef9ec32005-11-02 14:07:13 -08002926/*
2927 * Target ports are added by writing
2928 *
2929 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2930 * pkey=<P_Key>,service_id=<service ID>
2931 *
2932 * to the add_target sysfs attribute.
2933 */
2934enum {
2935 SRP_OPT_ERR = 0,
2936 SRP_OPT_ID_EXT = 1 << 0,
2937 SRP_OPT_IOC_GUID = 1 << 1,
2938 SRP_OPT_DGID = 1 << 2,
2939 SRP_OPT_PKEY = 1 << 3,
2940 SRP_OPT_SERVICE_ID = 1 << 4,
2941 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002942 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002943 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002944 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002945 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002946 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2947 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002948 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002949 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002950 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002951 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2952 SRP_OPT_IOC_GUID |
2953 SRP_OPT_DGID |
2954 SRP_OPT_PKEY |
2955 SRP_OPT_SERVICE_ID),
2956};
2957
Steven Whitehousea447c092008-10-13 10:46:57 +01002958static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002959 { SRP_OPT_ID_EXT, "id_ext=%s" },
2960 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2961 { SRP_OPT_DGID, "dgid=%s" },
2962 { SRP_OPT_PKEY, "pkey=%x" },
2963 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2964 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2965 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002966 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002967 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002968 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002969 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2970 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002971 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002972 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002973 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002974 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002975};
2976
2977static int srp_parse_options(const char *buf, struct srp_target_port *target)
2978{
2979 char *options, *sep_opt;
2980 char *p;
2981 char dgid[3];
2982 substring_t args[MAX_OPT_ARGS];
2983 int opt_mask = 0;
2984 int token;
2985 int ret = -EINVAL;
2986 int i;
2987
2988 options = kstrdup(buf, GFP_KERNEL);
2989 if (!options)
2990 return -ENOMEM;
2991
2992 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002993 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002994 if (!*p)
2995 continue;
2996
2997 token = match_token(p, srp_opt_tokens, args);
2998 opt_mask |= token;
2999
3000 switch (token) {
3001 case SRP_OPT_ID_EXT:
3002 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003003 if (!p) {
3004 ret = -ENOMEM;
3005 goto out;
3006 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003007 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3008 kfree(p);
3009 break;
3010
3011 case SRP_OPT_IOC_GUID:
3012 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003013 if (!p) {
3014 ret = -ENOMEM;
3015 goto out;
3016 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003017 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3018 kfree(p);
3019 break;
3020
3021 case SRP_OPT_DGID:
3022 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003023 if (!p) {
3024 ret = -ENOMEM;
3025 goto out;
3026 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003027 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003028 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07003029 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003030 goto out;
3031 }
3032
3033 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01003034 strlcpy(dgid, p + i * 2, sizeof(dgid));
3035 if (sscanf(dgid, "%hhx",
3036 &target->orig_dgid.raw[i]) < 1) {
3037 ret = -EINVAL;
3038 kfree(p);
3039 goto out;
3040 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003041 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08003042 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003043 break;
3044
3045 case SRP_OPT_PKEY:
3046 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003047 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003048 goto out;
3049 }
Bart Van Assche747fe002014-10-30 14:48:05 +01003050 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003051 break;
3052
3053 case SRP_OPT_SERVICE_ID:
3054 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003055 if (!p) {
3056 ret = -ENOMEM;
3057 goto out;
3058 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003059 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3060 kfree(p);
3061 break;
3062
3063 case SRP_OPT_MAX_SECT:
3064 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003065 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003066 goto out;
3067 }
3068 target->scsi_host->max_sectors = token;
3069 break;
3070
Bart Van Assche4d73f952013-10-26 14:40:37 +02003071 case SRP_OPT_QUEUE_SIZE:
3072 if (match_int(args, &token) || token < 1) {
3073 pr_warn("bad queue_size parameter '%s'\n", p);
3074 goto out;
3075 }
3076 target->scsi_host->can_queue = token;
3077 target->queue_size = token + SRP_RSP_SQ_SIZE +
3078 SRP_TSK_MGMT_SQ_SIZE;
3079 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3080 target->scsi_host->cmd_per_lun = token;
3081 break;
3082
Vu Pham52fb2b502006-06-17 20:37:31 -07003083 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003084 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003085 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3086 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003087 goto out;
3088 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003089 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003090 break;
3091
Ramachandra K0c0450db2006-06-17 20:37:38 -07003092 case SRP_OPT_IO_CLASS:
3093 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003094 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003095 goto out;
3096 }
3097 if (token != SRP_REV10_IB_IO_CLASS &&
3098 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003099 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3100 token, SRP_REV10_IB_IO_CLASS,
3101 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003102 goto out;
3103 }
3104 target->io_class = token;
3105 break;
3106
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003107 case SRP_OPT_INITIATOR_EXT:
3108 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003109 if (!p) {
3110 ret = -ENOMEM;
3111 goto out;
3112 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003113 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3114 kfree(p);
3115 break;
3116
David Dillow49248642011-01-14 18:23:24 -05003117 case SRP_OPT_CMD_SG_ENTRIES:
3118 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003119 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3120 p);
David Dillow49248642011-01-14 18:23:24 -05003121 goto out;
3122 }
3123 target->cmd_sg_cnt = token;
3124 break;
3125
David Dillowc07d4242011-01-16 13:57:10 -05003126 case SRP_OPT_ALLOW_EXT_SG:
3127 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003128 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003129 goto out;
3130 }
3131 target->allow_ext_sg = !!token;
3132 break;
3133
3134 case SRP_OPT_SG_TABLESIZE:
3135 if (match_int(args, &token) || token < 1 ||
3136 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003137 pr_warn("bad max sg_tablesize parameter '%s'\n",
3138 p);
David Dillowc07d4242011-01-16 13:57:10 -05003139 goto out;
3140 }
3141 target->sg_tablesize = token;
3142 break;
3143
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003144 case SRP_OPT_COMP_VECTOR:
3145 if (match_int(args, &token) || token < 0) {
3146 pr_warn("bad comp_vector parameter '%s'\n", p);
3147 goto out;
3148 }
3149 target->comp_vector = token;
3150 break;
3151
Vu Pham7bb312e2013-10-26 14:31:27 +02003152 case SRP_OPT_TL_RETRY_COUNT:
3153 if (match_int(args, &token) || token < 2 || token > 7) {
3154 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3155 p);
3156 goto out;
3157 }
3158 target->tl_retry_count = token;
3159 break;
3160
Roland Dreieraef9ec32005-11-02 14:07:13 -08003161 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003162 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3163 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003164 goto out;
3165 }
3166 }
3167
3168 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3169 ret = 0;
3170 else
3171 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3172 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3173 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003174 pr_warn("target creation request is missing parameter '%s'\n",
3175 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003176
Bart Van Assche4d73f952013-10-26 14:40:37 +02003177 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3178 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3179 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3180 target->scsi_host->cmd_per_lun,
3181 target->scsi_host->can_queue);
3182
Roland Dreieraef9ec32005-11-02 14:07:13 -08003183out:
3184 kfree(options);
3185 return ret;
3186}
3187
Tony Jonesee959b02008-02-22 00:13:36 +01003188static ssize_t srp_create_target(struct device *dev,
3189 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003190 const char *buf, size_t count)
3191{
3192 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003193 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003194 struct Scsi_Host *target_host;
3195 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003196 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003197 struct srp_device *srp_dev = host->srp_dev;
3198 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003199 int ret, node_idx, node, cpu, i;
3200 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003201
3202 target_host = scsi_host_alloc(&srp_template,
3203 sizeof (struct srp_target_port));
3204 if (!target_host)
3205 return -ENOMEM;
3206
David Dillow49248642011-01-14 18:23:24 -05003207 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003208 target_host->max_channel = 0;
3209 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003210 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003211 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003212
Roland Dreieraef9ec32005-11-02 14:07:13 -08003213 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003214
David Dillow49248642011-01-14 18:23:24 -05003215 target->io_class = SRP_REV16A_IB_IO_CLASS;
3216 target->scsi_host = target_host;
3217 target->srp_host = host;
Jason Gunthorpee6bf5f42015-07-30 17:22:22 -06003218 target->lkey = host->srp_dev->pd->local_dma_lkey;
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003219 target->global_mr = host->srp_dev->global_mr;
David Dillow49248642011-01-14 18:23:24 -05003220 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003221 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3222 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003223 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003224 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003225
Bart Van Assche34aa6542014-10-30 14:47:22 +01003226 /*
3227 * Avoid that the SCSI host can be removed by srp_remove_target()
3228 * before this function returns.
3229 */
3230 scsi_host_get(target->scsi_host);
3231
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003232 mutex_lock(&host->add_target_mutex);
3233
Roland Dreieraef9ec32005-11-02 14:07:13 -08003234 ret = srp_parse_options(buf, target);
3235 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003236 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003237
Bart Van Assche4d73f952013-10-26 14:40:37 +02003238 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3239
Bart Van Assche96fc2482013-06-28 14:51:26 +02003240 if (!srp_conn_unique(target->srp_host, target)) {
3241 shost_printk(KERN_INFO, target->scsi_host,
3242 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3243 be64_to_cpu(target->id_ext),
3244 be64_to_cpu(target->ioc_guid),
3245 be64_to_cpu(target->initiator_ext));
3246 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003247 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003248 }
3249
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003250 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003251 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003252 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003253 target->sg_tablesize = target->cmd_sg_cnt;
3254 }
3255
3256 target_host->sg_tablesize = target->sg_tablesize;
3257 target->indirect_size = target->sg_tablesize *
3258 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003259 target->max_iu_len = sizeof (struct srp_cmd) +
3260 sizeof (struct srp_indirect_buf) +
3261 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3262
Bart Van Asschec1120f82013-10-26 14:35:08 +02003263 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003264 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003265 spin_lock_init(&target->lock);
Matan Barak55ee3ab2015-10-15 18:38:45 +03003266 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003267 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003268 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003269
Bart Van Assched92c0da2014-10-06 17:14:36 +02003270 ret = -ENOMEM;
3271 target->ch_count = max_t(unsigned, num_online_nodes(),
3272 min(ch_count ? :
3273 min(4 * num_online_nodes(),
3274 ibdev->num_comp_vectors),
3275 num_online_cpus()));
3276 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3277 GFP_KERNEL);
3278 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003279 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003280
Bart Van Assched92c0da2014-10-06 17:14:36 +02003281 node_idx = 0;
3282 for_each_online_node(node) {
3283 const int ch_start = (node_idx * target->ch_count /
3284 num_online_nodes());
3285 const int ch_end = ((node_idx + 1) * target->ch_count /
3286 num_online_nodes());
3287 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3288 num_online_nodes() + target->comp_vector)
3289 % ibdev->num_comp_vectors;
3290 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3291 num_online_nodes() + target->comp_vector)
3292 % ibdev->num_comp_vectors;
3293 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003294
Bart Van Assched92c0da2014-10-06 17:14:36 +02003295 for_each_online_cpu(cpu) {
3296 if (cpu_to_node(cpu) != node)
3297 continue;
3298 if (ch_start + cpu_idx >= ch_end)
3299 continue;
3300 ch = &target->ch[ch_start + cpu_idx];
3301 ch->target = target;
3302 ch->comp_vector = cv_start == cv_end ? cv_start :
3303 cv_start + cpu_idx % (cv_end - cv_start);
3304 spin_lock_init(&ch->lock);
3305 INIT_LIST_HEAD(&ch->free_tx);
3306 ret = srp_new_cm_id(ch);
3307 if (ret)
3308 goto err_disconnect;
3309
3310 ret = srp_create_ch_ib(ch);
3311 if (ret)
3312 goto err_disconnect;
3313
3314 ret = srp_alloc_req_data(ch);
3315 if (ret)
3316 goto err_disconnect;
3317
3318 ret = srp_connect_ch(ch, multich);
3319 if (ret) {
3320 shost_printk(KERN_ERR, target->scsi_host,
3321 PFX "Connection %d/%d failed\n",
3322 ch_start + cpu_idx,
3323 target->ch_count);
3324 if (node_idx == 0 && cpu_idx == 0) {
3325 goto err_disconnect;
3326 } else {
3327 srp_free_ch_ib(target, ch);
3328 srp_free_req_data(target, ch);
3329 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003330 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003331 }
3332 }
3333
3334 multich = true;
3335 cpu_idx++;
3336 }
3337 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003338 }
3339
Bart Van Asschec257ea62015-07-31 14:13:22 -07003340connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003341 target->scsi_host->nr_hw_queues = target->ch_count;
3342
Roland Dreieraef9ec32005-11-02 14:07:13 -08003343 ret = srp_add_target(host, target);
3344 if (ret)
3345 goto err_disconnect;
3346
Bart Van Assche34aa6542014-10-30 14:47:22 +01003347 if (target->state != SRP_TARGET_REMOVED) {
3348 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3349 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3350 be64_to_cpu(target->id_ext),
3351 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003352 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003353 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003354 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003355 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003356
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003357 ret = count;
3358
3359out:
3360 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003361
3362 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003363 if (ret < 0)
3364 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003365
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003366 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003367
3368err_disconnect:
3369 srp_disconnect_target(target);
3370
Bart Van Assched92c0da2014-10-06 17:14:36 +02003371 for (i = 0; i < target->ch_count; i++) {
3372 ch = &target->ch[i];
3373 srp_free_ch_ib(target, ch);
3374 srp_free_req_data(target, ch);
3375 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003376
Bart Van Assched92c0da2014-10-06 17:14:36 +02003377 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003378 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003379}
3380
Tony Jonesee959b02008-02-22 00:13:36 +01003381static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003382
Tony Jonesee959b02008-02-22 00:13:36 +01003383static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3384 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385{
Tony Jonesee959b02008-02-22 00:13:36 +01003386 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003387
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003388 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003389}
3390
Tony Jonesee959b02008-02-22 00:13:36 +01003391static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003392
Tony Jonesee959b02008-02-22 00:13:36 +01003393static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3394 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003395{
Tony Jonesee959b02008-02-22 00:13:36 +01003396 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003397
3398 return sprintf(buf, "%d\n", host->port);
3399}
3400
Tony Jonesee959b02008-02-22 00:13:36 +01003401static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003402
Roland Dreierf5358a12006-06-17 20:37:29 -07003403static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003404{
3405 struct srp_host *host;
3406
3407 host = kzalloc(sizeof *host, GFP_KERNEL);
3408 if (!host)
3409 return NULL;
3410
3411 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003412 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003413 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003414 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003415 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003416 host->port = port;
3417
Tony Jonesee959b02008-02-22 00:13:36 +01003418 host->dev.class = &srp_class;
3419 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003420 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003421
Tony Jonesee959b02008-02-22 00:13:36 +01003422 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003423 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003424 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003425 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003426 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003427 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003428 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003429 goto err_class;
3430
3431 return host;
3432
3433err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003434 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003435
Roland Dreierf5358a12006-06-17 20:37:29 -07003436free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003437 kfree(host);
3438
3439 return NULL;
3440}
3441
3442static void srp_add_one(struct ib_device *device)
3443{
Roland Dreierf5358a12006-06-17 20:37:29 -07003444 struct srp_device *srp_dev;
3445 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003446 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003447 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003448 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003449
Roland Dreierf5358a12006-06-17 20:37:29 -07003450 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3451 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003452 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003453
Roland Dreierf5358a12006-06-17 20:37:29 -07003454 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003455 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003456 goto free_attr;
3457 }
3458
3459 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3460 if (!srp_dev)
3461 goto free_attr;
3462
Bart Van Assched1b42892014-05-20 15:07:20 +02003463 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3464 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003465 srp_dev->has_fr = (dev_attr->device_cap_flags &
3466 IB_DEVICE_MEM_MGT_EXTENSIONS);
3467 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3468 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3469
3470 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3471 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assche002f1562015-08-10 17:08:44 -07003472 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
Bart Van Assched1b42892014-05-20 15:07:20 +02003473
Roland Dreierf5358a12006-06-17 20:37:29 -07003474 /*
3475 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003476 * minimum of 4096 bytes. We're unlikely to build large sglists
3477 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003478 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003479 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3480 srp_dev->mr_page_size = 1 << mr_page_shift;
3481 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3482 max_pages_per_mr = dev_attr->max_mr_size;
3483 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3484 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3485 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003486 if (srp_dev->use_fast_reg) {
3487 srp_dev->max_pages_per_mr =
3488 min_t(u32, srp_dev->max_pages_per_mr,
3489 dev_attr->max_fast_reg_page_list_len);
3490 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003491 srp_dev->mr_max_size = srp_dev->mr_page_size *
3492 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003493 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003494 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003495 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003496 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003497
3498 INIT_LIST_HEAD(&srp_dev->dev_list);
3499
3500 srp_dev->dev = device;
3501 srp_dev->pd = ib_alloc_pd(device);
3502 if (IS_ERR(srp_dev->pd))
3503 goto free_dev;
3504
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003505 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3506 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3507 IB_ACCESS_LOCAL_WRITE |
3508 IB_ACCESS_REMOTE_READ |
3509 IB_ACCESS_REMOTE_WRITE);
3510 if (IS_ERR(srp_dev->global_mr))
3511 goto err_pd;
3512 } else {
3513 srp_dev->global_mr = NULL;
3514 }
Roland Dreierf5358a12006-06-17 20:37:29 -07003515
Hal Rosenstock41390322015-06-29 09:57:00 -04003516 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003517 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003518 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003519 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003520 }
3521
Roland Dreierf5358a12006-06-17 20:37:29 -07003522 ib_set_client_data(device, &srp_client, srp_dev);
3523
3524 goto free_attr;
3525
3526err_pd:
3527 ib_dealloc_pd(srp_dev->pd);
3528
3529free_dev:
3530 kfree(srp_dev);
3531
3532free_attr:
3533 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003534}
3535
Haggai Eran7c1eb452015-07-30 17:50:14 +03003536static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003537{
Roland Dreierf5358a12006-06-17 20:37:29 -07003538 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003539 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003540 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003541
Haggai Eran7c1eb452015-07-30 17:50:14 +03003542 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003543 if (!srp_dev)
3544 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003545
Roland Dreierf5358a12006-06-17 20:37:29 -07003546 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003547 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003548 /*
3549 * Wait for the sysfs entry to go away, so that no new
3550 * target ports can be created.
3551 */
3552 wait_for_completion(&host->released);
3553
3554 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003555 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003556 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003557 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003558 list_for_each_entry(target, &host->target_list, list)
3559 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003560 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003561
3562 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003563 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003564 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003565 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003566 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003567
Roland Dreieraef9ec32005-11-02 14:07:13 -08003568 kfree(host);
3569 }
3570
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003571 if (srp_dev->global_mr)
3572 ib_dereg_mr(srp_dev->global_mr);
Roland Dreierf5358a12006-06-17 20:37:29 -07003573 ib_dealloc_pd(srp_dev->pd);
3574
3575 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003576}
3577
FUJITA Tomonori32368222007-06-27 16:33:12 +09003578static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003579 .has_rport_state = true,
3580 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003581 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003582 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3583 .dev_loss_tmo = &srp_dev_loss_tmo,
3584 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003585 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003586 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003587};
3588
Roland Dreieraef9ec32005-11-02 14:07:13 -08003589static int __init srp_init_module(void)
3590{
3591 int ret;
3592
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003593 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003594
David Dillow49248642011-01-14 18:23:24 -05003595 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003596 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003597 if (!cmd_sg_entries)
3598 cmd_sg_entries = srp_sg_tablesize;
3599 }
3600
3601 if (!cmd_sg_entries)
3602 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3603
3604 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003605 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003606 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003607 }
3608
David Dillowc07d4242011-01-16 13:57:10 -05003609 if (!indirect_sg_entries)
3610 indirect_sg_entries = cmd_sg_entries;
3611 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003612 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3613 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003614 indirect_sg_entries = cmd_sg_entries;
3615 }
3616
Bart Van Asschebcc05912014-07-09 15:57:26 +02003617 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003618 if (!srp_remove_wq) {
3619 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003620 goto out;
3621 }
3622
3623 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003624 ib_srp_transport_template =
3625 srp_attach_transport(&ib_srp_transport_functions);
3626 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003627 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003628
Roland Dreieraef9ec32005-11-02 14:07:13 -08003629 ret = class_register(&srp_class);
3630 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003631 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003632 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003633 }
3634
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003635 ib_sa_register_client(&srp_sa_client);
3636
Roland Dreieraef9ec32005-11-02 14:07:13 -08003637 ret = ib_register_client(&srp_client);
3638 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003639 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003640 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003641 }
3642
Bart Van Asschebcc05912014-07-09 15:57:26 +02003643out:
3644 return ret;
3645
3646unreg_sa:
3647 ib_sa_unregister_client(&srp_sa_client);
3648 class_unregister(&srp_class);
3649
3650release_tr:
3651 srp_release_transport(ib_srp_transport_template);
3652
3653destroy_wq:
3654 destroy_workqueue(srp_remove_wq);
3655 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003656}
3657
3658static void __exit srp_cleanup_module(void)
3659{
3660 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003661 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003662 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003663 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003664 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003665}
3666
3667module_init(srp_init_module);
3668module_exit(srp_cleanup_module);