blob: 32f79624dd28565d3846384f24049e435aef83e7 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200173 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930184static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
Roland Dreieraef9ec32005-11-02 14:07:13 -0800189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700203
204 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700207}
208
Roland Dreieraef9ec32005-11-02 14:07:13 -0800209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Bart Van Assche56b53902014-07-09 15:58:22 +0200268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200343 if (d->mr)
344 ib_dereg_mr(d->mr);
345 }
346 kfree(pool);
347}
348
349/**
350 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
351 * @device: IB device to allocate fast registration descriptors for.
352 * @pd: Protection domain associated with the FR descriptors.
353 * @pool_size: Number of descriptors to allocate.
354 * @max_page_list_len: Maximum fast registration work request page list length.
355 */
356static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
357 struct ib_pd *pd, int pool_size,
358 int max_page_list_len)
359{
360 struct srp_fr_pool *pool;
361 struct srp_fr_desc *d;
362 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200363 int i, ret = -EINVAL;
364
365 if (pool_size <= 0)
366 goto err;
367 ret = -ENOMEM;
368 pool = kzalloc(sizeof(struct srp_fr_pool) +
369 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
370 if (!pool)
371 goto err;
372 pool->size = pool_size;
373 pool->max_page_list_len = max_page_list_len;
374 spin_lock_init(&pool->lock);
375 INIT_LIST_HEAD(&pool->free_list);
376
377 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
379 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200380 if (IS_ERR(mr)) {
381 ret = PTR_ERR(mr);
382 goto destroy_pool;
383 }
384 d->mr = mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200385 list_add_tail(&d->entry, &pool->free_list);
386 }
387
388out:
389 return pool;
390
391destroy_pool:
392 srp_destroy_fr_pool(pool);
393
394err:
395 pool = ERR_PTR(ret);
396 goto out;
397}
398
399/**
400 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
401 * @pool: Pool to obtain descriptor from.
402 */
403static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
404{
405 struct srp_fr_desc *d = NULL;
406 unsigned long flags;
407
408 spin_lock_irqsave(&pool->lock, flags);
409 if (!list_empty(&pool->free_list)) {
410 d = list_first_entry(&pool->free_list, typeof(*d), entry);
411 list_del(&d->entry);
412 }
413 spin_unlock_irqrestore(&pool->lock, flags);
414
415 return d;
416}
417
418/**
419 * srp_fr_pool_put() - put an FR descriptor back in the free list
420 * @pool: Pool the descriptor was allocated from.
421 * @desc: Pointer to an array of fast registration descriptor pointers.
422 * @n: Number of descriptors to put back.
423 *
424 * Note: The caller must already have queued an invalidation request for
425 * desc->mr->rkey before calling this function.
426 */
427static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
428 int n)
429{
430 unsigned long flags;
431 int i;
432
433 spin_lock_irqsave(&pool->lock, flags);
434 for (i = 0; i < n; i++)
435 list_add(&desc[i]->entry, &pool->free_list);
436 spin_unlock_irqrestore(&pool->lock, flags);
437}
438
439static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
440{
441 struct srp_device *dev = target->srp_host->srp_dev;
442
443 return srp_create_fr_pool(dev->dev, dev->pd,
444 target->scsi_host->can_queue,
445 dev->max_pages_per_mr);
446}
447
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
452 * Change a queue pair into the error state and wait until all receive
453 * completions have been processed before destroying it. This avoids that
454 * the receive completion handler can access the queue pair while it is
455 * being destroyed.
456 */
457static void srp_destroy_qp(struct srp_rdma_ch *ch)
458{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200459 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
461 struct ib_recv_wr *bad_wr;
462 int ret;
463
464 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200465 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200466
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
468 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
469 if (ret)
470 goto out;
471
472 init_completion(&ch->done);
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
474 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
475 if (ret == 0)
476 wait_for_completion(&ch->done);
477
478out:
479 ib_destroy_qp(ch->qp);
480}
481
Bart Van Assche509c07b2014-10-30 14:48:30 +0100482static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800483{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100484 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200485 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800486 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100487 struct ib_cq *recv_cq, *send_cq;
488 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200489 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = 1 + dev->use_fast_reg;
Matan Barak8e372102015-06-11 16:35:21 +0300492 struct ib_cq_init_attr cq_attr = {};
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493 int ret;
494
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 if (!init_attr)
497 return -ENOMEM;
498
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200499 /* + 1 for SRP_LAST_WR_ID */
Matan Barak8e372102015-06-11 16:35:21 +0300500 cq_attr.cqe = target->queue_size + 1;
501 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100502 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300503 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100504 if (IS_ERR(recv_cq)) {
505 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800506 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800507 }
508
Matan Barak8e372102015-06-11 16:35:21 +0300509 cq_attr.cqe = m * target->queue_size;
510 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300512 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100513 if (IS_ERR(send_cq)) {
514 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800515 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000516 }
517
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100518 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800519
520 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200521 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200522 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800523 init_attr->cap.max_recv_sge = 1;
524 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200525 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800526 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100527 init_attr->send_cq = send_cq;
528 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800529
Bart Van Assche62154b22014-05-20 15:04:45 +0200530 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100531 if (IS_ERR(qp)) {
532 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800533 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534 }
535
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800537 if (ret)
538 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800539
Bart Van Assche002f1562015-08-10 17:08:44 -0700540 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200541 fr_pool = srp_alloc_fr_pool(target);
542 if (IS_ERR(fr_pool)) {
543 ret = PTR_ERR(fr_pool);
544 shost_printk(KERN_WARNING, target->scsi_host, PFX
545 "FR pool allocation failed (%d)\n", ret);
546 goto err_qp;
547 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700548 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200549 fmr_pool = srp_alloc_fmr_pool(target);
550 if (IS_ERR(fmr_pool)) {
551 ret = PTR_ERR(fmr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FMR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200556 }
557
Bart Van Assche509c07b2014-10-30 14:48:30 +0100558 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200559 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100560 if (ch->recv_cq)
561 ib_destroy_cq(ch->recv_cq);
562 if (ch->send_cq)
563 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100564
Bart Van Assche509c07b2014-10-30 14:48:30 +0100565 ch->qp = qp;
566 ch->recv_cq = recv_cq;
567 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100568
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300569 if (dev->use_fast_reg) {
570 if (ch->fr_pool)
571 srp_destroy_fr_pool(ch->fr_pool);
572 ch->fr_pool = fr_pool;
573 } else if (dev->use_fmr) {
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
576 ch->fmr_pool = fmr_pool;
577 }
578
Roland Dreierda9d2f02010-02-24 15:07:59 -0800579 kfree(init_attr);
580 return 0;
581
582err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800584
585err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100586 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800587
588err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100589 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800590
591err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800592 kfree(init_attr);
593 return ret;
594}
595
Bart Van Assche4d73f952013-10-26 14:40:37 +0200596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100598 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200599 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200603 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604 int i;
605
Bart Van Assched92c0da2014-10-06 17:14:36 +0200606 if (!ch->target)
607 return;
608
Bart Van Assche509c07b2014-10-30 14:48:30 +0100609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100612 }
613
Bart Van Assched92c0da2014-10-06 17:14:36 +0200614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200618 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700621 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200624 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200625 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800628
Bart Van Assched92c0da2014-10-06 17:14:36 +0200629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
Bart Van Assche509c07b2014-10-30 14:48:30 +0100637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100639
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200641 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200645 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200647 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200651 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100656 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800657{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800660
Bart Van Assche509c07b2014-10-30 14:48:30 +0100661 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800665 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 ch->path = *pathrec;
667 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668}
669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100673 int ret;
674
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694
Bart Van Assche509c07b2014-10-30 14:48:30 +0100695 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100696 if (ret < 0)
697 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698
Bart Van Assche509c07b2014-10-30 14:48:30 +0100699 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702
Bart Van Assche509c07b2014-10-30 14:48:30 +0100703 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800704}
705
Bart Van Assched92c0da2014-10-06 17:14:36 +0200706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
Bart Van Assche509c07b2014-10-30 14:48:30 +0100719 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200738 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700749 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700750 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100760 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700761 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200762 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100769 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
Roland Dreieraef9ec32005-11-02 14:07:13 -0800774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800778 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700779 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200783 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800784 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200785 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100786 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788
Bart Van Assche509c07b2014-10-30 14:48:30 +0100789 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800790
791 kfree(req);
792
793 return status;
794}
795
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200808 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000809
810 return changed;
811}
812
Roland Dreieraef9ec32005-11-02 14:07:13 -0800813static void srp_disconnect_target(struct srp_target_port *target)
814{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200815 struct srp_rdma_ch *ch;
816 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100817
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200818 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800819
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000826 }
Roland Dreiere6581052006-05-17 09:13:21 -0700827 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828}
829
Bart Van Assche509c07b2014-10-30 14:48:30 +0100830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500832{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500835 struct srp_request *req;
836 int i;
837
Bart Van Assche47513cf2015-05-18 13:25:54 +0200838 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100842 req = &ch->req_ring[i];
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300843 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200844 kfree(req->fr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300845 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200846 kfree(req->fmr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300847 kfree(req->map_page);
848 }
David Dillowc07d4242011-01-16 13:57:10 -0500849 if (req->indirect_dma_addr) {
850 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
851 target->indirect_size,
852 DMA_TO_DEVICE);
853 }
854 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500855 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200856
Bart Van Assche509c07b2014-10-30 14:48:30 +0100857 kfree(ch->req_ring);
858 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500859}
860
Bart Van Assche509c07b2014-10-30 14:48:30 +0100861static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200862{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100863 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200864 struct srp_device *srp_dev = target->srp_host->srp_dev;
865 struct ib_device *ibdev = srp_dev->dev;
866 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200867 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200868 dma_addr_t dma_addr;
869 int i, ret = -ENOMEM;
870
Bart Van Assche509c07b2014-10-30 14:48:30 +0100871 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
872 GFP_KERNEL);
873 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200874 goto out;
875
876 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100877 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200878 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
879 GFP_KERNEL);
880 if (!mr_list)
881 goto out;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300882 if (srp_dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200883 req->fr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300884 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200885 req->fmr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300886 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
887 sizeof(void *), GFP_KERNEL);
888 if (!req->map_page)
889 goto out;
890 }
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200891 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200892 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200893 goto out;
894
895 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
896 target->indirect_size,
897 DMA_TO_DEVICE);
898 if (ib_dma_mapping_error(ibdev, dma_addr))
899 goto out;
900
901 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200902 }
903 ret = 0;
904
905out:
906 return ret;
907}
908
Bart Van Assche683b1592012-01-14 12:40:44 +0000909/**
910 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
911 * @shost: SCSI host whose attributes to remove from sysfs.
912 *
913 * Note: Any attributes defined in the host template and that did not exist
914 * before invocation of this function will be ignored.
915 */
916static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
917{
918 struct device_attribute **attr;
919
920 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
921 device_remove_file(&shost->shost_dev, *attr);
922}
923
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000924static void srp_remove_target(struct srp_target_port *target)
925{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200926 struct srp_rdma_ch *ch;
927 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100928
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000929 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
930
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000931 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200932 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000933 srp_remove_host(target->scsi_host);
934 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100935 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000936 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200937 for (i = 0; i < target->ch_count; i++) {
938 ch = &target->ch[i];
939 srp_free_ch_ib(target, ch);
940 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200941 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200942 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200943 for (i = 0; i < target->ch_count; i++) {
944 ch = &target->ch[i];
945 srp_free_req_data(target, ch);
946 }
947 kfree(target->ch);
948 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200949
950 spin_lock(&target->srp_host->target_lock);
951 list_del(&target->list);
952 spin_unlock(&target->srp_host->target_lock);
953
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000954 scsi_host_put(target->scsi_host);
955}
956
David Howellsc4028952006-11-22 14:57:56 +0000957static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800958{
David Howellsc4028952006-11-22 14:57:56 +0000959 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000960 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800961
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000962 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963
Bart Van Assche96fc2482013-06-28 14:51:26 +0200964 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800965}
966
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200967static void srp_rport_delete(struct srp_rport *rport)
968{
969 struct srp_target_port *target = rport->lld_data;
970
971 srp_queue_remove_work(target);
972}
973
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200974/**
975 * srp_connected_ch() - number of connected channels
976 * @target: SRP target port.
977 */
978static int srp_connected_ch(struct srp_target_port *target)
979{
980 int i, c = 0;
981
982 for (i = 0; i < target->ch_count; i++)
983 c += target->ch[i].connected;
984
985 return c;
986}
987
Bart Van Assched92c0da2014-10-06 17:14:36 +0200988static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800989{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100990 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800991 int ret;
992
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200993 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000994
Bart Van Assche509c07b2014-10-30 14:48:30 +0100995 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800996 if (ret)
997 return ret;
998
999 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001000 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001001 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001002 if (ret)
1003 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001004 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001005 if (ret < 0)
1006 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001007
1008 /*
1009 * The CM event handling code will set status to
1010 * SRP_PORT_REDIRECT if we get a port redirect REJ
1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1012 * redirect REJ back.
1013 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001014 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001015 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001016 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001017 return 0;
1018
1019 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001020 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001021 if (ret)
1022 return ret;
1023 break;
1024
1025 case SRP_DLID_REDIRECT:
1026 break;
1027
David Dillow9fe4bcf2008-01-08 17:08:52 -05001028 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001029 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001030 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001031 ch->status = -ECONNRESET;
1032 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001033
Roland Dreieraef9ec32005-11-02 14:07:13 -08001034 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001035 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001036 }
1037 }
1038}
1039
Bart Van Assche509c07b2014-10-30 14:48:30 +01001040static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001041{
1042 struct ib_send_wr *bad_wr;
1043 struct ib_send_wr wr = {
1044 .opcode = IB_WR_LOCAL_INV,
1045 .wr_id = LOCAL_INV_WR_ID_MASK,
1046 .next = NULL,
1047 .num_sge = 0,
1048 .send_flags = 0,
1049 .ex.invalidate_rkey = rkey,
1050 };
1051
Bart Van Assche509c07b2014-10-30 14:48:30 +01001052 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001053}
1054
Roland Dreierd945e1d2006-05-09 10:50:28 -07001055static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001056 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001057 struct srp_request *req)
1058{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001059 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001060 struct srp_device *dev = target->srp_host->srp_dev;
1061 struct ib_device *ibdev = dev->dev;
1062 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001063
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001064 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001065 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1066 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1067 return;
1068
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001069 if (dev->use_fast_reg) {
1070 struct srp_fr_desc **pfr;
1071
1072 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001073 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001074 if (res < 0) {
1075 shost_printk(KERN_ERR, target->scsi_host, PFX
1076 "Queueing INV WR for rkey %#x failed (%d)\n",
1077 (*pfr)->mr->rkey, res);
1078 queue_work(system_long_wq,
1079 &target->tl_err_work);
1080 }
1081 }
1082 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001083 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001084 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001085 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001086 struct ib_pool_fmr **pfmr;
1087
1088 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1089 ib_fmr_pool_unmap(*pfmr);
1090 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001091
David Dillow8f26c9f2011-01-14 19:45:50 -05001092 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1093 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001094}
1095
Bart Van Assche22032992012-08-14 13:18:53 +00001096/**
1097 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001098 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001099 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001100 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001101 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1102 * ownership of @req->scmnd if it equals @scmnd.
1103 *
1104 * Return value:
1105 * Either NULL or a pointer to the SCSI command the caller became owner of.
1106 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001107static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001108 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001109 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001110 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001111{
Bart Van Assche94a91742010-11-26 14:50:09 -05001112 unsigned long flags;
1113
Bart Van Assche509c07b2014-10-30 14:48:30 +01001114 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001115 if (req->scmnd &&
1116 (!sdev || req->scmnd->device == sdev) &&
1117 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001118 scmnd = req->scmnd;
1119 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001120 } else {
1121 scmnd = NULL;
1122 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001123 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001124
1125 return scmnd;
1126}
1127
1128/**
1129 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001130 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001131 * @req: Request to be freed.
1132 * @scmnd: SCSI command associated with @req.
1133 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001134 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001135static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1136 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001137{
1138 unsigned long flags;
1139
Bart Van Assche509c07b2014-10-30 14:48:30 +01001140 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001141
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 spin_lock_irqsave(&ch->lock, flags);
1143 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001144 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001145}
1146
Bart Van Assche509c07b2014-10-30 14:48:30 +01001147static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1148 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001149{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001150 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001151
1152 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001153 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001154 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001155 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001156 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001157}
1158
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001159static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001160{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001161 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001162 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001163 struct Scsi_Host *shost = target->scsi_host;
1164 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001165 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001166
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001167 /*
1168 * Invoking srp_terminate_io() while srp_queuecommand() is running
1169 * is not safe. Hence the warning statement below.
1170 */
1171 shost_for_each_device(sdev, shost)
1172 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1173
Bart Van Assched92c0da2014-10-06 17:14:36 +02001174 for (i = 0; i < target->ch_count; i++) {
1175 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001176
Bart Van Assched92c0da2014-10-06 17:14:36 +02001177 for (j = 0; j < target->req_ring_size; ++j) {
1178 struct srp_request *req = &ch->req_ring[j];
1179
1180 srp_finish_req(ch, req, NULL,
1181 DID_TRANSPORT_FAILFAST << 16);
1182 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001183 }
1184}
1185
1186/*
1187 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1188 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1189 * srp_reset_device() or srp_reset_host() calls will occur while this function
1190 * is in progress. One way to realize that is not to call this function
1191 * directly but to call srp_reconnect_rport() instead since that last function
1192 * serializes calls of this function via rport->mutex and also blocks
1193 * srp_queuecommand() calls before invoking this function.
1194 */
1195static int srp_rport_reconnect(struct srp_rport *rport)
1196{
1197 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001198 struct srp_rdma_ch *ch;
1199 int i, j, ret = 0;
1200 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001201
Roland Dreieraef9ec32005-11-02 14:07:13 -08001202 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001203
1204 if (target->state == SRP_TARGET_SCANNING)
1205 return -ENODEV;
1206
Roland Dreieraef9ec32005-11-02 14:07:13 -08001207 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001208 * Now get a new local CM ID so that we avoid confusing the target in
1209 * case things are really fouled up. Doing so also ensures that all CM
1210 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001211 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001212 for (i = 0; i < target->ch_count; i++) {
1213 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001214 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001215 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001216 for (i = 0; i < target->ch_count; i++) {
1217 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001218 for (j = 0; j < target->req_ring_size; ++j) {
1219 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001220
Bart Van Assched92c0da2014-10-06 17:14:36 +02001221 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1222 }
1223 }
1224 for (i = 0; i < target->ch_count; i++) {
1225 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001226 /*
1227 * Whether or not creating a new CM ID succeeded, create a new
1228 * QP. This guarantees that all completion callback function
1229 * invocations have finished before request resetting starts.
1230 */
1231 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001232
Bart Van Assched92c0da2014-10-06 17:14:36 +02001233 INIT_LIST_HEAD(&ch->free_tx);
1234 for (j = 0; j < target->queue_size; ++j)
1235 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1236 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001237
1238 target->qp_in_error = false;
1239
Bart Van Assched92c0da2014-10-06 17:14:36 +02001240 for (i = 0; i < target->ch_count; i++) {
1241 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001242 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001243 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001244 ret = srp_connect_ch(ch, multich);
1245 multich = true;
1246 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001247
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001248 if (ret == 0)
1249 shost_printk(KERN_INFO, target->scsi_host,
1250 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001251
1252 return ret;
1253}
1254
David Dillow8f26c9f2011-01-14 19:45:50 -05001255static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1256 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001257{
David Dillow8f26c9f2011-01-14 19:45:50 -05001258 struct srp_direct_buf *desc = state->desc;
1259
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001260 WARN_ON_ONCE(!dma_len);
1261
David Dillow8f26c9f2011-01-14 19:45:50 -05001262 desc->va = cpu_to_be64(dma_addr);
1263 desc->key = cpu_to_be32(rkey);
1264 desc->len = cpu_to_be32(dma_len);
1265
1266 state->total_len += dma_len;
1267 state->desc++;
1268 state->ndesc++;
1269}
1270
1271static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001272 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001273{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001274 struct srp_target_port *target = ch->target;
1275 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001276 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001277 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001278
Bart Van Asschef731ed62015-08-10 17:07:27 -07001279 if (state->fmr.next >= state->fmr.end)
1280 return -ENOMEM;
1281
Sagi Grimberg26630e82015-10-13 19:11:38 +03001282 WARN_ON_ONCE(!dev->use_fmr);
1283
1284 if (state->npages == 0)
1285 return 0;
1286
1287 if (state->npages == 1 && target->global_mr) {
1288 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1289 target->global_mr->rkey);
1290 goto reset_state;
1291 }
1292
Bart Van Assche509c07b2014-10-30 14:48:30 +01001293 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001294 state->npages, io_addr);
1295 if (IS_ERR(fmr))
1296 return PTR_ERR(fmr);
1297
Bart Van Asschef731ed62015-08-10 17:07:27 -07001298 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001299 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001300
Bart Van Assche186fbc62015-08-10 17:06:29 -07001301 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1302 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001303
Sagi Grimberg26630e82015-10-13 19:11:38 +03001304reset_state:
1305 state->npages = 0;
1306 state->dma_len = 0;
1307
David Dillow8f26c9f2011-01-14 19:45:50 -05001308 return 0;
1309}
1310
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001311static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001312 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001313{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001314 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001315 struct srp_device *dev = target->srp_host->srp_dev;
1316 struct ib_send_wr *bad_wr;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001317 struct ib_reg_wr wr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001318 struct srp_fr_desc *desc;
1319 u32 rkey;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001320 int n, err;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001321
Bart Van Asschef731ed62015-08-10 17:07:27 -07001322 if (state->fr.next >= state->fr.end)
1323 return -ENOMEM;
1324
Sagi Grimberg26630e82015-10-13 19:11:38 +03001325 WARN_ON_ONCE(!dev->use_fast_reg);
1326
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001327 if (state->sg_nents == 0)
Sagi Grimberg26630e82015-10-13 19:11:38 +03001328 return 0;
1329
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001330 if (state->sg_nents == 1 && target->global_mr) {
1331 srp_map_desc(state, sg_dma_address(state->sg),
1332 sg_dma_len(state->sg),
Sagi Grimberg26630e82015-10-13 19:11:38 +03001333 target->global_mr->rkey);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001334 return 1;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001335 }
1336
Bart Van Assche509c07b2014-10-30 14:48:30 +01001337 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001338 if (!desc)
1339 return -ENOMEM;
1340
1341 rkey = ib_inc_rkey(desc->mr->rkey);
1342 ib_update_fast_reg_key(desc->mr, rkey);
1343
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001344 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
1345 dev->mr_page_size);
1346 if (unlikely(n < 0))
1347 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001348
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001349 wr.wr.next = NULL;
1350 wr.wr.opcode = IB_WR_REG_MR;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001351 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001352 wr.wr.num_sge = 0;
1353 wr.wr.send_flags = 0;
1354 wr.mr = desc->mr;
1355 wr.key = desc->mr->rkey;
1356 wr.access = (IB_ACCESS_LOCAL_WRITE |
1357 IB_ACCESS_REMOTE_READ |
1358 IB_ACCESS_REMOTE_WRITE);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001359
Bart Van Asschef731ed62015-08-10 17:07:27 -07001360 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001361 state->nmdesc++;
1362
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001363 srp_map_desc(state, desc->mr->iova,
1364 desc->mr->length, desc->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001365
Sagi Grimberg26630e82015-10-13 19:11:38 +03001366 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001367 if (unlikely(err))
Sagi Grimberg26630e82015-10-13 19:11:38 +03001368 return err;
1369
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001370 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001371}
1372
David Dillow8f26c9f2011-01-14 19:45:50 -05001373static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001374 struct srp_rdma_ch *ch,
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001375 struct scatterlist *sg, int sg_index)
David Dillow8f26c9f2011-01-14 19:45:50 -05001376{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001377 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001378 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001379 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001380 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1381 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001382 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001383 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001384
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001385 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001386
David Dillow8f26c9f2011-01-14 19:45:50 -05001387 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001388 unsigned offset = dma_addr & ~dev->mr_page_mask;
1389 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001390 ret = srp_map_finish_fmr(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001391 if (ret)
1392 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001393 }
1394
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001395 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001396
1397 if (!state->npages)
1398 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001399 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001400 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001401 dma_addr += len;
1402 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001403 }
1404
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001405 /*
1406 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001407 * close it out and start a new one -- we can only merge at page
1408 * boundries.
1409 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001410 ret = 0;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001411 if (len != dev->mr_page_size)
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001412 ret = srp_map_finish_fmr(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001413 return ret;
1414}
1415
Sagi Grimberg26630e82015-10-13 19:11:38 +03001416static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1417 struct srp_request *req, struct scatterlist *scat,
1418 int count)
1419{
1420 struct scatterlist *sg;
1421 int i, ret;
1422
1423 state->desc = req->indirect_desc;
1424 state->pages = req->map_page;
1425 state->fmr.next = req->fmr_list;
1426 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1427
1428 for_each_sg(scat, sg, count, i) {
1429 ret = srp_map_sg_entry(state, ch, sg, i);
1430 if (ret)
1431 return ret;
1432 }
1433
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001434 ret = srp_map_finish_fmr(state, ch);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001435 if (ret)
1436 return ret;
1437
1438 req->nmdesc = state->nmdesc;
1439
1440 return 0;
1441}
1442
1443static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1444 struct srp_request *req, struct scatterlist *scat,
1445 int count)
1446{
Sagi Grimberg26630e82015-10-13 19:11:38 +03001447 state->desc = req->indirect_desc;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001448 state->fr.next = req->fr_list;
1449 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1450 state->sg = scat;
1451 state->sg_nents = scsi_sg_count(req->scmnd);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001452
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001453 while (state->sg_nents) {
1454 int i, n;
1455
1456 n = srp_map_finish_fr(state, ch);
1457 if (unlikely(n < 0))
1458 return n;
1459
1460 state->sg_nents -= n;
1461 for (i = 0; i < n; i++)
1462 state->sg = sg_next(state->sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001463 }
1464
Sagi Grimberg26630e82015-10-13 19:11:38 +03001465 req->nmdesc = state->nmdesc;
1466
1467 return 0;
1468}
1469
1470static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1471 struct srp_request *req, struct scatterlist *scat,
1472 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001473{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001474 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001475 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001476 struct scatterlist *sg;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001477 int i;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001478
Sagi Grimberg26630e82015-10-13 19:11:38 +03001479 state->desc = req->indirect_desc;
1480 for_each_sg(scat, sg, count, i) {
1481 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1482 ib_sg_dma_len(dev->dev, sg),
1483 target->global_mr->rkey);
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001484 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001485
Bart Van Assche52ede082014-05-20 15:07:45 +02001486 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001487
Sagi Grimberg26630e82015-10-13 19:11:38 +03001488 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001489}
1490
Bart Van Assche330179f2015-08-10 17:09:05 -07001491/*
1492 * Register the indirect data buffer descriptor with the HCA.
1493 *
1494 * Note: since the indirect data buffer descriptor has been allocated with
1495 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1496 * memory buffer.
1497 */
1498static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1499 void **next_mr, void **end_mr, u32 idb_len,
1500 __be32 *idb_rkey)
1501{
1502 struct srp_target_port *target = ch->target;
1503 struct srp_device *dev = target->srp_host->srp_dev;
1504 struct srp_map_state state;
1505 struct srp_direct_buf idb_desc;
1506 u64 idb_pages[1];
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001507 struct scatterlist idb_sg[1];
Bart Van Assche330179f2015-08-10 17:09:05 -07001508 int ret;
1509
1510 memset(&state, 0, sizeof(state));
1511 memset(&idb_desc, 0, sizeof(idb_desc));
1512 state.gen.next = next_mr;
1513 state.gen.end = end_mr;
1514 state.desc = &idb_desc;
Bart Van Assche330179f2015-08-10 17:09:05 -07001515 state.base_dma_addr = req->indirect_dma_addr;
1516 state.dma_len = idb_len;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001517
1518 if (dev->use_fast_reg) {
1519 state.sg = idb_sg;
1520 state.sg_nents = 1;
1521 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1522 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1523 ret = srp_map_finish_fr(&state, ch);
1524 if (ret < 0)
1525 return ret;
1526 } else if (dev->use_fmr) {
1527 state.pages = idb_pages;
1528 state.pages[0] = (req->indirect_dma_addr &
1529 dev->mr_page_mask);
1530 state.npages = 1;
1531 ret = srp_map_finish_fmr(&state, ch);
1532 if (ret < 0)
1533 return ret;
1534 } else {
1535 return -EINVAL;
1536 }
Bart Van Assche330179f2015-08-10 17:09:05 -07001537
1538 *idb_rkey = idb_desc.key;
1539
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001540 return 0;
Bart Van Assche330179f2015-08-10 17:09:05 -07001541}
1542
Bart Van Assche509c07b2014-10-30 14:48:30 +01001543static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001544 struct srp_request *req)
1545{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001546 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001547 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001548 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001549 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001550 struct srp_device *dev;
1551 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001552 struct srp_map_state state;
1553 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001554 u32 idb_len, table_len;
1555 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001556 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001557
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001558 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001559 return sizeof (struct srp_cmd);
1560
1561 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1562 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001563 shost_printk(KERN_WARNING, target->scsi_host,
1564 PFX "Unhandled data direction %d\n",
1565 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001566 return -EINVAL;
1567 }
1568
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001569 nents = scsi_sg_count(scmnd);
1570 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001571
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001572 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001573 ibdev = dev->dev;
1574
1575 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001576 if (unlikely(count == 0))
1577 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001578
1579 fmt = SRP_DATA_DESC_DIRECT;
1580 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001581
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001582 if (count == 1 && target->global_mr) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001583 /*
1584 * The midlayer only generated a single gather/scatter
1585 * entry, or DMA mapping coalesced everything to a
1586 * single entry. So a direct descriptor along with
1587 * the DMA MR suffices.
1588 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001589 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001590
Ralph Campbell85507bc2006-12-12 14:30:55 -08001591 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001592 buf->key = cpu_to_be32(target->global_mr->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001593 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001594
Bart Van Assche52ede082014-05-20 15:07:45 +02001595 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001596 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001597 }
1598
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001599 /*
1600 * We have more than one scatter/gather entry, so build our indirect
1601 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001602 */
1603 indirect_hdr = (void *) cmd->add_data;
1604
David Dillowc07d4242011-01-16 13:57:10 -05001605 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1606 target->indirect_size, DMA_TO_DEVICE);
1607
David Dillow8f26c9f2011-01-14 19:45:50 -05001608 memset(&state, 0, sizeof(state));
Sagi Grimberg26630e82015-10-13 19:11:38 +03001609 if (dev->use_fast_reg)
1610 srp_map_sg_fr(&state, ch, req, scat, count);
1611 else if (dev->use_fmr)
1612 srp_map_sg_fmr(&state, ch, req, scat, count);
1613 else
1614 srp_map_sg_dma(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001615
David Dillowc07d4242011-01-16 13:57:10 -05001616 /* We've mapped the request, now pull as much of the indirect
1617 * descriptor table as we can into the command buffer. If this
1618 * target is not using an external indirect table, we are
1619 * guaranteed to fit into the command, as the SCSI layer won't
1620 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001621 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001622 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001623 /*
1624 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001625 * so use a direct descriptor.
1626 */
1627 struct srp_direct_buf *buf = (void *) cmd->add_data;
1628
David Dillowc07d4242011-01-16 13:57:10 -05001629 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001630 goto map_complete;
1631 }
1632
David Dillowc07d4242011-01-16 13:57:10 -05001633 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1634 !target->allow_ext_sg)) {
1635 shost_printk(KERN_ERR, target->scsi_host,
1636 "Could not fit S/G list into SRP_CMD\n");
1637 return -EIO;
1638 }
1639
1640 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001641 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001642 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001643
1644 fmt = SRP_DATA_DESC_INDIRECT;
1645 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001646 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001647
David Dillowc07d4242011-01-16 13:57:10 -05001648 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1649 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001650
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001651 if (!target->global_mr) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001652 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1653 idb_len, &idb_rkey);
1654 if (ret < 0)
1655 return ret;
1656 req->nmdesc++;
1657 } else {
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001658 idb_rkey = target->global_mr->rkey;
Bart Van Assche330179f2015-08-10 17:09:05 -07001659 }
1660
David Dillowc07d4242011-01-16 13:57:10 -05001661 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001662 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001663 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1664 indirect_hdr->len = cpu_to_be32(state.total_len);
1665
1666 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001667 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001668 else
David Dillowc07d4242011-01-16 13:57:10 -05001669 cmd->data_in_desc_cnt = count;
1670
1671 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1672 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001673
1674map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001675 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1676 cmd->buf_fmt = fmt << 4;
1677 else
1678 cmd->buf_fmt = fmt;
1679
Roland Dreieraef9ec32005-11-02 14:07:13 -08001680 return len;
1681}
1682
David Dillow05a1d752010-10-08 14:48:14 -04001683/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001684 * Return an IU and possible credit to the free pool
1685 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001686static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001687 enum srp_iu_type iu_type)
1688{
1689 unsigned long flags;
1690
Bart Van Assche509c07b2014-10-30 14:48:30 +01001691 spin_lock_irqsave(&ch->lock, flags);
1692 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001693 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001694 ++ch->req_lim;
1695 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001696}
1697
1698/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001699 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001700 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001701 *
1702 * Note:
1703 * An upper limit for the number of allocated information units for each
1704 * request type is:
1705 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1706 * more than Scsi_Host.can_queue requests.
1707 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1708 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1709 * one unanswered SRP request to an initiator.
1710 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001711static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001712 enum srp_iu_type iu_type)
1713{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001714 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001715 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1716 struct srp_iu *iu;
1717
Bart Van Assche509c07b2014-10-30 14:48:30 +01001718 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001719
Bart Van Assche509c07b2014-10-30 14:48:30 +01001720 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001721 return NULL;
1722
1723 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001724 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001726 ++target->zero_req_lim;
1727 return NULL;
1728 }
1729
Bart Van Assche509c07b2014-10-30 14:48:30 +01001730 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001731 }
1732
Bart Van Assche509c07b2014-10-30 14:48:30 +01001733 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001734 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001735 return iu;
1736}
1737
Bart Van Assche509c07b2014-10-30 14:48:30 +01001738static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001739{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001740 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001741 struct ib_sge list;
1742 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001743
1744 list.addr = iu->dma;
1745 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001746 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001747
1748 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001749 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001750 wr.sg_list = &list;
1751 wr.num_sge = 1;
1752 wr.opcode = IB_WR_SEND;
1753 wr.send_flags = IB_SEND_SIGNALED;
1754
Bart Van Assche509c07b2014-10-30 14:48:30 +01001755 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001756}
1757
Bart Van Assche509c07b2014-10-30 14:48:30 +01001758static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001759{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001760 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001761 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001762 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001763
1764 list.addr = iu->dma;
1765 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001766 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001767
1768 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001769 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001770 wr.sg_list = &list;
1771 wr.num_sge = 1;
1772
Bart Van Assche509c07b2014-10-30 14:48:30 +01001773 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001774}
1775
Bart Van Assche509c07b2014-10-30 14:48:30 +01001776static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001777{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001778 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001779 struct srp_request *req;
1780 struct scsi_cmnd *scmnd;
1781 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001782
Roland Dreieraef9ec32005-11-02 14:07:13 -08001783 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001784 spin_lock_irqsave(&ch->lock, flags);
1785 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1786 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001787
Bart Van Assche509c07b2014-10-30 14:48:30 +01001788 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001789 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001790 ch->tsk_mgmt_status = rsp->data[3];
1791 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001792 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001793 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1794 if (scmnd) {
1795 req = (void *)scmnd->host_scribble;
1796 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1797 }
Bart Van Assche22032992012-08-14 13:18:53 +00001798 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001799 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001800 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1801 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001802
Bart Van Assche509c07b2014-10-30 14:48:30 +01001803 spin_lock_irqsave(&ch->lock, flags);
1804 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1805 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001806
1807 return;
1808 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001809 scmnd->result = rsp->status;
1810
1811 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1812 memcpy(scmnd->sense_buffer, rsp->data +
1813 be32_to_cpu(rsp->resp_data_len),
1814 min_t(int, be32_to_cpu(rsp->sense_data_len),
1815 SCSI_SENSE_BUFFERSIZE));
1816 }
1817
Bart Van Asschee7145312014-07-09 15:57:51 +02001818 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001819 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001820 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1821 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1822 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1823 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1824 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1825 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001826
Bart Van Assche509c07b2014-10-30 14:48:30 +01001827 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001828 be32_to_cpu(rsp->req_lim_delta));
1829
David Dillowf8b6e312010-11-26 13:02:21 -05001830 scmnd->host_scribble = NULL;
1831 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001832 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001833}
1834
Bart Van Assche509c07b2014-10-30 14:48:30 +01001835static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001836 void *rsp, int len)
1837{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001838 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001839 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001840 unsigned long flags;
1841 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001842 int err;
David Dillowbb125882010-10-08 14:40:47 -04001843
Bart Van Assche509c07b2014-10-30 14:48:30 +01001844 spin_lock_irqsave(&ch->lock, flags);
1845 ch->req_lim += req_delta;
1846 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1847 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001848
David Dillowbb125882010-10-08 14:40:47 -04001849 if (!iu) {
1850 shost_printk(KERN_ERR, target->scsi_host, PFX
1851 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001852 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001853 }
1854
1855 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1856 memcpy(iu->buf, rsp, len);
1857 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1858
Bart Van Assche509c07b2014-10-30 14:48:30 +01001859 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001860 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001861 shost_printk(KERN_ERR, target->scsi_host, PFX
1862 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001863 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001864 }
David Dillowbb125882010-10-08 14:40:47 -04001865
David Dillowbb125882010-10-08 14:40:47 -04001866 return err;
1867}
1868
Bart Van Assche509c07b2014-10-30 14:48:30 +01001869static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001870 struct srp_cred_req *req)
1871{
1872 struct srp_cred_rsp rsp = {
1873 .opcode = SRP_CRED_RSP,
1874 .tag = req->tag,
1875 };
1876 s32 delta = be32_to_cpu(req->req_lim_delta);
1877
Bart Van Assche509c07b2014-10-30 14:48:30 +01001878 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1879 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001880 "problems processing SRP_CRED_REQ\n");
1881}
1882
Bart Van Assche509c07b2014-10-30 14:48:30 +01001883static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001884 struct srp_aer_req *req)
1885{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001886 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001887 struct srp_aer_rsp rsp = {
1888 .opcode = SRP_AER_RSP,
1889 .tag = req->tag,
1890 };
1891 s32 delta = be32_to_cpu(req->req_lim_delta);
1892
1893 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001894 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001895
Bart Van Assche509c07b2014-10-30 14:48:30 +01001896 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001897 shost_printk(KERN_ERR, target->scsi_host, PFX
1898 "problems processing SRP_AER_REQ\n");
1899}
1900
Bart Van Assche509c07b2014-10-30 14:48:30 +01001901static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001902{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001903 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001904 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001905 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001906 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001907 u8 opcode;
1908
Bart Van Assche509c07b2014-10-30 14:48:30 +01001909 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001910 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001911
1912 opcode = *(u8 *) iu->buf;
1913
1914 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001915 shost_printk(KERN_ERR, target->scsi_host,
1916 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001917 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1918 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001919 }
1920
1921 switch (opcode) {
1922 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001923 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001924 break;
1925
David Dillowbb125882010-10-08 14:40:47 -04001926 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001927 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001928 break;
1929
1930 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001931 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001932 break;
1933
Roland Dreieraef9ec32005-11-02 14:07:13 -08001934 case SRP_T_LOGOUT:
1935 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001936 shost_printk(KERN_WARNING, target->scsi_host,
1937 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001938 break;
1939
1940 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001941 shost_printk(KERN_WARNING, target->scsi_host,
1942 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001943 break;
1944 }
1945
Bart Van Assche509c07b2014-10-30 14:48:30 +01001946 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001947 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001948
Bart Van Assche509c07b2014-10-30 14:48:30 +01001949 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001950 if (res != 0)
1951 shost_printk(KERN_ERR, target->scsi_host,
1952 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001953}
1954
Bart Van Asschec1120f82013-10-26 14:35:08 +02001955/**
1956 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001957 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001958 *
1959 * Note: This function may get invoked before the rport has been created,
1960 * hence the target->rport test.
1961 */
1962static void srp_tl_err_work(struct work_struct *work)
1963{
1964 struct srp_target_port *target;
1965
1966 target = container_of(work, struct srp_target_port, tl_err_work);
1967 if (target->rport)
1968 srp_start_tl_fail_timers(target->rport);
1969}
1970
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001971static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001972 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001973{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001974 struct srp_target_port *target = ch->target;
1975
1976 if (wr_id == SRP_LAST_WR_ID) {
1977 complete(&ch->done);
1978 return;
1979 }
1980
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001981 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001982 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1983 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001984 "LOCAL_INV failed with status %s (%d)\n",
1985 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001986 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1987 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001988 "FAST_REG_MR failed status %s (%d)\n",
1989 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001990 } else {
1991 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03001992 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001993 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03001994 ib_wc_status_msg(wc_status), wc_status,
1995 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001996 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001997 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001998 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001999 target->qp_in_error = true;
2000}
2001
Bart Van Assche509c07b2014-10-30 14:48:30 +01002002static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002003{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002004 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002005 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002006
2007 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2008 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02002009 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002010 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02002011 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002012 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002013 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002014 }
2015}
2016
Bart Van Assche509c07b2014-10-30 14:48:30 +01002017static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002018{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002019 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002020 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002021 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002022
2023 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02002024 if (likely(wc.status == IB_WC_SUCCESS)) {
2025 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002026 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02002027 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002028 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00002029 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002030 }
2031}
2032
Bart Van Assche76c75b22010-11-26 14:37:47 -05002033static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002034{
Bart Van Assche76c75b22010-11-26 14:37:47 -05002035 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002036 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002037 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002038 struct srp_request *req;
2039 struct srp_iu *iu;
2040 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08002041 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002042 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002043 u32 tag;
2044 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02002045 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002046 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2047
2048 /*
2049 * The SCSI EH thread is the only context from which srp_queuecommand()
2050 * can get invoked for blocked devices (SDEV_BLOCK /
2051 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2052 * locking the rport mutex if invoked from inside the SCSI EH.
2053 */
2054 if (in_scsi_eh)
2055 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002056
Bart Van Assched1b42892014-05-20 15:07:20 +02002057 scmnd->result = srp_chkready(target->rport);
2058 if (unlikely(scmnd->result))
2059 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002060
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002061 WARN_ON_ONCE(scmnd->request->tag < 0);
2062 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002063 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002064 idx = blk_mq_unique_tag_to_tag(tag);
2065 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2066 dev_name(&shost->shost_gendev), tag, idx,
2067 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002068
2069 spin_lock_irqsave(&ch->lock, flags);
2070 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002071 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002072
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002073 if (!iu)
2074 goto err;
2075
2076 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002077 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002078 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002079 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002080
David Dillowf8b6e312010-11-26 13:02:21 -05002081 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002082
2083 cmd = iu->buf;
2084 memset(cmd, 0, sizeof *cmd);
2085
2086 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002087 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002088 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002089 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2090
Roland Dreieraef9ec32005-11-02 14:07:13 -08002091 req->scmnd = scmnd;
2092 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002093
Bart Van Assche509c07b2014-10-30 14:48:30 +01002094 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002095 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002096 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002097 PFX "Failed to map data (%d)\n", len);
2098 /*
2099 * If we ran out of memory descriptors (-ENOMEM) because an
2100 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002101 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002102 * to reduce queue depth temporarily.
2103 */
2104 scmnd->result = len == -ENOMEM ?
2105 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002106 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002107 }
2108
David Dillow49248642011-01-14 18:23:24 -05002109 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002110 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002111
Bart Van Assche509c07b2014-10-30 14:48:30 +01002112 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002113 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002114 goto err_unmap;
2115 }
2116
Bart Van Assched1b42892014-05-20 15:07:20 +02002117 ret = 0;
2118
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002119unlock_rport:
2120 if (in_scsi_eh)
2121 mutex_unlock(&rport->mutex);
2122
Bart Van Assched1b42892014-05-20 15:07:20 +02002123 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002124
2125err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002126 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002127
Bart Van Assche76c75b22010-11-26 14:37:47 -05002128err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002129 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002130
Bart Van Assche024ca902014-05-20 15:03:49 +02002131 /*
2132 * Avoid that the loops that iterate over the request ring can
2133 * encounter a dangling SCSI command pointer.
2134 */
2135 req->scmnd = NULL;
2136
Bart Van Assched1b42892014-05-20 15:07:20 +02002137err:
2138 if (scmnd->result) {
2139 scmnd->scsi_done(scmnd);
2140 ret = 0;
2141 } else {
2142 ret = SCSI_MLQUEUE_HOST_BUSY;
2143 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002144
Bart Van Assched1b42892014-05-20 15:07:20 +02002145 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002146}
2147
Bart Van Assche4d73f952013-10-26 14:40:37 +02002148/*
2149 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002150 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002151 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002152static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002153{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002154 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002155 int i;
2156
Bart Van Assche509c07b2014-10-30 14:48:30 +01002157 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2158 GFP_KERNEL);
2159 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002160 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002161 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2162 GFP_KERNEL);
2163 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002164 goto err_no_ring;
2165
2166 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002167 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2168 ch->max_ti_iu_len,
2169 GFP_KERNEL, DMA_FROM_DEVICE);
2170 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002171 goto err;
2172 }
2173
Bart Van Assche4d73f952013-10-26 14:40:37 +02002174 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002175 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2176 target->max_iu_len,
2177 GFP_KERNEL, DMA_TO_DEVICE);
2178 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002179 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002180
Bart Van Assche509c07b2014-10-30 14:48:30 +01002181 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002182 }
2183
2184 return 0;
2185
2186err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002187 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002188 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2189 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002190 }
2191
Bart Van Assche4d73f952013-10-26 14:40:37 +02002192
2193err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002194 kfree(ch->tx_ring);
2195 ch->tx_ring = NULL;
2196 kfree(ch->rx_ring);
2197 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002198
2199 return -ENOMEM;
2200}
2201
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002202static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2203{
2204 uint64_t T_tr_ns, max_compl_time_ms;
2205 uint32_t rq_tmo_jiffies;
2206
2207 /*
2208 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2209 * table 91), both the QP timeout and the retry count have to be set
2210 * for RC QP's during the RTR to RTS transition.
2211 */
2212 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2213 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2214
2215 /*
2216 * Set target->rq_tmo_jiffies to one second more than the largest time
2217 * it can take before an error completion is generated. See also
2218 * C9-140..142 in the IBTA spec for more information about how to
2219 * convert the QP Local ACK Timeout value to nanoseconds.
2220 */
2221 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2222 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2223 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2224 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2225
2226 return rq_tmo_jiffies;
2227}
2228
David Dillow961e0be2011-01-14 17:32:07 -05002229static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002230 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002231 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002232{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002233 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002234 struct ib_qp_attr *qp_attr = NULL;
2235 int attr_mask = 0;
2236 int ret;
2237 int i;
2238
2239 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002240 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2241 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002242
2243 /*
2244 * Reserve credits for task management so we don't
2245 * bounce requests back to the SCSI mid-layer.
2246 */
2247 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002248 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002249 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002250 target->scsi_host->cmd_per_lun
2251 = min_t(int, target->scsi_host->can_queue,
2252 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002253 } else {
2254 shost_printk(KERN_WARNING, target->scsi_host,
2255 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2256 ret = -ECONNRESET;
2257 goto error;
2258 }
2259
Bart Van Assche509c07b2014-10-30 14:48:30 +01002260 if (!ch->rx_ring) {
2261 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002262 if (ret)
2263 goto error;
2264 }
2265
2266 ret = -ENOMEM;
2267 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2268 if (!qp_attr)
2269 goto error;
2270
2271 qp_attr->qp_state = IB_QPS_RTR;
2272 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2273 if (ret)
2274 goto error_free;
2275
Bart Van Assche509c07b2014-10-30 14:48:30 +01002276 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002277 if (ret)
2278 goto error_free;
2279
Bart Van Assche4d73f952013-10-26 14:40:37 +02002280 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002281 struct srp_iu *iu = ch->rx_ring[i];
2282
2283 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002284 if (ret)
2285 goto error_free;
2286 }
2287
2288 qp_attr->qp_state = IB_QPS_RTS;
2289 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2290 if (ret)
2291 goto error_free;
2292
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002293 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2294
Bart Van Assche509c07b2014-10-30 14:48:30 +01002295 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002296 if (ret)
2297 goto error_free;
2298
2299 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2300
2301error_free:
2302 kfree(qp_attr);
2303
2304error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002305 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002306}
2307
Roland Dreieraef9ec32005-11-02 14:07:13 -08002308static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2309 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002310 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002311{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002312 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002313 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002314 struct ib_class_port_info *cpi;
2315 int opcode;
2316
2317 switch (event->param.rej_rcvd.reason) {
2318 case IB_CM_REJ_PORT_CM_REDIRECT:
2319 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002320 ch->path.dlid = cpi->redirect_lid;
2321 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002322 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002323 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002324
Bart Van Assche509c07b2014-10-30 14:48:30 +01002325 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002326 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2327 break;
2328
2329 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002330 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002331 /*
2332 * Topspin/Cisco SRP gateways incorrectly send
2333 * reject reason code 25 when they mean 24
2334 * (port redirect).
2335 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002336 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002337 event->param.rej_rcvd.ari, 16);
2338
David Dillow7aa54bd2008-01-07 18:23:41 -05002339 shost_printk(KERN_DEBUG, shost,
2340 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002341 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2342 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002343
Bart Van Assche509c07b2014-10-30 14:48:30 +01002344 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002345 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002346 shost_printk(KERN_WARNING, shost,
2347 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002348 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002349 }
2350 break;
2351
2352 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002353 shost_printk(KERN_WARNING, shost,
2354 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002355 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002356 break;
2357
2358 case IB_CM_REJ_CONSUMER_DEFINED:
2359 opcode = *(u8 *) event->private_data;
2360 if (opcode == SRP_LOGIN_REJ) {
2361 struct srp_login_rej *rej = event->private_data;
2362 u32 reason = be32_to_cpu(rej->reason);
2363
2364 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002365 shost_printk(KERN_WARNING, shost,
2366 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002367 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002368 shost_printk(KERN_WARNING, shost, PFX
2369 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002370 target->sgid.raw,
2371 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002372 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002373 shost_printk(KERN_WARNING, shost,
2374 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2375 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002376 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002377 break;
2378
David Dillow9fe4bcf2008-01-08 17:08:52 -05002379 case IB_CM_REJ_STALE_CONN:
2380 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002381 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002382 break;
2383
Roland Dreieraef9ec32005-11-02 14:07:13 -08002384 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002385 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2386 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002387 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002388 }
2389}
2390
2391static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2392{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002393 struct srp_rdma_ch *ch = cm_id->context;
2394 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002396
2397 switch (event->event) {
2398 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002399 shost_printk(KERN_DEBUG, target->scsi_host,
2400 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002401 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002402 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002403 break;
2404
2405 case IB_CM_REP_RECEIVED:
2406 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002407 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002408 break;
2409
2410 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002411 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002412 comp = 1;
2413
Bart Van Assche509c07b2014-10-30 14:48:30 +01002414 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002415 break;
2416
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002417 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002418 shost_printk(KERN_WARNING, target->scsi_host,
2419 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002420 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002421 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002422 shost_printk(KERN_ERR, target->scsi_host,
2423 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002424 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002425 break;
2426
2427 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002428 shost_printk(KERN_ERR, target->scsi_host,
2429 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002430 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002431
Bart Van Assche509c07b2014-10-30 14:48:30 +01002432 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002433 break;
2434
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002435 case IB_CM_MRA_RECEIVED:
2436 case IB_CM_DREQ_ERROR:
2437 case IB_CM_DREP_RECEIVED:
2438 break;
2439
Roland Dreieraef9ec32005-11-02 14:07:13 -08002440 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002441 shost_printk(KERN_WARNING, target->scsi_host,
2442 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002443 break;
2444 }
2445
2446 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002447 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002448
Roland Dreieraef9ec32005-11-02 14:07:13 -08002449 return 0;
2450}
2451
Jack Wang71444b92013-11-07 11:37:37 +01002452/**
Jack Wang71444b92013-11-07 11:37:37 +01002453 * srp_change_queue_depth - setting device queue depth
2454 * @sdev: scsi device struct
2455 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002456 *
2457 * Returns queue depth.
2458 */
2459static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002460srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002461{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002462 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002463 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002464 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002465}
2466
Bart Van Assche985aa492015-05-18 13:27:14 +02002467static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2468 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002469{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002470 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002471 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002472 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002473 struct srp_iu *iu;
2474 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002475
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002476 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002477 return -1;
2478
Bart Van Assche509c07b2014-10-30 14:48:30 +01002479 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002480
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002481 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002482 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002483 * invoked while a task management function is being sent.
2484 */
2485 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002486 spin_lock_irq(&ch->lock);
2487 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2488 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002489
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002490 if (!iu) {
2491 mutex_unlock(&rport->mutex);
2492
Bart Van Assche76c75b22010-11-26 14:37:47 -05002493 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002494 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002495
David Dillow19081f32010-10-18 08:54:49 -04002496 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2497 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002498 tsk_mgmt = iu->buf;
2499 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2500
2501 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002502 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002503 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002504 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002505 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002506
David Dillow19081f32010-10-18 08:54:49 -04002507 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2508 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002509 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2510 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002511 mutex_unlock(&rport->mutex);
2512
Bart Van Assche76c75b22010-11-26 14:37:47 -05002513 return -1;
2514 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002515 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002516
Bart Van Assche509c07b2014-10-30 14:48:30 +01002517 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002518 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002519 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002520
Roland Dreierd945e1d2006-05-09 10:50:28 -07002521 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002522}
2523
Roland Dreieraef9ec32005-11-02 14:07:13 -08002524static int srp_abort(struct scsi_cmnd *scmnd)
2525{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002526 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002527 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002528 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002529 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002530 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002531 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002532
David Dillow7aa54bd2008-01-07 18:23:41 -05002533 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002534
Bart Van Assched92c0da2014-10-06 17:14:36 +02002535 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002536 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002537 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002538 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2539 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2540 return SUCCESS;
2541 ch = &target->ch[ch_idx];
2542 if (!srp_claim_req(ch, req, NULL, scmnd))
2543 return SUCCESS;
2544 shost_printk(KERN_ERR, target->scsi_host,
2545 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002546 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002547 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002548 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002549 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002550 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002551 else
2552 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002553 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002554 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002555 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002556
Bart Van Assche086f44f2013-06-12 15:23:04 +02002557 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002558}
2559
2560static int srp_reset_device(struct scsi_cmnd *scmnd)
2561{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002562 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002563 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002564 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002565
David Dillow7aa54bd2008-01-07 18:23:41 -05002566 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002567
Bart Van Assched92c0da2014-10-06 17:14:36 +02002568 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002569 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002570 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002571 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002572 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002573 return FAILED;
2574
Bart Van Assched92c0da2014-10-06 17:14:36 +02002575 for (i = 0; i < target->ch_count; i++) {
2576 ch = &target->ch[i];
2577 for (i = 0; i < target->req_ring_size; ++i) {
2578 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002579
Bart Van Assched92c0da2014-10-06 17:14:36 +02002580 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2581 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002582 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002583
Roland Dreierd945e1d2006-05-09 10:50:28 -07002584 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002585}
2586
2587static int srp_reset_host(struct scsi_cmnd *scmnd)
2588{
2589 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002590
David Dillow7aa54bd2008-01-07 18:23:41 -05002591 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002592
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002593 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002594}
2595
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002596static int srp_slave_configure(struct scsi_device *sdev)
2597{
2598 struct Scsi_Host *shost = sdev->host;
2599 struct srp_target_port *target = host_to_target(shost);
2600 struct request_queue *q = sdev->request_queue;
2601 unsigned long timeout;
2602
2603 if (sdev->type == TYPE_DISK) {
2604 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2605 blk_queue_rq_timeout(q, timeout);
2606 }
2607
2608 return 0;
2609}
2610
Tony Jonesee959b02008-02-22 00:13:36 +01002611static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2612 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002613{
Tony Jonesee959b02008-02-22 00:13:36 +01002614 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002615
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002616 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002617}
2618
Tony Jonesee959b02008-02-22 00:13:36 +01002619static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2620 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002621{
Tony Jonesee959b02008-02-22 00:13:36 +01002622 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002623
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002624 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002625}
2626
Tony Jonesee959b02008-02-22 00:13:36 +01002627static ssize_t show_service_id(struct device *dev,
2628 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002629{
Tony Jonesee959b02008-02-22 00:13:36 +01002630 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002631
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002632 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002633}
2634
Tony Jonesee959b02008-02-22 00:13:36 +01002635static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2636 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002637{
Tony Jonesee959b02008-02-22 00:13:36 +01002638 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002639
Bart Van Assche747fe002014-10-30 14:48:05 +01002640 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002641}
2642
Bart Van Assche848b3082013-10-26 14:38:12 +02002643static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2644 char *buf)
2645{
2646 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2647
Bart Van Assche747fe002014-10-30 14:48:05 +01002648 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002649}
2650
Tony Jonesee959b02008-02-22 00:13:36 +01002651static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2652 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002653{
Tony Jonesee959b02008-02-22 00:13:36 +01002654 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002655 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002656
Bart Van Assche509c07b2014-10-30 14:48:30 +01002657 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002658}
2659
Tony Jonesee959b02008-02-22 00:13:36 +01002660static ssize_t show_orig_dgid(struct device *dev,
2661 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002662{
Tony Jonesee959b02008-02-22 00:13:36 +01002663 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002664
Bart Van Assche747fe002014-10-30 14:48:05 +01002665 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002666}
2667
Bart Van Assche89de7482010-08-03 14:08:45 +00002668static ssize_t show_req_lim(struct device *dev,
2669 struct device_attribute *attr, char *buf)
2670{
2671 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002672 struct srp_rdma_ch *ch;
2673 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002674
Bart Van Assched92c0da2014-10-06 17:14:36 +02002675 for (i = 0; i < target->ch_count; i++) {
2676 ch = &target->ch[i];
2677 req_lim = min(req_lim, ch->req_lim);
2678 }
2679 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002680}
2681
Tony Jonesee959b02008-02-22 00:13:36 +01002682static ssize_t show_zero_req_lim(struct device *dev,
2683 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002684{
Tony Jonesee959b02008-02-22 00:13:36 +01002685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002686
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002687 return sprintf(buf, "%d\n", target->zero_req_lim);
2688}
2689
Tony Jonesee959b02008-02-22 00:13:36 +01002690static ssize_t show_local_ib_port(struct device *dev,
2691 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002692{
Tony Jonesee959b02008-02-22 00:13:36 +01002693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002694
2695 return sprintf(buf, "%d\n", target->srp_host->port);
2696}
2697
Tony Jonesee959b02008-02-22 00:13:36 +01002698static ssize_t show_local_ib_device(struct device *dev,
2699 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002700{
Tony Jonesee959b02008-02-22 00:13:36 +01002701 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002702
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002703 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002704}
2705
Bart Van Assched92c0da2014-10-06 17:14:36 +02002706static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2707 char *buf)
2708{
2709 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2710
2711 return sprintf(buf, "%d\n", target->ch_count);
2712}
2713
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002714static ssize_t show_comp_vector(struct device *dev,
2715 struct device_attribute *attr, char *buf)
2716{
2717 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2718
2719 return sprintf(buf, "%d\n", target->comp_vector);
2720}
2721
Vu Pham7bb312e2013-10-26 14:31:27 +02002722static ssize_t show_tl_retry_count(struct device *dev,
2723 struct device_attribute *attr, char *buf)
2724{
2725 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2726
2727 return sprintf(buf, "%d\n", target->tl_retry_count);
2728}
2729
David Dillow49248642011-01-14 18:23:24 -05002730static ssize_t show_cmd_sg_entries(struct device *dev,
2731 struct device_attribute *attr, char *buf)
2732{
2733 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2734
2735 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2736}
2737
David Dillowc07d4242011-01-16 13:57:10 -05002738static ssize_t show_allow_ext_sg(struct device *dev,
2739 struct device_attribute *attr, char *buf)
2740{
2741 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2742
2743 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2744}
2745
Tony Jonesee959b02008-02-22 00:13:36 +01002746static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2747static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2748static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2749static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002750static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002751static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2752static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002753static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002754static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2755static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2756static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002757static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002758static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002759static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002760static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002761static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002762
Tony Jonesee959b02008-02-22 00:13:36 +01002763static struct device_attribute *srp_host_attrs[] = {
2764 &dev_attr_id_ext,
2765 &dev_attr_ioc_guid,
2766 &dev_attr_service_id,
2767 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002768 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002769 &dev_attr_dgid,
2770 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002771 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002772 &dev_attr_zero_req_lim,
2773 &dev_attr_local_ib_port,
2774 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002775 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002776 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002777 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002778 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002779 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002780 NULL
2781};
2782
Roland Dreieraef9ec32005-11-02 14:07:13 -08002783static struct scsi_host_template srp_template = {
2784 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002785 .name = "InfiniBand SRP initiator",
2786 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002787 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002788 .info = srp_target_info,
2789 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002790 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002791 .eh_abort_handler = srp_abort,
2792 .eh_device_reset_handler = srp_reset_device,
2793 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002794 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002795 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002796 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002797 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002798 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002799 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002800 .shost_attrs = srp_host_attrs,
2801 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002802 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002803};
2804
Bart Van Assche34aa6542014-10-30 14:47:22 +01002805static int srp_sdev_count(struct Scsi_Host *host)
2806{
2807 struct scsi_device *sdev;
2808 int c = 0;
2809
2810 shost_for_each_device(sdev, host)
2811 c++;
2812
2813 return c;
2814}
2815
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002816/*
2817 * Return values:
2818 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2819 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2820 * removal has been scheduled.
2821 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2822 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002823static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2824{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002825 struct srp_rport_identifiers ids;
2826 struct srp_rport *rport;
2827
Bart Van Assche34aa6542014-10-30 14:47:22 +01002828 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002829 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002830 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002831
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002832 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002833 return -ENODEV;
2834
FUJITA Tomonori32368222007-06-27 16:33:12 +09002835 memcpy(ids.port_id, &target->id_ext, 8);
2836 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002837 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002838 rport = srp_rport_add(target->scsi_host, &ids);
2839 if (IS_ERR(rport)) {
2840 scsi_remove_host(target->scsi_host);
2841 return PTR_ERR(rport);
2842 }
2843
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002844 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002845 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002846
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002847 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002848 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002849 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002850
Roland Dreieraef9ec32005-11-02 14:07:13 -08002851 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002852 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002853
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002854 if (srp_connected_ch(target) < target->ch_count ||
2855 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002856 shost_printk(KERN_INFO, target->scsi_host,
2857 PFX "SCSI scan failed - removing SCSI host\n");
2858 srp_queue_remove_work(target);
2859 goto out;
2860 }
2861
2862 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2863 dev_name(&target->scsi_host->shost_gendev),
2864 srp_sdev_count(target->scsi_host));
2865
2866 spin_lock_irq(&target->lock);
2867 if (target->state == SRP_TARGET_SCANNING)
2868 target->state = SRP_TARGET_LIVE;
2869 spin_unlock_irq(&target->lock);
2870
2871out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002872 return 0;
2873}
2874
Tony Jonesee959b02008-02-22 00:13:36 +01002875static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002876{
2877 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002878 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002879
2880 complete(&host->released);
2881}
2882
2883static struct class srp_class = {
2884 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002885 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002886};
2887
Bart Van Assche96fc2482013-06-28 14:51:26 +02002888/**
2889 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002890 * @host: SRP host.
2891 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002892 */
2893static bool srp_conn_unique(struct srp_host *host,
2894 struct srp_target_port *target)
2895{
2896 struct srp_target_port *t;
2897 bool ret = false;
2898
2899 if (target->state == SRP_TARGET_REMOVED)
2900 goto out;
2901
2902 ret = true;
2903
2904 spin_lock(&host->target_lock);
2905 list_for_each_entry(t, &host->target_list, list) {
2906 if (t != target &&
2907 target->id_ext == t->id_ext &&
2908 target->ioc_guid == t->ioc_guid &&
2909 target->initiator_ext == t->initiator_ext) {
2910 ret = false;
2911 break;
2912 }
2913 }
2914 spin_unlock(&host->target_lock);
2915
2916out:
2917 return ret;
2918}
2919
Roland Dreieraef9ec32005-11-02 14:07:13 -08002920/*
2921 * Target ports are added by writing
2922 *
2923 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2924 * pkey=<P_Key>,service_id=<service ID>
2925 *
2926 * to the add_target sysfs attribute.
2927 */
2928enum {
2929 SRP_OPT_ERR = 0,
2930 SRP_OPT_ID_EXT = 1 << 0,
2931 SRP_OPT_IOC_GUID = 1 << 1,
2932 SRP_OPT_DGID = 1 << 2,
2933 SRP_OPT_PKEY = 1 << 3,
2934 SRP_OPT_SERVICE_ID = 1 << 4,
2935 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002936 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002937 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002938 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002939 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002940 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2941 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002942 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002943 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002944 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002945 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2946 SRP_OPT_IOC_GUID |
2947 SRP_OPT_DGID |
2948 SRP_OPT_PKEY |
2949 SRP_OPT_SERVICE_ID),
2950};
2951
Steven Whitehousea447c092008-10-13 10:46:57 +01002952static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002953 { SRP_OPT_ID_EXT, "id_ext=%s" },
2954 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2955 { SRP_OPT_DGID, "dgid=%s" },
2956 { SRP_OPT_PKEY, "pkey=%x" },
2957 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2958 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2959 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002960 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002961 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002962 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002963 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2964 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002965 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002966 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002967 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002968 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002969};
2970
2971static int srp_parse_options(const char *buf, struct srp_target_port *target)
2972{
2973 char *options, *sep_opt;
2974 char *p;
2975 char dgid[3];
2976 substring_t args[MAX_OPT_ARGS];
2977 int opt_mask = 0;
2978 int token;
2979 int ret = -EINVAL;
2980 int i;
2981
2982 options = kstrdup(buf, GFP_KERNEL);
2983 if (!options)
2984 return -ENOMEM;
2985
2986 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002987 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002988 if (!*p)
2989 continue;
2990
2991 token = match_token(p, srp_opt_tokens, args);
2992 opt_mask |= token;
2993
2994 switch (token) {
2995 case SRP_OPT_ID_EXT:
2996 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002997 if (!p) {
2998 ret = -ENOMEM;
2999 goto out;
3000 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003001 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3002 kfree(p);
3003 break;
3004
3005 case SRP_OPT_IOC_GUID:
3006 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003007 if (!p) {
3008 ret = -ENOMEM;
3009 goto out;
3010 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003011 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3012 kfree(p);
3013 break;
3014
3015 case SRP_OPT_DGID:
3016 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003017 if (!p) {
3018 ret = -ENOMEM;
3019 goto out;
3020 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003021 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003022 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07003023 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003024 goto out;
3025 }
3026
3027 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01003028 strlcpy(dgid, p + i * 2, sizeof(dgid));
3029 if (sscanf(dgid, "%hhx",
3030 &target->orig_dgid.raw[i]) < 1) {
3031 ret = -EINVAL;
3032 kfree(p);
3033 goto out;
3034 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003035 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08003036 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003037 break;
3038
3039 case SRP_OPT_PKEY:
3040 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003041 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003042 goto out;
3043 }
Bart Van Assche747fe002014-10-30 14:48:05 +01003044 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003045 break;
3046
3047 case SRP_OPT_SERVICE_ID:
3048 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003049 if (!p) {
3050 ret = -ENOMEM;
3051 goto out;
3052 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003053 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3054 kfree(p);
3055 break;
3056
3057 case SRP_OPT_MAX_SECT:
3058 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003059 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003060 goto out;
3061 }
3062 target->scsi_host->max_sectors = token;
3063 break;
3064
Bart Van Assche4d73f952013-10-26 14:40:37 +02003065 case SRP_OPT_QUEUE_SIZE:
3066 if (match_int(args, &token) || token < 1) {
3067 pr_warn("bad queue_size parameter '%s'\n", p);
3068 goto out;
3069 }
3070 target->scsi_host->can_queue = token;
3071 target->queue_size = token + SRP_RSP_SQ_SIZE +
3072 SRP_TSK_MGMT_SQ_SIZE;
3073 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3074 target->scsi_host->cmd_per_lun = token;
3075 break;
3076
Vu Pham52fb2b502006-06-17 20:37:31 -07003077 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003078 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003079 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3080 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003081 goto out;
3082 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003083 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003084 break;
3085
Ramachandra K0c0450db2006-06-17 20:37:38 -07003086 case SRP_OPT_IO_CLASS:
3087 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003088 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003089 goto out;
3090 }
3091 if (token != SRP_REV10_IB_IO_CLASS &&
3092 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003093 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3094 token, SRP_REV10_IB_IO_CLASS,
3095 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003096 goto out;
3097 }
3098 target->io_class = token;
3099 break;
3100
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003101 case SRP_OPT_INITIATOR_EXT:
3102 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003103 if (!p) {
3104 ret = -ENOMEM;
3105 goto out;
3106 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003107 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3108 kfree(p);
3109 break;
3110
David Dillow49248642011-01-14 18:23:24 -05003111 case SRP_OPT_CMD_SG_ENTRIES:
3112 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003113 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3114 p);
David Dillow49248642011-01-14 18:23:24 -05003115 goto out;
3116 }
3117 target->cmd_sg_cnt = token;
3118 break;
3119
David Dillowc07d4242011-01-16 13:57:10 -05003120 case SRP_OPT_ALLOW_EXT_SG:
3121 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003122 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003123 goto out;
3124 }
3125 target->allow_ext_sg = !!token;
3126 break;
3127
3128 case SRP_OPT_SG_TABLESIZE:
3129 if (match_int(args, &token) || token < 1 ||
3130 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003131 pr_warn("bad max sg_tablesize parameter '%s'\n",
3132 p);
David Dillowc07d4242011-01-16 13:57:10 -05003133 goto out;
3134 }
3135 target->sg_tablesize = token;
3136 break;
3137
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003138 case SRP_OPT_COMP_VECTOR:
3139 if (match_int(args, &token) || token < 0) {
3140 pr_warn("bad comp_vector parameter '%s'\n", p);
3141 goto out;
3142 }
3143 target->comp_vector = token;
3144 break;
3145
Vu Pham7bb312e2013-10-26 14:31:27 +02003146 case SRP_OPT_TL_RETRY_COUNT:
3147 if (match_int(args, &token) || token < 2 || token > 7) {
3148 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3149 p);
3150 goto out;
3151 }
3152 target->tl_retry_count = token;
3153 break;
3154
Roland Dreieraef9ec32005-11-02 14:07:13 -08003155 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003156 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3157 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003158 goto out;
3159 }
3160 }
3161
3162 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3163 ret = 0;
3164 else
3165 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3166 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3167 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003168 pr_warn("target creation request is missing parameter '%s'\n",
3169 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003170
Bart Van Assche4d73f952013-10-26 14:40:37 +02003171 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3172 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3173 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3174 target->scsi_host->cmd_per_lun,
3175 target->scsi_host->can_queue);
3176
Roland Dreieraef9ec32005-11-02 14:07:13 -08003177out:
3178 kfree(options);
3179 return ret;
3180}
3181
Tony Jonesee959b02008-02-22 00:13:36 +01003182static ssize_t srp_create_target(struct device *dev,
3183 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003184 const char *buf, size_t count)
3185{
3186 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003187 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003188 struct Scsi_Host *target_host;
3189 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003190 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003191 struct srp_device *srp_dev = host->srp_dev;
3192 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003193 int ret, node_idx, node, cpu, i;
3194 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003195
3196 target_host = scsi_host_alloc(&srp_template,
3197 sizeof (struct srp_target_port));
3198 if (!target_host)
3199 return -ENOMEM;
3200
David Dillow49248642011-01-14 18:23:24 -05003201 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003202 target_host->max_channel = 0;
3203 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003204 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003205 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003206
Roland Dreieraef9ec32005-11-02 14:07:13 -08003207 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003208
David Dillow49248642011-01-14 18:23:24 -05003209 target->io_class = SRP_REV16A_IB_IO_CLASS;
3210 target->scsi_host = target_host;
3211 target->srp_host = host;
Jason Gunthorpee6bf5f42015-07-30 17:22:22 -06003212 target->lkey = host->srp_dev->pd->local_dma_lkey;
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003213 target->global_mr = host->srp_dev->global_mr;
David Dillow49248642011-01-14 18:23:24 -05003214 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003215 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3216 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003217 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003218 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003219
Bart Van Assche34aa6542014-10-30 14:47:22 +01003220 /*
3221 * Avoid that the SCSI host can be removed by srp_remove_target()
3222 * before this function returns.
3223 */
3224 scsi_host_get(target->scsi_host);
3225
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003226 mutex_lock(&host->add_target_mutex);
3227
Roland Dreieraef9ec32005-11-02 14:07:13 -08003228 ret = srp_parse_options(buf, target);
3229 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003230 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003231
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003232 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3233 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003234 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003235
Bart Van Assche4d73f952013-10-26 14:40:37 +02003236 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3237
Bart Van Assche96fc2482013-06-28 14:51:26 +02003238 if (!srp_conn_unique(target->srp_host, target)) {
3239 shost_printk(KERN_INFO, target->scsi_host,
3240 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3241 be64_to_cpu(target->id_ext),
3242 be64_to_cpu(target->ioc_guid),
3243 be64_to_cpu(target->initiator_ext));
3244 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003245 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003246 }
3247
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003248 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003249 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003250 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003251 target->sg_tablesize = target->cmd_sg_cnt;
3252 }
3253
3254 target_host->sg_tablesize = target->sg_tablesize;
3255 target->indirect_size = target->sg_tablesize *
3256 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003257 target->max_iu_len = sizeof (struct srp_cmd) +
3258 sizeof (struct srp_indirect_buf) +
3259 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3260
Bart Van Asschec1120f82013-10-26 14:35:08 +02003261 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003262 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003263 spin_lock_init(&target->lock);
Matan Barak55ee3ab2015-10-15 18:38:45 +03003264 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003265 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003266 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003267
Bart Van Assched92c0da2014-10-06 17:14:36 +02003268 ret = -ENOMEM;
3269 target->ch_count = max_t(unsigned, num_online_nodes(),
3270 min(ch_count ? :
3271 min(4 * num_online_nodes(),
3272 ibdev->num_comp_vectors),
3273 num_online_cpus()));
3274 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3275 GFP_KERNEL);
3276 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003277 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003278
Bart Van Assched92c0da2014-10-06 17:14:36 +02003279 node_idx = 0;
3280 for_each_online_node(node) {
3281 const int ch_start = (node_idx * target->ch_count /
3282 num_online_nodes());
3283 const int ch_end = ((node_idx + 1) * target->ch_count /
3284 num_online_nodes());
3285 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3286 num_online_nodes() + target->comp_vector)
3287 % ibdev->num_comp_vectors;
3288 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3289 num_online_nodes() + target->comp_vector)
3290 % ibdev->num_comp_vectors;
3291 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003292
Bart Van Assched92c0da2014-10-06 17:14:36 +02003293 for_each_online_cpu(cpu) {
3294 if (cpu_to_node(cpu) != node)
3295 continue;
3296 if (ch_start + cpu_idx >= ch_end)
3297 continue;
3298 ch = &target->ch[ch_start + cpu_idx];
3299 ch->target = target;
3300 ch->comp_vector = cv_start == cv_end ? cv_start :
3301 cv_start + cpu_idx % (cv_end - cv_start);
3302 spin_lock_init(&ch->lock);
3303 INIT_LIST_HEAD(&ch->free_tx);
3304 ret = srp_new_cm_id(ch);
3305 if (ret)
3306 goto err_disconnect;
3307
3308 ret = srp_create_ch_ib(ch);
3309 if (ret)
3310 goto err_disconnect;
3311
3312 ret = srp_alloc_req_data(ch);
3313 if (ret)
3314 goto err_disconnect;
3315
3316 ret = srp_connect_ch(ch, multich);
3317 if (ret) {
3318 shost_printk(KERN_ERR, target->scsi_host,
3319 PFX "Connection %d/%d failed\n",
3320 ch_start + cpu_idx,
3321 target->ch_count);
3322 if (node_idx == 0 && cpu_idx == 0) {
3323 goto err_disconnect;
3324 } else {
3325 srp_free_ch_ib(target, ch);
3326 srp_free_req_data(target, ch);
3327 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003328 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003329 }
3330 }
3331
3332 multich = true;
3333 cpu_idx++;
3334 }
3335 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003336 }
3337
Bart Van Asschec257ea62015-07-31 14:13:22 -07003338connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003339 target->scsi_host->nr_hw_queues = target->ch_count;
3340
Roland Dreieraef9ec32005-11-02 14:07:13 -08003341 ret = srp_add_target(host, target);
3342 if (ret)
3343 goto err_disconnect;
3344
Bart Van Assche34aa6542014-10-30 14:47:22 +01003345 if (target->state != SRP_TARGET_REMOVED) {
3346 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3347 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3348 be64_to_cpu(target->id_ext),
3349 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003350 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003351 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003352 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003353 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003354
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003355 ret = count;
3356
3357out:
3358 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003359
3360 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003361 if (ret < 0)
3362 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003363
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003364 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003365
3366err_disconnect:
3367 srp_disconnect_target(target);
3368
Bart Van Assched92c0da2014-10-06 17:14:36 +02003369 for (i = 0; i < target->ch_count; i++) {
3370 ch = &target->ch[i];
3371 srp_free_ch_ib(target, ch);
3372 srp_free_req_data(target, ch);
3373 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003374
Bart Van Assched92c0da2014-10-06 17:14:36 +02003375 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003376 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003377}
3378
Tony Jonesee959b02008-02-22 00:13:36 +01003379static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003380
Tony Jonesee959b02008-02-22 00:13:36 +01003381static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3382 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003383{
Tony Jonesee959b02008-02-22 00:13:36 +01003384 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003386 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003387}
3388
Tony Jonesee959b02008-02-22 00:13:36 +01003389static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003390
Tony Jonesee959b02008-02-22 00:13:36 +01003391static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3392 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003393{
Tony Jonesee959b02008-02-22 00:13:36 +01003394 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003395
3396 return sprintf(buf, "%d\n", host->port);
3397}
3398
Tony Jonesee959b02008-02-22 00:13:36 +01003399static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003400
Roland Dreierf5358a12006-06-17 20:37:29 -07003401static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003402{
3403 struct srp_host *host;
3404
3405 host = kzalloc(sizeof *host, GFP_KERNEL);
3406 if (!host)
3407 return NULL;
3408
3409 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003410 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003411 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003412 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003413 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003414 host->port = port;
3415
Tony Jonesee959b02008-02-22 00:13:36 +01003416 host->dev.class = &srp_class;
3417 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003418 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003419
Tony Jonesee959b02008-02-22 00:13:36 +01003420 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003421 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003422 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003423 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003424 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003425 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003426 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003427 goto err_class;
3428
3429 return host;
3430
3431err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003432 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003433
Roland Dreierf5358a12006-06-17 20:37:29 -07003434free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003435 kfree(host);
3436
3437 return NULL;
3438}
3439
3440static void srp_add_one(struct ib_device *device)
3441{
Roland Dreierf5358a12006-06-17 20:37:29 -07003442 struct srp_device *srp_dev;
3443 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003444 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003445 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003446 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003447
Roland Dreierf5358a12006-06-17 20:37:29 -07003448 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3449 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003450 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003451
Roland Dreierf5358a12006-06-17 20:37:29 -07003452 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003453 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003454 goto free_attr;
3455 }
3456
3457 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3458 if (!srp_dev)
3459 goto free_attr;
3460
Bart Van Assched1b42892014-05-20 15:07:20 +02003461 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3462 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003463 srp_dev->has_fr = (dev_attr->device_cap_flags &
3464 IB_DEVICE_MEM_MGT_EXTENSIONS);
3465 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3466 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3467
3468 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3469 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assche002f1562015-08-10 17:08:44 -07003470 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
Bart Van Assched1b42892014-05-20 15:07:20 +02003471
Roland Dreierf5358a12006-06-17 20:37:29 -07003472 /*
3473 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003474 * minimum of 4096 bytes. We're unlikely to build large sglists
3475 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003476 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003477 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3478 srp_dev->mr_page_size = 1 << mr_page_shift;
3479 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3480 max_pages_per_mr = dev_attr->max_mr_size;
3481 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3482 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3483 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003484 if (srp_dev->use_fast_reg) {
3485 srp_dev->max_pages_per_mr =
3486 min_t(u32, srp_dev->max_pages_per_mr,
3487 dev_attr->max_fast_reg_page_list_len);
3488 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003489 srp_dev->mr_max_size = srp_dev->mr_page_size *
3490 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003491 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003492 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003493 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003494 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003495
3496 INIT_LIST_HEAD(&srp_dev->dev_list);
3497
3498 srp_dev->dev = device;
3499 srp_dev->pd = ib_alloc_pd(device);
3500 if (IS_ERR(srp_dev->pd))
3501 goto free_dev;
3502
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003503 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3504 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3505 IB_ACCESS_LOCAL_WRITE |
3506 IB_ACCESS_REMOTE_READ |
3507 IB_ACCESS_REMOTE_WRITE);
3508 if (IS_ERR(srp_dev->global_mr))
3509 goto err_pd;
3510 } else {
3511 srp_dev->global_mr = NULL;
3512 }
Roland Dreierf5358a12006-06-17 20:37:29 -07003513
Hal Rosenstock41390322015-06-29 09:57:00 -04003514 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003515 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003516 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003517 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003518 }
3519
Roland Dreierf5358a12006-06-17 20:37:29 -07003520 ib_set_client_data(device, &srp_client, srp_dev);
3521
3522 goto free_attr;
3523
3524err_pd:
3525 ib_dealloc_pd(srp_dev->pd);
3526
3527free_dev:
3528 kfree(srp_dev);
3529
3530free_attr:
3531 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003532}
3533
Haggai Eran7c1eb452015-07-30 17:50:14 +03003534static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003535{
Roland Dreierf5358a12006-06-17 20:37:29 -07003536 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003537 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003538 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003539
Haggai Eran7c1eb452015-07-30 17:50:14 +03003540 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003541 if (!srp_dev)
3542 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003543
Roland Dreierf5358a12006-06-17 20:37:29 -07003544 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003545 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003546 /*
3547 * Wait for the sysfs entry to go away, so that no new
3548 * target ports can be created.
3549 */
3550 wait_for_completion(&host->released);
3551
3552 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003553 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003554 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003555 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003556 list_for_each_entry(target, &host->target_list, list)
3557 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003558 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003559
3560 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003561 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003562 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003563 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003564 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003565
Roland Dreieraef9ec32005-11-02 14:07:13 -08003566 kfree(host);
3567 }
3568
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003569 if (srp_dev->global_mr)
3570 ib_dereg_mr(srp_dev->global_mr);
Roland Dreierf5358a12006-06-17 20:37:29 -07003571 ib_dealloc_pd(srp_dev->pd);
3572
3573 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003574}
3575
FUJITA Tomonori32368222007-06-27 16:33:12 +09003576static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003577 .has_rport_state = true,
3578 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003579 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003580 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3581 .dev_loss_tmo = &srp_dev_loss_tmo,
3582 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003583 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003584 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003585};
3586
Roland Dreieraef9ec32005-11-02 14:07:13 -08003587static int __init srp_init_module(void)
3588{
3589 int ret;
3590
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003591 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003592
David Dillow49248642011-01-14 18:23:24 -05003593 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003594 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003595 if (!cmd_sg_entries)
3596 cmd_sg_entries = srp_sg_tablesize;
3597 }
3598
3599 if (!cmd_sg_entries)
3600 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3601
3602 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003603 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003604 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003605 }
3606
David Dillowc07d4242011-01-16 13:57:10 -05003607 if (!indirect_sg_entries)
3608 indirect_sg_entries = cmd_sg_entries;
3609 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003610 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3611 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003612 indirect_sg_entries = cmd_sg_entries;
3613 }
3614
Bart Van Asschebcc05912014-07-09 15:57:26 +02003615 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003616 if (!srp_remove_wq) {
3617 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003618 goto out;
3619 }
3620
3621 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003622 ib_srp_transport_template =
3623 srp_attach_transport(&ib_srp_transport_functions);
3624 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003625 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003626
Roland Dreieraef9ec32005-11-02 14:07:13 -08003627 ret = class_register(&srp_class);
3628 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003629 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003630 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003631 }
3632
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003633 ib_sa_register_client(&srp_sa_client);
3634
Roland Dreieraef9ec32005-11-02 14:07:13 -08003635 ret = ib_register_client(&srp_client);
3636 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003637 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003638 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003639 }
3640
Bart Van Asschebcc05912014-07-09 15:57:26 +02003641out:
3642 return ret;
3643
3644unreg_sa:
3645 ib_sa_unregister_client(&srp_sa_client);
3646 class_unregister(&srp_class);
3647
3648release_tr:
3649 srp_release_transport(ib_srp_transport_template);
3650
3651destroy_wq:
3652 destroy_workqueue(srp_remove_wq);
3653 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003654}
3655
3656static void __exit srp_cleanup_module(void)
3657{
3658 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003659 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003660 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003661 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003662 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003663}
3664
3665module_init(srp_init_module);
3666module_exit(srp_cleanup_module);