blob: b481490ad25756f6de36cd718c0983be751c8e5c [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300134static void srp_remove_one(struct ib_device *device, void *client_data);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200173 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930184static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
Roland Dreieraef9ec32005-11-02 14:07:13 -0800189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700203
204 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700207}
208
Roland Dreieraef9ec32005-11-02 14:07:13 -0800209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Bart Van Assche56b53902014-07-09 15:58:22 +0200268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349}
350
351/**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361{
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300381 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
382 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397out:
398 return pool;
399
400destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403err:
404 pool = ERR_PTR(ret);
405 goto out;
406}
407
408/**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413{
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425}
426
427/**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438{
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446}
447
448static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449{
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455}
456
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457/**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466static void srp_destroy_qp(struct srp_rdma_ch *ch)
467{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200474 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
Bart Van Assche509c07b2014-10-30 14:48:30 +0100491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800492{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100493 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200494 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800495 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200498 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
Matan Barak8e372102015-06-11 16:35:21 +0300501 struct ib_cq_init_attr cq_attr = {};
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200508 /* + 1 for SRP_LAST_WR_ID */
Matan Barak8e372102015-06-11 16:35:21 +0300509 cq_attr.cqe = target->queue_size + 1;
510 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100511 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300512 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100513 if (IS_ERR(recv_cq)) {
514 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800515 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800516 }
517
Matan Barak8e372102015-06-11 16:35:21 +0300518 cq_attr.cqe = m * target->queue_size;
519 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100520 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300521 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100522 if (IS_ERR(send_cq)) {
523 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800524 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000525 }
526
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100527 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800528
529 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200530 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200531 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800532 init_attr->cap.max_recv_sge = 1;
533 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200534 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800535 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 init_attr->send_cq = send_cq;
537 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800538
Bart Van Assche62154b22014-05-20 15:04:45 +0200539 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100540 if (IS_ERR(qp)) {
541 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800542 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800543 }
544
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100545 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800546 if (ret)
547 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800548
Bart Van Assche002f1562015-08-10 17:08:44 -0700549 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200550 fr_pool = srp_alloc_fr_pool(target);
551 if (IS_ERR(fr_pool)) {
552 ret = PTR_ERR(fr_pool);
553 shost_printk(KERN_WARNING, target->scsi_host, PFX
554 "FR pool allocation failed (%d)\n", ret);
555 goto err_qp;
556 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700557 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200558 fmr_pool = srp_alloc_fmr_pool(target);
559 if (IS_ERR(fmr_pool)) {
560 ret = PTR_ERR(fmr_pool);
561 shost_printk(KERN_WARNING, target->scsi_host, PFX
562 "FMR pool allocation failed (%d)\n", ret);
563 goto err_qp;
564 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200565 }
566
Bart Van Assche509c07b2014-10-30 14:48:30 +0100567 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200568 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100569 if (ch->recv_cq)
570 ib_destroy_cq(ch->recv_cq);
571 if (ch->send_cq)
572 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100573
Bart Van Assche509c07b2014-10-30 14:48:30 +0100574 ch->qp = qp;
575 ch->recv_cq = recv_cq;
576 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100577
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300578 if (dev->use_fast_reg) {
579 if (ch->fr_pool)
580 srp_destroy_fr_pool(ch->fr_pool);
581 ch->fr_pool = fr_pool;
582 } else if (dev->use_fmr) {
583 if (ch->fmr_pool)
584 ib_destroy_fmr_pool(ch->fmr_pool);
585 ch->fmr_pool = fmr_pool;
586 }
587
Roland Dreierda9d2f02010-02-24 15:07:59 -0800588 kfree(init_attr);
589 return 0;
590
591err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100592 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800593
594err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100595 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800596
597err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100598 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800599
600err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800601 kfree(init_attr);
602 return ret;
603}
604
Bart Van Assche4d73f952013-10-26 14:40:37 +0200605/*
606 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100607 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200608 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100609static void srp_free_ch_ib(struct srp_target_port *target,
610 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800611{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200612 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800613 int i;
614
Bart Van Assched92c0da2014-10-06 17:14:36 +0200615 if (!ch->target)
616 return;
617
Bart Van Assche509c07b2014-10-30 14:48:30 +0100618 if (ch->cm_id) {
619 ib_destroy_cm_id(ch->cm_id);
620 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100621 }
622
Bart Van Assched92c0da2014-10-06 17:14:36 +0200623 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
624 if (!ch->qp)
625 return;
626
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200627 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100628 if (ch->fr_pool)
629 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700630 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100631 if (ch->fmr_pool)
632 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200633 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200634 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100635 ib_destroy_cq(ch->send_cq);
636 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800637
Bart Van Assched92c0da2014-10-06 17:14:36 +0200638 /*
639 * Avoid that the SCSI error handler tries to use this channel after
640 * it has been freed. The SCSI error handler can namely continue
641 * trying to perform recovery actions after scsi_remove_host()
642 * returned.
643 */
644 ch->target = NULL;
645
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 ch->qp = NULL;
647 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100648
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200650 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100651 srp_free_iu(target->srp_host, ch->rx_ring[i]);
652 kfree(ch->rx_ring);
653 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200654 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100655 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200656 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100657 srp_free_iu(target->srp_host, ch->tx_ring[i]);
658 kfree(ch->tx_ring);
659 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200660 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800661}
662
663static void srp_path_rec_completion(int status,
664 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100665 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800666{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100667 struct srp_rdma_ch *ch = ch_ptr;
668 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500672 shost_printk(KERN_ERR, target->scsi_host,
673 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800674 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675 ch->path = *pathrec;
676 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800677}
678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800680{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100681 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100682 int ret;
683
Bart Van Assche509c07b2014-10-30 14:48:30 +0100684 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800685
Bart Van Assche509c07b2014-10-30 14:48:30 +0100686 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800687
Bart Van Assche509c07b2014-10-30 14:48:30 +0100688 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
689 target->srp_host->srp_dev->dev,
690 target->srp_host->port,
691 &ch->path,
692 IB_SA_PATH_REC_SERVICE_ID |
693 IB_SA_PATH_REC_DGID |
694 IB_SA_PATH_REC_SGID |
695 IB_SA_PATH_REC_NUMB_PATH |
696 IB_SA_PATH_REC_PKEY,
697 SRP_PATH_REC_TIMEOUT_MS,
698 GFP_KERNEL,
699 srp_path_rec_completion,
700 ch, &ch->path_query);
701 if (ch->path_query_id < 0)
702 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800703
Bart Van Assche509c07b2014-10-30 14:48:30 +0100704 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100705 if (ret < 0)
706 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500709 shost_printk(KERN_WARNING, target->scsi_host,
710 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800711
Bart Van Assche509c07b2014-10-30 14:48:30 +0100712 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800713}
714
Bart Van Assched92c0da2014-10-06 17:14:36 +0200715static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800716{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100717 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800718 struct {
719 struct ib_cm_req_param param;
720 struct srp_login_req priv;
721 } *req = NULL;
722 int status;
723
724 req = kzalloc(sizeof *req, GFP_KERNEL);
725 if (!req)
726 return -ENOMEM;
727
Bart Van Assche509c07b2014-10-30 14:48:30 +0100728 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800729 req->param.alternate_path = NULL;
730 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100731 req->param.qp_num = ch->qp->qp_num;
732 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800733 req->param.private_data = &req->priv;
734 req->param.private_data_len = sizeof req->priv;
735 req->param.flow_control = 1;
736
737 get_random_bytes(&req->param.starting_psn, 4);
738 req->param.starting_psn &= 0xffffff;
739
740 /*
741 * Pick some arbitrary defaults here; we could make these
742 * module parameters if anyone cared about setting them.
743 */
744 req->param.responder_resources = 4;
745 req->param.remote_cm_response_timeout = 20;
746 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200747 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800748 req->param.rnr_retry_count = 7;
749 req->param.max_cm_retries = 15;
750
751 req->priv.opcode = SRP_LOGIN_REQ;
752 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500753 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800754 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
755 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200756 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
757 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700758 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700759 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700760 * port identifier format is 8 bytes of ID extension followed
761 * by 8 bytes of GUID. Older drafts put the two halves in the
762 * opposite order, so that the GUID comes first.
763 *
764 * Targets conforming to these obsolete drafts can be
765 * recognized by the I/O Class they report.
766 */
767 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
768 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100769 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200771 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700772 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
773 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
774 } else {
775 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200776 &target->initiator_ext, 8);
777 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100778 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700779 memcpy(req->priv.target_port_id, &target->id_ext, 8);
780 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
781 }
782
Roland Dreieraef9ec32005-11-02 14:07:13 -0800783 /*
784 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200785 * zero out the first 8 bytes of our initiator port ID and set
786 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700788 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500789 shost_printk(KERN_DEBUG, target->scsi_host,
790 PFX "Topspin/Cisco initiator port ID workaround "
791 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200792 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800793 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200794 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100795 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800796 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800797
Bart Van Assche509c07b2014-10-30 14:48:30 +0100798 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800799
800 kfree(req);
801
802 return status;
803}
804
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000805static bool srp_queue_remove_work(struct srp_target_port *target)
806{
807 bool changed = false;
808
809 spin_lock_irq(&target->lock);
810 if (target->state != SRP_TARGET_REMOVED) {
811 target->state = SRP_TARGET_REMOVED;
812 changed = true;
813 }
814 spin_unlock_irq(&target->lock);
815
816 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200817 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000818
819 return changed;
820}
821
Roland Dreieraef9ec32005-11-02 14:07:13 -0800822static void srp_disconnect_target(struct srp_target_port *target)
823{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200824 struct srp_rdma_ch *ch;
825 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100826
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200827 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200829 for (i = 0; i < target->ch_count; i++) {
830 ch = &target->ch[i];
831 ch->connected = false;
832 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
833 shost_printk(KERN_DEBUG, target->scsi_host,
834 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000835 }
Roland Dreiere6581052006-05-17 09:13:21 -0700836 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800837}
838
Bart Van Assche509c07b2014-10-30 14:48:30 +0100839static void srp_free_req_data(struct srp_target_port *target,
840 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500841{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200842 struct srp_device *dev = target->srp_host->srp_dev;
843 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500844 struct srp_request *req;
845 int i;
846
Bart Van Assche47513cf2015-05-18 13:25:54 +0200847 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200848 return;
849
850 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100851 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200852 if (dev->use_fast_reg)
853 kfree(req->fr_list);
854 else
855 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500856 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500857 if (req->indirect_dma_addr) {
858 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
859 target->indirect_size,
860 DMA_TO_DEVICE);
861 }
862 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500863 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200864
Bart Van Assche509c07b2014-10-30 14:48:30 +0100865 kfree(ch->req_ring);
866 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500867}
868
Bart Van Assche509c07b2014-10-30 14:48:30 +0100869static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200870{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100871 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200872 struct srp_device *srp_dev = target->srp_host->srp_dev;
873 struct ib_device *ibdev = srp_dev->dev;
874 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200875 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200876 dma_addr_t dma_addr;
877 int i, ret = -ENOMEM;
878
Bart Van Assche509c07b2014-10-30 14:48:30 +0100879 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
880 GFP_KERNEL);
881 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200882 goto out;
883
884 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100885 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200886 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
887 GFP_KERNEL);
888 if (!mr_list)
889 goto out;
890 if (srp_dev->use_fast_reg)
891 req->fr_list = mr_list;
892 else
893 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200894 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200895 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200896 if (!req->map_page)
897 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200898 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200899 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200900 goto out;
901
902 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
903 target->indirect_size,
904 DMA_TO_DEVICE);
905 if (ib_dma_mapping_error(ibdev, dma_addr))
906 goto out;
907
908 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200909 }
910 ret = 0;
911
912out:
913 return ret;
914}
915
Bart Van Assche683b1592012-01-14 12:40:44 +0000916/**
917 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
918 * @shost: SCSI host whose attributes to remove from sysfs.
919 *
920 * Note: Any attributes defined in the host template and that did not exist
921 * before invocation of this function will be ignored.
922 */
923static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
924{
925 struct device_attribute **attr;
926
927 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
928 device_remove_file(&shost->shost_dev, *attr);
929}
930
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000931static void srp_remove_target(struct srp_target_port *target)
932{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200933 struct srp_rdma_ch *ch;
934 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100935
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000936 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
937
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000938 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200939 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000940 srp_remove_host(target->scsi_host);
941 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100942 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000943 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200944 for (i = 0; i < target->ch_count; i++) {
945 ch = &target->ch[i];
946 srp_free_ch_ib(target, ch);
947 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200948 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200949 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200950 for (i = 0; i < target->ch_count; i++) {
951 ch = &target->ch[i];
952 srp_free_req_data(target, ch);
953 }
954 kfree(target->ch);
955 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200956
957 spin_lock(&target->srp_host->target_lock);
958 list_del(&target->list);
959 spin_unlock(&target->srp_host->target_lock);
960
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000961 scsi_host_put(target->scsi_host);
962}
963
David Howellsc4028952006-11-22 14:57:56 +0000964static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800965{
David Howellsc4028952006-11-22 14:57:56 +0000966 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000967 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800968
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000969 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800970
Bart Van Assche96fc2482013-06-28 14:51:26 +0200971 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800972}
973
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200974static void srp_rport_delete(struct srp_rport *rport)
975{
976 struct srp_target_port *target = rport->lld_data;
977
978 srp_queue_remove_work(target);
979}
980
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200981/**
982 * srp_connected_ch() - number of connected channels
983 * @target: SRP target port.
984 */
985static int srp_connected_ch(struct srp_target_port *target)
986{
987 int i, c = 0;
988
989 for (i = 0; i < target->ch_count; i++)
990 c += target->ch[i].connected;
991
992 return c;
993}
994
Bart Van Assched92c0da2014-10-06 17:14:36 +0200995static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800996{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100997 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800998 int ret;
999
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001000 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +00001001
Bart Van Assche509c07b2014-10-30 14:48:30 +01001002 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001003 if (ret)
1004 return ret;
1005
1006 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001007 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001008 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001009 if (ret)
1010 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001011 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001012 if (ret < 0)
1013 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001014
1015 /*
1016 * The CM event handling code will set status to
1017 * SRP_PORT_REDIRECT if we get a port redirect REJ
1018 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1019 * redirect REJ back.
1020 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001021 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001022 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001023 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001024 return 0;
1025
1026 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001027 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001028 if (ret)
1029 return ret;
1030 break;
1031
1032 case SRP_DLID_REDIRECT:
1033 break;
1034
David Dillow9fe4bcf2008-01-08 17:08:52 -05001035 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001036 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001037 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001038 ch->status = -ECONNRESET;
1039 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001040
Roland Dreieraef9ec32005-11-02 14:07:13 -08001041 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001042 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001043 }
1044 }
1045}
1046
Bart Van Assche509c07b2014-10-30 14:48:30 +01001047static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001048{
1049 struct ib_send_wr *bad_wr;
1050 struct ib_send_wr wr = {
1051 .opcode = IB_WR_LOCAL_INV,
1052 .wr_id = LOCAL_INV_WR_ID_MASK,
1053 .next = NULL,
1054 .num_sge = 0,
1055 .send_flags = 0,
1056 .ex.invalidate_rkey = rkey,
1057 };
1058
Bart Van Assche509c07b2014-10-30 14:48:30 +01001059 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001060}
1061
Roland Dreierd945e1d2006-05-09 10:50:28 -07001062static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001063 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001064 struct srp_request *req)
1065{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001066 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001067 struct srp_device *dev = target->srp_host->srp_dev;
1068 struct ib_device *ibdev = dev->dev;
1069 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001070
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001071 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001072 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1073 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1074 return;
1075
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001076 if (dev->use_fast_reg) {
1077 struct srp_fr_desc **pfr;
1078
1079 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001080 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001081 if (res < 0) {
1082 shost_printk(KERN_ERR, target->scsi_host, PFX
1083 "Queueing INV WR for rkey %#x failed (%d)\n",
1084 (*pfr)->mr->rkey, res);
1085 queue_work(system_long_wq,
1086 &target->tl_err_work);
1087 }
1088 }
1089 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001090 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001091 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001092 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001093 struct ib_pool_fmr **pfmr;
1094
1095 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1096 ib_fmr_pool_unmap(*pfmr);
1097 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001098
David Dillow8f26c9f2011-01-14 19:45:50 -05001099 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1100 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001101}
1102
Bart Van Assche22032992012-08-14 13:18:53 +00001103/**
1104 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001105 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001106 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001107 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001108 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1109 * ownership of @req->scmnd if it equals @scmnd.
1110 *
1111 * Return value:
1112 * Either NULL or a pointer to the SCSI command the caller became owner of.
1113 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001114static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001115 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001116 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001117 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001118{
Bart Van Assche94a91742010-11-26 14:50:09 -05001119 unsigned long flags;
1120
Bart Van Assche509c07b2014-10-30 14:48:30 +01001121 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001122 if (req->scmnd &&
1123 (!sdev || req->scmnd->device == sdev) &&
1124 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001125 scmnd = req->scmnd;
1126 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001127 } else {
1128 scmnd = NULL;
1129 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001130 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001131
1132 return scmnd;
1133}
1134
1135/**
1136 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001137 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001138 * @req: Request to be freed.
1139 * @scmnd: SCSI command associated with @req.
1140 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001141 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1143 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001144{
1145 unsigned long flags;
1146
Bart Van Assche509c07b2014-10-30 14:48:30 +01001147 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001148
Bart Van Assche509c07b2014-10-30 14:48:30 +01001149 spin_lock_irqsave(&ch->lock, flags);
1150 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001151 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001152}
1153
Bart Van Assche509c07b2014-10-30 14:48:30 +01001154static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1155 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001156{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001157 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001158
1159 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001160 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001161 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001162 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001163 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001164}
1165
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001166static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001167{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001168 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001169 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001170 struct Scsi_Host *shost = target->scsi_host;
1171 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001172 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001173
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001174 /*
1175 * Invoking srp_terminate_io() while srp_queuecommand() is running
1176 * is not safe. Hence the warning statement below.
1177 */
1178 shost_for_each_device(sdev, shost)
1179 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1180
Bart Van Assched92c0da2014-10-06 17:14:36 +02001181 for (i = 0; i < target->ch_count; i++) {
1182 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001183
Bart Van Assched92c0da2014-10-06 17:14:36 +02001184 for (j = 0; j < target->req_ring_size; ++j) {
1185 struct srp_request *req = &ch->req_ring[j];
1186
1187 srp_finish_req(ch, req, NULL,
1188 DID_TRANSPORT_FAILFAST << 16);
1189 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001190 }
1191}
1192
1193/*
1194 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1195 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1196 * srp_reset_device() or srp_reset_host() calls will occur while this function
1197 * is in progress. One way to realize that is not to call this function
1198 * directly but to call srp_reconnect_rport() instead since that last function
1199 * serializes calls of this function via rport->mutex and also blocks
1200 * srp_queuecommand() calls before invoking this function.
1201 */
1202static int srp_rport_reconnect(struct srp_rport *rport)
1203{
1204 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001205 struct srp_rdma_ch *ch;
1206 int i, j, ret = 0;
1207 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001208
Roland Dreieraef9ec32005-11-02 14:07:13 -08001209 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001210
1211 if (target->state == SRP_TARGET_SCANNING)
1212 return -ENODEV;
1213
Roland Dreieraef9ec32005-11-02 14:07:13 -08001214 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001215 * Now get a new local CM ID so that we avoid confusing the target in
1216 * case things are really fouled up. Doing so also ensures that all CM
1217 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001218 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001221 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001222 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001223 for (i = 0; i < target->ch_count; i++) {
1224 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001225 for (j = 0; j < target->req_ring_size; ++j) {
1226 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001227
Bart Van Assched92c0da2014-10-06 17:14:36 +02001228 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1229 }
1230 }
1231 for (i = 0; i < target->ch_count; i++) {
1232 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001233 /*
1234 * Whether or not creating a new CM ID succeeded, create a new
1235 * QP. This guarantees that all completion callback function
1236 * invocations have finished before request resetting starts.
1237 */
1238 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001239
Bart Van Assched92c0da2014-10-06 17:14:36 +02001240 INIT_LIST_HEAD(&ch->free_tx);
1241 for (j = 0; j < target->queue_size; ++j)
1242 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1243 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001244
1245 target->qp_in_error = false;
1246
Bart Van Assched92c0da2014-10-06 17:14:36 +02001247 for (i = 0; i < target->ch_count; i++) {
1248 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001249 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001250 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001251 ret = srp_connect_ch(ch, multich);
1252 multich = true;
1253 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001254
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001255 if (ret == 0)
1256 shost_printk(KERN_INFO, target->scsi_host,
1257 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001258
1259 return ret;
1260}
1261
David Dillow8f26c9f2011-01-14 19:45:50 -05001262static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1263 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001264{
David Dillow8f26c9f2011-01-14 19:45:50 -05001265 struct srp_direct_buf *desc = state->desc;
1266
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001267 WARN_ON_ONCE(!dma_len);
1268
David Dillow8f26c9f2011-01-14 19:45:50 -05001269 desc->va = cpu_to_be64(dma_addr);
1270 desc->key = cpu_to_be32(rkey);
1271 desc->len = cpu_to_be32(dma_len);
1272
1273 state->total_len += dma_len;
1274 state->desc++;
1275 state->ndesc++;
1276}
1277
1278static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001279 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001280{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001281 struct srp_target_port *target = ch->target;
1282 struct srp_device *dev = target->srp_host->srp_dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001283 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001284 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001285
Bart Van Asschef731ed62015-08-10 17:07:27 -07001286 if (state->fmr.next >= state->fmr.end)
1287 return -ENOMEM;
1288
Bart Van Assche509c07b2014-10-30 14:48:30 +01001289 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001290 state->npages, io_addr);
1291 if (IS_ERR(fmr))
1292 return PTR_ERR(fmr);
1293
Bart Van Asschef731ed62015-08-10 17:07:27 -07001294 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001295 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001296
Bart Van Assche186fbc62015-08-10 17:06:29 -07001297 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1298 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001299
David Dillow8f26c9f2011-01-14 19:45:50 -05001300 return 0;
1301}
1302
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001303static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001304 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001305{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001306 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001307 struct srp_device *dev = target->srp_host->srp_dev;
1308 struct ib_send_wr *bad_wr;
1309 struct ib_send_wr wr;
1310 struct srp_fr_desc *desc;
1311 u32 rkey;
1312
Bart Van Asschef731ed62015-08-10 17:07:27 -07001313 if (state->fr.next >= state->fr.end)
1314 return -ENOMEM;
1315
Bart Van Assche509c07b2014-10-30 14:48:30 +01001316 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001317 if (!desc)
1318 return -ENOMEM;
1319
1320 rkey = ib_inc_rkey(desc->mr->rkey);
1321 ib_update_fast_reg_key(desc->mr, rkey);
1322
1323 memcpy(desc->frpl->page_list, state->pages,
1324 sizeof(state->pages[0]) * state->npages);
1325
1326 memset(&wr, 0, sizeof(wr));
1327 wr.opcode = IB_WR_FAST_REG_MR;
1328 wr.wr_id = FAST_REG_WR_ID_MASK;
1329 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1330 wr.wr.fast_reg.page_list = desc->frpl;
1331 wr.wr.fast_reg.page_list_len = state->npages;
1332 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1333 wr.wr.fast_reg.length = state->dma_len;
1334 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1335 IB_ACCESS_REMOTE_READ |
1336 IB_ACCESS_REMOTE_WRITE);
1337 wr.wr.fast_reg.rkey = desc->mr->lkey;
1338
Bart Van Asschef731ed62015-08-10 17:07:27 -07001339 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001340 state->nmdesc++;
1341
1342 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1343 desc->mr->rkey);
1344
Bart Van Assche509c07b2014-10-30 14:48:30 +01001345 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001346}
1347
Bart Van Assche539dde62014-05-20 15:05:46 +02001348static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001349 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001350{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001351 struct srp_target_port *target = ch->target;
Bart Van Assche002f1562015-08-10 17:08:44 -07001352 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche539dde62014-05-20 15:05:46 +02001353 int ret = 0;
1354
Bart Van Assche002f1562015-08-10 17:08:44 -07001355 WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
1356
Bart Van Assche539dde62014-05-20 15:05:46 +02001357 if (state->npages == 0)
1358 return 0;
1359
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001360 if (state->npages == 1 && target->global_mr)
Bart Van Assche52ede082014-05-20 15:07:45 +02001361 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001362 target->global_mr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001363 else
Bart Van Assche002f1562015-08-10 17:08:44 -07001364 ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
Bart Van Assche509c07b2014-10-30 14:48:30 +01001365 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001366
1367 if (ret == 0) {
1368 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001369 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001370 }
1371
1372 return ret;
1373}
1374
David Dillow8f26c9f2011-01-14 19:45:50 -05001375static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001376 struct srp_rdma_ch *ch,
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001377 struct scatterlist *sg, int sg_index)
David Dillow8f26c9f2011-01-14 19:45:50 -05001378{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001379 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001380 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001381 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001382 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1383 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001384 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001385 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001386
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001387 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001388
David Dillow8f26c9f2011-01-14 19:45:50 -05001389 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001390 unsigned offset = dma_addr & ~dev->mr_page_mask;
1391 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001392 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001393 if (ret)
1394 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001395 }
1396
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001397 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001398
1399 if (!state->npages)
1400 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001401 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001402 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001403 dma_addr += len;
1404 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001405 }
1406
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001407 /*
1408 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001409 * close it out and start a new one -- we can only merge at page
1410 * boundries.
1411 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001412 ret = 0;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001413 if (len != dev->mr_page_size)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001414 ret = srp_finish_mapping(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001415 return ret;
1416}
1417
Bart Van Assche509c07b2014-10-30 14:48:30 +01001418static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1419 struct srp_request *req, struct scatterlist *scat,
1420 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001421{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001422 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001423 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001424 struct scatterlist *sg;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001425 int i, ret;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001426
1427 state->desc = req->indirect_desc;
1428 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001429 if (dev->use_fast_reg) {
Bart Van Asschef731ed62015-08-10 17:07:27 -07001430 state->fr.next = req->fr_list;
1431 state->fr.end = req->fr_list + target->cmd_sg_cnt;
Bart Van Assche002f1562015-08-10 17:08:44 -07001432 } else if (dev->use_fmr) {
Bart Van Asschef731ed62015-08-10 17:07:27 -07001433 state->fmr.next = req->fmr_list;
1434 state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001435 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001436
Bart Van Assche002f1562015-08-10 17:08:44 -07001437 if (dev->use_fast_reg || dev->use_fmr) {
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001438 for_each_sg(scat, sg, count, i) {
1439 ret = srp_map_sg_entry(state, ch, sg, i);
1440 if (ret)
1441 goto out;
1442 }
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001443 ret = srp_finish_mapping(state, ch);
1444 if (ret)
1445 goto out;
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001446 } else {
1447 for_each_sg(scat, sg, count, i) {
1448 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001449 ib_sg_dma_len(dev->dev, sg),
1450 target->global_mr->rkey);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001451 }
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001452 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001453
Bart Van Assche52ede082014-05-20 15:07:45 +02001454 req->nmdesc = state->nmdesc;
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001455 ret = 0;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001456
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001457out:
1458 return ret;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001459}
1460
Bart Van Assche330179f2015-08-10 17:09:05 -07001461/*
1462 * Register the indirect data buffer descriptor with the HCA.
1463 *
1464 * Note: since the indirect data buffer descriptor has been allocated with
1465 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1466 * memory buffer.
1467 */
1468static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1469 void **next_mr, void **end_mr, u32 idb_len,
1470 __be32 *idb_rkey)
1471{
1472 struct srp_target_port *target = ch->target;
1473 struct srp_device *dev = target->srp_host->srp_dev;
1474 struct srp_map_state state;
1475 struct srp_direct_buf idb_desc;
1476 u64 idb_pages[1];
1477 int ret;
1478
1479 memset(&state, 0, sizeof(state));
1480 memset(&idb_desc, 0, sizeof(idb_desc));
1481 state.gen.next = next_mr;
1482 state.gen.end = end_mr;
1483 state.desc = &idb_desc;
1484 state.pages = idb_pages;
1485 state.pages[0] = (req->indirect_dma_addr &
1486 dev->mr_page_mask);
1487 state.npages = 1;
1488 state.base_dma_addr = req->indirect_dma_addr;
1489 state.dma_len = idb_len;
1490 ret = srp_finish_mapping(&state, ch);
1491 if (ret < 0)
1492 goto out;
1493
1494 *idb_rkey = idb_desc.key;
1495
1496out:
1497 return ret;
1498}
1499
Bart Van Assche509c07b2014-10-30 14:48:30 +01001500static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001501 struct srp_request *req)
1502{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001503 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001504 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001505 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001506 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001507 struct srp_device *dev;
1508 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001509 struct srp_map_state state;
1510 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001511 u32 idb_len, table_len;
1512 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001513 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001514
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001515 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001516 return sizeof (struct srp_cmd);
1517
1518 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1519 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001520 shost_printk(KERN_WARNING, target->scsi_host,
1521 PFX "Unhandled data direction %d\n",
1522 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001523 return -EINVAL;
1524 }
1525
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001526 nents = scsi_sg_count(scmnd);
1527 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001528
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001529 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001530 ibdev = dev->dev;
1531
1532 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001533 if (unlikely(count == 0))
1534 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001535
1536 fmt = SRP_DATA_DESC_DIRECT;
1537 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001538
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001539 if (count == 1 && target->global_mr) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001540 /*
1541 * The midlayer only generated a single gather/scatter
1542 * entry, or DMA mapping coalesced everything to a
1543 * single entry. So a direct descriptor along with
1544 * the DMA MR suffices.
1545 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001546 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001547
Ralph Campbell85507bc2006-12-12 14:30:55 -08001548 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001549 buf->key = cpu_to_be32(target->global_mr->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001550 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001551
Bart Van Assche52ede082014-05-20 15:07:45 +02001552 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001553 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001554 }
1555
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001556 /*
1557 * We have more than one scatter/gather entry, so build our indirect
1558 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001559 */
1560 indirect_hdr = (void *) cmd->add_data;
1561
David Dillowc07d4242011-01-16 13:57:10 -05001562 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1563 target->indirect_size, DMA_TO_DEVICE);
1564
David Dillow8f26c9f2011-01-14 19:45:50 -05001565 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001566 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001567
David Dillowc07d4242011-01-16 13:57:10 -05001568 /* We've mapped the request, now pull as much of the indirect
1569 * descriptor table as we can into the command buffer. If this
1570 * target is not using an external indirect table, we are
1571 * guaranteed to fit into the command, as the SCSI layer won't
1572 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001573 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001574 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001575 /*
1576 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001577 * so use a direct descriptor.
1578 */
1579 struct srp_direct_buf *buf = (void *) cmd->add_data;
1580
David Dillowc07d4242011-01-16 13:57:10 -05001581 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001582 goto map_complete;
1583 }
1584
David Dillowc07d4242011-01-16 13:57:10 -05001585 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1586 !target->allow_ext_sg)) {
1587 shost_printk(KERN_ERR, target->scsi_host,
1588 "Could not fit S/G list into SRP_CMD\n");
1589 return -EIO;
1590 }
1591
1592 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001593 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001594 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001595
1596 fmt = SRP_DATA_DESC_INDIRECT;
1597 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001598 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001599
David Dillowc07d4242011-01-16 13:57:10 -05001600 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1601 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001602
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001603 if (!target->global_mr) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001604 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1605 idb_len, &idb_rkey);
1606 if (ret < 0)
1607 return ret;
1608 req->nmdesc++;
1609 } else {
Bart Van Assche03f6fb92015-08-10 17:09:36 -07001610 idb_rkey = target->global_mr->rkey;
Bart Van Assche330179f2015-08-10 17:09:05 -07001611 }
1612
David Dillowc07d4242011-01-16 13:57:10 -05001613 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001614 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001615 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1616 indirect_hdr->len = cpu_to_be32(state.total_len);
1617
1618 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001619 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001620 else
David Dillowc07d4242011-01-16 13:57:10 -05001621 cmd->data_in_desc_cnt = count;
1622
1623 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1624 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001625
1626map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001627 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1628 cmd->buf_fmt = fmt << 4;
1629 else
1630 cmd->buf_fmt = fmt;
1631
Roland Dreieraef9ec32005-11-02 14:07:13 -08001632 return len;
1633}
1634
David Dillow05a1d752010-10-08 14:48:14 -04001635/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001636 * Return an IU and possible credit to the free pool
1637 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001638static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001639 enum srp_iu_type iu_type)
1640{
1641 unsigned long flags;
1642
Bart Van Assche509c07b2014-10-30 14:48:30 +01001643 spin_lock_irqsave(&ch->lock, flags);
1644 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001645 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001646 ++ch->req_lim;
1647 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001648}
1649
1650/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001651 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001652 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001653 *
1654 * Note:
1655 * An upper limit for the number of allocated information units for each
1656 * request type is:
1657 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1658 * more than Scsi_Host.can_queue requests.
1659 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1660 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1661 * one unanswered SRP request to an initiator.
1662 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001663static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001664 enum srp_iu_type iu_type)
1665{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001666 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001667 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1668 struct srp_iu *iu;
1669
Bart Van Assche509c07b2014-10-30 14:48:30 +01001670 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001671
Bart Van Assche509c07b2014-10-30 14:48:30 +01001672 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001673 return NULL;
1674
1675 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001676 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001677 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001678 ++target->zero_req_lim;
1679 return NULL;
1680 }
1681
Bart Van Assche509c07b2014-10-30 14:48:30 +01001682 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001683 }
1684
Bart Van Assche509c07b2014-10-30 14:48:30 +01001685 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001686 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001687 return iu;
1688}
1689
Bart Van Assche509c07b2014-10-30 14:48:30 +01001690static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001691{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001692 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001693 struct ib_sge list;
1694 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001695
1696 list.addr = iu->dma;
1697 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001698 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001699
1700 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001701 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001702 wr.sg_list = &list;
1703 wr.num_sge = 1;
1704 wr.opcode = IB_WR_SEND;
1705 wr.send_flags = IB_SEND_SIGNALED;
1706
Bart Van Assche509c07b2014-10-30 14:48:30 +01001707 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001708}
1709
Bart Van Assche509c07b2014-10-30 14:48:30 +01001710static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001711{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001712 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001713 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001714 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001715
1716 list.addr = iu->dma;
1717 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001718 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001719
1720 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001721 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001722 wr.sg_list = &list;
1723 wr.num_sge = 1;
1724
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001726}
1727
Bart Van Assche509c07b2014-10-30 14:48:30 +01001728static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001730 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001731 struct srp_request *req;
1732 struct scsi_cmnd *scmnd;
1733 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001734
Roland Dreieraef9ec32005-11-02 14:07:13 -08001735 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001736 spin_lock_irqsave(&ch->lock, flags);
1737 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1738 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001739
Bart Van Assche509c07b2014-10-30 14:48:30 +01001740 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001741 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001742 ch->tsk_mgmt_status = rsp->data[3];
1743 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001744 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001745 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1746 if (scmnd) {
1747 req = (void *)scmnd->host_scribble;
1748 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1749 }
Bart Van Assche22032992012-08-14 13:18:53 +00001750 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001751 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001752 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1753 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001754
Bart Van Assche509c07b2014-10-30 14:48:30 +01001755 spin_lock_irqsave(&ch->lock, flags);
1756 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1757 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001758
1759 return;
1760 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001761 scmnd->result = rsp->status;
1762
1763 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1764 memcpy(scmnd->sense_buffer, rsp->data +
1765 be32_to_cpu(rsp->resp_data_len),
1766 min_t(int, be32_to_cpu(rsp->sense_data_len),
1767 SCSI_SENSE_BUFFERSIZE));
1768 }
1769
Bart Van Asschee7145312014-07-09 15:57:51 +02001770 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001771 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1773 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1775 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1776 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1777 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001778
Bart Van Assche509c07b2014-10-30 14:48:30 +01001779 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001780 be32_to_cpu(rsp->req_lim_delta));
1781
David Dillowf8b6e312010-11-26 13:02:21 -05001782 scmnd->host_scribble = NULL;
1783 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001784 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001785}
1786
Bart Van Assche509c07b2014-10-30 14:48:30 +01001787static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001788 void *rsp, int len)
1789{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001790 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001791 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001792 unsigned long flags;
1793 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001794 int err;
David Dillowbb125882010-10-08 14:40:47 -04001795
Bart Van Assche509c07b2014-10-30 14:48:30 +01001796 spin_lock_irqsave(&ch->lock, flags);
1797 ch->req_lim += req_delta;
1798 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1799 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001800
David Dillowbb125882010-10-08 14:40:47 -04001801 if (!iu) {
1802 shost_printk(KERN_ERR, target->scsi_host, PFX
1803 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001804 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001805 }
1806
1807 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1808 memcpy(iu->buf, rsp, len);
1809 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1810
Bart Van Assche509c07b2014-10-30 14:48:30 +01001811 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001812 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001813 shost_printk(KERN_ERR, target->scsi_host, PFX
1814 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001815 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001816 }
David Dillowbb125882010-10-08 14:40:47 -04001817
David Dillowbb125882010-10-08 14:40:47 -04001818 return err;
1819}
1820
Bart Van Assche509c07b2014-10-30 14:48:30 +01001821static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001822 struct srp_cred_req *req)
1823{
1824 struct srp_cred_rsp rsp = {
1825 .opcode = SRP_CRED_RSP,
1826 .tag = req->tag,
1827 };
1828 s32 delta = be32_to_cpu(req->req_lim_delta);
1829
Bart Van Assche509c07b2014-10-30 14:48:30 +01001830 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1831 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001832 "problems processing SRP_CRED_REQ\n");
1833}
1834
Bart Van Assche509c07b2014-10-30 14:48:30 +01001835static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001836 struct srp_aer_req *req)
1837{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001838 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001839 struct srp_aer_rsp rsp = {
1840 .opcode = SRP_AER_RSP,
1841 .tag = req->tag,
1842 };
1843 s32 delta = be32_to_cpu(req->req_lim_delta);
1844
1845 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001846 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001847
Bart Van Assche509c07b2014-10-30 14:48:30 +01001848 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001849 shost_printk(KERN_ERR, target->scsi_host, PFX
1850 "problems processing SRP_AER_REQ\n");
1851}
1852
Bart Van Assche509c07b2014-10-30 14:48:30 +01001853static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001854{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001855 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001856 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001857 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001858 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001859 u8 opcode;
1860
Bart Van Assche509c07b2014-10-30 14:48:30 +01001861 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001862 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863
1864 opcode = *(u8 *) iu->buf;
1865
1866 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001867 shost_printk(KERN_ERR, target->scsi_host,
1868 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001869 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1870 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001871 }
1872
1873 switch (opcode) {
1874 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001875 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001876 break;
1877
David Dillowbb125882010-10-08 14:40:47 -04001878 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001879 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001880 break;
1881
1882 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001883 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001884 break;
1885
Roland Dreieraef9ec32005-11-02 14:07:13 -08001886 case SRP_T_LOGOUT:
1887 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001890 break;
1891
1892 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001893 shost_printk(KERN_WARNING, target->scsi_host,
1894 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001895 break;
1896 }
1897
Bart Van Assche509c07b2014-10-30 14:48:30 +01001898 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001899 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001900
Bart Van Assche509c07b2014-10-30 14:48:30 +01001901 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001902 if (res != 0)
1903 shost_printk(KERN_ERR, target->scsi_host,
1904 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001905}
1906
Bart Van Asschec1120f82013-10-26 14:35:08 +02001907/**
1908 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001909 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001910 *
1911 * Note: This function may get invoked before the rport has been created,
1912 * hence the target->rport test.
1913 */
1914static void srp_tl_err_work(struct work_struct *work)
1915{
1916 struct srp_target_port *target;
1917
1918 target = container_of(work, struct srp_target_port, tl_err_work);
1919 if (target->rport)
1920 srp_start_tl_fail_timers(target->rport);
1921}
1922
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001923static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001924 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001925{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001926 struct srp_target_port *target = ch->target;
1927
1928 if (wr_id == SRP_LAST_WR_ID) {
1929 complete(&ch->done);
1930 return;
1931 }
1932
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001933 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001934 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1935 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001936 "LOCAL_INV failed with status %s (%d)\n",
1937 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001938 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1939 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001940 "FAST_REG_MR failed status %s (%d)\n",
1941 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001942 } else {
1943 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03001944 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001945 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03001946 ib_wc_status_msg(wc_status), wc_status,
1947 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001948 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001949 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001950 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001951 target->qp_in_error = true;
1952}
1953
Bart Van Assche509c07b2014-10-30 14:48:30 +01001954static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001955{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001956 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001957 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001958
1959 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1960 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001961 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001962 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001963 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001964 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001965 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001966 }
1967}
1968
Bart Van Assche509c07b2014-10-30 14:48:30 +01001969static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001970{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001971 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001972 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001973 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001974
1975 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001976 if (likely(wc.status == IB_WC_SUCCESS)) {
1977 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001978 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001979 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001980 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001981 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001982 }
1983}
1984
Bart Van Assche76c75b22010-11-26 14:37:47 -05001985static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001986{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001987 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001988 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001989 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001990 struct srp_request *req;
1991 struct srp_iu *iu;
1992 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001993 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001994 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001995 u32 tag;
1996 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001997 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001998 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1999
2000 /*
2001 * The SCSI EH thread is the only context from which srp_queuecommand()
2002 * can get invoked for blocked devices (SDEV_BLOCK /
2003 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2004 * locking the rport mutex if invoked from inside the SCSI EH.
2005 */
2006 if (in_scsi_eh)
2007 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002008
Bart Van Assched1b42892014-05-20 15:07:20 +02002009 scmnd->result = srp_chkready(target->rport);
2010 if (unlikely(scmnd->result))
2011 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002012
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002013 WARN_ON_ONCE(scmnd->request->tag < 0);
2014 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002015 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002016 idx = blk_mq_unique_tag_to_tag(tag);
2017 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2018 dev_name(&shost->shost_gendev), tag, idx,
2019 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002020
2021 spin_lock_irqsave(&ch->lock, flags);
2022 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002023 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002024
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002025 if (!iu)
2026 goto err;
2027
2028 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002029 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002030 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002031 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002032
David Dillowf8b6e312010-11-26 13:02:21 -05002033 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002034
2035 cmd = iu->buf;
2036 memset(cmd, 0, sizeof *cmd);
2037
2038 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002039 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002040 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002041 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2042
Roland Dreieraef9ec32005-11-02 14:07:13 -08002043 req->scmnd = scmnd;
2044 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002045
Bart Van Assche509c07b2014-10-30 14:48:30 +01002046 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002047 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002048 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002049 PFX "Failed to map data (%d)\n", len);
2050 /*
2051 * If we ran out of memory descriptors (-ENOMEM) because an
2052 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002053 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002054 * to reduce queue depth temporarily.
2055 */
2056 scmnd->result = len == -ENOMEM ?
2057 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002058 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002059 }
2060
David Dillow49248642011-01-14 18:23:24 -05002061 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002062 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002063
Bart Van Assche509c07b2014-10-30 14:48:30 +01002064 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002065 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002066 goto err_unmap;
2067 }
2068
Bart Van Assched1b42892014-05-20 15:07:20 +02002069 ret = 0;
2070
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002071unlock_rport:
2072 if (in_scsi_eh)
2073 mutex_unlock(&rport->mutex);
2074
Bart Van Assched1b42892014-05-20 15:07:20 +02002075 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002076
2077err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002078 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002079
Bart Van Assche76c75b22010-11-26 14:37:47 -05002080err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002081 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002082
Bart Van Assche024ca902014-05-20 15:03:49 +02002083 /*
2084 * Avoid that the loops that iterate over the request ring can
2085 * encounter a dangling SCSI command pointer.
2086 */
2087 req->scmnd = NULL;
2088
Bart Van Assched1b42892014-05-20 15:07:20 +02002089err:
2090 if (scmnd->result) {
2091 scmnd->scsi_done(scmnd);
2092 ret = 0;
2093 } else {
2094 ret = SCSI_MLQUEUE_HOST_BUSY;
2095 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002096
Bart Van Assched1b42892014-05-20 15:07:20 +02002097 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002098}
2099
Bart Van Assche4d73f952013-10-26 14:40:37 +02002100/*
2101 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002102 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002103 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002104static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002105{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002106 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002107 int i;
2108
Bart Van Assche509c07b2014-10-30 14:48:30 +01002109 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2110 GFP_KERNEL);
2111 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002112 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002113 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2114 GFP_KERNEL);
2115 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002116 goto err_no_ring;
2117
2118 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002119 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2120 ch->max_ti_iu_len,
2121 GFP_KERNEL, DMA_FROM_DEVICE);
2122 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002123 goto err;
2124 }
2125
Bart Van Assche4d73f952013-10-26 14:40:37 +02002126 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002127 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2128 target->max_iu_len,
2129 GFP_KERNEL, DMA_TO_DEVICE);
2130 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002131 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002132
Bart Van Assche509c07b2014-10-30 14:48:30 +01002133 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002134 }
2135
2136 return 0;
2137
2138err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002139 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002140 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2141 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002142 }
2143
Bart Van Assche4d73f952013-10-26 14:40:37 +02002144
2145err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002146 kfree(ch->tx_ring);
2147 ch->tx_ring = NULL;
2148 kfree(ch->rx_ring);
2149 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002150
2151 return -ENOMEM;
2152}
2153
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002154static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2155{
2156 uint64_t T_tr_ns, max_compl_time_ms;
2157 uint32_t rq_tmo_jiffies;
2158
2159 /*
2160 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2161 * table 91), both the QP timeout and the retry count have to be set
2162 * for RC QP's during the RTR to RTS transition.
2163 */
2164 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2165 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2166
2167 /*
2168 * Set target->rq_tmo_jiffies to one second more than the largest time
2169 * it can take before an error completion is generated. See also
2170 * C9-140..142 in the IBTA spec for more information about how to
2171 * convert the QP Local ACK Timeout value to nanoseconds.
2172 */
2173 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2174 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2175 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2176 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2177
2178 return rq_tmo_jiffies;
2179}
2180
David Dillow961e0be2011-01-14 17:32:07 -05002181static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002182 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002183 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002184{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002185 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002186 struct ib_qp_attr *qp_attr = NULL;
2187 int attr_mask = 0;
2188 int ret;
2189 int i;
2190
2191 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002192 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2193 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002194
2195 /*
2196 * Reserve credits for task management so we don't
2197 * bounce requests back to the SCSI mid-layer.
2198 */
2199 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002200 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002201 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002202 target->scsi_host->cmd_per_lun
2203 = min_t(int, target->scsi_host->can_queue,
2204 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002205 } else {
2206 shost_printk(KERN_WARNING, target->scsi_host,
2207 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2208 ret = -ECONNRESET;
2209 goto error;
2210 }
2211
Bart Van Assche509c07b2014-10-30 14:48:30 +01002212 if (!ch->rx_ring) {
2213 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002214 if (ret)
2215 goto error;
2216 }
2217
2218 ret = -ENOMEM;
2219 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2220 if (!qp_attr)
2221 goto error;
2222
2223 qp_attr->qp_state = IB_QPS_RTR;
2224 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2225 if (ret)
2226 goto error_free;
2227
Bart Van Assche509c07b2014-10-30 14:48:30 +01002228 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002229 if (ret)
2230 goto error_free;
2231
Bart Van Assche4d73f952013-10-26 14:40:37 +02002232 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002233 struct srp_iu *iu = ch->rx_ring[i];
2234
2235 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002236 if (ret)
2237 goto error_free;
2238 }
2239
2240 qp_attr->qp_state = IB_QPS_RTS;
2241 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2242 if (ret)
2243 goto error_free;
2244
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002245 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2246
Bart Van Assche509c07b2014-10-30 14:48:30 +01002247 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002248 if (ret)
2249 goto error_free;
2250
2251 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2252
2253error_free:
2254 kfree(qp_attr);
2255
2256error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002257 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002258}
2259
Roland Dreieraef9ec32005-11-02 14:07:13 -08002260static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2261 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002262 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002263{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002264 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002265 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002266 struct ib_class_port_info *cpi;
2267 int opcode;
2268
2269 switch (event->param.rej_rcvd.reason) {
2270 case IB_CM_REJ_PORT_CM_REDIRECT:
2271 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002272 ch->path.dlid = cpi->redirect_lid;
2273 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002274 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002275 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002276
Bart Van Assche509c07b2014-10-30 14:48:30 +01002277 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002278 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2279 break;
2280
2281 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002282 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002283 /*
2284 * Topspin/Cisco SRP gateways incorrectly send
2285 * reject reason code 25 when they mean 24
2286 * (port redirect).
2287 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002288 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002289 event->param.rej_rcvd.ari, 16);
2290
David Dillow7aa54bd2008-01-07 18:23:41 -05002291 shost_printk(KERN_DEBUG, shost,
2292 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002293 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2294 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002295
Bart Van Assche509c07b2014-10-30 14:48:30 +01002296 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002297 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002298 shost_printk(KERN_WARNING, shost,
2299 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002300 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002301 }
2302 break;
2303
2304 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002305 shost_printk(KERN_WARNING, shost,
2306 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002307 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002308 break;
2309
2310 case IB_CM_REJ_CONSUMER_DEFINED:
2311 opcode = *(u8 *) event->private_data;
2312 if (opcode == SRP_LOGIN_REJ) {
2313 struct srp_login_rej *rej = event->private_data;
2314 u32 reason = be32_to_cpu(rej->reason);
2315
2316 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002317 shost_printk(KERN_WARNING, shost,
2318 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002319 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002320 shost_printk(KERN_WARNING, shost, PFX
2321 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002322 target->sgid.raw,
2323 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002324 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002325 shost_printk(KERN_WARNING, shost,
2326 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2327 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002328 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002329 break;
2330
David Dillow9fe4bcf2008-01-08 17:08:52 -05002331 case IB_CM_REJ_STALE_CONN:
2332 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002333 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002334 break;
2335
Roland Dreieraef9ec32005-11-02 14:07:13 -08002336 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002337 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2338 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002339 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002340 }
2341}
2342
2343static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2344{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002345 struct srp_rdma_ch *ch = cm_id->context;
2346 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002347 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002348
2349 switch (event->event) {
2350 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002351 shost_printk(KERN_DEBUG, target->scsi_host,
2352 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002353 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002354 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002355 break;
2356
2357 case IB_CM_REP_RECEIVED:
2358 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002359 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002360 break;
2361
2362 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002363 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002364 comp = 1;
2365
Bart Van Assche509c07b2014-10-30 14:48:30 +01002366 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002367 break;
2368
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002369 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002370 shost_printk(KERN_WARNING, target->scsi_host,
2371 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002372 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002373 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002374 shost_printk(KERN_ERR, target->scsi_host,
2375 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002376 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002377 break;
2378
2379 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002380 shost_printk(KERN_ERR, target->scsi_host,
2381 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002382 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002383
Bart Van Assche509c07b2014-10-30 14:48:30 +01002384 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002385 break;
2386
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002387 case IB_CM_MRA_RECEIVED:
2388 case IB_CM_DREQ_ERROR:
2389 case IB_CM_DREP_RECEIVED:
2390 break;
2391
Roland Dreieraef9ec32005-11-02 14:07:13 -08002392 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002393 shost_printk(KERN_WARNING, target->scsi_host,
2394 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395 break;
2396 }
2397
2398 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002399 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002400
Roland Dreieraef9ec32005-11-02 14:07:13 -08002401 return 0;
2402}
2403
Jack Wang71444b92013-11-07 11:37:37 +01002404/**
Jack Wang71444b92013-11-07 11:37:37 +01002405 * srp_change_queue_depth - setting device queue depth
2406 * @sdev: scsi device struct
2407 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002408 *
2409 * Returns queue depth.
2410 */
2411static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002412srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002413{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002414 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002415 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002416 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002417}
2418
Bart Van Assche985aa492015-05-18 13:27:14 +02002419static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2420 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002421{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002422 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002423 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002424 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002425 struct srp_iu *iu;
2426 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002427
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002428 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002429 return -1;
2430
Bart Van Assche509c07b2014-10-30 14:48:30 +01002431 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002432
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002433 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002434 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002435 * invoked while a task management function is being sent.
2436 */
2437 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002438 spin_lock_irq(&ch->lock);
2439 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2440 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002441
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002442 if (!iu) {
2443 mutex_unlock(&rport->mutex);
2444
Bart Van Assche76c75b22010-11-26 14:37:47 -05002445 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002446 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002447
David Dillow19081f32010-10-18 08:54:49 -04002448 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2449 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002450 tsk_mgmt = iu->buf;
2451 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2452
2453 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002454 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002455 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002457 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002458
David Dillow19081f32010-10-18 08:54:49 -04002459 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2460 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002461 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2462 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002463 mutex_unlock(&rport->mutex);
2464
Bart Van Assche76c75b22010-11-26 14:37:47 -05002465 return -1;
2466 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002467 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002468
Bart Van Assche509c07b2014-10-30 14:48:30 +01002469 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002470 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002471 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002472
Roland Dreierd945e1d2006-05-09 10:50:28 -07002473 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002474}
2475
Roland Dreieraef9ec32005-11-02 14:07:13 -08002476static int srp_abort(struct scsi_cmnd *scmnd)
2477{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002478 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002479 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002480 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002481 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002482 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002483 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002484
David Dillow7aa54bd2008-01-07 18:23:41 -05002485 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002486
Bart Van Assched92c0da2014-10-06 17:14:36 +02002487 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002488 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002489 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002490 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2491 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2492 return SUCCESS;
2493 ch = &target->ch[ch_idx];
2494 if (!srp_claim_req(ch, req, NULL, scmnd))
2495 return SUCCESS;
2496 shost_printk(KERN_ERR, target->scsi_host,
2497 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002498 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002499 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002500 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002501 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002502 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002503 else
2504 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002505 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002506 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002507 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002508
Bart Van Assche086f44f2013-06-12 15:23:04 +02002509 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002510}
2511
2512static int srp_reset_device(struct scsi_cmnd *scmnd)
2513{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002514 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002515 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002516 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002517
David Dillow7aa54bd2008-01-07 18:23:41 -05002518 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002519
Bart Van Assched92c0da2014-10-06 17:14:36 +02002520 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002521 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002522 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002523 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002524 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002525 return FAILED;
2526
Bart Van Assched92c0da2014-10-06 17:14:36 +02002527 for (i = 0; i < target->ch_count; i++) {
2528 ch = &target->ch[i];
2529 for (i = 0; i < target->req_ring_size; ++i) {
2530 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002531
Bart Van Assched92c0da2014-10-06 17:14:36 +02002532 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2533 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002534 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002535
Roland Dreierd945e1d2006-05-09 10:50:28 -07002536 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002537}
2538
2539static int srp_reset_host(struct scsi_cmnd *scmnd)
2540{
2541 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002542
David Dillow7aa54bd2008-01-07 18:23:41 -05002543 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002544
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002545 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002546}
2547
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002548static int srp_slave_configure(struct scsi_device *sdev)
2549{
2550 struct Scsi_Host *shost = sdev->host;
2551 struct srp_target_port *target = host_to_target(shost);
2552 struct request_queue *q = sdev->request_queue;
2553 unsigned long timeout;
2554
2555 if (sdev->type == TYPE_DISK) {
2556 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2557 blk_queue_rq_timeout(q, timeout);
2558 }
2559
2560 return 0;
2561}
2562
Tony Jonesee959b02008-02-22 00:13:36 +01002563static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2564 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002565{
Tony Jonesee959b02008-02-22 00:13:36 +01002566 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002567
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002568 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002569}
2570
Tony Jonesee959b02008-02-22 00:13:36 +01002571static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2572 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002573{
Tony Jonesee959b02008-02-22 00:13:36 +01002574 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002575
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002576 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002577}
2578
Tony Jonesee959b02008-02-22 00:13:36 +01002579static ssize_t show_service_id(struct device *dev,
2580 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002581{
Tony Jonesee959b02008-02-22 00:13:36 +01002582 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002583
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002584 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002585}
2586
Tony Jonesee959b02008-02-22 00:13:36 +01002587static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2588 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002589{
Tony Jonesee959b02008-02-22 00:13:36 +01002590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002591
Bart Van Assche747fe002014-10-30 14:48:05 +01002592 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002593}
2594
Bart Van Assche848b3082013-10-26 14:38:12 +02002595static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2596 char *buf)
2597{
2598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2599
Bart Van Assche747fe002014-10-30 14:48:05 +01002600 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002601}
2602
Tony Jonesee959b02008-02-22 00:13:36 +01002603static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2604 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002605{
Tony Jonesee959b02008-02-22 00:13:36 +01002606 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002607 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002608
Bart Van Assche509c07b2014-10-30 14:48:30 +01002609 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002610}
2611
Tony Jonesee959b02008-02-22 00:13:36 +01002612static ssize_t show_orig_dgid(struct device *dev,
2613 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002614{
Tony Jonesee959b02008-02-22 00:13:36 +01002615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002616
Bart Van Assche747fe002014-10-30 14:48:05 +01002617 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002618}
2619
Bart Van Assche89de7482010-08-03 14:08:45 +00002620static ssize_t show_req_lim(struct device *dev,
2621 struct device_attribute *attr, char *buf)
2622{
2623 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002624 struct srp_rdma_ch *ch;
2625 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002626
Bart Van Assched92c0da2014-10-06 17:14:36 +02002627 for (i = 0; i < target->ch_count; i++) {
2628 ch = &target->ch[i];
2629 req_lim = min(req_lim, ch->req_lim);
2630 }
2631 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002632}
2633
Tony Jonesee959b02008-02-22 00:13:36 +01002634static ssize_t show_zero_req_lim(struct device *dev,
2635 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002636{
Tony Jonesee959b02008-02-22 00:13:36 +01002637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002638
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002639 return sprintf(buf, "%d\n", target->zero_req_lim);
2640}
2641
Tony Jonesee959b02008-02-22 00:13:36 +01002642static ssize_t show_local_ib_port(struct device *dev,
2643 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002644{
Tony Jonesee959b02008-02-22 00:13:36 +01002645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002646
2647 return sprintf(buf, "%d\n", target->srp_host->port);
2648}
2649
Tony Jonesee959b02008-02-22 00:13:36 +01002650static ssize_t show_local_ib_device(struct device *dev,
2651 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002652{
Tony Jonesee959b02008-02-22 00:13:36 +01002653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002654
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002655 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002656}
2657
Bart Van Assched92c0da2014-10-06 17:14:36 +02002658static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2659 char *buf)
2660{
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2662
2663 return sprintf(buf, "%d\n", target->ch_count);
2664}
2665
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002666static ssize_t show_comp_vector(struct device *dev,
2667 struct device_attribute *attr, char *buf)
2668{
2669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2670
2671 return sprintf(buf, "%d\n", target->comp_vector);
2672}
2673
Vu Pham7bb312e2013-10-26 14:31:27 +02002674static ssize_t show_tl_retry_count(struct device *dev,
2675 struct device_attribute *attr, char *buf)
2676{
2677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2678
2679 return sprintf(buf, "%d\n", target->tl_retry_count);
2680}
2681
David Dillow49248642011-01-14 18:23:24 -05002682static ssize_t show_cmd_sg_entries(struct device *dev,
2683 struct device_attribute *attr, char *buf)
2684{
2685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2686
2687 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2688}
2689
David Dillowc07d4242011-01-16 13:57:10 -05002690static ssize_t show_allow_ext_sg(struct device *dev,
2691 struct device_attribute *attr, char *buf)
2692{
2693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2694
2695 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2696}
2697
Tony Jonesee959b02008-02-22 00:13:36 +01002698static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2699static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2700static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2701static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002702static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002703static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2704static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002705static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002706static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2707static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2708static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002709static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002710static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002711static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002712static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002713static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002714
Tony Jonesee959b02008-02-22 00:13:36 +01002715static struct device_attribute *srp_host_attrs[] = {
2716 &dev_attr_id_ext,
2717 &dev_attr_ioc_guid,
2718 &dev_attr_service_id,
2719 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002720 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002721 &dev_attr_dgid,
2722 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002723 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002724 &dev_attr_zero_req_lim,
2725 &dev_attr_local_ib_port,
2726 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002727 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002728 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002729 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002730 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002731 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002732 NULL
2733};
2734
Roland Dreieraef9ec32005-11-02 14:07:13 -08002735static struct scsi_host_template srp_template = {
2736 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002737 .name = "InfiniBand SRP initiator",
2738 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002739 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002740 .info = srp_target_info,
2741 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002742 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002743 .eh_abort_handler = srp_abort,
2744 .eh_device_reset_handler = srp_reset_device,
2745 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002746 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002747 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002748 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002749 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002750 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002751 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002752 .shost_attrs = srp_host_attrs,
2753 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002754 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002755};
2756
Bart Van Assche34aa6542014-10-30 14:47:22 +01002757static int srp_sdev_count(struct Scsi_Host *host)
2758{
2759 struct scsi_device *sdev;
2760 int c = 0;
2761
2762 shost_for_each_device(sdev, host)
2763 c++;
2764
2765 return c;
2766}
2767
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002768/*
2769 * Return values:
2770 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2771 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2772 * removal has been scheduled.
2773 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2774 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002775static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2776{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002777 struct srp_rport_identifiers ids;
2778 struct srp_rport *rport;
2779
Bart Van Assche34aa6542014-10-30 14:47:22 +01002780 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002781 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002782 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002783
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002784 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002785 return -ENODEV;
2786
FUJITA Tomonori32368222007-06-27 16:33:12 +09002787 memcpy(ids.port_id, &target->id_ext, 8);
2788 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002789 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002790 rport = srp_rport_add(target->scsi_host, &ids);
2791 if (IS_ERR(rport)) {
2792 scsi_remove_host(target->scsi_host);
2793 return PTR_ERR(rport);
2794 }
2795
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002796 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002797 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002798
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002799 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002800 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002801 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002802
Roland Dreieraef9ec32005-11-02 14:07:13 -08002803 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002804 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002805
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002806 if (srp_connected_ch(target) < target->ch_count ||
2807 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002808 shost_printk(KERN_INFO, target->scsi_host,
2809 PFX "SCSI scan failed - removing SCSI host\n");
2810 srp_queue_remove_work(target);
2811 goto out;
2812 }
2813
2814 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2815 dev_name(&target->scsi_host->shost_gendev),
2816 srp_sdev_count(target->scsi_host));
2817
2818 spin_lock_irq(&target->lock);
2819 if (target->state == SRP_TARGET_SCANNING)
2820 target->state = SRP_TARGET_LIVE;
2821 spin_unlock_irq(&target->lock);
2822
2823out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002824 return 0;
2825}
2826
Tony Jonesee959b02008-02-22 00:13:36 +01002827static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002828{
2829 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002830 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002831
2832 complete(&host->released);
2833}
2834
2835static struct class srp_class = {
2836 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002837 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002838};
2839
Bart Van Assche96fc2482013-06-28 14:51:26 +02002840/**
2841 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002842 * @host: SRP host.
2843 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002844 */
2845static bool srp_conn_unique(struct srp_host *host,
2846 struct srp_target_port *target)
2847{
2848 struct srp_target_port *t;
2849 bool ret = false;
2850
2851 if (target->state == SRP_TARGET_REMOVED)
2852 goto out;
2853
2854 ret = true;
2855
2856 spin_lock(&host->target_lock);
2857 list_for_each_entry(t, &host->target_list, list) {
2858 if (t != target &&
2859 target->id_ext == t->id_ext &&
2860 target->ioc_guid == t->ioc_guid &&
2861 target->initiator_ext == t->initiator_ext) {
2862 ret = false;
2863 break;
2864 }
2865 }
2866 spin_unlock(&host->target_lock);
2867
2868out:
2869 return ret;
2870}
2871
Roland Dreieraef9ec32005-11-02 14:07:13 -08002872/*
2873 * Target ports are added by writing
2874 *
2875 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2876 * pkey=<P_Key>,service_id=<service ID>
2877 *
2878 * to the add_target sysfs attribute.
2879 */
2880enum {
2881 SRP_OPT_ERR = 0,
2882 SRP_OPT_ID_EXT = 1 << 0,
2883 SRP_OPT_IOC_GUID = 1 << 1,
2884 SRP_OPT_DGID = 1 << 2,
2885 SRP_OPT_PKEY = 1 << 3,
2886 SRP_OPT_SERVICE_ID = 1 << 4,
2887 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002888 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002889 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002890 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002891 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002892 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2893 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002894 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002895 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002896 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002897 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2898 SRP_OPT_IOC_GUID |
2899 SRP_OPT_DGID |
2900 SRP_OPT_PKEY |
2901 SRP_OPT_SERVICE_ID),
2902};
2903
Steven Whitehousea447c092008-10-13 10:46:57 +01002904static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002905 { SRP_OPT_ID_EXT, "id_ext=%s" },
2906 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2907 { SRP_OPT_DGID, "dgid=%s" },
2908 { SRP_OPT_PKEY, "pkey=%x" },
2909 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2910 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2911 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002912 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002913 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002914 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002915 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2916 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002917 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002918 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002919 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002920 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002921};
2922
2923static int srp_parse_options(const char *buf, struct srp_target_port *target)
2924{
2925 char *options, *sep_opt;
2926 char *p;
2927 char dgid[3];
2928 substring_t args[MAX_OPT_ARGS];
2929 int opt_mask = 0;
2930 int token;
2931 int ret = -EINVAL;
2932 int i;
2933
2934 options = kstrdup(buf, GFP_KERNEL);
2935 if (!options)
2936 return -ENOMEM;
2937
2938 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002939 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002940 if (!*p)
2941 continue;
2942
2943 token = match_token(p, srp_opt_tokens, args);
2944 opt_mask |= token;
2945
2946 switch (token) {
2947 case SRP_OPT_ID_EXT:
2948 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002949 if (!p) {
2950 ret = -ENOMEM;
2951 goto out;
2952 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002953 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2954 kfree(p);
2955 break;
2956
2957 case SRP_OPT_IOC_GUID:
2958 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002959 if (!p) {
2960 ret = -ENOMEM;
2961 goto out;
2962 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002963 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2964 kfree(p);
2965 break;
2966
2967 case SRP_OPT_DGID:
2968 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002969 if (!p) {
2970 ret = -ENOMEM;
2971 goto out;
2972 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002973 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002974 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002975 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002976 goto out;
2977 }
2978
2979 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002980 strlcpy(dgid, p + i * 2, sizeof(dgid));
2981 if (sscanf(dgid, "%hhx",
2982 &target->orig_dgid.raw[i]) < 1) {
2983 ret = -EINVAL;
2984 kfree(p);
2985 goto out;
2986 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002987 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002988 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002989 break;
2990
2991 case SRP_OPT_PKEY:
2992 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002993 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002994 goto out;
2995 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002996 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002997 break;
2998
2999 case SRP_OPT_SERVICE_ID:
3000 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003001 if (!p) {
3002 ret = -ENOMEM;
3003 goto out;
3004 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003005 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3006 kfree(p);
3007 break;
3008
3009 case SRP_OPT_MAX_SECT:
3010 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003011 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003012 goto out;
3013 }
3014 target->scsi_host->max_sectors = token;
3015 break;
3016
Bart Van Assche4d73f952013-10-26 14:40:37 +02003017 case SRP_OPT_QUEUE_SIZE:
3018 if (match_int(args, &token) || token < 1) {
3019 pr_warn("bad queue_size parameter '%s'\n", p);
3020 goto out;
3021 }
3022 target->scsi_host->can_queue = token;
3023 target->queue_size = token + SRP_RSP_SQ_SIZE +
3024 SRP_TSK_MGMT_SQ_SIZE;
3025 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3026 target->scsi_host->cmd_per_lun = token;
3027 break;
3028
Vu Pham52fb2b502006-06-17 20:37:31 -07003029 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003030 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003031 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3032 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003033 goto out;
3034 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003035 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003036 break;
3037
Ramachandra K0c0450db2006-06-17 20:37:38 -07003038 case SRP_OPT_IO_CLASS:
3039 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003040 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003041 goto out;
3042 }
3043 if (token != SRP_REV10_IB_IO_CLASS &&
3044 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003045 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3046 token, SRP_REV10_IB_IO_CLASS,
3047 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003048 goto out;
3049 }
3050 target->io_class = token;
3051 break;
3052
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003053 case SRP_OPT_INITIATOR_EXT:
3054 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003055 if (!p) {
3056 ret = -ENOMEM;
3057 goto out;
3058 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003059 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3060 kfree(p);
3061 break;
3062
David Dillow49248642011-01-14 18:23:24 -05003063 case SRP_OPT_CMD_SG_ENTRIES:
3064 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003065 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3066 p);
David Dillow49248642011-01-14 18:23:24 -05003067 goto out;
3068 }
3069 target->cmd_sg_cnt = token;
3070 break;
3071
David Dillowc07d4242011-01-16 13:57:10 -05003072 case SRP_OPT_ALLOW_EXT_SG:
3073 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003074 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003075 goto out;
3076 }
3077 target->allow_ext_sg = !!token;
3078 break;
3079
3080 case SRP_OPT_SG_TABLESIZE:
3081 if (match_int(args, &token) || token < 1 ||
3082 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003083 pr_warn("bad max sg_tablesize parameter '%s'\n",
3084 p);
David Dillowc07d4242011-01-16 13:57:10 -05003085 goto out;
3086 }
3087 target->sg_tablesize = token;
3088 break;
3089
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003090 case SRP_OPT_COMP_VECTOR:
3091 if (match_int(args, &token) || token < 0) {
3092 pr_warn("bad comp_vector parameter '%s'\n", p);
3093 goto out;
3094 }
3095 target->comp_vector = token;
3096 break;
3097
Vu Pham7bb312e2013-10-26 14:31:27 +02003098 case SRP_OPT_TL_RETRY_COUNT:
3099 if (match_int(args, &token) || token < 2 || token > 7) {
3100 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3101 p);
3102 goto out;
3103 }
3104 target->tl_retry_count = token;
3105 break;
3106
Roland Dreieraef9ec32005-11-02 14:07:13 -08003107 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003108 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3109 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003110 goto out;
3111 }
3112 }
3113
3114 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3115 ret = 0;
3116 else
3117 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3118 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3119 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003120 pr_warn("target creation request is missing parameter '%s'\n",
3121 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003122
Bart Van Assche4d73f952013-10-26 14:40:37 +02003123 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3124 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3125 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3126 target->scsi_host->cmd_per_lun,
3127 target->scsi_host->can_queue);
3128
Roland Dreieraef9ec32005-11-02 14:07:13 -08003129out:
3130 kfree(options);
3131 return ret;
3132}
3133
Tony Jonesee959b02008-02-22 00:13:36 +01003134static ssize_t srp_create_target(struct device *dev,
3135 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003136 const char *buf, size_t count)
3137{
3138 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003139 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003140 struct Scsi_Host *target_host;
3141 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003142 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003143 struct srp_device *srp_dev = host->srp_dev;
3144 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003145 int ret, node_idx, node, cpu, i;
3146 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003147
3148 target_host = scsi_host_alloc(&srp_template,
3149 sizeof (struct srp_target_port));
3150 if (!target_host)
3151 return -ENOMEM;
3152
David Dillow49248642011-01-14 18:23:24 -05003153 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003154 target_host->max_channel = 0;
3155 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003156 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003157 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003158
Roland Dreieraef9ec32005-11-02 14:07:13 -08003159 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003160
David Dillow49248642011-01-14 18:23:24 -05003161 target->io_class = SRP_REV16A_IB_IO_CLASS;
3162 target->scsi_host = target_host;
3163 target->srp_host = host;
Jason Gunthorpee6bf5f42015-07-30 17:22:22 -06003164 target->lkey = host->srp_dev->pd->local_dma_lkey;
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003165 target->global_mr = host->srp_dev->global_mr;
David Dillow49248642011-01-14 18:23:24 -05003166 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003167 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3168 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003169 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003170 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003171
Bart Van Assche34aa6542014-10-30 14:47:22 +01003172 /*
3173 * Avoid that the SCSI host can be removed by srp_remove_target()
3174 * before this function returns.
3175 */
3176 scsi_host_get(target->scsi_host);
3177
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003178 mutex_lock(&host->add_target_mutex);
3179
Roland Dreieraef9ec32005-11-02 14:07:13 -08003180 ret = srp_parse_options(buf, target);
3181 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003182 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003183
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003184 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3185 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003186 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003187
Bart Van Assche4d73f952013-10-26 14:40:37 +02003188 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3189
Bart Van Assche96fc2482013-06-28 14:51:26 +02003190 if (!srp_conn_unique(target->srp_host, target)) {
3191 shost_printk(KERN_INFO, target->scsi_host,
3192 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3193 be64_to_cpu(target->id_ext),
3194 be64_to_cpu(target->ioc_guid),
3195 be64_to_cpu(target->initiator_ext));
3196 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003197 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003198 }
3199
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003200 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003201 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003202 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003203 target->sg_tablesize = target->cmd_sg_cnt;
3204 }
3205
3206 target_host->sg_tablesize = target->sg_tablesize;
3207 target->indirect_size = target->sg_tablesize *
3208 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003209 target->max_iu_len = sizeof (struct srp_cmd) +
3210 sizeof (struct srp_indirect_buf) +
3211 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3212
Bart Van Asschec1120f82013-10-26 14:35:08 +02003213 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003214 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003215 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003216 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003217 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003218 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003219
Bart Van Assched92c0da2014-10-06 17:14:36 +02003220 ret = -ENOMEM;
3221 target->ch_count = max_t(unsigned, num_online_nodes(),
3222 min(ch_count ? :
3223 min(4 * num_online_nodes(),
3224 ibdev->num_comp_vectors),
3225 num_online_cpus()));
3226 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3227 GFP_KERNEL);
3228 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003229 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003230
Bart Van Assched92c0da2014-10-06 17:14:36 +02003231 node_idx = 0;
3232 for_each_online_node(node) {
3233 const int ch_start = (node_idx * target->ch_count /
3234 num_online_nodes());
3235 const int ch_end = ((node_idx + 1) * target->ch_count /
3236 num_online_nodes());
3237 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3238 num_online_nodes() + target->comp_vector)
3239 % ibdev->num_comp_vectors;
3240 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3241 num_online_nodes() + target->comp_vector)
3242 % ibdev->num_comp_vectors;
3243 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003244
Bart Van Assched92c0da2014-10-06 17:14:36 +02003245 for_each_online_cpu(cpu) {
3246 if (cpu_to_node(cpu) != node)
3247 continue;
3248 if (ch_start + cpu_idx >= ch_end)
3249 continue;
3250 ch = &target->ch[ch_start + cpu_idx];
3251 ch->target = target;
3252 ch->comp_vector = cv_start == cv_end ? cv_start :
3253 cv_start + cpu_idx % (cv_end - cv_start);
3254 spin_lock_init(&ch->lock);
3255 INIT_LIST_HEAD(&ch->free_tx);
3256 ret = srp_new_cm_id(ch);
3257 if (ret)
3258 goto err_disconnect;
3259
3260 ret = srp_create_ch_ib(ch);
3261 if (ret)
3262 goto err_disconnect;
3263
3264 ret = srp_alloc_req_data(ch);
3265 if (ret)
3266 goto err_disconnect;
3267
3268 ret = srp_connect_ch(ch, multich);
3269 if (ret) {
3270 shost_printk(KERN_ERR, target->scsi_host,
3271 PFX "Connection %d/%d failed\n",
3272 ch_start + cpu_idx,
3273 target->ch_count);
3274 if (node_idx == 0 && cpu_idx == 0) {
3275 goto err_disconnect;
3276 } else {
3277 srp_free_ch_ib(target, ch);
3278 srp_free_req_data(target, ch);
3279 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003280 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003281 }
3282 }
3283
3284 multich = true;
3285 cpu_idx++;
3286 }
3287 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003288 }
3289
Bart Van Asschec257ea62015-07-31 14:13:22 -07003290connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003291 target->scsi_host->nr_hw_queues = target->ch_count;
3292
Roland Dreieraef9ec32005-11-02 14:07:13 -08003293 ret = srp_add_target(host, target);
3294 if (ret)
3295 goto err_disconnect;
3296
Bart Van Assche34aa6542014-10-30 14:47:22 +01003297 if (target->state != SRP_TARGET_REMOVED) {
3298 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3299 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3300 be64_to_cpu(target->id_ext),
3301 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003302 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003303 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003304 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003305 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003306
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003307 ret = count;
3308
3309out:
3310 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003311
3312 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003313 if (ret < 0)
3314 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003315
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003316 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003317
3318err_disconnect:
3319 srp_disconnect_target(target);
3320
Bart Van Assched92c0da2014-10-06 17:14:36 +02003321 for (i = 0; i < target->ch_count; i++) {
3322 ch = &target->ch[i];
3323 srp_free_ch_ib(target, ch);
3324 srp_free_req_data(target, ch);
3325 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003326
Bart Van Assched92c0da2014-10-06 17:14:36 +02003327 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003328 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003329}
3330
Tony Jonesee959b02008-02-22 00:13:36 +01003331static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003332
Tony Jonesee959b02008-02-22 00:13:36 +01003333static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3334 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003335{
Tony Jonesee959b02008-02-22 00:13:36 +01003336 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003337
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003338 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003339}
3340
Tony Jonesee959b02008-02-22 00:13:36 +01003341static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003342
Tony Jonesee959b02008-02-22 00:13:36 +01003343static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3344 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003345{
Tony Jonesee959b02008-02-22 00:13:36 +01003346 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003347
3348 return sprintf(buf, "%d\n", host->port);
3349}
3350
Tony Jonesee959b02008-02-22 00:13:36 +01003351static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003352
Roland Dreierf5358a12006-06-17 20:37:29 -07003353static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003354{
3355 struct srp_host *host;
3356
3357 host = kzalloc(sizeof *host, GFP_KERNEL);
3358 if (!host)
3359 return NULL;
3360
3361 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003362 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003363 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003364 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003365 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003366 host->port = port;
3367
Tony Jonesee959b02008-02-22 00:13:36 +01003368 host->dev.class = &srp_class;
3369 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003370 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003371
Tony Jonesee959b02008-02-22 00:13:36 +01003372 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003373 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003374 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003375 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003376 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003377 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003378 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003379 goto err_class;
3380
3381 return host;
3382
3383err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003384 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385
Roland Dreierf5358a12006-06-17 20:37:29 -07003386free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003387 kfree(host);
3388
3389 return NULL;
3390}
3391
3392static void srp_add_one(struct ib_device *device)
3393{
Roland Dreierf5358a12006-06-17 20:37:29 -07003394 struct srp_device *srp_dev;
3395 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003396 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003397 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003398 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003399
Roland Dreierf5358a12006-06-17 20:37:29 -07003400 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3401 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003402 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003403
Roland Dreierf5358a12006-06-17 20:37:29 -07003404 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003405 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003406 goto free_attr;
3407 }
3408
3409 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3410 if (!srp_dev)
3411 goto free_attr;
3412
Bart Van Assched1b42892014-05-20 15:07:20 +02003413 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3414 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003415 srp_dev->has_fr = (dev_attr->device_cap_flags &
3416 IB_DEVICE_MEM_MGT_EXTENSIONS);
3417 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3418 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3419
3420 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3421 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assche002f1562015-08-10 17:08:44 -07003422 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
Bart Van Assched1b42892014-05-20 15:07:20 +02003423
Roland Dreierf5358a12006-06-17 20:37:29 -07003424 /*
3425 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003426 * minimum of 4096 bytes. We're unlikely to build large sglists
3427 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003428 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003429 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3430 srp_dev->mr_page_size = 1 << mr_page_shift;
3431 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3432 max_pages_per_mr = dev_attr->max_mr_size;
3433 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3434 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3435 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003436 if (srp_dev->use_fast_reg) {
3437 srp_dev->max_pages_per_mr =
3438 min_t(u32, srp_dev->max_pages_per_mr,
3439 dev_attr->max_fast_reg_page_list_len);
3440 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003441 srp_dev->mr_max_size = srp_dev->mr_page_size *
3442 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003443 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003444 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003445 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003446 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003447
3448 INIT_LIST_HEAD(&srp_dev->dev_list);
3449
3450 srp_dev->dev = device;
3451 srp_dev->pd = ib_alloc_pd(device);
3452 if (IS_ERR(srp_dev->pd))
3453 goto free_dev;
3454
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003455 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3456 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3457 IB_ACCESS_LOCAL_WRITE |
3458 IB_ACCESS_REMOTE_READ |
3459 IB_ACCESS_REMOTE_WRITE);
3460 if (IS_ERR(srp_dev->global_mr))
3461 goto err_pd;
3462 } else {
3463 srp_dev->global_mr = NULL;
3464 }
Roland Dreierf5358a12006-06-17 20:37:29 -07003465
Hal Rosenstock41390322015-06-29 09:57:00 -04003466 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003467 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003468 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003469 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003470 }
3471
Roland Dreierf5358a12006-06-17 20:37:29 -07003472 ib_set_client_data(device, &srp_client, srp_dev);
3473
3474 goto free_attr;
3475
3476err_pd:
3477 ib_dealloc_pd(srp_dev->pd);
3478
3479free_dev:
3480 kfree(srp_dev);
3481
3482free_attr:
3483 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003484}
3485
Haggai Eran7c1eb452015-07-30 17:50:14 +03003486static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003487{
Roland Dreierf5358a12006-06-17 20:37:29 -07003488 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003489 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003490 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003491
Haggai Eran7c1eb452015-07-30 17:50:14 +03003492 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003493 if (!srp_dev)
3494 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003495
Roland Dreierf5358a12006-06-17 20:37:29 -07003496 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003497 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003498 /*
3499 * Wait for the sysfs entry to go away, so that no new
3500 * target ports can be created.
3501 */
3502 wait_for_completion(&host->released);
3503
3504 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003505 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003506 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003507 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003508 list_for_each_entry(target, &host->target_list, list)
3509 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003510 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003511
3512 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003513 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003514 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003515 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003516 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003517
Roland Dreieraef9ec32005-11-02 14:07:13 -08003518 kfree(host);
3519 }
3520
Bart Van Assche03f6fb92015-08-10 17:09:36 -07003521 if (srp_dev->global_mr)
3522 ib_dereg_mr(srp_dev->global_mr);
Roland Dreierf5358a12006-06-17 20:37:29 -07003523 ib_dealloc_pd(srp_dev->pd);
3524
3525 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003526}
3527
FUJITA Tomonori32368222007-06-27 16:33:12 +09003528static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003529 .has_rport_state = true,
3530 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003531 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003532 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3533 .dev_loss_tmo = &srp_dev_loss_tmo,
3534 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003535 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003536 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003537};
3538
Roland Dreieraef9ec32005-11-02 14:07:13 -08003539static int __init srp_init_module(void)
3540{
3541 int ret;
3542
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003543 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003544
David Dillow49248642011-01-14 18:23:24 -05003545 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003546 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003547 if (!cmd_sg_entries)
3548 cmd_sg_entries = srp_sg_tablesize;
3549 }
3550
3551 if (!cmd_sg_entries)
3552 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3553
3554 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003555 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003556 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003557 }
3558
David Dillowc07d4242011-01-16 13:57:10 -05003559 if (!indirect_sg_entries)
3560 indirect_sg_entries = cmd_sg_entries;
3561 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003562 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3563 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003564 indirect_sg_entries = cmd_sg_entries;
3565 }
3566
Bart Van Asschebcc05912014-07-09 15:57:26 +02003567 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003568 if (!srp_remove_wq) {
3569 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003570 goto out;
3571 }
3572
3573 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003574 ib_srp_transport_template =
3575 srp_attach_transport(&ib_srp_transport_functions);
3576 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003577 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003578
Roland Dreieraef9ec32005-11-02 14:07:13 -08003579 ret = class_register(&srp_class);
3580 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003581 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003582 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003583 }
3584
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003585 ib_sa_register_client(&srp_sa_client);
3586
Roland Dreieraef9ec32005-11-02 14:07:13 -08003587 ret = ib_register_client(&srp_client);
3588 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003589 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003590 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003591 }
3592
Bart Van Asschebcc05912014-07-09 15:57:26 +02003593out:
3594 return ret;
3595
3596unreg_sa:
3597 ib_sa_unregister_client(&srp_sa_client);
3598 class_unregister(&srp_class);
3599
3600release_tr:
3601 srp_release_transport(ib_srp_transport_template);
3602
3603destroy_wq:
3604 destroy_workqueue(srp_remove_wq);
3605 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003606}
3607
3608static void __exit srp_cleanup_module(void)
3609{
3610 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003611 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003612 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003613 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003614 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003615}
3616
3617module_init(srp_init_module);
3618module_exit(srp_cleanup_module);