blob: aac844a6eef630a0996d7070fca10927a3920a60 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080043
Arun Sharma600634972011-07-26 16:09:06 -070044#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080045
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010049#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080050#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090051#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080052
Roland Dreieraef9ec32005-11-02 14:07:13 -080053#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020057#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080059
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
David Dillow49248642011-01-14 18:23:24 -050065static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050067static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020069static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020070static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080071static int topspin_workarounds = 1;
72
David Dillow49248642011-01-14 18:23:24 -050073module_param(srp_sg_tablesize, uint, 0444);
74MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
75
76module_param(cmd_sg_entries, uint, 0444);
77MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
79
David Dillowc07d4242011-01-16 13:57:10 -050080module_param(indirect_sg_entries, uint, 0444);
81MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83
84module_param(allow_ext_sg, bool, 0444);
85MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87
Roland Dreieraef9ec32005-11-02 14:07:13 -080088module_param(topspin_workarounds, int, 0444);
89MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91
Bart Van Assche5cfb1782014-05-20 15:08:34 +020092module_param(prefer_fr, bool, 0444);
93MODULE_PARM_DESC(prefer_fr,
94"Whether to use fast registration if both FMR and fast registration are supported");
95
Bart Van Asscheb1b88542014-05-20 15:06:41 +020096module_param(register_always, bool, 0444);
97MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
99
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200100static struct kernel_param_ops srp_tmo_ops;
101
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200102static int srp_reconnect_delay = 10;
103module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200107static int srp_fast_io_fail_tmo = 15;
108module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
114
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200115static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200116module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
125
Bart Van Assched92c0da2014-10-06 17:14:36 +0200126static unsigned ch_count;
127module_param(ch_count, uint, 0444);
128MODULE_PARM_DESC(ch_count,
129 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
130
Roland Dreieraef9ec32005-11-02 14:07:13 -0800131static void srp_add_one(struct ib_device *device);
132static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100133static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
134static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800135static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
136
FUJITA Tomonori32368222007-06-27 16:33:12 +0900137static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200138static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139
Roland Dreieraef9ec32005-11-02 14:07:13 -0800140static struct ib_client srp_client = {
141 .name = "srp",
142 .add = srp_add_one,
143 .remove = srp_remove_one
144};
145
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700146static struct ib_sa_client srp_sa_client;
147
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200148static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
149{
150 int tmo = *(int *)kp->arg;
151
152 if (tmo >= 0)
153 return sprintf(buffer, "%d", tmo);
154 else
155 return sprintf(buffer, "off");
156}
157
158static int srp_tmo_set(const char *val, const struct kernel_param *kp)
159{
160 int tmo, res;
161
162 if (strncmp(val, "off", 3) != 0) {
163 res = kstrtoint(val, 0, &tmo);
164 if (res)
165 goto out;
166 } else {
167 tmo = -1;
168 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200174 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
185static struct kernel_param_ops srp_tmo_ops = {
186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
Roland Dreieraef9ec32005-11-02 14:07:13 -0800190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700204
205 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700208}
209
Roland Dreieraef9ec32005-11-02 14:07:13 -0800210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000255 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Roland Dreier969a60f2008-07-14 23:48:43 -0700268 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
Bart Van Assche747fe002014-10-30 14:48:05 +0100270 be16_to_cpu(target->pkey),
Roland Dreier969a60f2008-07-14 23:48:43 -0700271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349}
350
351/**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361{
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
382 if (IS_ERR(mr)) {
383 ret = PTR_ERR(mr);
384 goto destroy_pool;
385 }
386 d->mr = mr;
387 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
388 if (IS_ERR(frpl)) {
389 ret = PTR_ERR(frpl);
390 goto destroy_pool;
391 }
392 d->frpl = frpl;
393 list_add_tail(&d->entry, &pool->free_list);
394 }
395
396out:
397 return pool;
398
399destroy_pool:
400 srp_destroy_fr_pool(pool);
401
402err:
403 pool = ERR_PTR(ret);
404 goto out;
405}
406
407/**
408 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
409 * @pool: Pool to obtain descriptor from.
410 */
411static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
412{
413 struct srp_fr_desc *d = NULL;
414 unsigned long flags;
415
416 spin_lock_irqsave(&pool->lock, flags);
417 if (!list_empty(&pool->free_list)) {
418 d = list_first_entry(&pool->free_list, typeof(*d), entry);
419 list_del(&d->entry);
420 }
421 spin_unlock_irqrestore(&pool->lock, flags);
422
423 return d;
424}
425
426/**
427 * srp_fr_pool_put() - put an FR descriptor back in the free list
428 * @pool: Pool the descriptor was allocated from.
429 * @desc: Pointer to an array of fast registration descriptor pointers.
430 * @n: Number of descriptors to put back.
431 *
432 * Note: The caller must already have queued an invalidation request for
433 * desc->mr->rkey before calling this function.
434 */
435static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
436 int n)
437{
438 unsigned long flags;
439 int i;
440
441 spin_lock_irqsave(&pool->lock, flags);
442 for (i = 0; i < n; i++)
443 list_add(&desc[i]->entry, &pool->free_list);
444 spin_unlock_irqrestore(&pool->lock, flags);
445}
446
447static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
448{
449 struct srp_device *dev = target->srp_host->srp_dev;
450
451 return srp_create_fr_pool(dev->dev, dev->pd,
452 target->scsi_host->can_queue,
453 dev->max_pages_per_mr);
454}
455
Bart Van Assche509c07b2014-10-30 14:48:30 +0100456static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800457{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100458 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200459 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800460 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100461 struct ib_cq *recv_cq, *send_cq;
462 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200463 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200464 struct srp_fr_pool *fr_pool = NULL;
465 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800466 int ret;
467
468 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
469 if (!init_attr)
470 return -ENOMEM;
471
Bart Van Assche509c07b2014-10-30 14:48:30 +0100472 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
473 target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100474 if (IS_ERR(recv_cq)) {
475 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800476 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800477 }
478
Bart Van Assche509c07b2014-10-30 14:48:30 +0100479 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
480 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100481 if (IS_ERR(send_cq)) {
482 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800483 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000484 }
485
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100486 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800487
488 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200489 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200490 init_attr->cap.max_recv_wr = target->queue_size;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800491 init_attr->cap.max_recv_sge = 1;
492 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200493 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800494 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100495 init_attr->send_cq = send_cq;
496 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800497
Bart Van Assche62154b22014-05-20 15:04:45 +0200498 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100499 if (IS_ERR(qp)) {
500 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800501 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502 }
503
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100504 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800505 if (ret)
506 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800507
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200508 if (dev->use_fast_reg && dev->has_fr) {
509 fr_pool = srp_alloc_fr_pool(target);
510 if (IS_ERR(fr_pool)) {
511 ret = PTR_ERR(fr_pool);
512 shost_printk(KERN_WARNING, target->scsi_host, PFX
513 "FR pool allocation failed (%d)\n", ret);
514 goto err_qp;
515 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100516 if (ch->fr_pool)
517 srp_destroy_fr_pool(ch->fr_pool);
518 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200519 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200520 fmr_pool = srp_alloc_fmr_pool(target);
521 if (IS_ERR(fmr_pool)) {
522 ret = PTR_ERR(fmr_pool);
523 shost_printk(KERN_WARNING, target->scsi_host, PFX
524 "FMR pool allocation failed (%d)\n", ret);
525 goto err_qp;
526 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100527 if (ch->fmr_pool)
528 ib_destroy_fmr_pool(ch->fmr_pool);
529 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200530 }
531
Bart Van Assche509c07b2014-10-30 14:48:30 +0100532 if (ch->qp)
533 ib_destroy_qp(ch->qp);
534 if (ch->recv_cq)
535 ib_destroy_cq(ch->recv_cq);
536 if (ch->send_cq)
537 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100538
Bart Van Assche509c07b2014-10-30 14:48:30 +0100539 ch->qp = qp;
540 ch->recv_cq = recv_cq;
541 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100542
Roland Dreierda9d2f02010-02-24 15:07:59 -0800543 kfree(init_attr);
544 return 0;
545
546err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100547 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800548
549err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100550 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800551
552err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100553 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800554
555err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800556 kfree(init_attr);
557 return ret;
558}
559
Bart Van Assche4d73f952013-10-26 14:40:37 +0200560/*
561 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100562 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200563 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100564static void srp_free_ch_ib(struct srp_target_port *target,
565 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800566{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200567 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800568 int i;
569
Bart Van Assched92c0da2014-10-06 17:14:36 +0200570 if (!ch->target)
571 return;
572
Bart Van Assche509c07b2014-10-30 14:48:30 +0100573 if (ch->cm_id) {
574 ib_destroy_cm_id(ch->cm_id);
575 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100576 }
577
Bart Van Assched92c0da2014-10-06 17:14:36 +0200578 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
579 if (!ch->qp)
580 return;
581
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200582 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100583 if (ch->fr_pool)
584 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200585 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100586 if (ch->fmr_pool)
587 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200588 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100589 ib_destroy_qp(ch->qp);
590 ib_destroy_cq(ch->send_cq);
591 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800592
Bart Van Assched92c0da2014-10-06 17:14:36 +0200593 /*
594 * Avoid that the SCSI error handler tries to use this channel after
595 * it has been freed. The SCSI error handler can namely continue
596 * trying to perform recovery actions after scsi_remove_host()
597 * returned.
598 */
599 ch->target = NULL;
600
Bart Van Assche509c07b2014-10-30 14:48:30 +0100601 ch->qp = NULL;
602 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100603
Bart Van Assche509c07b2014-10-30 14:48:30 +0100604 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200605 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100606 srp_free_iu(target->srp_host, ch->rx_ring[i]);
607 kfree(ch->rx_ring);
608 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200609 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100610 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200611 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100612 srp_free_iu(target->srp_host, ch->tx_ring[i]);
613 kfree(ch->tx_ring);
614 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200615 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800616}
617
618static void srp_path_rec_completion(int status,
619 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800621{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 struct srp_rdma_ch *ch = ch_ptr;
623 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800624
Bart Van Assche509c07b2014-10-30 14:48:30 +0100625 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800626 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500627 shost_printk(KERN_ERR, target->scsi_host,
628 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800629 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100630 ch->path = *pathrec;
631 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800632}
633
Bart Van Assche509c07b2014-10-30 14:48:30 +0100634static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800635{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100636 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100637 int ret;
638
Bart Van Assche509c07b2014-10-30 14:48:30 +0100639 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800640
Bart Van Assche509c07b2014-10-30 14:48:30 +0100641 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800642
Bart Van Assche509c07b2014-10-30 14:48:30 +0100643 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
644 target->srp_host->srp_dev->dev,
645 target->srp_host->port,
646 &ch->path,
647 IB_SA_PATH_REC_SERVICE_ID |
648 IB_SA_PATH_REC_DGID |
649 IB_SA_PATH_REC_SGID |
650 IB_SA_PATH_REC_NUMB_PATH |
651 IB_SA_PATH_REC_PKEY,
652 SRP_PATH_REC_TIMEOUT_MS,
653 GFP_KERNEL,
654 srp_path_rec_completion,
655 ch, &ch->path_query);
656 if (ch->path_query_id < 0)
657 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800658
Bart Van Assche509c07b2014-10-30 14:48:30 +0100659 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100660 if (ret < 0)
661 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662
Bart Van Assche509c07b2014-10-30 14:48:30 +0100663 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500664 shost_printk(KERN_WARNING, target->scsi_host,
665 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800666
Bart Van Assche509c07b2014-10-30 14:48:30 +0100667 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668}
669
Bart Van Assched92c0da2014-10-06 17:14:36 +0200670static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800673 struct {
674 struct ib_cm_req_param param;
675 struct srp_login_req priv;
676 } *req = NULL;
677 int status;
678
679 req = kzalloc(sizeof *req, GFP_KERNEL);
680 if (!req)
681 return -ENOMEM;
682
Bart Van Assche509c07b2014-10-30 14:48:30 +0100683 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800684 req->param.alternate_path = NULL;
685 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100686 req->param.qp_num = ch->qp->qp_num;
687 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800688 req->param.private_data = &req->priv;
689 req->param.private_data_len = sizeof req->priv;
690 req->param.flow_control = 1;
691
692 get_random_bytes(&req->param.starting_psn, 4);
693 req->param.starting_psn &= 0xffffff;
694
695 /*
696 * Pick some arbitrary defaults here; we could make these
697 * module parameters if anyone cared about setting them.
698 */
699 req->param.responder_resources = 4;
700 req->param.remote_cm_response_timeout = 20;
701 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200702 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800703 req->param.rnr_retry_count = 7;
704 req->param.max_cm_retries = 15;
705
706 req->priv.opcode = SRP_LOGIN_REQ;
707 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500708 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
710 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200711 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
712 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700713 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700714 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700715 * port identifier format is 8 bytes of ID extension followed
716 * by 8 bytes of GUID. Older drafts put the two halves in the
717 * opposite order, so that the GUID comes first.
718 *
719 * Targets conforming to these obsolete drafts can be
720 * recognized by the I/O Class they report.
721 */
722 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
723 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100724 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700725 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200726 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700727 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
728 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
729 } else {
730 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200731 &target->initiator_ext, 8);
732 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100733 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700734 memcpy(req->priv.target_port_id, &target->id_ext, 8);
735 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
736 }
737
Roland Dreieraef9ec32005-11-02 14:07:13 -0800738 /*
739 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200740 * zero out the first 8 bytes of our initiator port ID and set
741 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800742 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700743 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500744 shost_printk(KERN_DEBUG, target->scsi_host,
745 PFX "Topspin/Cisco initiator port ID workaround "
746 "activated for target GUID %016llx\n",
747 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800748 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200749 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100750 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800751 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800752
Bart Van Assche509c07b2014-10-30 14:48:30 +0100753 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800754
755 kfree(req);
756
757 return status;
758}
759
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000760static bool srp_queue_remove_work(struct srp_target_port *target)
761{
762 bool changed = false;
763
764 spin_lock_irq(&target->lock);
765 if (target->state != SRP_TARGET_REMOVED) {
766 target->state = SRP_TARGET_REMOVED;
767 changed = true;
768 }
769 spin_unlock_irq(&target->lock);
770
771 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200772 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000773
774 return changed;
775}
776
Bart Van Assche294c8752011-12-25 12:18:12 +0000777static bool srp_change_conn_state(struct srp_target_port *target,
778 bool connected)
779{
780 bool changed = false;
781
782 spin_lock_irq(&target->lock);
783 if (target->connected != connected) {
784 target->connected = connected;
785 changed = true;
786 }
787 spin_unlock_irq(&target->lock);
788
789 return changed;
790}
791
Roland Dreieraef9ec32005-11-02 14:07:13 -0800792static void srp_disconnect_target(struct srp_target_port *target)
793{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200794 struct srp_rdma_ch *ch;
795 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100796
Bart Van Assche294c8752011-12-25 12:18:12 +0000797 if (srp_change_conn_state(target, false)) {
798 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800799
Bart Van Assched92c0da2014-10-06 17:14:36 +0200800 for (i = 0; i < target->ch_count; i++) {
801 ch = &target->ch[i];
802 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
803 shost_printk(KERN_DEBUG, target->scsi_host,
804 PFX "Sending CM DREQ failed\n");
805 }
Bart Van Assche294c8752011-12-25 12:18:12 +0000806 }
Roland Dreiere6581052006-05-17 09:13:21 -0700807 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800808}
809
Bart Van Assche509c07b2014-10-30 14:48:30 +0100810static void srp_free_req_data(struct srp_target_port *target,
811 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500812{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200813 struct srp_device *dev = target->srp_host->srp_dev;
814 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500815 struct srp_request *req;
816 int i;
817
Bart Van Assched92c0da2014-10-06 17:14:36 +0200818 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200819 return;
820
821 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100822 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200823 if (dev->use_fast_reg)
824 kfree(req->fr_list);
825 else
826 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500827 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500828 if (req->indirect_dma_addr) {
829 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
830 target->indirect_size,
831 DMA_TO_DEVICE);
832 }
833 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500834 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200835
Bart Van Assche509c07b2014-10-30 14:48:30 +0100836 kfree(ch->req_ring);
837 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500838}
839
Bart Van Assche509c07b2014-10-30 14:48:30 +0100840static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200841{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100842 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200843 struct srp_device *srp_dev = target->srp_host->srp_dev;
844 struct ib_device *ibdev = srp_dev->dev;
845 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200846 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200847 dma_addr_t dma_addr;
848 int i, ret = -ENOMEM;
849
Bart Van Assche509c07b2014-10-30 14:48:30 +0100850 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
851 GFP_KERNEL);
852 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200853 goto out;
854
855 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100856 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200857 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
858 GFP_KERNEL);
859 if (!mr_list)
860 goto out;
861 if (srp_dev->use_fast_reg)
862 req->fr_list = mr_list;
863 else
864 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200865 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200866 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200867 if (!req->map_page)
868 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200869 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200870 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200871 goto out;
872
873 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
874 target->indirect_size,
875 DMA_TO_DEVICE);
876 if (ib_dma_mapping_error(ibdev, dma_addr))
877 goto out;
878
879 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200880 }
881 ret = 0;
882
883out:
884 return ret;
885}
886
Bart Van Assche683b1592012-01-14 12:40:44 +0000887/**
888 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
889 * @shost: SCSI host whose attributes to remove from sysfs.
890 *
891 * Note: Any attributes defined in the host template and that did not exist
892 * before invocation of this function will be ignored.
893 */
894static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
895{
896 struct device_attribute **attr;
897
898 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
899 device_remove_file(&shost->shost_dev, *attr);
900}
901
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000902static void srp_remove_target(struct srp_target_port *target)
903{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200904 struct srp_rdma_ch *ch;
905 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100906
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000907 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
908
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000909 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200910 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000911 srp_remove_host(target->scsi_host);
912 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100913 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000914 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200915 for (i = 0; i < target->ch_count; i++) {
916 ch = &target->ch[i];
917 srp_free_ch_ib(target, ch);
918 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200919 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200920 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200921 for (i = 0; i < target->ch_count; i++) {
922 ch = &target->ch[i];
923 srp_free_req_data(target, ch);
924 }
925 kfree(target->ch);
926 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200927
928 spin_lock(&target->srp_host->target_lock);
929 list_del(&target->list);
930 spin_unlock(&target->srp_host->target_lock);
931
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000932 scsi_host_put(target->scsi_host);
933}
934
David Howellsc4028952006-11-22 14:57:56 +0000935static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800936{
David Howellsc4028952006-11-22 14:57:56 +0000937 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000938 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800939
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000940 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800941
Bart Van Assche96fc2482013-06-28 14:51:26 +0200942 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800943}
944
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200945static void srp_rport_delete(struct srp_rport *rport)
946{
947 struct srp_target_port *target = rport->lld_data;
948
949 srp_queue_remove_work(target);
950}
951
Bart Van Assched92c0da2014-10-06 17:14:36 +0200952static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800953{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100954 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800955 int ret;
956
Bart Van Assched92c0da2014-10-06 17:14:36 +0200957 WARN_ON_ONCE(!multich && target->connected);
Bart Van Assche294c8752011-12-25 12:18:12 +0000958
Bart Van Assche948d1e82011-09-03 09:25:42 +0200959 target->qp_in_error = false;
960
Bart Van Assche509c07b2014-10-30 14:48:30 +0100961 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800962 if (ret)
963 return ret;
964
965 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100966 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200967 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800968 if (ret)
969 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100970 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100971 if (ret < 0)
972 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800973
974 /*
975 * The CM event handling code will set status to
976 * SRP_PORT_REDIRECT if we get a port redirect REJ
977 * back, or SRP_DLID_REDIRECT if we get a lid/qp
978 * redirect REJ back.
979 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100980 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -0800981 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +0000982 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800983 return 0;
984
985 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +0100986 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800987 if (ret)
988 return ret;
989 break;
990
991 case SRP_DLID_REDIRECT:
992 break;
993
David Dillow9fe4bcf2008-01-08 17:08:52 -0500994 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -0500995 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +0100996 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +0100997 ch->status = -ECONNRESET;
998 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500999
Roland Dreieraef9ec32005-11-02 14:07:13 -08001000 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001001 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001002 }
1003 }
1004}
1005
Bart Van Assche509c07b2014-10-30 14:48:30 +01001006static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001007{
1008 struct ib_send_wr *bad_wr;
1009 struct ib_send_wr wr = {
1010 .opcode = IB_WR_LOCAL_INV,
1011 .wr_id = LOCAL_INV_WR_ID_MASK,
1012 .next = NULL,
1013 .num_sge = 0,
1014 .send_flags = 0,
1015 .ex.invalidate_rkey = rkey,
1016 };
1017
Bart Van Assche509c07b2014-10-30 14:48:30 +01001018 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001019}
1020
Roland Dreierd945e1d2006-05-09 10:50:28 -07001021static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001022 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001023 struct srp_request *req)
1024{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001025 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001026 struct srp_device *dev = target->srp_host->srp_dev;
1027 struct ib_device *ibdev = dev->dev;
1028 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001029
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001030 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001031 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1032 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1033 return;
1034
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001035 if (dev->use_fast_reg) {
1036 struct srp_fr_desc **pfr;
1037
1038 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001039 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001040 if (res < 0) {
1041 shost_printk(KERN_ERR, target->scsi_host, PFX
1042 "Queueing INV WR for rkey %#x failed (%d)\n",
1043 (*pfr)->mr->rkey, res);
1044 queue_work(system_long_wq,
1045 &target->tl_err_work);
1046 }
1047 }
1048 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001049 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001050 req->nmdesc);
1051 } else {
1052 struct ib_pool_fmr **pfmr;
1053
1054 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1055 ib_fmr_pool_unmap(*pfmr);
1056 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001057
David Dillow8f26c9f2011-01-14 19:45:50 -05001058 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1059 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001060}
1061
Bart Van Assche22032992012-08-14 13:18:53 +00001062/**
1063 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001064 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001065 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001066 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001067 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1068 * ownership of @req->scmnd if it equals @scmnd.
1069 *
1070 * Return value:
1071 * Either NULL or a pointer to the SCSI command the caller became owner of.
1072 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001073static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001074 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001075 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001076 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001077{
Bart Van Assche94a91742010-11-26 14:50:09 -05001078 unsigned long flags;
1079
Bart Van Assche509c07b2014-10-30 14:48:30 +01001080 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001081 if (req->scmnd &&
1082 (!sdev || req->scmnd->device == sdev) &&
1083 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001084 scmnd = req->scmnd;
1085 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001086 } else {
1087 scmnd = NULL;
1088 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001089 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001090
1091 return scmnd;
1092}
1093
1094/**
1095 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001096 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001097 * @req: Request to be freed.
1098 * @scmnd: SCSI command associated with @req.
1099 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001100 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001101static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1102 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001103{
1104 unsigned long flags;
1105
Bart Van Assche509c07b2014-10-30 14:48:30 +01001106 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001107
Bart Van Assche509c07b2014-10-30 14:48:30 +01001108 spin_lock_irqsave(&ch->lock, flags);
1109 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001110 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001111}
1112
Bart Van Assche509c07b2014-10-30 14:48:30 +01001113static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1114 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001115{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001116 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001117
1118 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001119 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001120 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001121 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001122 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001123}
1124
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001125static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001126{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001127 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001128 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001129 struct Scsi_Host *shost = target->scsi_host;
1130 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001131 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001132
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001133 /*
1134 * Invoking srp_terminate_io() while srp_queuecommand() is running
1135 * is not safe. Hence the warning statement below.
1136 */
1137 shost_for_each_device(sdev, shost)
1138 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1139
Bart Van Assched92c0da2014-10-06 17:14:36 +02001140 for (i = 0; i < target->ch_count; i++) {
1141 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142
Bart Van Assched92c0da2014-10-06 17:14:36 +02001143 for (j = 0; j < target->req_ring_size; ++j) {
1144 struct srp_request *req = &ch->req_ring[j];
1145
1146 srp_finish_req(ch, req, NULL,
1147 DID_TRANSPORT_FAILFAST << 16);
1148 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001149 }
1150}
1151
1152/*
1153 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1154 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1155 * srp_reset_device() or srp_reset_host() calls will occur while this function
1156 * is in progress. One way to realize that is not to call this function
1157 * directly but to call srp_reconnect_rport() instead since that last function
1158 * serializes calls of this function via rport->mutex and also blocks
1159 * srp_queuecommand() calls before invoking this function.
1160 */
1161static int srp_rport_reconnect(struct srp_rport *rport)
1162{
1163 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001164 struct srp_rdma_ch *ch;
1165 int i, j, ret = 0;
1166 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001167
Roland Dreieraef9ec32005-11-02 14:07:13 -08001168 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001169
1170 if (target->state == SRP_TARGET_SCANNING)
1171 return -ENODEV;
1172
Roland Dreieraef9ec32005-11-02 14:07:13 -08001173 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001174 * Now get a new local CM ID so that we avoid confusing the target in
1175 * case things are really fouled up. Doing so also ensures that all CM
1176 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001177 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001178 for (i = 0; i < target->ch_count; i++) {
1179 ch = &target->ch[i];
1180 if (!ch->target)
1181 break;
1182 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001183 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001184 for (i = 0; i < target->ch_count; i++) {
1185 ch = &target->ch[i];
1186 if (!ch->target)
1187 break;
1188 for (j = 0; j < target->req_ring_size; ++j) {
1189 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001190
Bart Van Assched92c0da2014-10-06 17:14:36 +02001191 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1192 }
1193 }
1194 for (i = 0; i < target->ch_count; i++) {
1195 ch = &target->ch[i];
1196 if (!ch->target)
1197 break;
1198 /*
1199 * Whether or not creating a new CM ID succeeded, create a new
1200 * QP. This guarantees that all completion callback function
1201 * invocations have finished before request resetting starts.
1202 */
1203 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001204
Bart Van Assched92c0da2014-10-06 17:14:36 +02001205 INIT_LIST_HEAD(&ch->free_tx);
1206 for (j = 0; j < target->queue_size; ++j)
1207 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1208 }
1209 for (i = 0; i < target->ch_count; i++) {
1210 ch = &target->ch[i];
1211 if (ret || !ch->target) {
1212 if (i > 1)
1213 ret = 0;
1214 break;
1215 }
1216 ret = srp_connect_ch(ch, multich);
1217 multich = true;
1218 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001219
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001220 if (ret == 0)
1221 shost_printk(KERN_INFO, target->scsi_host,
1222 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001223
1224 return ret;
1225}
1226
David Dillow8f26c9f2011-01-14 19:45:50 -05001227static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1228 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001229{
David Dillow8f26c9f2011-01-14 19:45:50 -05001230 struct srp_direct_buf *desc = state->desc;
1231
1232 desc->va = cpu_to_be64(dma_addr);
1233 desc->key = cpu_to_be32(rkey);
1234 desc->len = cpu_to_be32(dma_len);
1235
1236 state->total_len += dma_len;
1237 state->desc++;
1238 state->ndesc++;
1239}
1240
1241static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001242 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001243{
David Dillow8f26c9f2011-01-14 19:45:50 -05001244 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001245 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001246
Bart Van Assche509c07b2014-10-30 14:48:30 +01001247 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001248 state->npages, io_addr);
1249 if (IS_ERR(fmr))
1250 return PTR_ERR(fmr);
1251
1252 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001253 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001254
Bart Van Assche52ede082014-05-20 15:07:45 +02001255 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001256
David Dillow8f26c9f2011-01-14 19:45:50 -05001257 return 0;
1258}
1259
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001260static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001261 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001262{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001263 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001264 struct srp_device *dev = target->srp_host->srp_dev;
1265 struct ib_send_wr *bad_wr;
1266 struct ib_send_wr wr;
1267 struct srp_fr_desc *desc;
1268 u32 rkey;
1269
Bart Van Assche509c07b2014-10-30 14:48:30 +01001270 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001271 if (!desc)
1272 return -ENOMEM;
1273
1274 rkey = ib_inc_rkey(desc->mr->rkey);
1275 ib_update_fast_reg_key(desc->mr, rkey);
1276
1277 memcpy(desc->frpl->page_list, state->pages,
1278 sizeof(state->pages[0]) * state->npages);
1279
1280 memset(&wr, 0, sizeof(wr));
1281 wr.opcode = IB_WR_FAST_REG_MR;
1282 wr.wr_id = FAST_REG_WR_ID_MASK;
1283 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1284 wr.wr.fast_reg.page_list = desc->frpl;
1285 wr.wr.fast_reg.page_list_len = state->npages;
1286 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1287 wr.wr.fast_reg.length = state->dma_len;
1288 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1289 IB_ACCESS_REMOTE_READ |
1290 IB_ACCESS_REMOTE_WRITE);
1291 wr.wr.fast_reg.rkey = desc->mr->lkey;
1292
1293 *state->next_fr++ = desc;
1294 state->nmdesc++;
1295
1296 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1297 desc->mr->rkey);
1298
Bart Van Assche509c07b2014-10-30 14:48:30 +01001299 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001300}
1301
Bart Van Assche539dde62014-05-20 15:05:46 +02001302static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001303 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001304{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001305 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001306 int ret = 0;
1307
1308 if (state->npages == 0)
1309 return 0;
1310
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001311 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001312 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001313 target->rkey);
1314 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001315 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001316 srp_map_finish_fr(state, ch) :
1317 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001318
1319 if (ret == 0) {
1320 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001321 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001322 }
1323
1324 return ret;
1325}
1326
David Dillow8f26c9f2011-01-14 19:45:50 -05001327static void srp_map_update_start(struct srp_map_state *state,
1328 struct scatterlist *sg, int sg_index,
1329 dma_addr_t dma_addr)
1330{
1331 state->unmapped_sg = sg;
1332 state->unmapped_index = sg_index;
1333 state->unmapped_addr = dma_addr;
1334}
1335
1336static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001337 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001338 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001339 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001340{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001341 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001342 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001343 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001344 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1345 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1346 unsigned int len;
1347 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001348
David Dillow8f26c9f2011-01-14 19:45:50 -05001349 if (!dma_len)
1350 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001351
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001352 if (!use_mr) {
1353 /*
1354 * Once we're in direct map mode for a request, we don't
1355 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001356 * other than the descriptor.
1357 */
1358 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1359 return 0;
1360 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001361
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001362 /*
1363 * Since not all RDMA HW drivers support non-zero page offsets for
1364 * FMR, if we start at an offset into a page, don't merge into the
1365 * current FMR mapping. Finish it out, and use the kernel's MR for
1366 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001367 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001368 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1369 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001370 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001371 if (ret)
1372 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001373
David Dillow8f26c9f2011-01-14 19:45:50 -05001374 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1375 srp_map_update_start(state, NULL, 0, 0);
1376 return 0;
1377 }
1378
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001379 /*
1380 * If this is the first sg that will be mapped via FMR or via FR, save
1381 * our position. We need to know the first unmapped entry, its index,
1382 * and the first unmapped address within that entry to be able to
1383 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001384 */
1385 if (!state->unmapped_sg)
1386 srp_map_update_start(state, sg, sg_index, dma_addr);
1387
1388 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001389 unsigned offset = dma_addr & ~dev->mr_page_mask;
1390 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001391 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001392 if (ret)
1393 return ret;
1394
1395 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001396 }
1397
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001398 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001399
1400 if (!state->npages)
1401 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001402 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001403 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001404 dma_addr += len;
1405 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001406 }
1407
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001408 /*
1409 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001410 * close it out and start a new one -- we can only merge at page
1411 * boundries.
1412 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001413 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001414 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001415 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001416 if (!ret)
1417 srp_map_update_start(state, NULL, 0, 0);
1418 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001419 return ret;
1420}
1421
Bart Van Assche509c07b2014-10-30 14:48:30 +01001422static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1423 struct srp_request *req, struct scatterlist *scat,
1424 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001425{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001426 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001427 struct srp_device *dev = target->srp_host->srp_dev;
1428 struct ib_device *ibdev = dev->dev;
1429 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001430 int i;
1431 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001432
1433 state->desc = req->indirect_desc;
1434 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001435 if (dev->use_fast_reg) {
1436 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001437 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001438 } else {
1439 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001440 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001441 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001442
1443 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001444 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001445 /*
1446 * Memory registration failed, so backtrack to the
1447 * first unmapped entry and continue on without using
1448 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001449 */
1450 dma_addr_t dma_addr;
1451 unsigned int dma_len;
1452
1453backtrack:
1454 sg = state->unmapped_sg;
1455 i = state->unmapped_index;
1456
1457 dma_addr = ib_sg_dma_address(ibdev, sg);
1458 dma_len = ib_sg_dma_len(ibdev, sg);
1459 dma_len -= (state->unmapped_addr - dma_addr);
1460 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001461 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001462 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1463 }
1464 }
1465
Bart Van Assche509c07b2014-10-30 14:48:30 +01001466 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001467 goto backtrack;
1468
Bart Van Assche52ede082014-05-20 15:07:45 +02001469 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001470
1471 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001472}
1473
Bart Van Assche509c07b2014-10-30 14:48:30 +01001474static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001475 struct srp_request *req)
1476{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001477 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001478 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001479 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001480 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001481 struct srp_device *dev;
1482 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001483 struct srp_map_state state;
1484 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001485 u32 table_len;
1486 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001487
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001488 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001489 return sizeof (struct srp_cmd);
1490
1491 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1492 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001493 shost_printk(KERN_WARNING, target->scsi_host,
1494 PFX "Unhandled data direction %d\n",
1495 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001496 return -EINVAL;
1497 }
1498
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001499 nents = scsi_sg_count(scmnd);
1500 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001501
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001502 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001503 ibdev = dev->dev;
1504
1505 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001506 if (unlikely(count == 0))
1507 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001508
1509 fmt = SRP_DATA_DESC_DIRECT;
1510 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001511
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001512 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001513 /*
1514 * The midlayer only generated a single gather/scatter
1515 * entry, or DMA mapping coalesced everything to a
1516 * single entry. So a direct descriptor along with
1517 * the DMA MR suffices.
1518 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001519 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001520
Ralph Campbell85507bc2006-12-12 14:30:55 -08001521 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001522 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001523 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001524
Bart Van Assche52ede082014-05-20 15:07:45 +02001525 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001526 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001527 }
1528
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001529 /*
1530 * We have more than one scatter/gather entry, so build our indirect
1531 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001532 */
1533 indirect_hdr = (void *) cmd->add_data;
1534
David Dillowc07d4242011-01-16 13:57:10 -05001535 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1536 target->indirect_size, DMA_TO_DEVICE);
1537
David Dillow8f26c9f2011-01-14 19:45:50 -05001538 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001539 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001540
David Dillowc07d4242011-01-16 13:57:10 -05001541 /* We've mapped the request, now pull as much of the indirect
1542 * descriptor table as we can into the command buffer. If this
1543 * target is not using an external indirect table, we are
1544 * guaranteed to fit into the command, as the SCSI layer won't
1545 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001546 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001547 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001548 /*
1549 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001550 * so use a direct descriptor.
1551 */
1552 struct srp_direct_buf *buf = (void *) cmd->add_data;
1553
David Dillowc07d4242011-01-16 13:57:10 -05001554 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001555 goto map_complete;
1556 }
1557
David Dillowc07d4242011-01-16 13:57:10 -05001558 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1559 !target->allow_ext_sg)) {
1560 shost_printk(KERN_ERR, target->scsi_host,
1561 "Could not fit S/G list into SRP_CMD\n");
1562 return -EIO;
1563 }
1564
1565 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001566 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1567
1568 fmt = SRP_DATA_DESC_INDIRECT;
1569 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001570 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001571
David Dillowc07d4242011-01-16 13:57:10 -05001572 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1573 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001574
David Dillowc07d4242011-01-16 13:57:10 -05001575 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001576 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1577 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1578 indirect_hdr->len = cpu_to_be32(state.total_len);
1579
1580 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001581 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001582 else
David Dillowc07d4242011-01-16 13:57:10 -05001583 cmd->data_in_desc_cnt = count;
1584
1585 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1586 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001587
1588map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001589 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1590 cmd->buf_fmt = fmt << 4;
1591 else
1592 cmd->buf_fmt = fmt;
1593
Roland Dreieraef9ec32005-11-02 14:07:13 -08001594 return len;
1595}
1596
David Dillow05a1d752010-10-08 14:48:14 -04001597/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001598 * Return an IU and possible credit to the free pool
1599 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001600static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001601 enum srp_iu_type iu_type)
1602{
1603 unsigned long flags;
1604
Bart Van Assche509c07b2014-10-30 14:48:30 +01001605 spin_lock_irqsave(&ch->lock, flags);
1606 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001607 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001608 ++ch->req_lim;
1609 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001610}
1611
1612/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001613 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001614 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001615 *
1616 * Note:
1617 * An upper limit for the number of allocated information units for each
1618 * request type is:
1619 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1620 * more than Scsi_Host.can_queue requests.
1621 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1622 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1623 * one unanswered SRP request to an initiator.
1624 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001625static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001626 enum srp_iu_type iu_type)
1627{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001628 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001629 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1630 struct srp_iu *iu;
1631
Bart Van Assche509c07b2014-10-30 14:48:30 +01001632 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001633
Bart Van Assche509c07b2014-10-30 14:48:30 +01001634 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001635 return NULL;
1636
1637 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001638 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001639 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001640 ++target->zero_req_lim;
1641 return NULL;
1642 }
1643
Bart Van Assche509c07b2014-10-30 14:48:30 +01001644 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001645 }
1646
Bart Van Assche509c07b2014-10-30 14:48:30 +01001647 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001648 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001649 return iu;
1650}
1651
Bart Van Assche509c07b2014-10-30 14:48:30 +01001652static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001653{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001654 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001655 struct ib_sge list;
1656 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001657
1658 list.addr = iu->dma;
1659 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001660 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001661
1662 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001663 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001664 wr.sg_list = &list;
1665 wr.num_sge = 1;
1666 wr.opcode = IB_WR_SEND;
1667 wr.send_flags = IB_SEND_SIGNALED;
1668
Bart Van Assche509c07b2014-10-30 14:48:30 +01001669 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001670}
1671
Bart Van Assche509c07b2014-10-30 14:48:30 +01001672static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001673{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001674 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001675 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001676 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001677
1678 list.addr = iu->dma;
1679 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001680 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001681
1682 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001683 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001684 wr.sg_list = &list;
1685 wr.num_sge = 1;
1686
Bart Van Assche509c07b2014-10-30 14:48:30 +01001687 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001688}
1689
Bart Van Assche509c07b2014-10-30 14:48:30 +01001690static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001691{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001692 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001693 struct srp_request *req;
1694 struct scsi_cmnd *scmnd;
1695 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001696
Roland Dreieraef9ec32005-11-02 14:07:13 -08001697 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001698 spin_lock_irqsave(&ch->lock, flags);
1699 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1700 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001701
Bart Van Assche509c07b2014-10-30 14:48:30 +01001702 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001703 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001704 ch->tsk_mgmt_status = rsp->data[3];
1705 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001706 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001707 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1708 if (scmnd) {
1709 req = (void *)scmnd->host_scribble;
1710 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1711 }
Bart Van Assche22032992012-08-14 13:18:53 +00001712 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001713 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001714 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1715 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001716
Bart Van Assche509c07b2014-10-30 14:48:30 +01001717 spin_lock_irqsave(&ch->lock, flags);
1718 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1719 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001720
1721 return;
1722 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001723 scmnd->result = rsp->status;
1724
1725 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1726 memcpy(scmnd->sense_buffer, rsp->data +
1727 be32_to_cpu(rsp->resp_data_len),
1728 min_t(int, be32_to_cpu(rsp->sense_data_len),
1729 SCSI_SENSE_BUFFERSIZE));
1730 }
1731
Bart Van Asschee7145312014-07-09 15:57:51 +02001732 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001733 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001734 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1735 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1736 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1737 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1738 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1739 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001740
Bart Van Assche509c07b2014-10-30 14:48:30 +01001741 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001742 be32_to_cpu(rsp->req_lim_delta));
1743
David Dillowf8b6e312010-11-26 13:02:21 -05001744 scmnd->host_scribble = NULL;
1745 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001746 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001747}
1748
Bart Van Assche509c07b2014-10-30 14:48:30 +01001749static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001750 void *rsp, int len)
1751{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001752 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001753 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001754 unsigned long flags;
1755 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001756 int err;
David Dillowbb125882010-10-08 14:40:47 -04001757
Bart Van Assche509c07b2014-10-30 14:48:30 +01001758 spin_lock_irqsave(&ch->lock, flags);
1759 ch->req_lim += req_delta;
1760 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1761 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001762
David Dillowbb125882010-10-08 14:40:47 -04001763 if (!iu) {
1764 shost_printk(KERN_ERR, target->scsi_host, PFX
1765 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001766 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001767 }
1768
1769 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1770 memcpy(iu->buf, rsp, len);
1771 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1772
Bart Van Assche509c07b2014-10-30 14:48:30 +01001773 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001774 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001775 shost_printk(KERN_ERR, target->scsi_host, PFX
1776 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001777 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001778 }
David Dillowbb125882010-10-08 14:40:47 -04001779
David Dillowbb125882010-10-08 14:40:47 -04001780 return err;
1781}
1782
Bart Van Assche509c07b2014-10-30 14:48:30 +01001783static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001784 struct srp_cred_req *req)
1785{
1786 struct srp_cred_rsp rsp = {
1787 .opcode = SRP_CRED_RSP,
1788 .tag = req->tag,
1789 };
1790 s32 delta = be32_to_cpu(req->req_lim_delta);
1791
Bart Van Assche509c07b2014-10-30 14:48:30 +01001792 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1793 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001794 "problems processing SRP_CRED_REQ\n");
1795}
1796
Bart Van Assche509c07b2014-10-30 14:48:30 +01001797static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001798 struct srp_aer_req *req)
1799{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001800 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001801 struct srp_aer_rsp rsp = {
1802 .opcode = SRP_AER_RSP,
1803 .tag = req->tag,
1804 };
1805 s32 delta = be32_to_cpu(req->req_lim_delta);
1806
1807 shost_printk(KERN_ERR, target->scsi_host, PFX
1808 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1809
Bart Van Assche509c07b2014-10-30 14:48:30 +01001810 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001811 shost_printk(KERN_ERR, target->scsi_host, PFX
1812 "problems processing SRP_AER_REQ\n");
1813}
1814
Bart Van Assche509c07b2014-10-30 14:48:30 +01001815static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001816{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001817 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001818 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001819 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001820 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001821 u8 opcode;
1822
Bart Van Assche509c07b2014-10-30 14:48:30 +01001823 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001824 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001825
1826 opcode = *(u8 *) iu->buf;
1827
1828 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001829 shost_printk(KERN_ERR, target->scsi_host,
1830 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001831 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1832 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001833 }
1834
1835 switch (opcode) {
1836 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001837 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001838 break;
1839
David Dillowbb125882010-10-08 14:40:47 -04001840 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001841 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001842 break;
1843
1844 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001845 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001846 break;
1847
Roland Dreieraef9ec32005-11-02 14:07:13 -08001848 case SRP_T_LOGOUT:
1849 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001850 shost_printk(KERN_WARNING, target->scsi_host,
1851 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001852 break;
1853
1854 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001855 shost_printk(KERN_WARNING, target->scsi_host,
1856 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001857 break;
1858 }
1859
Bart Van Assche509c07b2014-10-30 14:48:30 +01001860 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001861 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001862
Bart Van Assche509c07b2014-10-30 14:48:30 +01001863 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001864 if (res != 0)
1865 shost_printk(KERN_ERR, target->scsi_host,
1866 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001867}
1868
Bart Van Asschec1120f82013-10-26 14:35:08 +02001869/**
1870 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001871 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001872 *
1873 * Note: This function may get invoked before the rport has been created,
1874 * hence the target->rport test.
1875 */
1876static void srp_tl_err_work(struct work_struct *work)
1877{
1878 struct srp_target_port *target;
1879
1880 target = container_of(work, struct srp_target_port, tl_err_work);
1881 if (target->rport)
1882 srp_start_tl_fail_timers(target->rport);
1883}
1884
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001885static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1886 bool send_err, struct srp_target_port *target)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001887{
Bart Van Assche294c8752011-12-25 12:18:12 +00001888 if (target->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001889 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1890 shost_printk(KERN_ERR, target->scsi_host, PFX
1891 "LOCAL_INV failed with status %d\n",
1892 wc_status);
1893 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1894 shost_printk(KERN_ERR, target->scsi_host, PFX
1895 "FAST_REG_MR failed status %d\n",
1896 wc_status);
1897 } else {
1898 shost_printk(KERN_ERR, target->scsi_host,
1899 PFX "failed %s status %d for iu %p\n",
1900 send_err ? "send" : "receive",
1901 wc_status, (void *)(uintptr_t)wr_id);
1902 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001903 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001904 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001905 target->qp_in_error = true;
1906}
1907
Bart Van Assche509c07b2014-10-30 14:48:30 +01001908static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001909{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001910 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001911 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001912
1913 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1914 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001915 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001916 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001917 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001918 srp_handle_qp_err(wc.wr_id, wc.status, false,
1919 ch->target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001920 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001921 }
1922}
1923
Bart Van Assche509c07b2014-10-30 14:48:30 +01001924static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001925{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001926 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001927 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001928 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001929
1930 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001931 if (likely(wc.status == IB_WC_SUCCESS)) {
1932 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001933 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001934 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001935 srp_handle_qp_err(wc.wr_id, wc.status, true,
1936 ch->target);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001937 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001938 }
1939}
1940
Bart Van Assche76c75b22010-11-26 14:37:47 -05001941static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001942{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001943 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001944 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001945 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001946 struct srp_request *req;
1947 struct srp_iu *iu;
1948 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001949 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001950 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001951 u32 tag;
1952 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001953 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001954 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1955
1956 /*
1957 * The SCSI EH thread is the only context from which srp_queuecommand()
1958 * can get invoked for blocked devices (SDEV_BLOCK /
1959 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1960 * locking the rport mutex if invoked from inside the SCSI EH.
1961 */
1962 if (in_scsi_eh)
1963 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001964
Bart Van Assched1b42892014-05-20 15:07:20 +02001965 scmnd->result = srp_chkready(target->rport);
1966 if (unlikely(scmnd->result))
1967 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001968
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001969 WARN_ON_ONCE(scmnd->request->tag < 0);
1970 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001971 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001972 idx = blk_mq_unique_tag_to_tag(tag);
1973 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
1974 dev_name(&shost->shost_gendev), tag, idx,
1975 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001976
1977 spin_lock_irqsave(&ch->lock, flags);
1978 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001979 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001980
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001981 if (!iu)
1982 goto err;
1983
1984 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001985 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05001986 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001987 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001988
David Dillowf8b6e312010-11-26 13:02:21 -05001989 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001990
1991 cmd = iu->buf;
1992 memset(cmd, 0, sizeof *cmd);
1993
1994 cmd->opcode = SRP_CMD;
1995 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001996 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001997 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1998
Roland Dreieraef9ec32005-11-02 14:07:13 -08001999 req->scmnd = scmnd;
2000 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002001
Bart Van Assche509c07b2014-10-30 14:48:30 +01002002 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002003 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002004 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002005 PFX "Failed to map data (%d)\n", len);
2006 /*
2007 * If we ran out of memory descriptors (-ENOMEM) because an
2008 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002009 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002010 * to reduce queue depth temporarily.
2011 */
2012 scmnd->result = len == -ENOMEM ?
2013 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002014 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002015 }
2016
David Dillow49248642011-01-14 18:23:24 -05002017 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002018 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002019
Bart Van Assche509c07b2014-10-30 14:48:30 +01002020 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002021 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002022 goto err_unmap;
2023 }
2024
Bart Van Assched1b42892014-05-20 15:07:20 +02002025 ret = 0;
2026
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002027unlock_rport:
2028 if (in_scsi_eh)
2029 mutex_unlock(&rport->mutex);
2030
Bart Van Assched1b42892014-05-20 15:07:20 +02002031 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002032
2033err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002034 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002035
Bart Van Assche76c75b22010-11-26 14:37:47 -05002036err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002037 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002038
Bart Van Assche024ca902014-05-20 15:03:49 +02002039 /*
2040 * Avoid that the loops that iterate over the request ring can
2041 * encounter a dangling SCSI command pointer.
2042 */
2043 req->scmnd = NULL;
2044
Bart Van Assched1b42892014-05-20 15:07:20 +02002045err:
2046 if (scmnd->result) {
2047 scmnd->scsi_done(scmnd);
2048 ret = 0;
2049 } else {
2050 ret = SCSI_MLQUEUE_HOST_BUSY;
2051 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002052
Bart Van Assched1b42892014-05-20 15:07:20 +02002053 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002054}
2055
Bart Van Assche4d73f952013-10-26 14:40:37 +02002056/*
2057 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002058 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002059 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002060static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002061{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002062 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002063 int i;
2064
Bart Van Assche509c07b2014-10-30 14:48:30 +01002065 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2066 GFP_KERNEL);
2067 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002068 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002069 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2070 GFP_KERNEL);
2071 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002072 goto err_no_ring;
2073
2074 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002075 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2076 ch->max_ti_iu_len,
2077 GFP_KERNEL, DMA_FROM_DEVICE);
2078 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002079 goto err;
2080 }
2081
Bart Van Assche4d73f952013-10-26 14:40:37 +02002082 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002083 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2084 target->max_iu_len,
2085 GFP_KERNEL, DMA_TO_DEVICE);
2086 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002087 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002088
Bart Van Assche509c07b2014-10-30 14:48:30 +01002089 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002090 }
2091
2092 return 0;
2093
2094err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002095 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002096 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2097 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002098 }
2099
Bart Van Assche4d73f952013-10-26 14:40:37 +02002100
2101err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002102 kfree(ch->tx_ring);
2103 ch->tx_ring = NULL;
2104 kfree(ch->rx_ring);
2105 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002106
2107 return -ENOMEM;
2108}
2109
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002110static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2111{
2112 uint64_t T_tr_ns, max_compl_time_ms;
2113 uint32_t rq_tmo_jiffies;
2114
2115 /*
2116 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2117 * table 91), both the QP timeout and the retry count have to be set
2118 * for RC QP's during the RTR to RTS transition.
2119 */
2120 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2121 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2122
2123 /*
2124 * Set target->rq_tmo_jiffies to one second more than the largest time
2125 * it can take before an error completion is generated. See also
2126 * C9-140..142 in the IBTA spec for more information about how to
2127 * convert the QP Local ACK Timeout value to nanoseconds.
2128 */
2129 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2130 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2131 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2132 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2133
2134 return rq_tmo_jiffies;
2135}
2136
David Dillow961e0be2011-01-14 17:32:07 -05002137static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2138 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002139 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002140{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002141 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002142 struct ib_qp_attr *qp_attr = NULL;
2143 int attr_mask = 0;
2144 int ret;
2145 int i;
2146
2147 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002148 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2149 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002150
2151 /*
2152 * Reserve credits for task management so we don't
2153 * bounce requests back to the SCSI mid-layer.
2154 */
2155 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002156 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002157 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002158 target->scsi_host->cmd_per_lun
2159 = min_t(int, target->scsi_host->can_queue,
2160 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002161 } else {
2162 shost_printk(KERN_WARNING, target->scsi_host,
2163 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2164 ret = -ECONNRESET;
2165 goto error;
2166 }
2167
Bart Van Assche509c07b2014-10-30 14:48:30 +01002168 if (!ch->rx_ring) {
2169 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002170 if (ret)
2171 goto error;
2172 }
2173
2174 ret = -ENOMEM;
2175 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2176 if (!qp_attr)
2177 goto error;
2178
2179 qp_attr->qp_state = IB_QPS_RTR;
2180 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2181 if (ret)
2182 goto error_free;
2183
Bart Van Assche509c07b2014-10-30 14:48:30 +01002184 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002185 if (ret)
2186 goto error_free;
2187
Bart Van Assche4d73f952013-10-26 14:40:37 +02002188 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002189 struct srp_iu *iu = ch->rx_ring[i];
2190
2191 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002192 if (ret)
2193 goto error_free;
2194 }
2195
2196 qp_attr->qp_state = IB_QPS_RTS;
2197 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2198 if (ret)
2199 goto error_free;
2200
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002201 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2202
Bart Van Assche509c07b2014-10-30 14:48:30 +01002203 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002204 if (ret)
2205 goto error_free;
2206
2207 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2208
2209error_free:
2210 kfree(qp_attr);
2211
2212error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002213 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002214}
2215
Roland Dreieraef9ec32005-11-02 14:07:13 -08002216static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2217 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002218 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002219{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002220 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002221 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002222 struct ib_class_port_info *cpi;
2223 int opcode;
2224
2225 switch (event->param.rej_rcvd.reason) {
2226 case IB_CM_REJ_PORT_CM_REDIRECT:
2227 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002228 ch->path.dlid = cpi->redirect_lid;
2229 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002230 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002231 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002232
Bart Van Assche509c07b2014-10-30 14:48:30 +01002233 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002234 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2235 break;
2236
2237 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002238 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002239 /*
2240 * Topspin/Cisco SRP gateways incorrectly send
2241 * reject reason code 25 when they mean 24
2242 * (port redirect).
2243 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002244 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002245 event->param.rej_rcvd.ari, 16);
2246
David Dillow7aa54bd2008-01-07 18:23:41 -05002247 shost_printk(KERN_DEBUG, shost,
2248 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002249 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2250 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002251
Bart Van Assche509c07b2014-10-30 14:48:30 +01002252 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002253 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002254 shost_printk(KERN_WARNING, shost,
2255 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002256 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002257 }
2258 break;
2259
2260 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002261 shost_printk(KERN_WARNING, shost,
2262 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002263 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002264 break;
2265
2266 case IB_CM_REJ_CONSUMER_DEFINED:
2267 opcode = *(u8 *) event->private_data;
2268 if (opcode == SRP_LOGIN_REJ) {
2269 struct srp_login_rej *rej = event->private_data;
2270 u32 reason = be32_to_cpu(rej->reason);
2271
2272 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002273 shost_printk(KERN_WARNING, shost,
2274 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002275 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002276 shost_printk(KERN_WARNING, shost, PFX
2277 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002278 target->sgid.raw,
2279 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002280 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002281 shost_printk(KERN_WARNING, shost,
2282 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2283 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002284 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002285 break;
2286
David Dillow9fe4bcf2008-01-08 17:08:52 -05002287 case IB_CM_REJ_STALE_CONN:
2288 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002289 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002290 break;
2291
Roland Dreieraef9ec32005-11-02 14:07:13 -08002292 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002293 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2294 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002295 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002296 }
2297}
2298
2299static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2300{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002301 struct srp_rdma_ch *ch = cm_id->context;
2302 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002303 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002304
2305 switch (event->event) {
2306 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002307 shost_printk(KERN_DEBUG, target->scsi_host,
2308 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002309 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002310 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002311 break;
2312
2313 case IB_CM_REP_RECEIVED:
2314 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002315 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002316 break;
2317
2318 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002319 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002320 comp = 1;
2321
Bart Van Assche509c07b2014-10-30 14:48:30 +01002322 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002323 break;
2324
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002325 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002326 shost_printk(KERN_WARNING, target->scsi_host,
2327 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00002328 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002329 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002330 shost_printk(KERN_ERR, target->scsi_host,
2331 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002332 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002333 break;
2334
2335 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002336 shost_printk(KERN_ERR, target->scsi_host,
2337 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002338 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002339
Bart Van Assche509c07b2014-10-30 14:48:30 +01002340 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002341 break;
2342
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002343 case IB_CM_MRA_RECEIVED:
2344 case IB_CM_DREQ_ERROR:
2345 case IB_CM_DREP_RECEIVED:
2346 break;
2347
Roland Dreieraef9ec32005-11-02 14:07:13 -08002348 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002349 shost_printk(KERN_WARNING, target->scsi_host,
2350 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002351 break;
2352 }
2353
2354 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002355 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002356
Roland Dreieraef9ec32005-11-02 14:07:13 -08002357 return 0;
2358}
2359
Jack Wang71444b92013-11-07 11:37:37 +01002360/**
Jack Wang71444b92013-11-07 11:37:37 +01002361 * srp_change_queue_depth - setting device queue depth
2362 * @sdev: scsi device struct
2363 * @qdepth: requested queue depth
2364 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2365 * (see include/scsi/scsi_host.h for definition)
2366 *
2367 * Returns queue depth.
2368 */
2369static int
2370srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2371{
2372 struct Scsi_Host *shost = sdev->host;
2373 int max_depth;
2374 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
2375 max_depth = shost->can_queue;
2376 if (!sdev->tagged_supported)
2377 max_depth = 1;
2378 if (qdepth > max_depth)
2379 qdepth = max_depth;
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01002380 scsi_adjust_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002381 } else if (reason == SCSI_QDEPTH_QFULL)
2382 scsi_track_queue_full(sdev, qdepth);
2383 else
2384 return -EOPNOTSUPP;
2385
2386 return sdev->queue_depth;
2387}
2388
Bart Van Assche509c07b2014-10-30 14:48:30 +01002389static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2390 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002391{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002392 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002393 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002394 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395 struct srp_iu *iu;
2396 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002397
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002398 if (!target->connected || target->qp_in_error)
2399 return -1;
2400
Bart Van Assche509c07b2014-10-30 14:48:30 +01002401 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002402
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002403 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002404 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002405 * invoked while a task management function is being sent.
2406 */
2407 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002408 spin_lock_irq(&ch->lock);
2409 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2410 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002411
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002412 if (!iu) {
2413 mutex_unlock(&rport->mutex);
2414
Bart Van Assche76c75b22010-11-26 14:37:47 -05002415 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002416 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002417
David Dillow19081f32010-10-18 08:54:49 -04002418 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2419 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420 tsk_mgmt = iu->buf;
2421 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2422
2423 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002424 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2425 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002426 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002427 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002428
David Dillow19081f32010-10-18 08:54:49 -04002429 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2430 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002431 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2432 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002433 mutex_unlock(&rport->mutex);
2434
Bart Van Assche76c75b22010-11-26 14:37:47 -05002435 return -1;
2436 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002437 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002438
Bart Van Assche509c07b2014-10-30 14:48:30 +01002439 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002440 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002441 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002442
Roland Dreierd945e1d2006-05-09 10:50:28 -07002443 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002444}
2445
Roland Dreieraef9ec32005-11-02 14:07:13 -08002446static int srp_abort(struct scsi_cmnd *scmnd)
2447{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002448 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002449 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002450 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002451 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002452 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002453 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002454
David Dillow7aa54bd2008-01-07 18:23:41 -05002455 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456
Bart Van Assched92c0da2014-10-06 17:14:36 +02002457 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002458 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002459 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002460 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2461 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2462 return SUCCESS;
2463 ch = &target->ch[ch_idx];
2464 if (!srp_claim_req(ch, req, NULL, scmnd))
2465 return SUCCESS;
2466 shost_printk(KERN_ERR, target->scsi_host,
2467 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002468 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002469 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002470 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002471 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002472 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002473 else
2474 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002475 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002476 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002477 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002478
Bart Van Assche086f44f2013-06-12 15:23:04 +02002479 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002480}
2481
2482static int srp_reset_device(struct scsi_cmnd *scmnd)
2483{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002484 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002485 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002486 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002487
David Dillow7aa54bd2008-01-07 18:23:41 -05002488 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002489
Bart Van Assched92c0da2014-10-06 17:14:36 +02002490 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002491 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002492 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002493 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002494 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002495 return FAILED;
2496
Bart Van Assched92c0da2014-10-06 17:14:36 +02002497 for (i = 0; i < target->ch_count; i++) {
2498 ch = &target->ch[i];
2499 for (i = 0; i < target->req_ring_size; ++i) {
2500 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002501
Bart Van Assched92c0da2014-10-06 17:14:36 +02002502 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2503 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002504 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002505
Roland Dreierd945e1d2006-05-09 10:50:28 -07002506 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002507}
2508
2509static int srp_reset_host(struct scsi_cmnd *scmnd)
2510{
2511 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002512
David Dillow7aa54bd2008-01-07 18:23:41 -05002513 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002514
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002515 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002516}
2517
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002518static int srp_slave_configure(struct scsi_device *sdev)
2519{
2520 struct Scsi_Host *shost = sdev->host;
2521 struct srp_target_port *target = host_to_target(shost);
2522 struct request_queue *q = sdev->request_queue;
2523 unsigned long timeout;
2524
2525 if (sdev->type == TYPE_DISK) {
2526 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2527 blk_queue_rq_timeout(q, timeout);
2528 }
2529
2530 return 0;
2531}
2532
Tony Jonesee959b02008-02-22 00:13:36 +01002533static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2534 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002535{
Tony Jonesee959b02008-02-22 00:13:36 +01002536 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002537
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002538 return sprintf(buf, "0x%016llx\n",
2539 (unsigned long long) be64_to_cpu(target->id_ext));
2540}
2541
Tony Jonesee959b02008-02-22 00:13:36 +01002542static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2543 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002544{
Tony Jonesee959b02008-02-22 00:13:36 +01002545 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002546
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002547 return sprintf(buf, "0x%016llx\n",
2548 (unsigned long long) be64_to_cpu(target->ioc_guid));
2549}
2550
Tony Jonesee959b02008-02-22 00:13:36 +01002551static ssize_t show_service_id(struct device *dev,
2552 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002553{
Tony Jonesee959b02008-02-22 00:13:36 +01002554 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002555
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002556 return sprintf(buf, "0x%016llx\n",
2557 (unsigned long long) be64_to_cpu(target->service_id));
2558}
2559
Tony Jonesee959b02008-02-22 00:13:36 +01002560static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2561 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002562{
Tony Jonesee959b02008-02-22 00:13:36 +01002563 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002564
Bart Van Assche747fe002014-10-30 14:48:05 +01002565 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002566}
2567
Bart Van Assche848b3082013-10-26 14:38:12 +02002568static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2569 char *buf)
2570{
2571 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2572
Bart Van Assche747fe002014-10-30 14:48:05 +01002573 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002574}
2575
Tony Jonesee959b02008-02-22 00:13:36 +01002576static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2577 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002578{
Tony Jonesee959b02008-02-22 00:13:36 +01002579 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002580 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002581
Bart Van Assche509c07b2014-10-30 14:48:30 +01002582 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002583}
2584
Tony Jonesee959b02008-02-22 00:13:36 +01002585static ssize_t show_orig_dgid(struct device *dev,
2586 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002587{
Tony Jonesee959b02008-02-22 00:13:36 +01002588 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002589
Bart Van Assche747fe002014-10-30 14:48:05 +01002590 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002591}
2592
Bart Van Assche89de7482010-08-03 14:08:45 +00002593static ssize_t show_req_lim(struct device *dev,
2594 struct device_attribute *attr, char *buf)
2595{
2596 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002597 struct srp_rdma_ch *ch;
2598 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002599
Bart Van Assched92c0da2014-10-06 17:14:36 +02002600 for (i = 0; i < target->ch_count; i++) {
2601 ch = &target->ch[i];
2602 req_lim = min(req_lim, ch->req_lim);
2603 }
2604 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002605}
2606
Tony Jonesee959b02008-02-22 00:13:36 +01002607static ssize_t show_zero_req_lim(struct device *dev,
2608 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002609{
Tony Jonesee959b02008-02-22 00:13:36 +01002610 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002611
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002612 return sprintf(buf, "%d\n", target->zero_req_lim);
2613}
2614
Tony Jonesee959b02008-02-22 00:13:36 +01002615static ssize_t show_local_ib_port(struct device *dev,
2616 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002617{
Tony Jonesee959b02008-02-22 00:13:36 +01002618 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002619
2620 return sprintf(buf, "%d\n", target->srp_host->port);
2621}
2622
Tony Jonesee959b02008-02-22 00:13:36 +01002623static ssize_t show_local_ib_device(struct device *dev,
2624 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002625{
Tony Jonesee959b02008-02-22 00:13:36 +01002626 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002627
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002628 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002629}
2630
Bart Van Assched92c0da2014-10-06 17:14:36 +02002631static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2632 char *buf)
2633{
2634 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2635
2636 return sprintf(buf, "%d\n", target->ch_count);
2637}
2638
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002639static ssize_t show_comp_vector(struct device *dev,
2640 struct device_attribute *attr, char *buf)
2641{
2642 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2643
2644 return sprintf(buf, "%d\n", target->comp_vector);
2645}
2646
Vu Pham7bb312e2013-10-26 14:31:27 +02002647static ssize_t show_tl_retry_count(struct device *dev,
2648 struct device_attribute *attr, char *buf)
2649{
2650 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2651
2652 return sprintf(buf, "%d\n", target->tl_retry_count);
2653}
2654
David Dillow49248642011-01-14 18:23:24 -05002655static ssize_t show_cmd_sg_entries(struct device *dev,
2656 struct device_attribute *attr, char *buf)
2657{
2658 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2659
2660 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2661}
2662
David Dillowc07d4242011-01-16 13:57:10 -05002663static ssize_t show_allow_ext_sg(struct device *dev,
2664 struct device_attribute *attr, char *buf)
2665{
2666 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2667
2668 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2669}
2670
Tony Jonesee959b02008-02-22 00:13:36 +01002671static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2672static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2673static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2674static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002675static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002676static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2677static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002678static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002679static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2680static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2681static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002682static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002683static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002684static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002685static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002686static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002687
Tony Jonesee959b02008-02-22 00:13:36 +01002688static struct device_attribute *srp_host_attrs[] = {
2689 &dev_attr_id_ext,
2690 &dev_attr_ioc_guid,
2691 &dev_attr_service_id,
2692 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002693 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002694 &dev_attr_dgid,
2695 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002696 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002697 &dev_attr_zero_req_lim,
2698 &dev_attr_local_ib_port,
2699 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002700 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002701 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002702 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002703 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002704 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002705 NULL
2706};
2707
Roland Dreieraef9ec32005-11-02 14:07:13 -08002708static struct scsi_host_template srp_template = {
2709 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002710 .name = "InfiniBand SRP initiator",
2711 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002712 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002713 .info = srp_target_info,
2714 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002715 .change_queue_depth = srp_change_queue_depth,
Christoph Hellwiga62182f2014-10-02 14:39:55 +02002716 .change_queue_type = scsi_change_queue_type,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002717 .eh_abort_handler = srp_abort,
2718 .eh_device_reset_handler = srp_reset_device,
2719 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002720 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002721 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002722 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002723 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002724 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002725 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002726 .shost_attrs = srp_host_attrs,
2727 .use_blk_tags = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002728};
2729
Bart Van Assche34aa6542014-10-30 14:47:22 +01002730static int srp_sdev_count(struct Scsi_Host *host)
2731{
2732 struct scsi_device *sdev;
2733 int c = 0;
2734
2735 shost_for_each_device(sdev, host)
2736 c++;
2737
2738 return c;
2739}
2740
Roland Dreieraef9ec32005-11-02 14:07:13 -08002741static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2742{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002743 struct srp_rport_identifiers ids;
2744 struct srp_rport *rport;
2745
Bart Van Assche34aa6542014-10-30 14:47:22 +01002746 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002747 sprintf(target->target_name, "SRP.T10:%016llX",
2748 (unsigned long long) be64_to_cpu(target->id_ext));
2749
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002750 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002751 return -ENODEV;
2752
FUJITA Tomonori32368222007-06-27 16:33:12 +09002753 memcpy(ids.port_id, &target->id_ext, 8);
2754 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002755 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002756 rport = srp_rport_add(target->scsi_host, &ids);
2757 if (IS_ERR(rport)) {
2758 scsi_remove_host(target->scsi_host);
2759 return PTR_ERR(rport);
2760 }
2761
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002762 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002763 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002764
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002765 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002766 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002767 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002768
Roland Dreieraef9ec32005-11-02 14:07:13 -08002769 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002770 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002771
Bart Van Assche34aa6542014-10-30 14:47:22 +01002772 if (!target->connected || target->qp_in_error) {
2773 shost_printk(KERN_INFO, target->scsi_host,
2774 PFX "SCSI scan failed - removing SCSI host\n");
2775 srp_queue_remove_work(target);
2776 goto out;
2777 }
2778
2779 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2780 dev_name(&target->scsi_host->shost_gendev),
2781 srp_sdev_count(target->scsi_host));
2782
2783 spin_lock_irq(&target->lock);
2784 if (target->state == SRP_TARGET_SCANNING)
2785 target->state = SRP_TARGET_LIVE;
2786 spin_unlock_irq(&target->lock);
2787
2788out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002789 return 0;
2790}
2791
Tony Jonesee959b02008-02-22 00:13:36 +01002792static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002793{
2794 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002795 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002796
2797 complete(&host->released);
2798}
2799
2800static struct class srp_class = {
2801 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002802 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002803};
2804
Bart Van Assche96fc2482013-06-28 14:51:26 +02002805/**
2806 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002807 * @host: SRP host.
2808 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002809 */
2810static bool srp_conn_unique(struct srp_host *host,
2811 struct srp_target_port *target)
2812{
2813 struct srp_target_port *t;
2814 bool ret = false;
2815
2816 if (target->state == SRP_TARGET_REMOVED)
2817 goto out;
2818
2819 ret = true;
2820
2821 spin_lock(&host->target_lock);
2822 list_for_each_entry(t, &host->target_list, list) {
2823 if (t != target &&
2824 target->id_ext == t->id_ext &&
2825 target->ioc_guid == t->ioc_guid &&
2826 target->initiator_ext == t->initiator_ext) {
2827 ret = false;
2828 break;
2829 }
2830 }
2831 spin_unlock(&host->target_lock);
2832
2833out:
2834 return ret;
2835}
2836
Roland Dreieraef9ec32005-11-02 14:07:13 -08002837/*
2838 * Target ports are added by writing
2839 *
2840 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2841 * pkey=<P_Key>,service_id=<service ID>
2842 *
2843 * to the add_target sysfs attribute.
2844 */
2845enum {
2846 SRP_OPT_ERR = 0,
2847 SRP_OPT_ID_EXT = 1 << 0,
2848 SRP_OPT_IOC_GUID = 1 << 1,
2849 SRP_OPT_DGID = 1 << 2,
2850 SRP_OPT_PKEY = 1 << 3,
2851 SRP_OPT_SERVICE_ID = 1 << 4,
2852 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002853 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002854 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002855 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002856 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002857 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2858 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002859 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002860 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002861 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002862 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2863 SRP_OPT_IOC_GUID |
2864 SRP_OPT_DGID |
2865 SRP_OPT_PKEY |
2866 SRP_OPT_SERVICE_ID),
2867};
2868
Steven Whitehousea447c092008-10-13 10:46:57 +01002869static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002870 { SRP_OPT_ID_EXT, "id_ext=%s" },
2871 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2872 { SRP_OPT_DGID, "dgid=%s" },
2873 { SRP_OPT_PKEY, "pkey=%x" },
2874 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2875 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2876 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002877 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002878 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002879 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002880 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2881 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002882 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002883 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002884 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002885 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002886};
2887
2888static int srp_parse_options(const char *buf, struct srp_target_port *target)
2889{
2890 char *options, *sep_opt;
2891 char *p;
2892 char dgid[3];
2893 substring_t args[MAX_OPT_ARGS];
2894 int opt_mask = 0;
2895 int token;
2896 int ret = -EINVAL;
2897 int i;
2898
2899 options = kstrdup(buf, GFP_KERNEL);
2900 if (!options)
2901 return -ENOMEM;
2902
2903 sep_opt = options;
2904 while ((p = strsep(&sep_opt, ",")) != NULL) {
2905 if (!*p)
2906 continue;
2907
2908 token = match_token(p, srp_opt_tokens, args);
2909 opt_mask |= token;
2910
2911 switch (token) {
2912 case SRP_OPT_ID_EXT:
2913 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002914 if (!p) {
2915 ret = -ENOMEM;
2916 goto out;
2917 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002918 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2919 kfree(p);
2920 break;
2921
2922 case SRP_OPT_IOC_GUID:
2923 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002924 if (!p) {
2925 ret = -ENOMEM;
2926 goto out;
2927 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002928 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2929 kfree(p);
2930 break;
2931
2932 case SRP_OPT_DGID:
2933 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002934 if (!p) {
2935 ret = -ENOMEM;
2936 goto out;
2937 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002938 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002939 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002940 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002941 goto out;
2942 }
2943
2944 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002945 strlcpy(dgid, p + i * 2, sizeof(dgid));
2946 if (sscanf(dgid, "%hhx",
2947 &target->orig_dgid.raw[i]) < 1) {
2948 ret = -EINVAL;
2949 kfree(p);
2950 goto out;
2951 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002952 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002953 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002954 break;
2955
2956 case SRP_OPT_PKEY:
2957 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002958 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002959 goto out;
2960 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002961 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002962 break;
2963
2964 case SRP_OPT_SERVICE_ID:
2965 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002966 if (!p) {
2967 ret = -ENOMEM;
2968 goto out;
2969 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002970 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2971 kfree(p);
2972 break;
2973
2974 case SRP_OPT_MAX_SECT:
2975 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002976 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002977 goto out;
2978 }
2979 target->scsi_host->max_sectors = token;
2980 break;
2981
Bart Van Assche4d73f952013-10-26 14:40:37 +02002982 case SRP_OPT_QUEUE_SIZE:
2983 if (match_int(args, &token) || token < 1) {
2984 pr_warn("bad queue_size parameter '%s'\n", p);
2985 goto out;
2986 }
2987 target->scsi_host->can_queue = token;
2988 target->queue_size = token + SRP_RSP_SQ_SIZE +
2989 SRP_TSK_MGMT_SQ_SIZE;
2990 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2991 target->scsi_host->cmd_per_lun = token;
2992 break;
2993
Vu Pham52fb2b502006-06-17 20:37:31 -07002994 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002995 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002996 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2997 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07002998 goto out;
2999 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003000 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003001 break;
3002
Ramachandra K0c0450db2006-06-17 20:37:38 -07003003 case SRP_OPT_IO_CLASS:
3004 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003005 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003006 goto out;
3007 }
3008 if (token != SRP_REV10_IB_IO_CLASS &&
3009 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003010 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3011 token, SRP_REV10_IB_IO_CLASS,
3012 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003013 goto out;
3014 }
3015 target->io_class = token;
3016 break;
3017
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003018 case SRP_OPT_INITIATOR_EXT:
3019 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003020 if (!p) {
3021 ret = -ENOMEM;
3022 goto out;
3023 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003024 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3025 kfree(p);
3026 break;
3027
David Dillow49248642011-01-14 18:23:24 -05003028 case SRP_OPT_CMD_SG_ENTRIES:
3029 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003030 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3031 p);
David Dillow49248642011-01-14 18:23:24 -05003032 goto out;
3033 }
3034 target->cmd_sg_cnt = token;
3035 break;
3036
David Dillowc07d4242011-01-16 13:57:10 -05003037 case SRP_OPT_ALLOW_EXT_SG:
3038 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003039 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003040 goto out;
3041 }
3042 target->allow_ext_sg = !!token;
3043 break;
3044
3045 case SRP_OPT_SG_TABLESIZE:
3046 if (match_int(args, &token) || token < 1 ||
3047 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003048 pr_warn("bad max sg_tablesize parameter '%s'\n",
3049 p);
David Dillowc07d4242011-01-16 13:57:10 -05003050 goto out;
3051 }
3052 target->sg_tablesize = token;
3053 break;
3054
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003055 case SRP_OPT_COMP_VECTOR:
3056 if (match_int(args, &token) || token < 0) {
3057 pr_warn("bad comp_vector parameter '%s'\n", p);
3058 goto out;
3059 }
3060 target->comp_vector = token;
3061 break;
3062
Vu Pham7bb312e2013-10-26 14:31:27 +02003063 case SRP_OPT_TL_RETRY_COUNT:
3064 if (match_int(args, &token) || token < 2 || token > 7) {
3065 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3066 p);
3067 goto out;
3068 }
3069 target->tl_retry_count = token;
3070 break;
3071
Roland Dreieraef9ec32005-11-02 14:07:13 -08003072 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003073 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3074 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003075 goto out;
3076 }
3077 }
3078
3079 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3080 ret = 0;
3081 else
3082 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3083 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3084 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003085 pr_warn("target creation request is missing parameter '%s'\n",
3086 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003087
Bart Van Assche4d73f952013-10-26 14:40:37 +02003088 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3089 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3090 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3091 target->scsi_host->cmd_per_lun,
3092 target->scsi_host->can_queue);
3093
Roland Dreieraef9ec32005-11-02 14:07:13 -08003094out:
3095 kfree(options);
3096 return ret;
3097}
3098
Tony Jonesee959b02008-02-22 00:13:36 +01003099static ssize_t srp_create_target(struct device *dev,
3100 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003101 const char *buf, size_t count)
3102{
3103 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003104 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003105 struct Scsi_Host *target_host;
3106 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003107 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003108 struct srp_device *srp_dev = host->srp_dev;
3109 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003110 int ret, node_idx, node, cpu, i;
3111 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003112
3113 target_host = scsi_host_alloc(&srp_template,
3114 sizeof (struct srp_target_port));
3115 if (!target_host)
3116 return -ENOMEM;
3117
David Dillow49248642011-01-14 18:23:24 -05003118 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003119 target_host->max_channel = 0;
3120 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003121 target_host->max_lun = SRP_MAX_LUN;
3122 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003123
Roland Dreieraef9ec32005-11-02 14:07:13 -08003124 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003125
David Dillow49248642011-01-14 18:23:24 -05003126 target->io_class = SRP_REV16A_IB_IO_CLASS;
3127 target->scsi_host = target_host;
3128 target->srp_host = host;
3129 target->lkey = host->srp_dev->mr->lkey;
3130 target->rkey = host->srp_dev->mr->rkey;
3131 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003132 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3133 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003134 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003135 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003136
Bart Van Assche34aa6542014-10-30 14:47:22 +01003137 /*
3138 * Avoid that the SCSI host can be removed by srp_remove_target()
3139 * before this function returns.
3140 */
3141 scsi_host_get(target->scsi_host);
3142
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003143 mutex_lock(&host->add_target_mutex);
3144
Roland Dreieraef9ec32005-11-02 14:07:13 -08003145 ret = srp_parse_options(buf, target);
3146 if (ret)
3147 goto err;
3148
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003149 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3150 if (ret)
3151 goto err;
3152
Bart Van Assche4d73f952013-10-26 14:40:37 +02003153 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3154
Bart Van Assche96fc2482013-06-28 14:51:26 +02003155 if (!srp_conn_unique(target->srp_host, target)) {
3156 shost_printk(KERN_INFO, target->scsi_host,
3157 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3158 be64_to_cpu(target->id_ext),
3159 be64_to_cpu(target->ioc_guid),
3160 be64_to_cpu(target->initiator_ext));
3161 ret = -EEXIST;
3162 goto err;
3163 }
3164
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003165 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003166 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003167 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003168 target->sg_tablesize = target->cmd_sg_cnt;
3169 }
3170
3171 target_host->sg_tablesize = target->sg_tablesize;
3172 target->indirect_size = target->sg_tablesize *
3173 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003174 target->max_iu_len = sizeof (struct srp_cmd) +
3175 sizeof (struct srp_indirect_buf) +
3176 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3177
Bart Van Asschec1120f82013-10-26 14:35:08 +02003178 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003179 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003180 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003181 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003182 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02003183 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003184
Bart Van Assched92c0da2014-10-06 17:14:36 +02003185 ret = -ENOMEM;
3186 target->ch_count = max_t(unsigned, num_online_nodes(),
3187 min(ch_count ? :
3188 min(4 * num_online_nodes(),
3189 ibdev->num_comp_vectors),
3190 num_online_cpus()));
3191 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3192 GFP_KERNEL);
3193 if (!target->ch)
3194 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003195
Bart Van Assched92c0da2014-10-06 17:14:36 +02003196 node_idx = 0;
3197 for_each_online_node(node) {
3198 const int ch_start = (node_idx * target->ch_count /
3199 num_online_nodes());
3200 const int ch_end = ((node_idx + 1) * target->ch_count /
3201 num_online_nodes());
3202 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3203 num_online_nodes() + target->comp_vector)
3204 % ibdev->num_comp_vectors;
3205 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3206 num_online_nodes() + target->comp_vector)
3207 % ibdev->num_comp_vectors;
3208 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003209
Bart Van Assched92c0da2014-10-06 17:14:36 +02003210 for_each_online_cpu(cpu) {
3211 if (cpu_to_node(cpu) != node)
3212 continue;
3213 if (ch_start + cpu_idx >= ch_end)
3214 continue;
3215 ch = &target->ch[ch_start + cpu_idx];
3216 ch->target = target;
3217 ch->comp_vector = cv_start == cv_end ? cv_start :
3218 cv_start + cpu_idx % (cv_end - cv_start);
3219 spin_lock_init(&ch->lock);
3220 INIT_LIST_HEAD(&ch->free_tx);
3221 ret = srp_new_cm_id(ch);
3222 if (ret)
3223 goto err_disconnect;
3224
3225 ret = srp_create_ch_ib(ch);
3226 if (ret)
3227 goto err_disconnect;
3228
3229 ret = srp_alloc_req_data(ch);
3230 if (ret)
3231 goto err_disconnect;
3232
3233 ret = srp_connect_ch(ch, multich);
3234 if (ret) {
3235 shost_printk(KERN_ERR, target->scsi_host,
3236 PFX "Connection %d/%d failed\n",
3237 ch_start + cpu_idx,
3238 target->ch_count);
3239 if (node_idx == 0 && cpu_idx == 0) {
3240 goto err_disconnect;
3241 } else {
3242 srp_free_ch_ib(target, ch);
3243 srp_free_req_data(target, ch);
3244 target->ch_count = ch - target->ch;
3245 break;
3246 }
3247 }
3248
3249 multich = true;
3250 cpu_idx++;
3251 }
3252 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003253 }
3254
Bart Van Assched92c0da2014-10-06 17:14:36 +02003255 target->scsi_host->nr_hw_queues = target->ch_count;
3256
Roland Dreieraef9ec32005-11-02 14:07:13 -08003257 ret = srp_add_target(host, target);
3258 if (ret)
3259 goto err_disconnect;
3260
Bart Van Assche34aa6542014-10-30 14:47:22 +01003261 if (target->state != SRP_TARGET_REMOVED) {
3262 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3263 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3264 be64_to_cpu(target->id_ext),
3265 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003266 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003267 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003268 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003269 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003270
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003271 ret = count;
3272
3273out:
3274 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003275
3276 scsi_host_put(target->scsi_host);
3277
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003278 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003279
3280err_disconnect:
3281 srp_disconnect_target(target);
3282
Bart Van Assched92c0da2014-10-06 17:14:36 +02003283 for (i = 0; i < target->ch_count; i++) {
3284 ch = &target->ch[i];
3285 srp_free_ch_ib(target, ch);
3286 srp_free_req_data(target, ch);
3287 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003288
Bart Van Assched92c0da2014-10-06 17:14:36 +02003289 kfree(target->ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05003290
Roland Dreieraef9ec32005-11-02 14:07:13 -08003291err:
3292 scsi_host_put(target_host);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003293 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003294}
3295
Tony Jonesee959b02008-02-22 00:13:36 +01003296static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003297
Tony Jonesee959b02008-02-22 00:13:36 +01003298static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3299 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003300{
Tony Jonesee959b02008-02-22 00:13:36 +01003301 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003302
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003303 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003304}
3305
Tony Jonesee959b02008-02-22 00:13:36 +01003306static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003307
Tony Jonesee959b02008-02-22 00:13:36 +01003308static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3309 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003310{
Tony Jonesee959b02008-02-22 00:13:36 +01003311 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003312
3313 return sprintf(buf, "%d\n", host->port);
3314}
3315
Tony Jonesee959b02008-02-22 00:13:36 +01003316static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003317
Roland Dreierf5358a12006-06-17 20:37:29 -07003318static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003319{
3320 struct srp_host *host;
3321
3322 host = kzalloc(sizeof *host, GFP_KERNEL);
3323 if (!host)
3324 return NULL;
3325
3326 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003327 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003328 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003329 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003330 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003331 host->port = port;
3332
Tony Jonesee959b02008-02-22 00:13:36 +01003333 host->dev.class = &srp_class;
3334 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003335 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003336
Tony Jonesee959b02008-02-22 00:13:36 +01003337 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003338 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003339 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003340 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003341 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003342 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003343 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003344 goto err_class;
3345
3346 return host;
3347
3348err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003349 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003350
Roland Dreierf5358a12006-06-17 20:37:29 -07003351free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003352 kfree(host);
3353
3354 return NULL;
3355}
3356
3357static void srp_add_one(struct ib_device *device)
3358{
Roland Dreierf5358a12006-06-17 20:37:29 -07003359 struct srp_device *srp_dev;
3360 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003361 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003362 int mr_page_shift, s, e, p;
3363 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003364
Roland Dreierf5358a12006-06-17 20:37:29 -07003365 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3366 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003367 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368
Roland Dreierf5358a12006-06-17 20:37:29 -07003369 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003370 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003371 goto free_attr;
3372 }
3373
3374 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3375 if (!srp_dev)
3376 goto free_attr;
3377
Bart Van Assched1b42892014-05-20 15:07:20 +02003378 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3379 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003380 srp_dev->has_fr = (dev_attr->device_cap_flags &
3381 IB_DEVICE_MEM_MGT_EXTENSIONS);
3382 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3383 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3384
3385 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3386 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003387
Roland Dreierf5358a12006-06-17 20:37:29 -07003388 /*
3389 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003390 * minimum of 4096 bytes. We're unlikely to build large sglists
3391 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003392 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003393 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3394 srp_dev->mr_page_size = 1 << mr_page_shift;
3395 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3396 max_pages_per_mr = dev_attr->max_mr_size;
3397 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3398 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3399 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003400 if (srp_dev->use_fast_reg) {
3401 srp_dev->max_pages_per_mr =
3402 min_t(u32, srp_dev->max_pages_per_mr,
3403 dev_attr->max_fast_reg_page_list_len);
3404 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003405 srp_dev->mr_max_size = srp_dev->mr_page_size *
3406 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003407 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003408 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003409 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003410 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003411
3412 INIT_LIST_HEAD(&srp_dev->dev_list);
3413
3414 srp_dev->dev = device;
3415 srp_dev->pd = ib_alloc_pd(device);
3416 if (IS_ERR(srp_dev->pd))
3417 goto free_dev;
3418
3419 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3420 IB_ACCESS_LOCAL_WRITE |
3421 IB_ACCESS_REMOTE_READ |
3422 IB_ACCESS_REMOTE_WRITE);
3423 if (IS_ERR(srp_dev->mr))
3424 goto err_pd;
3425
Tom Tucker07ebafb2006-08-03 16:02:42 -05003426 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003427 s = 0;
3428 e = 0;
3429 } else {
3430 s = 1;
3431 e = device->phys_port_cnt;
3432 }
3433
3434 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003435 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003436 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003437 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003438 }
3439
Roland Dreierf5358a12006-06-17 20:37:29 -07003440 ib_set_client_data(device, &srp_client, srp_dev);
3441
3442 goto free_attr;
3443
3444err_pd:
3445 ib_dealloc_pd(srp_dev->pd);
3446
3447free_dev:
3448 kfree(srp_dev);
3449
3450free_attr:
3451 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003452}
3453
3454static void srp_remove_one(struct ib_device *device)
3455{
Roland Dreierf5358a12006-06-17 20:37:29 -07003456 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003457 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003458 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003459
Roland Dreierf5358a12006-06-17 20:37:29 -07003460 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003461 if (!srp_dev)
3462 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003463
Roland Dreierf5358a12006-06-17 20:37:29 -07003464 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003465 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003466 /*
3467 * Wait for the sysfs entry to go away, so that no new
3468 * target ports can be created.
3469 */
3470 wait_for_completion(&host->released);
3471
3472 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003473 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003474 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003475 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003476 list_for_each_entry(target, &host->target_list, list)
3477 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003478 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003479
3480 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003481 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003482 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003483 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003484 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003485
Roland Dreieraef9ec32005-11-02 14:07:13 -08003486 kfree(host);
3487 }
3488
Roland Dreierf5358a12006-06-17 20:37:29 -07003489 ib_dereg_mr(srp_dev->mr);
3490 ib_dealloc_pd(srp_dev->pd);
3491
3492 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003493}
3494
FUJITA Tomonori32368222007-06-27 16:33:12 +09003495static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003496 .has_rport_state = true,
3497 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003498 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003499 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3500 .dev_loss_tmo = &srp_dev_loss_tmo,
3501 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003502 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003503 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003504};
3505
Roland Dreieraef9ec32005-11-02 14:07:13 -08003506static int __init srp_init_module(void)
3507{
3508 int ret;
3509
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003510 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003511
David Dillow49248642011-01-14 18:23:24 -05003512 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003513 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003514 if (!cmd_sg_entries)
3515 cmd_sg_entries = srp_sg_tablesize;
3516 }
3517
3518 if (!cmd_sg_entries)
3519 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3520
3521 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003522 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003523 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003524 }
3525
David Dillowc07d4242011-01-16 13:57:10 -05003526 if (!indirect_sg_entries)
3527 indirect_sg_entries = cmd_sg_entries;
3528 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003529 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3530 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003531 indirect_sg_entries = cmd_sg_entries;
3532 }
3533
Bart Van Asschebcc05912014-07-09 15:57:26 +02003534 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003535 if (!srp_remove_wq) {
3536 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003537 goto out;
3538 }
3539
3540 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003541 ib_srp_transport_template =
3542 srp_attach_transport(&ib_srp_transport_functions);
3543 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003544 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003545
Roland Dreieraef9ec32005-11-02 14:07:13 -08003546 ret = class_register(&srp_class);
3547 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003548 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003549 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003550 }
3551
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003552 ib_sa_register_client(&srp_sa_client);
3553
Roland Dreieraef9ec32005-11-02 14:07:13 -08003554 ret = ib_register_client(&srp_client);
3555 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003556 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003557 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003558 }
3559
Bart Van Asschebcc05912014-07-09 15:57:26 +02003560out:
3561 return ret;
3562
3563unreg_sa:
3564 ib_sa_unregister_client(&srp_sa_client);
3565 class_unregister(&srp_class);
3566
3567release_tr:
3568 srp_release_transport(ib_srp_transport_template);
3569
3570destroy_wq:
3571 destroy_workqueue(srp_remove_wq);
3572 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003573}
3574
3575static void __exit srp_cleanup_module(void)
3576{
3577 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003578 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003579 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003580 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003581 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003582}
3583
3584module_init(srp_init_module);
3585module_exit(srp_cleanup_module);