blob: 5461924c9f10ef9311a8f11a5091170c4597281a [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080043
Arun Sharma600634972011-07-26 16:09:06 -070044#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080045
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010049#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080050#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090051#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080052
Roland Dreieraef9ec32005-11-02 14:07:13 -080053#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020057#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080059
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
David Dillow49248642011-01-14 18:23:24 -050065static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050067static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020069static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020070static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080071static int topspin_workarounds = 1;
72
David Dillow49248642011-01-14 18:23:24 -050073module_param(srp_sg_tablesize, uint, 0444);
74MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
75
76module_param(cmd_sg_entries, uint, 0444);
77MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
79
David Dillowc07d4242011-01-16 13:57:10 -050080module_param(indirect_sg_entries, uint, 0444);
81MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83
84module_param(allow_ext_sg, bool, 0444);
85MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87
Roland Dreieraef9ec32005-11-02 14:07:13 -080088module_param(topspin_workarounds, int, 0444);
89MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91
Bart Van Assche5cfb1782014-05-20 15:08:34 +020092module_param(prefer_fr, bool, 0444);
93MODULE_PARM_DESC(prefer_fr,
94"Whether to use fast registration if both FMR and fast registration are supported");
95
Bart Van Asscheb1b88542014-05-20 15:06:41 +020096module_param(register_always, bool, 0444);
97MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
99
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200100static struct kernel_param_ops srp_tmo_ops;
101
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200102static int srp_reconnect_delay = 10;
103module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200107static int srp_fast_io_fail_tmo = 15;
108module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
114
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200115static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200116module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
125
Bart Van Assched92c0da2014-10-06 17:14:36 +0200126static unsigned ch_count;
127module_param(ch_count, uint, 0444);
128MODULE_PARM_DESC(ch_count,
129 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
130
Roland Dreieraef9ec32005-11-02 14:07:13 -0800131static void srp_add_one(struct ib_device *device);
132static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100133static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
134static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800135static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
136
FUJITA Tomonori32368222007-06-27 16:33:12 +0900137static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200138static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139
Roland Dreieraef9ec32005-11-02 14:07:13 -0800140static struct ib_client srp_client = {
141 .name = "srp",
142 .add = srp_add_one,
143 .remove = srp_remove_one
144};
145
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700146static struct ib_sa_client srp_sa_client;
147
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200148static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
149{
150 int tmo = *(int *)kp->arg;
151
152 if (tmo >= 0)
153 return sprintf(buffer, "%d", tmo);
154 else
155 return sprintf(buffer, "off");
156}
157
158static int srp_tmo_set(const char *val, const struct kernel_param *kp)
159{
160 int tmo, res;
161
162 if (strncmp(val, "off", 3) != 0) {
163 res = kstrtoint(val, 0, &tmo);
164 if (res)
165 goto out;
166 } else {
167 tmo = -1;
168 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200174 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
185static struct kernel_param_ops srp_tmo_ops = {
186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
Roland Dreieraef9ec32005-11-02 14:07:13 -0800190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700204
205 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700208}
209
Roland Dreieraef9ec32005-11-02 14:07:13 -0800210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000255 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
Roland Dreier969a60f2008-07-14 23:48:43 -0700268 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
Bart Van Assche747fe002014-10-30 14:48:05 +0100270 be16_to_cpu(target->pkey),
Roland Dreier969a60f2008-07-14 23:48:43 -0700271 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
Bart Van Assche509c07b2014-10-30 14:48:30 +0100291static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500292{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294 struct ib_cm_id *new_cm_id;
295
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100297 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
Bart Van Assche509c07b2014-10-30 14:48:30 +0100301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500308
309 return 0;
310}
311
Bart Van Assched1b42892014-05-20 15:07:20 +0200312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349}
350
351/**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361{
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
382 if (IS_ERR(mr)) {
383 ret = PTR_ERR(mr);
384 goto destroy_pool;
385 }
386 d->mr = mr;
387 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
388 if (IS_ERR(frpl)) {
389 ret = PTR_ERR(frpl);
390 goto destroy_pool;
391 }
392 d->frpl = frpl;
393 list_add_tail(&d->entry, &pool->free_list);
394 }
395
396out:
397 return pool;
398
399destroy_pool:
400 srp_destroy_fr_pool(pool);
401
402err:
403 pool = ERR_PTR(ret);
404 goto out;
405}
406
407/**
408 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
409 * @pool: Pool to obtain descriptor from.
410 */
411static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
412{
413 struct srp_fr_desc *d = NULL;
414 unsigned long flags;
415
416 spin_lock_irqsave(&pool->lock, flags);
417 if (!list_empty(&pool->free_list)) {
418 d = list_first_entry(&pool->free_list, typeof(*d), entry);
419 list_del(&d->entry);
420 }
421 spin_unlock_irqrestore(&pool->lock, flags);
422
423 return d;
424}
425
426/**
427 * srp_fr_pool_put() - put an FR descriptor back in the free list
428 * @pool: Pool the descriptor was allocated from.
429 * @desc: Pointer to an array of fast registration descriptor pointers.
430 * @n: Number of descriptors to put back.
431 *
432 * Note: The caller must already have queued an invalidation request for
433 * desc->mr->rkey before calling this function.
434 */
435static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
436 int n)
437{
438 unsigned long flags;
439 int i;
440
441 spin_lock_irqsave(&pool->lock, flags);
442 for (i = 0; i < n; i++)
443 list_add(&desc[i]->entry, &pool->free_list);
444 spin_unlock_irqrestore(&pool->lock, flags);
445}
446
447static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
448{
449 struct srp_device *dev = target->srp_host->srp_dev;
450
451 return srp_create_fr_pool(dev->dev, dev->pd,
452 target->scsi_host->can_queue,
453 dev->max_pages_per_mr);
454}
455
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200456/**
457 * srp_destroy_qp() - destroy an RDMA queue pair
458 * @ch: SRP RDMA channel.
459 *
460 * Change a queue pair into the error state and wait until all receive
461 * completions have been processed before destroying it. This avoids that
462 * the receive completion handler can access the queue pair while it is
463 * being destroyed.
464 */
465static void srp_destroy_qp(struct srp_rdma_ch *ch)
466{
467 struct srp_target_port *target = ch->target;
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(target->connected);
475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
Bart Van Assche509c07b2014-10-30 14:48:30 +0100491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800492{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100493 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200494 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800495 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200498 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800501 int ret;
502
503 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
504 if (!init_attr)
505 return -ENOMEM;
506
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200507 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200509 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100510 if (IS_ERR(recv_cq)) {
511 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800512 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800513 }
514
Bart Van Assche509c07b2014-10-30 14:48:30 +0100515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
516 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100517 if (IS_ERR(send_cq)) {
518 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800519 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000520 }
521
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100522 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800523
524 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200525 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200526 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800527 init_attr->cap.max_recv_sge = 1;
528 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800530 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100531 init_attr->send_cq = send_cq;
532 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800533
Bart Van Assche62154b22014-05-20 15:04:45 +0200534 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100535 if (IS_ERR(qp)) {
536 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800537 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800538 }
539
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100540 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800541 if (ret)
542 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800543
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200544 if (dev->use_fast_reg && dev->has_fr) {
545 fr_pool = srp_alloc_fr_pool(target);
546 if (IS_ERR(fr_pool)) {
547 ret = PTR_ERR(fr_pool);
548 shost_printk(KERN_WARNING, target->scsi_host, PFX
549 "FR pool allocation failed (%d)\n", ret);
550 goto err_qp;
551 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100552 if (ch->fr_pool)
553 srp_destroy_fr_pool(ch->fr_pool);
554 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200555 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200556 fmr_pool = srp_alloc_fmr_pool(target);
557 if (IS_ERR(fmr_pool)) {
558 ret = PTR_ERR(fmr_pool);
559 shost_printk(KERN_WARNING, target->scsi_host, PFX
560 "FMR pool allocation failed (%d)\n", ret);
561 goto err_qp;
562 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100563 if (ch->fmr_pool)
564 ib_destroy_fmr_pool(ch->fmr_pool);
565 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200566 }
567
Bart Van Assche509c07b2014-10-30 14:48:30 +0100568 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200569 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100570 if (ch->recv_cq)
571 ib_destroy_cq(ch->recv_cq);
572 if (ch->send_cq)
573 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100574
Bart Van Assche509c07b2014-10-30 14:48:30 +0100575 ch->qp = qp;
576 ch->recv_cq = recv_cq;
577 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100578
Roland Dreierda9d2f02010-02-24 15:07:59 -0800579 kfree(init_attr);
580 return 0;
581
582err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800584
585err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100586 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800587
588err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100589 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800590
591err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800592 kfree(init_attr);
593 return ret;
594}
595
Bart Van Assche4d73f952013-10-26 14:40:37 +0200596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100598 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200599 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200603 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604 int i;
605
Bart Van Assched92c0da2014-10-06 17:14:36 +0200606 if (!ch->target)
607 return;
608
Bart Van Assche509c07b2014-10-30 14:48:30 +0100609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100612 }
613
Bart Van Assched92c0da2014-10-06 17:14:36 +0200614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200618 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200621 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200624 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200625 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800628
Bart Van Assched92c0da2014-10-06 17:14:36 +0200629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
Bart Van Assche509c07b2014-10-30 14:48:30 +0100637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100639
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200641 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200645 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200647 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200651 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100656 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800657{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800660
Bart Van Assche509c07b2014-10-30 14:48:30 +0100661 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800665 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 ch->path = *pathrec;
667 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668}
669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100673 int ret;
674
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694
Bart Van Assche509c07b2014-10-30 14:48:30 +0100695 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100696 if (ret < 0)
697 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698
Bart Van Assche509c07b2014-10-30 14:48:30 +0100699 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702
Bart Van Assche509c07b2014-10-30 14:48:30 +0100703 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800704}
705
Bart Van Assched92c0da2014-10-06 17:14:36 +0200706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
Bart Van Assche509c07b2014-10-30 14:48:30 +0100719 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200738 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700749 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700750 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100760 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700761 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200762 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100769 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
Roland Dreieraef9ec32005-11-02 14:07:13 -0800774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800778 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700779 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
783 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800784 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200785 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100786 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788
Bart Van Assche509c07b2014-10-30 14:48:30 +0100789 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800790
791 kfree(req);
792
793 return status;
794}
795
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200808 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000809
810 return changed;
811}
812
Bart Van Assche294c8752011-12-25 12:18:12 +0000813static bool srp_change_conn_state(struct srp_target_port *target,
814 bool connected)
815{
816 bool changed = false;
817
818 spin_lock_irq(&target->lock);
819 if (target->connected != connected) {
820 target->connected = connected;
821 changed = true;
822 }
823 spin_unlock_irq(&target->lock);
824
825 return changed;
826}
827
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828static void srp_disconnect_target(struct srp_target_port *target)
829{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200830 struct srp_rdma_ch *ch;
831 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100832
Bart Van Assche294c8752011-12-25 12:18:12 +0000833 if (srp_change_conn_state(target, false)) {
834 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800835
Bart Van Assched92c0da2014-10-06 17:14:36 +0200836 for (i = 0; i < target->ch_count; i++) {
837 ch = &target->ch[i];
838 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
839 shost_printk(KERN_DEBUG, target->scsi_host,
840 PFX "Sending CM DREQ failed\n");
841 }
Bart Van Assche294c8752011-12-25 12:18:12 +0000842 }
Roland Dreiere6581052006-05-17 09:13:21 -0700843 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800844}
845
Bart Van Assche509c07b2014-10-30 14:48:30 +0100846static void srp_free_req_data(struct srp_target_port *target,
847 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500848{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200849 struct srp_device *dev = target->srp_host->srp_dev;
850 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500851 struct srp_request *req;
852 int i;
853
Bart Van Assched92c0da2014-10-06 17:14:36 +0200854 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200855 return;
856
857 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100858 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200859 if (dev->use_fast_reg)
860 kfree(req->fr_list);
861 else
862 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500863 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500864 if (req->indirect_dma_addr) {
865 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
866 target->indirect_size,
867 DMA_TO_DEVICE);
868 }
869 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500870 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200871
Bart Van Assche509c07b2014-10-30 14:48:30 +0100872 kfree(ch->req_ring);
873 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500874}
875
Bart Van Assche509c07b2014-10-30 14:48:30 +0100876static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200877{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100878 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200879 struct srp_device *srp_dev = target->srp_host->srp_dev;
880 struct ib_device *ibdev = srp_dev->dev;
881 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200882 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200883 dma_addr_t dma_addr;
884 int i, ret = -ENOMEM;
885
Bart Van Assche509c07b2014-10-30 14:48:30 +0100886 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
887 GFP_KERNEL);
888 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200889 goto out;
890
891 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100892 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200893 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
894 GFP_KERNEL);
895 if (!mr_list)
896 goto out;
897 if (srp_dev->use_fast_reg)
898 req->fr_list = mr_list;
899 else
900 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200901 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200902 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200903 if (!req->map_page)
904 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200905 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200906 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200907 goto out;
908
909 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
910 target->indirect_size,
911 DMA_TO_DEVICE);
912 if (ib_dma_mapping_error(ibdev, dma_addr))
913 goto out;
914
915 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200916 }
917 ret = 0;
918
919out:
920 return ret;
921}
922
Bart Van Assche683b1592012-01-14 12:40:44 +0000923/**
924 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
925 * @shost: SCSI host whose attributes to remove from sysfs.
926 *
927 * Note: Any attributes defined in the host template and that did not exist
928 * before invocation of this function will be ignored.
929 */
930static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
931{
932 struct device_attribute **attr;
933
934 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
935 device_remove_file(&shost->shost_dev, *attr);
936}
937
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000938static void srp_remove_target(struct srp_target_port *target)
939{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200940 struct srp_rdma_ch *ch;
941 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100942
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000943 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
944
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000945 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200946 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000947 srp_remove_host(target->scsi_host);
948 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100949 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000950 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200951 for (i = 0; i < target->ch_count; i++) {
952 ch = &target->ch[i];
953 srp_free_ch_ib(target, ch);
954 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200955 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200956 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200957 for (i = 0; i < target->ch_count; i++) {
958 ch = &target->ch[i];
959 srp_free_req_data(target, ch);
960 }
961 kfree(target->ch);
962 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200963
964 spin_lock(&target->srp_host->target_lock);
965 list_del(&target->list);
966 spin_unlock(&target->srp_host->target_lock);
967
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000968 scsi_host_put(target->scsi_host);
969}
970
David Howellsc4028952006-11-22 14:57:56 +0000971static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800972{
David Howellsc4028952006-11-22 14:57:56 +0000973 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000974 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800975
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000976 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800977
Bart Van Assche96fc2482013-06-28 14:51:26 +0200978 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800979}
980
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200981static void srp_rport_delete(struct srp_rport *rport)
982{
983 struct srp_target_port *target = rport->lld_data;
984
985 srp_queue_remove_work(target);
986}
987
Bart Van Assched92c0da2014-10-06 17:14:36 +0200988static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800989{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100990 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800991 int ret;
992
Bart Van Assched92c0da2014-10-06 17:14:36 +0200993 WARN_ON_ONCE(!multich && target->connected);
Bart Van Assche294c8752011-12-25 12:18:12 +0000994
Bart Van Assche948d1e82011-09-03 09:25:42 +0200995 target->qp_in_error = false;
996
Bart Van Assche509c07b2014-10-30 14:48:30 +0100997 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800998 if (ret)
999 return ret;
1000
1001 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001002 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001003 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001004 if (ret)
1005 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001006 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001007 if (ret < 0)
1008 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001009
1010 /*
1011 * The CM event handling code will set status to
1012 * SRP_PORT_REDIRECT if we get a port redirect REJ
1013 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1014 * redirect REJ back.
1015 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001016 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001017 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +00001018 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001019 return 0;
1020
1021 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001022 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001023 if (ret)
1024 return ret;
1025 break;
1026
1027 case SRP_DLID_REDIRECT:
1028 break;
1029
David Dillow9fe4bcf2008-01-08 17:08:52 -05001030 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001031 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001032 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001033 ch->status = -ECONNRESET;
1034 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001035
Roland Dreieraef9ec32005-11-02 14:07:13 -08001036 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001037 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001038 }
1039 }
1040}
1041
Bart Van Assche509c07b2014-10-30 14:48:30 +01001042static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001043{
1044 struct ib_send_wr *bad_wr;
1045 struct ib_send_wr wr = {
1046 .opcode = IB_WR_LOCAL_INV,
1047 .wr_id = LOCAL_INV_WR_ID_MASK,
1048 .next = NULL,
1049 .num_sge = 0,
1050 .send_flags = 0,
1051 .ex.invalidate_rkey = rkey,
1052 };
1053
Bart Van Assche509c07b2014-10-30 14:48:30 +01001054 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001055}
1056
Roland Dreierd945e1d2006-05-09 10:50:28 -07001057static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001058 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001059 struct srp_request *req)
1060{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001061 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001062 struct srp_device *dev = target->srp_host->srp_dev;
1063 struct ib_device *ibdev = dev->dev;
1064 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001065
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001066 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001067 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1068 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1069 return;
1070
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001071 if (dev->use_fast_reg) {
1072 struct srp_fr_desc **pfr;
1073
1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001075 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001076 if (res < 0) {
1077 shost_printk(KERN_ERR, target->scsi_host, PFX
1078 "Queueing INV WR for rkey %#x failed (%d)\n",
1079 (*pfr)->mr->rkey, res);
1080 queue_work(system_long_wq,
1081 &target->tl_err_work);
1082 }
1083 }
1084 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001086 req->nmdesc);
1087 } else {
1088 struct ib_pool_fmr **pfmr;
1089
1090 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1091 ib_fmr_pool_unmap(*pfmr);
1092 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001093
David Dillow8f26c9f2011-01-14 19:45:50 -05001094 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1095 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001096}
1097
Bart Van Assche22032992012-08-14 13:18:53 +00001098/**
1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001100 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001101 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001102 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1104 * ownership of @req->scmnd if it equals @scmnd.
1105 *
1106 * Return value:
1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1108 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001109static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001110 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001111 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001112 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001113{
Bart Van Assche94a91742010-11-26 14:50:09 -05001114 unsigned long flags;
1115
Bart Van Assche509c07b2014-10-30 14:48:30 +01001116 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001117 if (req->scmnd &&
1118 (!sdev || req->scmnd->device == sdev) &&
1119 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001120 scmnd = req->scmnd;
1121 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001122 } else {
1123 scmnd = NULL;
1124 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001125 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001126
1127 return scmnd;
1128}
1129
1130/**
1131 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001132 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001133 * @req: Request to be freed.
1134 * @scmnd: SCSI command associated with @req.
1135 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001136 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001137static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001139{
1140 unsigned long flags;
1141
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001143
Bart Van Assche509c07b2014-10-30 14:48:30 +01001144 spin_lock_irqsave(&ch->lock, flags);
1145 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001146 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001147}
1148
Bart Van Assche509c07b2014-10-30 14:48:30 +01001149static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1150 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001151{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001153
1154 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001155 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001156 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001157 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001158 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001159}
1160
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001161static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001162{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001163 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001164 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001165 struct Scsi_Host *shost = target->scsi_host;
1166 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001167 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001168
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001169 /*
1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
1171 * is not safe. Hence the warning statement below.
1172 */
1173 shost_for_each_device(sdev, shost)
1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1175
Bart Van Assched92c0da2014-10-06 17:14:36 +02001176 for (i = 0; i < target->ch_count; i++) {
1177 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001178
Bart Van Assched92c0da2014-10-06 17:14:36 +02001179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1181
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1184 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001185 }
1186}
1187
1188/*
1189 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1190 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1191 * srp_reset_device() or srp_reset_host() calls will occur while this function
1192 * is in progress. One way to realize that is not to call this function
1193 * directly but to call srp_reconnect_rport() instead since that last function
1194 * serializes calls of this function via rport->mutex and also blocks
1195 * srp_queuecommand() calls before invoking this function.
1196 */
1197static int srp_rport_reconnect(struct srp_rport *rport)
1198{
1199 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001200 struct srp_rdma_ch *ch;
1201 int i, j, ret = 0;
1202 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001203
Roland Dreieraef9ec32005-11-02 14:07:13 -08001204 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001205
1206 if (target->state == SRP_TARGET_SCANNING)
1207 return -ENODEV;
1208
Roland Dreieraef9ec32005-11-02 14:07:13 -08001209 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001210 * Now get a new local CM ID so that we avoid confusing the target in
1211 * case things are really fouled up. Doing so also ensures that all CM
1212 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001213 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
1216 if (!ch->target)
1217 break;
1218 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001219 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
1222 if (!ch->target)
1223 break;
1224 for (j = 0; j < target->req_ring_size; ++j) {
1225 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001226
Bart Van Assched92c0da2014-10-06 17:14:36 +02001227 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1228 }
1229 }
1230 for (i = 0; i < target->ch_count; i++) {
1231 ch = &target->ch[i];
1232 if (!ch->target)
1233 break;
1234 /*
1235 * Whether or not creating a new CM ID succeeded, create a new
1236 * QP. This guarantees that all completion callback function
1237 * invocations have finished before request resetting starts.
1238 */
1239 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001240
Bart Van Assched92c0da2014-10-06 17:14:36 +02001241 INIT_LIST_HEAD(&ch->free_tx);
1242 for (j = 0; j < target->queue_size; ++j)
1243 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1244 }
1245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
1247 if (ret || !ch->target) {
1248 if (i > 1)
1249 ret = 0;
1250 break;
1251 }
1252 ret = srp_connect_ch(ch, multich);
1253 multich = true;
1254 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001255
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001256 if (ret == 0)
1257 shost_printk(KERN_INFO, target->scsi_host,
1258 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001259
1260 return ret;
1261}
1262
David Dillow8f26c9f2011-01-14 19:45:50 -05001263static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1264 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001265{
David Dillow8f26c9f2011-01-14 19:45:50 -05001266 struct srp_direct_buf *desc = state->desc;
1267
1268 desc->va = cpu_to_be64(dma_addr);
1269 desc->key = cpu_to_be32(rkey);
1270 desc->len = cpu_to_be32(dma_len);
1271
1272 state->total_len += dma_len;
1273 state->desc++;
1274 state->ndesc++;
1275}
1276
1277static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001278 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001279{
David Dillow8f26c9f2011-01-14 19:45:50 -05001280 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001281 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001282
Bart Van Assche509c07b2014-10-30 14:48:30 +01001283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001284 state->npages, io_addr);
1285 if (IS_ERR(fmr))
1286 return PTR_ERR(fmr);
1287
1288 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001289 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001290
Bart Van Assche52ede082014-05-20 15:07:45 +02001291 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001292
David Dillow8f26c9f2011-01-14 19:45:50 -05001293 return 0;
1294}
1295
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001296static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001297 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001298{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001299 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001300 struct srp_device *dev = target->srp_host->srp_dev;
1301 struct ib_send_wr *bad_wr;
1302 struct ib_send_wr wr;
1303 struct srp_fr_desc *desc;
1304 u32 rkey;
1305
Bart Van Assche509c07b2014-10-30 14:48:30 +01001306 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001307 if (!desc)
1308 return -ENOMEM;
1309
1310 rkey = ib_inc_rkey(desc->mr->rkey);
1311 ib_update_fast_reg_key(desc->mr, rkey);
1312
1313 memcpy(desc->frpl->page_list, state->pages,
1314 sizeof(state->pages[0]) * state->npages);
1315
1316 memset(&wr, 0, sizeof(wr));
1317 wr.opcode = IB_WR_FAST_REG_MR;
1318 wr.wr_id = FAST_REG_WR_ID_MASK;
1319 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1320 wr.wr.fast_reg.page_list = desc->frpl;
1321 wr.wr.fast_reg.page_list_len = state->npages;
1322 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1323 wr.wr.fast_reg.length = state->dma_len;
1324 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1325 IB_ACCESS_REMOTE_READ |
1326 IB_ACCESS_REMOTE_WRITE);
1327 wr.wr.fast_reg.rkey = desc->mr->lkey;
1328
1329 *state->next_fr++ = desc;
1330 state->nmdesc++;
1331
1332 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1333 desc->mr->rkey);
1334
Bart Van Assche509c07b2014-10-30 14:48:30 +01001335 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001336}
1337
Bart Van Assche539dde62014-05-20 15:05:46 +02001338static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001339 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001340{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001341 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001342 int ret = 0;
1343
1344 if (state->npages == 0)
1345 return 0;
1346
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001347 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001348 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001349 target->rkey);
1350 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001351 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001352 srp_map_finish_fr(state, ch) :
1353 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001354
1355 if (ret == 0) {
1356 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001357 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001358 }
1359
1360 return ret;
1361}
1362
David Dillow8f26c9f2011-01-14 19:45:50 -05001363static void srp_map_update_start(struct srp_map_state *state,
1364 struct scatterlist *sg, int sg_index,
1365 dma_addr_t dma_addr)
1366{
1367 state->unmapped_sg = sg;
1368 state->unmapped_index = sg_index;
1369 state->unmapped_addr = dma_addr;
1370}
1371
1372static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001373 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001374 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001375 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001376{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001377 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001378 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001379 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001380 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1381 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1382 unsigned int len;
1383 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001384
David Dillow8f26c9f2011-01-14 19:45:50 -05001385 if (!dma_len)
1386 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001387
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001388 if (!use_mr) {
1389 /*
1390 * Once we're in direct map mode for a request, we don't
1391 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001392 * other than the descriptor.
1393 */
1394 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1395 return 0;
1396 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001397
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001398 /*
1399 * Since not all RDMA HW drivers support non-zero page offsets for
1400 * FMR, if we start at an offset into a page, don't merge into the
1401 * current FMR mapping. Finish it out, and use the kernel's MR for
1402 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001403 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001404 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1405 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001406 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001407 if (ret)
1408 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001409
David Dillow8f26c9f2011-01-14 19:45:50 -05001410 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1411 srp_map_update_start(state, NULL, 0, 0);
1412 return 0;
1413 }
1414
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001415 /*
1416 * If this is the first sg that will be mapped via FMR or via FR, save
1417 * our position. We need to know the first unmapped entry, its index,
1418 * and the first unmapped address within that entry to be able to
1419 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001420 */
1421 if (!state->unmapped_sg)
1422 srp_map_update_start(state, sg, sg_index, dma_addr);
1423
1424 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001425 unsigned offset = dma_addr & ~dev->mr_page_mask;
1426 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001427 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001428 if (ret)
1429 return ret;
1430
1431 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001432 }
1433
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001434 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001435
1436 if (!state->npages)
1437 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001438 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001439 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001440 dma_addr += len;
1441 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001442 }
1443
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001444 /*
1445 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001446 * close it out and start a new one -- we can only merge at page
1447 * boundries.
1448 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001449 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001450 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001451 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001452 if (!ret)
1453 srp_map_update_start(state, NULL, 0, 0);
1454 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001455 return ret;
1456}
1457
Bart Van Assche509c07b2014-10-30 14:48:30 +01001458static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1459 struct srp_request *req, struct scatterlist *scat,
1460 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001461{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001462 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001463 struct srp_device *dev = target->srp_host->srp_dev;
1464 struct ib_device *ibdev = dev->dev;
1465 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001466 int i;
1467 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001468
1469 state->desc = req->indirect_desc;
1470 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001471 if (dev->use_fast_reg) {
1472 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001473 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001474 } else {
1475 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001476 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001477 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001478
1479 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001480 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001481 /*
1482 * Memory registration failed, so backtrack to the
1483 * first unmapped entry and continue on without using
1484 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001485 */
1486 dma_addr_t dma_addr;
1487 unsigned int dma_len;
1488
1489backtrack:
1490 sg = state->unmapped_sg;
1491 i = state->unmapped_index;
1492
1493 dma_addr = ib_sg_dma_address(ibdev, sg);
1494 dma_len = ib_sg_dma_len(ibdev, sg);
1495 dma_len -= (state->unmapped_addr - dma_addr);
1496 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001497 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001498 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1499 }
1500 }
1501
Bart Van Assche509c07b2014-10-30 14:48:30 +01001502 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001503 goto backtrack;
1504
Bart Van Assche52ede082014-05-20 15:07:45 +02001505 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001506
1507 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001508}
1509
Bart Van Assche509c07b2014-10-30 14:48:30 +01001510static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001511 struct srp_request *req)
1512{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001513 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001514 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001515 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001516 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001517 struct srp_device *dev;
1518 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001519 struct srp_map_state state;
1520 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001521 u32 table_len;
1522 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001523
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001524 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001525 return sizeof (struct srp_cmd);
1526
1527 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1528 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001529 shost_printk(KERN_WARNING, target->scsi_host,
1530 PFX "Unhandled data direction %d\n",
1531 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001532 return -EINVAL;
1533 }
1534
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001535 nents = scsi_sg_count(scmnd);
1536 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001537
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001538 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001539 ibdev = dev->dev;
1540
1541 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001542 if (unlikely(count == 0))
1543 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001544
1545 fmt = SRP_DATA_DESC_DIRECT;
1546 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001547
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001548 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001549 /*
1550 * The midlayer only generated a single gather/scatter
1551 * entry, or DMA mapping coalesced everything to a
1552 * single entry. So a direct descriptor along with
1553 * the DMA MR suffices.
1554 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001555 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001556
Ralph Campbell85507bc2006-12-12 14:30:55 -08001557 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001558 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001559 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001560
Bart Van Assche52ede082014-05-20 15:07:45 +02001561 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001562 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001563 }
1564
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001565 /*
1566 * We have more than one scatter/gather entry, so build our indirect
1567 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001568 */
1569 indirect_hdr = (void *) cmd->add_data;
1570
David Dillowc07d4242011-01-16 13:57:10 -05001571 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1572 target->indirect_size, DMA_TO_DEVICE);
1573
David Dillow8f26c9f2011-01-14 19:45:50 -05001574 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001575 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001576
David Dillowc07d4242011-01-16 13:57:10 -05001577 /* We've mapped the request, now pull as much of the indirect
1578 * descriptor table as we can into the command buffer. If this
1579 * target is not using an external indirect table, we are
1580 * guaranteed to fit into the command, as the SCSI layer won't
1581 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001582 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001583 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001584 /*
1585 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001586 * so use a direct descriptor.
1587 */
1588 struct srp_direct_buf *buf = (void *) cmd->add_data;
1589
David Dillowc07d4242011-01-16 13:57:10 -05001590 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001591 goto map_complete;
1592 }
1593
David Dillowc07d4242011-01-16 13:57:10 -05001594 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1595 !target->allow_ext_sg)) {
1596 shost_printk(KERN_ERR, target->scsi_host,
1597 "Could not fit S/G list into SRP_CMD\n");
1598 return -EIO;
1599 }
1600
1601 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001602 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1603
1604 fmt = SRP_DATA_DESC_INDIRECT;
1605 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001606 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001607
David Dillowc07d4242011-01-16 13:57:10 -05001608 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1609 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001610
David Dillowc07d4242011-01-16 13:57:10 -05001611 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001612 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1613 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1614 indirect_hdr->len = cpu_to_be32(state.total_len);
1615
1616 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001617 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001618 else
David Dillowc07d4242011-01-16 13:57:10 -05001619 cmd->data_in_desc_cnt = count;
1620
1621 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1622 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001623
1624map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001625 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1626 cmd->buf_fmt = fmt << 4;
1627 else
1628 cmd->buf_fmt = fmt;
1629
Roland Dreieraef9ec32005-11-02 14:07:13 -08001630 return len;
1631}
1632
David Dillow05a1d752010-10-08 14:48:14 -04001633/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001634 * Return an IU and possible credit to the free pool
1635 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001636static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001637 enum srp_iu_type iu_type)
1638{
1639 unsigned long flags;
1640
Bart Van Assche509c07b2014-10-30 14:48:30 +01001641 spin_lock_irqsave(&ch->lock, flags);
1642 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001643 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001644 ++ch->req_lim;
1645 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001646}
1647
1648/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001649 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001650 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001651 *
1652 * Note:
1653 * An upper limit for the number of allocated information units for each
1654 * request type is:
1655 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1656 * more than Scsi_Host.can_queue requests.
1657 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1658 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1659 * one unanswered SRP request to an initiator.
1660 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001661static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001662 enum srp_iu_type iu_type)
1663{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001664 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001665 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1666 struct srp_iu *iu;
1667
Bart Van Assche509c07b2014-10-30 14:48:30 +01001668 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001669
Bart Van Assche509c07b2014-10-30 14:48:30 +01001670 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001671 return NULL;
1672
1673 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001674 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001675 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001676 ++target->zero_req_lim;
1677 return NULL;
1678 }
1679
Bart Van Assche509c07b2014-10-30 14:48:30 +01001680 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001681 }
1682
Bart Van Assche509c07b2014-10-30 14:48:30 +01001683 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001684 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001685 return iu;
1686}
1687
Bart Van Assche509c07b2014-10-30 14:48:30 +01001688static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001689{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001690 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001691 struct ib_sge list;
1692 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001693
1694 list.addr = iu->dma;
1695 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001696 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001697
1698 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001699 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001700 wr.sg_list = &list;
1701 wr.num_sge = 1;
1702 wr.opcode = IB_WR_SEND;
1703 wr.send_flags = IB_SEND_SIGNALED;
1704
Bart Van Assche509c07b2014-10-30 14:48:30 +01001705 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001706}
1707
Bart Van Assche509c07b2014-10-30 14:48:30 +01001708static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001709{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001710 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001711 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001712 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001713
1714 list.addr = iu->dma;
1715 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001716 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001717
1718 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001719 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001720 wr.sg_list = &list;
1721 wr.num_sge = 1;
1722
Bart Van Assche509c07b2014-10-30 14:48:30 +01001723 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001724}
1725
Bart Van Assche509c07b2014-10-30 14:48:30 +01001726static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001727{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001728 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729 struct srp_request *req;
1730 struct scsi_cmnd *scmnd;
1731 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001732
Roland Dreieraef9ec32005-11-02 14:07:13 -08001733 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001734 spin_lock_irqsave(&ch->lock, flags);
1735 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1736 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001737
Bart Van Assche509c07b2014-10-30 14:48:30 +01001738 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001739 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001740 ch->tsk_mgmt_status = rsp->data[3];
1741 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001742 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001743 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1744 if (scmnd) {
1745 req = (void *)scmnd->host_scribble;
1746 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1747 }
Bart Van Assche22032992012-08-14 13:18:53 +00001748 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001749 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001750 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1751 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001752
Bart Van Assche509c07b2014-10-30 14:48:30 +01001753 spin_lock_irqsave(&ch->lock, flags);
1754 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1755 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001756
1757 return;
1758 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001759 scmnd->result = rsp->status;
1760
1761 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1762 memcpy(scmnd->sense_buffer, rsp->data +
1763 be32_to_cpu(rsp->resp_data_len),
1764 min_t(int, be32_to_cpu(rsp->sense_data_len),
1765 SCSI_SENSE_BUFFERSIZE));
1766 }
1767
Bart Van Asschee7145312014-07-09 15:57:51 +02001768 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001769 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001770 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1771 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1773 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1775 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001776
Bart Van Assche509c07b2014-10-30 14:48:30 +01001777 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001778 be32_to_cpu(rsp->req_lim_delta));
1779
David Dillowf8b6e312010-11-26 13:02:21 -05001780 scmnd->host_scribble = NULL;
1781 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001782 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001783}
1784
Bart Van Assche509c07b2014-10-30 14:48:30 +01001785static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001786 void *rsp, int len)
1787{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001788 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001789 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001790 unsigned long flags;
1791 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001792 int err;
David Dillowbb125882010-10-08 14:40:47 -04001793
Bart Van Assche509c07b2014-10-30 14:48:30 +01001794 spin_lock_irqsave(&ch->lock, flags);
1795 ch->req_lim += req_delta;
1796 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1797 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001798
David Dillowbb125882010-10-08 14:40:47 -04001799 if (!iu) {
1800 shost_printk(KERN_ERR, target->scsi_host, PFX
1801 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001802 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001803 }
1804
1805 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1806 memcpy(iu->buf, rsp, len);
1807 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1808
Bart Van Assche509c07b2014-10-30 14:48:30 +01001809 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001810 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001811 shost_printk(KERN_ERR, target->scsi_host, PFX
1812 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001813 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001814 }
David Dillowbb125882010-10-08 14:40:47 -04001815
David Dillowbb125882010-10-08 14:40:47 -04001816 return err;
1817}
1818
Bart Van Assche509c07b2014-10-30 14:48:30 +01001819static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001820 struct srp_cred_req *req)
1821{
1822 struct srp_cred_rsp rsp = {
1823 .opcode = SRP_CRED_RSP,
1824 .tag = req->tag,
1825 };
1826 s32 delta = be32_to_cpu(req->req_lim_delta);
1827
Bart Van Assche509c07b2014-10-30 14:48:30 +01001828 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1829 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001830 "problems processing SRP_CRED_REQ\n");
1831}
1832
Bart Van Assche509c07b2014-10-30 14:48:30 +01001833static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001834 struct srp_aer_req *req)
1835{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001836 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001837 struct srp_aer_rsp rsp = {
1838 .opcode = SRP_AER_RSP,
1839 .tag = req->tag,
1840 };
1841 s32 delta = be32_to_cpu(req->req_lim_delta);
1842
1843 shost_printk(KERN_ERR, target->scsi_host, PFX
1844 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1845
Bart Van Assche509c07b2014-10-30 14:48:30 +01001846 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001847 shost_printk(KERN_ERR, target->scsi_host, PFX
1848 "problems processing SRP_AER_REQ\n");
1849}
1850
Bart Van Assche509c07b2014-10-30 14:48:30 +01001851static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001852{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001853 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001854 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001855 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001856 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001857 u8 opcode;
1858
Bart Van Assche509c07b2014-10-30 14:48:30 +01001859 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001860 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001861
1862 opcode = *(u8 *) iu->buf;
1863
1864 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001865 shost_printk(KERN_ERR, target->scsi_host,
1866 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001867 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1868 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001869 }
1870
1871 switch (opcode) {
1872 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001873 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001874 break;
1875
David Dillowbb125882010-10-08 14:40:47 -04001876 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001877 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001878 break;
1879
1880 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001881 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001882 break;
1883
Roland Dreieraef9ec32005-11-02 14:07:13 -08001884 case SRP_T_LOGOUT:
1885 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001886 shost_printk(KERN_WARNING, target->scsi_host,
1887 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001888 break;
1889
1890 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001891 shost_printk(KERN_WARNING, target->scsi_host,
1892 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001893 break;
1894 }
1895
Bart Van Assche509c07b2014-10-30 14:48:30 +01001896 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001897 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001898
Bart Van Assche509c07b2014-10-30 14:48:30 +01001899 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001900 if (res != 0)
1901 shost_printk(KERN_ERR, target->scsi_host,
1902 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001903}
1904
Bart Van Asschec1120f82013-10-26 14:35:08 +02001905/**
1906 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001907 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001908 *
1909 * Note: This function may get invoked before the rport has been created,
1910 * hence the target->rport test.
1911 */
1912static void srp_tl_err_work(struct work_struct *work)
1913{
1914 struct srp_target_port *target;
1915
1916 target = container_of(work, struct srp_target_port, tl_err_work);
1917 if (target->rport)
1918 srp_start_tl_fail_timers(target->rport);
1919}
1920
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001921static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001922 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001923{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001924 struct srp_target_port *target = ch->target;
1925
1926 if (wr_id == SRP_LAST_WR_ID) {
1927 complete(&ch->done);
1928 return;
1929 }
1930
Bart Van Assche294c8752011-12-25 12:18:12 +00001931 if (target->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001932 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1933 shost_printk(KERN_ERR, target->scsi_host, PFX
1934 "LOCAL_INV failed with status %d\n",
1935 wc_status);
1936 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1937 shost_printk(KERN_ERR, target->scsi_host, PFX
1938 "FAST_REG_MR failed status %d\n",
1939 wc_status);
1940 } else {
1941 shost_printk(KERN_ERR, target->scsi_host,
1942 PFX "failed %s status %d for iu %p\n",
1943 send_err ? "send" : "receive",
1944 wc_status, (void *)(uintptr_t)wr_id);
1945 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001946 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001947 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001948 target->qp_in_error = true;
1949}
1950
Bart Van Assche509c07b2014-10-30 14:48:30 +01001951static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001952{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001953 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001954 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001955
1956 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1957 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001958 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001959 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001960 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001961 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001962 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001963 }
1964}
1965
Bart Van Assche509c07b2014-10-30 14:48:30 +01001966static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001967{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001968 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001969 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001970 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001971
1972 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001973 if (likely(wc.status == IB_WC_SUCCESS)) {
1974 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001975 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001976 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001977 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001978 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001979 }
1980}
1981
Bart Van Assche76c75b22010-11-26 14:37:47 -05001982static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001983{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001984 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001985 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001986 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001987 struct srp_request *req;
1988 struct srp_iu *iu;
1989 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001990 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001991 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001992 u32 tag;
1993 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001994 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001995 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1996
1997 /*
1998 * The SCSI EH thread is the only context from which srp_queuecommand()
1999 * can get invoked for blocked devices (SDEV_BLOCK /
2000 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2001 * locking the rport mutex if invoked from inside the SCSI EH.
2002 */
2003 if (in_scsi_eh)
2004 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002005
Bart Van Assched1b42892014-05-20 15:07:20 +02002006 scmnd->result = srp_chkready(target->rport);
2007 if (unlikely(scmnd->result))
2008 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002009
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002010 WARN_ON_ONCE(scmnd->request->tag < 0);
2011 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002012 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002013 idx = blk_mq_unique_tag_to_tag(tag);
2014 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2015 dev_name(&shost->shost_gendev), tag, idx,
2016 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002017
2018 spin_lock_irqsave(&ch->lock, flags);
2019 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002020 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002021
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002022 if (!iu)
2023 goto err;
2024
2025 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002026 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002027 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002028 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002029
David Dillowf8b6e312010-11-26 13:02:21 -05002030 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002031
2032 cmd = iu->buf;
2033 memset(cmd, 0, sizeof *cmd);
2034
2035 cmd->opcode = SRP_CMD;
2036 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002037 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002038 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2039
Roland Dreieraef9ec32005-11-02 14:07:13 -08002040 req->scmnd = scmnd;
2041 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002042
Bart Van Assche509c07b2014-10-30 14:48:30 +01002043 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002044 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002045 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002046 PFX "Failed to map data (%d)\n", len);
2047 /*
2048 * If we ran out of memory descriptors (-ENOMEM) because an
2049 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002050 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002051 * to reduce queue depth temporarily.
2052 */
2053 scmnd->result = len == -ENOMEM ?
2054 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002055 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002056 }
2057
David Dillow49248642011-01-14 18:23:24 -05002058 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002059 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002060
Bart Van Assche509c07b2014-10-30 14:48:30 +01002061 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002062 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002063 goto err_unmap;
2064 }
2065
Bart Van Assched1b42892014-05-20 15:07:20 +02002066 ret = 0;
2067
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002068unlock_rport:
2069 if (in_scsi_eh)
2070 mutex_unlock(&rport->mutex);
2071
Bart Van Assched1b42892014-05-20 15:07:20 +02002072 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002073
2074err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002075 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002076
Bart Van Assche76c75b22010-11-26 14:37:47 -05002077err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002078 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002079
Bart Van Assche024ca902014-05-20 15:03:49 +02002080 /*
2081 * Avoid that the loops that iterate over the request ring can
2082 * encounter a dangling SCSI command pointer.
2083 */
2084 req->scmnd = NULL;
2085
Bart Van Assched1b42892014-05-20 15:07:20 +02002086err:
2087 if (scmnd->result) {
2088 scmnd->scsi_done(scmnd);
2089 ret = 0;
2090 } else {
2091 ret = SCSI_MLQUEUE_HOST_BUSY;
2092 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002093
Bart Van Assched1b42892014-05-20 15:07:20 +02002094 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002095}
2096
Bart Van Assche4d73f952013-10-26 14:40:37 +02002097/*
2098 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002099 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002100 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002101static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002102{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002103 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002104 int i;
2105
Bart Van Assche509c07b2014-10-30 14:48:30 +01002106 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2107 GFP_KERNEL);
2108 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002109 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002110 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2111 GFP_KERNEL);
2112 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002113 goto err_no_ring;
2114
2115 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002116 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2117 ch->max_ti_iu_len,
2118 GFP_KERNEL, DMA_FROM_DEVICE);
2119 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002120 goto err;
2121 }
2122
Bart Van Assche4d73f952013-10-26 14:40:37 +02002123 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002124 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2125 target->max_iu_len,
2126 GFP_KERNEL, DMA_TO_DEVICE);
2127 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002128 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002129
Bart Van Assche509c07b2014-10-30 14:48:30 +01002130 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002131 }
2132
2133 return 0;
2134
2135err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002136 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002137 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2138 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002139 }
2140
Bart Van Assche4d73f952013-10-26 14:40:37 +02002141
2142err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002143 kfree(ch->tx_ring);
2144 ch->tx_ring = NULL;
2145 kfree(ch->rx_ring);
2146 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002147
2148 return -ENOMEM;
2149}
2150
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002151static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2152{
2153 uint64_t T_tr_ns, max_compl_time_ms;
2154 uint32_t rq_tmo_jiffies;
2155
2156 /*
2157 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2158 * table 91), both the QP timeout and the retry count have to be set
2159 * for RC QP's during the RTR to RTS transition.
2160 */
2161 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2162 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2163
2164 /*
2165 * Set target->rq_tmo_jiffies to one second more than the largest time
2166 * it can take before an error completion is generated. See also
2167 * C9-140..142 in the IBTA spec for more information about how to
2168 * convert the QP Local ACK Timeout value to nanoseconds.
2169 */
2170 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2171 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2172 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2173 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2174
2175 return rq_tmo_jiffies;
2176}
2177
David Dillow961e0be2011-01-14 17:32:07 -05002178static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2179 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002180 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002181{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002182 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002183 struct ib_qp_attr *qp_attr = NULL;
2184 int attr_mask = 0;
2185 int ret;
2186 int i;
2187
2188 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002189 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2190 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002191
2192 /*
2193 * Reserve credits for task management so we don't
2194 * bounce requests back to the SCSI mid-layer.
2195 */
2196 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002197 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002198 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002199 target->scsi_host->cmd_per_lun
2200 = min_t(int, target->scsi_host->can_queue,
2201 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002202 } else {
2203 shost_printk(KERN_WARNING, target->scsi_host,
2204 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2205 ret = -ECONNRESET;
2206 goto error;
2207 }
2208
Bart Van Assche509c07b2014-10-30 14:48:30 +01002209 if (!ch->rx_ring) {
2210 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002211 if (ret)
2212 goto error;
2213 }
2214
2215 ret = -ENOMEM;
2216 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2217 if (!qp_attr)
2218 goto error;
2219
2220 qp_attr->qp_state = IB_QPS_RTR;
2221 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2222 if (ret)
2223 goto error_free;
2224
Bart Van Assche509c07b2014-10-30 14:48:30 +01002225 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002226 if (ret)
2227 goto error_free;
2228
Bart Van Assche4d73f952013-10-26 14:40:37 +02002229 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002230 struct srp_iu *iu = ch->rx_ring[i];
2231
2232 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002233 if (ret)
2234 goto error_free;
2235 }
2236
2237 qp_attr->qp_state = IB_QPS_RTS;
2238 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2239 if (ret)
2240 goto error_free;
2241
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002242 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2243
Bart Van Assche509c07b2014-10-30 14:48:30 +01002244 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002245 if (ret)
2246 goto error_free;
2247
2248 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2249
2250error_free:
2251 kfree(qp_attr);
2252
2253error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002254 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002255}
2256
Roland Dreieraef9ec32005-11-02 14:07:13 -08002257static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2258 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002259 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002260{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002261 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002262 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002263 struct ib_class_port_info *cpi;
2264 int opcode;
2265
2266 switch (event->param.rej_rcvd.reason) {
2267 case IB_CM_REJ_PORT_CM_REDIRECT:
2268 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002269 ch->path.dlid = cpi->redirect_lid;
2270 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002271 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002272 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002273
Bart Van Assche509c07b2014-10-30 14:48:30 +01002274 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002275 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2276 break;
2277
2278 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002279 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002280 /*
2281 * Topspin/Cisco SRP gateways incorrectly send
2282 * reject reason code 25 when they mean 24
2283 * (port redirect).
2284 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002285 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002286 event->param.rej_rcvd.ari, 16);
2287
David Dillow7aa54bd2008-01-07 18:23:41 -05002288 shost_printk(KERN_DEBUG, shost,
2289 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002290 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2291 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002292
Bart Van Assche509c07b2014-10-30 14:48:30 +01002293 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002294 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002295 shost_printk(KERN_WARNING, shost,
2296 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002297 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002298 }
2299 break;
2300
2301 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002302 shost_printk(KERN_WARNING, shost,
2303 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002304 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002305 break;
2306
2307 case IB_CM_REJ_CONSUMER_DEFINED:
2308 opcode = *(u8 *) event->private_data;
2309 if (opcode == SRP_LOGIN_REJ) {
2310 struct srp_login_rej *rej = event->private_data;
2311 u32 reason = be32_to_cpu(rej->reason);
2312
2313 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002314 shost_printk(KERN_WARNING, shost,
2315 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002316 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002317 shost_printk(KERN_WARNING, shost, PFX
2318 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002319 target->sgid.raw,
2320 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002321 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002322 shost_printk(KERN_WARNING, shost,
2323 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2324 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002325 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002326 break;
2327
David Dillow9fe4bcf2008-01-08 17:08:52 -05002328 case IB_CM_REJ_STALE_CONN:
2329 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002330 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002331 break;
2332
Roland Dreieraef9ec32005-11-02 14:07:13 -08002333 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002334 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2335 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002336 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002337 }
2338}
2339
2340static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2341{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002342 struct srp_rdma_ch *ch = cm_id->context;
2343 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002344 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002345
2346 switch (event->event) {
2347 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002348 shost_printk(KERN_DEBUG, target->scsi_host,
2349 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002350 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002351 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002352 break;
2353
2354 case IB_CM_REP_RECEIVED:
2355 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002356 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002357 break;
2358
2359 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002360 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002361 comp = 1;
2362
Bart Van Assche509c07b2014-10-30 14:48:30 +01002363 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002364 break;
2365
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002366 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002367 shost_printk(KERN_WARNING, target->scsi_host,
2368 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00002369 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002370 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002371 shost_printk(KERN_ERR, target->scsi_host,
2372 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002373 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002374 break;
2375
2376 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002377 shost_printk(KERN_ERR, target->scsi_host,
2378 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002379 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002380
Bart Van Assche509c07b2014-10-30 14:48:30 +01002381 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002382 break;
2383
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002384 case IB_CM_MRA_RECEIVED:
2385 case IB_CM_DREQ_ERROR:
2386 case IB_CM_DREP_RECEIVED:
2387 break;
2388
Roland Dreieraef9ec32005-11-02 14:07:13 -08002389 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002390 shost_printk(KERN_WARNING, target->scsi_host,
2391 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002392 break;
2393 }
2394
2395 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002396 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002397
Roland Dreieraef9ec32005-11-02 14:07:13 -08002398 return 0;
2399}
2400
Jack Wang71444b92013-11-07 11:37:37 +01002401/**
Jack Wang71444b92013-11-07 11:37:37 +01002402 * srp_change_queue_depth - setting device queue depth
2403 * @sdev: scsi device struct
2404 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002405 *
2406 * Returns queue depth.
2407 */
2408static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002409srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002410{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002411 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002412 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002413 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002414}
2415
Bart Van Assche509c07b2014-10-30 14:48:30 +01002416static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2417 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002418{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002419 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002420 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002421 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002422 struct srp_iu *iu;
2423 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002424
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002425 if (!target->connected || target->qp_in_error)
2426 return -1;
2427
Bart Van Assche509c07b2014-10-30 14:48:30 +01002428 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002429
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002430 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002431 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002432 * invoked while a task management function is being sent.
2433 */
2434 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002435 spin_lock_irq(&ch->lock);
2436 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2437 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002438
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002439 if (!iu) {
2440 mutex_unlock(&rport->mutex);
2441
Bart Van Assche76c75b22010-11-26 14:37:47 -05002442 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002443 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002444
David Dillow19081f32010-10-18 08:54:49 -04002445 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2446 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002447 tsk_mgmt = iu->buf;
2448 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2449
2450 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002451 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2452 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002453 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002454 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002455
David Dillow19081f32010-10-18 08:54:49 -04002456 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2457 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002458 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2459 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002460 mutex_unlock(&rport->mutex);
2461
Bart Van Assche76c75b22010-11-26 14:37:47 -05002462 return -1;
2463 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002464 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002465
Bart Van Assche509c07b2014-10-30 14:48:30 +01002466 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002467 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002468 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002469
Roland Dreierd945e1d2006-05-09 10:50:28 -07002470 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002471}
2472
Roland Dreieraef9ec32005-11-02 14:07:13 -08002473static int srp_abort(struct scsi_cmnd *scmnd)
2474{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002475 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002476 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002477 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002478 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002479 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002480 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002481
David Dillow7aa54bd2008-01-07 18:23:41 -05002482 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002483
Bart Van Assched92c0da2014-10-06 17:14:36 +02002484 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002485 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002486 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002487 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2488 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2489 return SUCCESS;
2490 ch = &target->ch[ch_idx];
2491 if (!srp_claim_req(ch, req, NULL, scmnd))
2492 return SUCCESS;
2493 shost_printk(KERN_ERR, target->scsi_host,
2494 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002495 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002496 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002497 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002498 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002499 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002500 else
2501 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002502 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002503 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002504 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002505
Bart Van Assche086f44f2013-06-12 15:23:04 +02002506 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002507}
2508
2509static int srp_reset_device(struct scsi_cmnd *scmnd)
2510{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002511 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002512 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002513 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002514
David Dillow7aa54bd2008-01-07 18:23:41 -05002515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002516
Bart Van Assched92c0da2014-10-06 17:14:36 +02002517 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002518 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002519 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002520 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002521 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002522 return FAILED;
2523
Bart Van Assched92c0da2014-10-06 17:14:36 +02002524 for (i = 0; i < target->ch_count; i++) {
2525 ch = &target->ch[i];
2526 for (i = 0; i < target->req_ring_size; ++i) {
2527 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002528
Bart Van Assched92c0da2014-10-06 17:14:36 +02002529 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2530 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002531 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002532
Roland Dreierd945e1d2006-05-09 10:50:28 -07002533 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002534}
2535
2536static int srp_reset_host(struct scsi_cmnd *scmnd)
2537{
2538 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002539
David Dillow7aa54bd2008-01-07 18:23:41 -05002540 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002541
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002542 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002543}
2544
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002545static int srp_slave_configure(struct scsi_device *sdev)
2546{
2547 struct Scsi_Host *shost = sdev->host;
2548 struct srp_target_port *target = host_to_target(shost);
2549 struct request_queue *q = sdev->request_queue;
2550 unsigned long timeout;
2551
2552 if (sdev->type == TYPE_DISK) {
2553 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2554 blk_queue_rq_timeout(q, timeout);
2555 }
2556
2557 return 0;
2558}
2559
Tony Jonesee959b02008-02-22 00:13:36 +01002560static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2561 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002562{
Tony Jonesee959b02008-02-22 00:13:36 +01002563 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002564
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002565 return sprintf(buf, "0x%016llx\n",
2566 (unsigned long long) be64_to_cpu(target->id_ext));
2567}
2568
Tony Jonesee959b02008-02-22 00:13:36 +01002569static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2570 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002571{
Tony Jonesee959b02008-02-22 00:13:36 +01002572 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002573
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002574 return sprintf(buf, "0x%016llx\n",
2575 (unsigned long long) be64_to_cpu(target->ioc_guid));
2576}
2577
Tony Jonesee959b02008-02-22 00:13:36 +01002578static ssize_t show_service_id(struct device *dev,
2579 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002580{
Tony Jonesee959b02008-02-22 00:13:36 +01002581 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002582
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002583 return sprintf(buf, "0x%016llx\n",
2584 (unsigned long long) be64_to_cpu(target->service_id));
2585}
2586
Tony Jonesee959b02008-02-22 00:13:36 +01002587static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2588 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002589{
Tony Jonesee959b02008-02-22 00:13:36 +01002590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002591
Bart Van Assche747fe002014-10-30 14:48:05 +01002592 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002593}
2594
Bart Van Assche848b3082013-10-26 14:38:12 +02002595static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2596 char *buf)
2597{
2598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2599
Bart Van Assche747fe002014-10-30 14:48:05 +01002600 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002601}
2602
Tony Jonesee959b02008-02-22 00:13:36 +01002603static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2604 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002605{
Tony Jonesee959b02008-02-22 00:13:36 +01002606 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002607 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002608
Bart Van Assche509c07b2014-10-30 14:48:30 +01002609 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002610}
2611
Tony Jonesee959b02008-02-22 00:13:36 +01002612static ssize_t show_orig_dgid(struct device *dev,
2613 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002614{
Tony Jonesee959b02008-02-22 00:13:36 +01002615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002616
Bart Van Assche747fe002014-10-30 14:48:05 +01002617 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002618}
2619
Bart Van Assche89de7482010-08-03 14:08:45 +00002620static ssize_t show_req_lim(struct device *dev,
2621 struct device_attribute *attr, char *buf)
2622{
2623 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002624 struct srp_rdma_ch *ch;
2625 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002626
Bart Van Assched92c0da2014-10-06 17:14:36 +02002627 for (i = 0; i < target->ch_count; i++) {
2628 ch = &target->ch[i];
2629 req_lim = min(req_lim, ch->req_lim);
2630 }
2631 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002632}
2633
Tony Jonesee959b02008-02-22 00:13:36 +01002634static ssize_t show_zero_req_lim(struct device *dev,
2635 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002636{
Tony Jonesee959b02008-02-22 00:13:36 +01002637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002638
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002639 return sprintf(buf, "%d\n", target->zero_req_lim);
2640}
2641
Tony Jonesee959b02008-02-22 00:13:36 +01002642static ssize_t show_local_ib_port(struct device *dev,
2643 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002644{
Tony Jonesee959b02008-02-22 00:13:36 +01002645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002646
2647 return sprintf(buf, "%d\n", target->srp_host->port);
2648}
2649
Tony Jonesee959b02008-02-22 00:13:36 +01002650static ssize_t show_local_ib_device(struct device *dev,
2651 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002652{
Tony Jonesee959b02008-02-22 00:13:36 +01002653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002654
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002655 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002656}
2657
Bart Van Assched92c0da2014-10-06 17:14:36 +02002658static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2659 char *buf)
2660{
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2662
2663 return sprintf(buf, "%d\n", target->ch_count);
2664}
2665
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002666static ssize_t show_comp_vector(struct device *dev,
2667 struct device_attribute *attr, char *buf)
2668{
2669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2670
2671 return sprintf(buf, "%d\n", target->comp_vector);
2672}
2673
Vu Pham7bb312e2013-10-26 14:31:27 +02002674static ssize_t show_tl_retry_count(struct device *dev,
2675 struct device_attribute *attr, char *buf)
2676{
2677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2678
2679 return sprintf(buf, "%d\n", target->tl_retry_count);
2680}
2681
David Dillow49248642011-01-14 18:23:24 -05002682static ssize_t show_cmd_sg_entries(struct device *dev,
2683 struct device_attribute *attr, char *buf)
2684{
2685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2686
2687 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2688}
2689
David Dillowc07d4242011-01-16 13:57:10 -05002690static ssize_t show_allow_ext_sg(struct device *dev,
2691 struct device_attribute *attr, char *buf)
2692{
2693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2694
2695 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2696}
2697
Tony Jonesee959b02008-02-22 00:13:36 +01002698static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2699static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2700static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2701static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002702static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002703static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2704static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002705static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002706static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2707static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2708static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002709static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002710static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002711static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002712static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002713static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002714
Tony Jonesee959b02008-02-22 00:13:36 +01002715static struct device_attribute *srp_host_attrs[] = {
2716 &dev_attr_id_ext,
2717 &dev_attr_ioc_guid,
2718 &dev_attr_service_id,
2719 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002720 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002721 &dev_attr_dgid,
2722 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002723 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002724 &dev_attr_zero_req_lim,
2725 &dev_attr_local_ib_port,
2726 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002727 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002728 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002729 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002730 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002731 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002732 NULL
2733};
2734
Roland Dreieraef9ec32005-11-02 14:07:13 -08002735static struct scsi_host_template srp_template = {
2736 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002737 .name = "InfiniBand SRP initiator",
2738 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002739 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002740 .info = srp_target_info,
2741 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002742 .change_queue_depth = srp_change_queue_depth,
Christoph Hellwiga62182f2014-10-02 14:39:55 +02002743 .change_queue_type = scsi_change_queue_type,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002744 .eh_abort_handler = srp_abort,
2745 .eh_device_reset_handler = srp_reset_device,
2746 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002747 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002748 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002749 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002750 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002751 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002752 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002753 .shost_attrs = srp_host_attrs,
2754 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002755 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002756};
2757
Bart Van Assche34aa6542014-10-30 14:47:22 +01002758static int srp_sdev_count(struct Scsi_Host *host)
2759{
2760 struct scsi_device *sdev;
2761 int c = 0;
2762
2763 shost_for_each_device(sdev, host)
2764 c++;
2765
2766 return c;
2767}
2768
Roland Dreieraef9ec32005-11-02 14:07:13 -08002769static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2770{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002771 struct srp_rport_identifiers ids;
2772 struct srp_rport *rport;
2773
Bart Van Assche34aa6542014-10-30 14:47:22 +01002774 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002775 sprintf(target->target_name, "SRP.T10:%016llX",
2776 (unsigned long long) be64_to_cpu(target->id_ext));
2777
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002778 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002779 return -ENODEV;
2780
FUJITA Tomonori32368222007-06-27 16:33:12 +09002781 memcpy(ids.port_id, &target->id_ext, 8);
2782 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002783 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002784 rport = srp_rport_add(target->scsi_host, &ids);
2785 if (IS_ERR(rport)) {
2786 scsi_remove_host(target->scsi_host);
2787 return PTR_ERR(rport);
2788 }
2789
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002790 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002791 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002792
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002793 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002794 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002795 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002796
Roland Dreieraef9ec32005-11-02 14:07:13 -08002797 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002798 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002799
Bart Van Assche34aa6542014-10-30 14:47:22 +01002800 if (!target->connected || target->qp_in_error) {
2801 shost_printk(KERN_INFO, target->scsi_host,
2802 PFX "SCSI scan failed - removing SCSI host\n");
2803 srp_queue_remove_work(target);
2804 goto out;
2805 }
2806
2807 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2808 dev_name(&target->scsi_host->shost_gendev),
2809 srp_sdev_count(target->scsi_host));
2810
2811 spin_lock_irq(&target->lock);
2812 if (target->state == SRP_TARGET_SCANNING)
2813 target->state = SRP_TARGET_LIVE;
2814 spin_unlock_irq(&target->lock);
2815
2816out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002817 return 0;
2818}
2819
Tony Jonesee959b02008-02-22 00:13:36 +01002820static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002821{
2822 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002823 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002824
2825 complete(&host->released);
2826}
2827
2828static struct class srp_class = {
2829 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002830 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002831};
2832
Bart Van Assche96fc2482013-06-28 14:51:26 +02002833/**
2834 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002835 * @host: SRP host.
2836 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002837 */
2838static bool srp_conn_unique(struct srp_host *host,
2839 struct srp_target_port *target)
2840{
2841 struct srp_target_port *t;
2842 bool ret = false;
2843
2844 if (target->state == SRP_TARGET_REMOVED)
2845 goto out;
2846
2847 ret = true;
2848
2849 spin_lock(&host->target_lock);
2850 list_for_each_entry(t, &host->target_list, list) {
2851 if (t != target &&
2852 target->id_ext == t->id_ext &&
2853 target->ioc_guid == t->ioc_guid &&
2854 target->initiator_ext == t->initiator_ext) {
2855 ret = false;
2856 break;
2857 }
2858 }
2859 spin_unlock(&host->target_lock);
2860
2861out:
2862 return ret;
2863}
2864
Roland Dreieraef9ec32005-11-02 14:07:13 -08002865/*
2866 * Target ports are added by writing
2867 *
2868 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2869 * pkey=<P_Key>,service_id=<service ID>
2870 *
2871 * to the add_target sysfs attribute.
2872 */
2873enum {
2874 SRP_OPT_ERR = 0,
2875 SRP_OPT_ID_EXT = 1 << 0,
2876 SRP_OPT_IOC_GUID = 1 << 1,
2877 SRP_OPT_DGID = 1 << 2,
2878 SRP_OPT_PKEY = 1 << 3,
2879 SRP_OPT_SERVICE_ID = 1 << 4,
2880 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002881 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002882 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002883 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002884 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002885 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2886 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002887 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002888 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002889 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002890 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2891 SRP_OPT_IOC_GUID |
2892 SRP_OPT_DGID |
2893 SRP_OPT_PKEY |
2894 SRP_OPT_SERVICE_ID),
2895};
2896
Steven Whitehousea447c092008-10-13 10:46:57 +01002897static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002898 { SRP_OPT_ID_EXT, "id_ext=%s" },
2899 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2900 { SRP_OPT_DGID, "dgid=%s" },
2901 { SRP_OPT_PKEY, "pkey=%x" },
2902 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2903 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2904 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002905 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002906 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002907 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002908 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2909 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002910 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002911 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002912 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002913 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002914};
2915
2916static int srp_parse_options(const char *buf, struct srp_target_port *target)
2917{
2918 char *options, *sep_opt;
2919 char *p;
2920 char dgid[3];
2921 substring_t args[MAX_OPT_ARGS];
2922 int opt_mask = 0;
2923 int token;
2924 int ret = -EINVAL;
2925 int i;
2926
2927 options = kstrdup(buf, GFP_KERNEL);
2928 if (!options)
2929 return -ENOMEM;
2930
2931 sep_opt = options;
2932 while ((p = strsep(&sep_opt, ",")) != NULL) {
2933 if (!*p)
2934 continue;
2935
2936 token = match_token(p, srp_opt_tokens, args);
2937 opt_mask |= token;
2938
2939 switch (token) {
2940 case SRP_OPT_ID_EXT:
2941 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002942 if (!p) {
2943 ret = -ENOMEM;
2944 goto out;
2945 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002946 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2947 kfree(p);
2948 break;
2949
2950 case SRP_OPT_IOC_GUID:
2951 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002952 if (!p) {
2953 ret = -ENOMEM;
2954 goto out;
2955 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002956 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2957 kfree(p);
2958 break;
2959
2960 case SRP_OPT_DGID:
2961 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002962 if (!p) {
2963 ret = -ENOMEM;
2964 goto out;
2965 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002966 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002967 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002968 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002969 goto out;
2970 }
2971
2972 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002973 strlcpy(dgid, p + i * 2, sizeof(dgid));
2974 if (sscanf(dgid, "%hhx",
2975 &target->orig_dgid.raw[i]) < 1) {
2976 ret = -EINVAL;
2977 kfree(p);
2978 goto out;
2979 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002980 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002981 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002982 break;
2983
2984 case SRP_OPT_PKEY:
2985 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002986 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002987 goto out;
2988 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002989 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002990 break;
2991
2992 case SRP_OPT_SERVICE_ID:
2993 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002994 if (!p) {
2995 ret = -ENOMEM;
2996 goto out;
2997 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002998 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2999 kfree(p);
3000 break;
3001
3002 case SRP_OPT_MAX_SECT:
3003 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003004 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003005 goto out;
3006 }
3007 target->scsi_host->max_sectors = token;
3008 break;
3009
Bart Van Assche4d73f952013-10-26 14:40:37 +02003010 case SRP_OPT_QUEUE_SIZE:
3011 if (match_int(args, &token) || token < 1) {
3012 pr_warn("bad queue_size parameter '%s'\n", p);
3013 goto out;
3014 }
3015 target->scsi_host->can_queue = token;
3016 target->queue_size = token + SRP_RSP_SQ_SIZE +
3017 SRP_TSK_MGMT_SQ_SIZE;
3018 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3019 target->scsi_host->cmd_per_lun = token;
3020 break;
3021
Vu Pham52fb2b502006-06-17 20:37:31 -07003022 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003023 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003024 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3025 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003026 goto out;
3027 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003028 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003029 break;
3030
Ramachandra K0c0450db2006-06-17 20:37:38 -07003031 case SRP_OPT_IO_CLASS:
3032 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003033 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003034 goto out;
3035 }
3036 if (token != SRP_REV10_IB_IO_CLASS &&
3037 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003038 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3039 token, SRP_REV10_IB_IO_CLASS,
3040 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003041 goto out;
3042 }
3043 target->io_class = token;
3044 break;
3045
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003046 case SRP_OPT_INITIATOR_EXT:
3047 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003048 if (!p) {
3049 ret = -ENOMEM;
3050 goto out;
3051 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003052 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3053 kfree(p);
3054 break;
3055
David Dillow49248642011-01-14 18:23:24 -05003056 case SRP_OPT_CMD_SG_ENTRIES:
3057 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003058 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3059 p);
David Dillow49248642011-01-14 18:23:24 -05003060 goto out;
3061 }
3062 target->cmd_sg_cnt = token;
3063 break;
3064
David Dillowc07d4242011-01-16 13:57:10 -05003065 case SRP_OPT_ALLOW_EXT_SG:
3066 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003067 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003068 goto out;
3069 }
3070 target->allow_ext_sg = !!token;
3071 break;
3072
3073 case SRP_OPT_SG_TABLESIZE:
3074 if (match_int(args, &token) || token < 1 ||
3075 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003076 pr_warn("bad max sg_tablesize parameter '%s'\n",
3077 p);
David Dillowc07d4242011-01-16 13:57:10 -05003078 goto out;
3079 }
3080 target->sg_tablesize = token;
3081 break;
3082
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003083 case SRP_OPT_COMP_VECTOR:
3084 if (match_int(args, &token) || token < 0) {
3085 pr_warn("bad comp_vector parameter '%s'\n", p);
3086 goto out;
3087 }
3088 target->comp_vector = token;
3089 break;
3090
Vu Pham7bb312e2013-10-26 14:31:27 +02003091 case SRP_OPT_TL_RETRY_COUNT:
3092 if (match_int(args, &token) || token < 2 || token > 7) {
3093 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3094 p);
3095 goto out;
3096 }
3097 target->tl_retry_count = token;
3098 break;
3099
Roland Dreieraef9ec32005-11-02 14:07:13 -08003100 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003101 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3102 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003103 goto out;
3104 }
3105 }
3106
3107 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3108 ret = 0;
3109 else
3110 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3111 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3112 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003113 pr_warn("target creation request is missing parameter '%s'\n",
3114 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003115
Bart Van Assche4d73f952013-10-26 14:40:37 +02003116 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3117 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3118 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3119 target->scsi_host->cmd_per_lun,
3120 target->scsi_host->can_queue);
3121
Roland Dreieraef9ec32005-11-02 14:07:13 -08003122out:
3123 kfree(options);
3124 return ret;
3125}
3126
Tony Jonesee959b02008-02-22 00:13:36 +01003127static ssize_t srp_create_target(struct device *dev,
3128 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003129 const char *buf, size_t count)
3130{
3131 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003132 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003133 struct Scsi_Host *target_host;
3134 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003135 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003136 struct srp_device *srp_dev = host->srp_dev;
3137 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003138 int ret, node_idx, node, cpu, i;
3139 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003140
3141 target_host = scsi_host_alloc(&srp_template,
3142 sizeof (struct srp_target_port));
3143 if (!target_host)
3144 return -ENOMEM;
3145
David Dillow49248642011-01-14 18:23:24 -05003146 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003147 target_host->max_channel = 0;
3148 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003149 target_host->max_lun = SRP_MAX_LUN;
3150 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003151
Roland Dreieraef9ec32005-11-02 14:07:13 -08003152 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003153
David Dillow49248642011-01-14 18:23:24 -05003154 target->io_class = SRP_REV16A_IB_IO_CLASS;
3155 target->scsi_host = target_host;
3156 target->srp_host = host;
3157 target->lkey = host->srp_dev->mr->lkey;
3158 target->rkey = host->srp_dev->mr->rkey;
3159 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003160 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3161 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003162 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003163 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003164
Bart Van Assche34aa6542014-10-30 14:47:22 +01003165 /*
3166 * Avoid that the SCSI host can be removed by srp_remove_target()
3167 * before this function returns.
3168 */
3169 scsi_host_get(target->scsi_host);
3170
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003171 mutex_lock(&host->add_target_mutex);
3172
Roland Dreieraef9ec32005-11-02 14:07:13 -08003173 ret = srp_parse_options(buf, target);
3174 if (ret)
3175 goto err;
3176
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003177 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3178 if (ret)
3179 goto err;
3180
Bart Van Assche4d73f952013-10-26 14:40:37 +02003181 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3182
Bart Van Assche96fc2482013-06-28 14:51:26 +02003183 if (!srp_conn_unique(target->srp_host, target)) {
3184 shost_printk(KERN_INFO, target->scsi_host,
3185 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3186 be64_to_cpu(target->id_ext),
3187 be64_to_cpu(target->ioc_guid),
3188 be64_to_cpu(target->initiator_ext));
3189 ret = -EEXIST;
3190 goto err;
3191 }
3192
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003193 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003194 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003195 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003196 target->sg_tablesize = target->cmd_sg_cnt;
3197 }
3198
3199 target_host->sg_tablesize = target->sg_tablesize;
3200 target->indirect_size = target->sg_tablesize *
3201 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003202 target->max_iu_len = sizeof (struct srp_cmd) +
3203 sizeof (struct srp_indirect_buf) +
3204 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3205
Bart Van Asschec1120f82013-10-26 14:35:08 +02003206 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003207 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003208 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003209 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003210 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02003211 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003212
Bart Van Assched92c0da2014-10-06 17:14:36 +02003213 ret = -ENOMEM;
3214 target->ch_count = max_t(unsigned, num_online_nodes(),
3215 min(ch_count ? :
3216 min(4 * num_online_nodes(),
3217 ibdev->num_comp_vectors),
3218 num_online_cpus()));
3219 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3220 GFP_KERNEL);
3221 if (!target->ch)
3222 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003223
Bart Van Assched92c0da2014-10-06 17:14:36 +02003224 node_idx = 0;
3225 for_each_online_node(node) {
3226 const int ch_start = (node_idx * target->ch_count /
3227 num_online_nodes());
3228 const int ch_end = ((node_idx + 1) * target->ch_count /
3229 num_online_nodes());
3230 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3231 num_online_nodes() + target->comp_vector)
3232 % ibdev->num_comp_vectors;
3233 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3234 num_online_nodes() + target->comp_vector)
3235 % ibdev->num_comp_vectors;
3236 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003237
Bart Van Assched92c0da2014-10-06 17:14:36 +02003238 for_each_online_cpu(cpu) {
3239 if (cpu_to_node(cpu) != node)
3240 continue;
3241 if (ch_start + cpu_idx >= ch_end)
3242 continue;
3243 ch = &target->ch[ch_start + cpu_idx];
3244 ch->target = target;
3245 ch->comp_vector = cv_start == cv_end ? cv_start :
3246 cv_start + cpu_idx % (cv_end - cv_start);
3247 spin_lock_init(&ch->lock);
3248 INIT_LIST_HEAD(&ch->free_tx);
3249 ret = srp_new_cm_id(ch);
3250 if (ret)
3251 goto err_disconnect;
3252
3253 ret = srp_create_ch_ib(ch);
3254 if (ret)
3255 goto err_disconnect;
3256
3257 ret = srp_alloc_req_data(ch);
3258 if (ret)
3259 goto err_disconnect;
3260
3261 ret = srp_connect_ch(ch, multich);
3262 if (ret) {
3263 shost_printk(KERN_ERR, target->scsi_host,
3264 PFX "Connection %d/%d failed\n",
3265 ch_start + cpu_idx,
3266 target->ch_count);
3267 if (node_idx == 0 && cpu_idx == 0) {
3268 goto err_disconnect;
3269 } else {
3270 srp_free_ch_ib(target, ch);
3271 srp_free_req_data(target, ch);
3272 target->ch_count = ch - target->ch;
3273 break;
3274 }
3275 }
3276
3277 multich = true;
3278 cpu_idx++;
3279 }
3280 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003281 }
3282
Bart Van Assched92c0da2014-10-06 17:14:36 +02003283 target->scsi_host->nr_hw_queues = target->ch_count;
3284
Roland Dreieraef9ec32005-11-02 14:07:13 -08003285 ret = srp_add_target(host, target);
3286 if (ret)
3287 goto err_disconnect;
3288
Bart Van Assche34aa6542014-10-30 14:47:22 +01003289 if (target->state != SRP_TARGET_REMOVED) {
3290 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3291 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3292 be64_to_cpu(target->id_ext),
3293 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003294 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003295 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003296 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003297 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003298
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003299 ret = count;
3300
3301out:
3302 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003303
3304 scsi_host_put(target->scsi_host);
3305
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003306 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003307
3308err_disconnect:
3309 srp_disconnect_target(target);
3310
Bart Van Assched92c0da2014-10-06 17:14:36 +02003311 for (i = 0; i < target->ch_count; i++) {
3312 ch = &target->ch[i];
3313 srp_free_ch_ib(target, ch);
3314 srp_free_req_data(target, ch);
3315 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003316
Bart Van Assched92c0da2014-10-06 17:14:36 +02003317 kfree(target->ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05003318
Roland Dreieraef9ec32005-11-02 14:07:13 -08003319err:
3320 scsi_host_put(target_host);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003321 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003322}
3323
Tony Jonesee959b02008-02-22 00:13:36 +01003324static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003325
Tony Jonesee959b02008-02-22 00:13:36 +01003326static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3327 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003328{
Tony Jonesee959b02008-02-22 00:13:36 +01003329 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003330
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003331 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003332}
3333
Tony Jonesee959b02008-02-22 00:13:36 +01003334static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003335
Tony Jonesee959b02008-02-22 00:13:36 +01003336static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3337 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003338{
Tony Jonesee959b02008-02-22 00:13:36 +01003339 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003340
3341 return sprintf(buf, "%d\n", host->port);
3342}
3343
Tony Jonesee959b02008-02-22 00:13:36 +01003344static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003345
Roland Dreierf5358a12006-06-17 20:37:29 -07003346static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003347{
3348 struct srp_host *host;
3349
3350 host = kzalloc(sizeof *host, GFP_KERNEL);
3351 if (!host)
3352 return NULL;
3353
3354 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003355 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003356 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003357 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003358 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003359 host->port = port;
3360
Tony Jonesee959b02008-02-22 00:13:36 +01003361 host->dev.class = &srp_class;
3362 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003363 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003364
Tony Jonesee959b02008-02-22 00:13:36 +01003365 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003366 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003367 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003369 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003371 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003372 goto err_class;
3373
3374 return host;
3375
3376err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003377 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003378
Roland Dreierf5358a12006-06-17 20:37:29 -07003379free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003380 kfree(host);
3381
3382 return NULL;
3383}
3384
3385static void srp_add_one(struct ib_device *device)
3386{
Roland Dreierf5358a12006-06-17 20:37:29 -07003387 struct srp_device *srp_dev;
3388 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003389 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003390 int mr_page_shift, s, e, p;
3391 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003392
Roland Dreierf5358a12006-06-17 20:37:29 -07003393 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3394 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003395 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003396
Roland Dreierf5358a12006-06-17 20:37:29 -07003397 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003398 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003399 goto free_attr;
3400 }
3401
3402 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3403 if (!srp_dev)
3404 goto free_attr;
3405
Bart Van Assched1b42892014-05-20 15:07:20 +02003406 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3407 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003408 srp_dev->has_fr = (dev_attr->device_cap_flags &
3409 IB_DEVICE_MEM_MGT_EXTENSIONS);
3410 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3411 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3412
3413 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3414 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003415
Roland Dreierf5358a12006-06-17 20:37:29 -07003416 /*
3417 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003418 * minimum of 4096 bytes. We're unlikely to build large sglists
3419 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003420 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003421 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3422 srp_dev->mr_page_size = 1 << mr_page_shift;
3423 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3424 max_pages_per_mr = dev_attr->max_mr_size;
3425 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3426 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3427 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003428 if (srp_dev->use_fast_reg) {
3429 srp_dev->max_pages_per_mr =
3430 min_t(u32, srp_dev->max_pages_per_mr,
3431 dev_attr->max_fast_reg_page_list_len);
3432 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003433 srp_dev->mr_max_size = srp_dev->mr_page_size *
3434 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003435 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003436 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003437 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003438 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003439
3440 INIT_LIST_HEAD(&srp_dev->dev_list);
3441
3442 srp_dev->dev = device;
3443 srp_dev->pd = ib_alloc_pd(device);
3444 if (IS_ERR(srp_dev->pd))
3445 goto free_dev;
3446
3447 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3448 IB_ACCESS_LOCAL_WRITE |
3449 IB_ACCESS_REMOTE_READ |
3450 IB_ACCESS_REMOTE_WRITE);
3451 if (IS_ERR(srp_dev->mr))
3452 goto err_pd;
3453
Tom Tucker07ebafb2006-08-03 16:02:42 -05003454 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003455 s = 0;
3456 e = 0;
3457 } else {
3458 s = 1;
3459 e = device->phys_port_cnt;
3460 }
3461
3462 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003463 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003464 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003465 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003466 }
3467
Roland Dreierf5358a12006-06-17 20:37:29 -07003468 ib_set_client_data(device, &srp_client, srp_dev);
3469
3470 goto free_attr;
3471
3472err_pd:
3473 ib_dealloc_pd(srp_dev->pd);
3474
3475free_dev:
3476 kfree(srp_dev);
3477
3478free_attr:
3479 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003480}
3481
3482static void srp_remove_one(struct ib_device *device)
3483{
Roland Dreierf5358a12006-06-17 20:37:29 -07003484 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003485 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003486 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003487
Roland Dreierf5358a12006-06-17 20:37:29 -07003488 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003489 if (!srp_dev)
3490 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003491
Roland Dreierf5358a12006-06-17 20:37:29 -07003492 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003493 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003494 /*
3495 * Wait for the sysfs entry to go away, so that no new
3496 * target ports can be created.
3497 */
3498 wait_for_completion(&host->released);
3499
3500 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003501 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003502 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003503 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003504 list_for_each_entry(target, &host->target_list, list)
3505 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003506 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003507
3508 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003509 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003510 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003511 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003512 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003513
Roland Dreieraef9ec32005-11-02 14:07:13 -08003514 kfree(host);
3515 }
3516
Roland Dreierf5358a12006-06-17 20:37:29 -07003517 ib_dereg_mr(srp_dev->mr);
3518 ib_dealloc_pd(srp_dev->pd);
3519
3520 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003521}
3522
FUJITA Tomonori32368222007-06-27 16:33:12 +09003523static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003524 .has_rport_state = true,
3525 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003526 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003527 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3528 .dev_loss_tmo = &srp_dev_loss_tmo,
3529 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003530 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003531 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003532};
3533
Roland Dreieraef9ec32005-11-02 14:07:13 -08003534static int __init srp_init_module(void)
3535{
3536 int ret;
3537
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003538 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003539
David Dillow49248642011-01-14 18:23:24 -05003540 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003541 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003542 if (!cmd_sg_entries)
3543 cmd_sg_entries = srp_sg_tablesize;
3544 }
3545
3546 if (!cmd_sg_entries)
3547 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3548
3549 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003550 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003551 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003552 }
3553
David Dillowc07d4242011-01-16 13:57:10 -05003554 if (!indirect_sg_entries)
3555 indirect_sg_entries = cmd_sg_entries;
3556 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003557 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3558 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003559 indirect_sg_entries = cmd_sg_entries;
3560 }
3561
Bart Van Asschebcc05912014-07-09 15:57:26 +02003562 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003563 if (!srp_remove_wq) {
3564 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003565 goto out;
3566 }
3567
3568 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003569 ib_srp_transport_template =
3570 srp_attach_transport(&ib_srp_transport_functions);
3571 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003572 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003573
Roland Dreieraef9ec32005-11-02 14:07:13 -08003574 ret = class_register(&srp_class);
3575 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003576 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003577 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003578 }
3579
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003580 ib_sa_register_client(&srp_sa_client);
3581
Roland Dreieraef9ec32005-11-02 14:07:13 -08003582 ret = ib_register_client(&srp_client);
3583 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003584 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003585 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003586 }
3587
Bart Van Asschebcc05912014-07-09 15:57:26 +02003588out:
3589 return ret;
3590
3591unreg_sa:
3592 ib_sa_unregister_client(&srp_sa_client);
3593 class_unregister(&srp_class);
3594
3595release_tr:
3596 srp_release_transport(ib_srp_transport_template);
3597
3598destroy_wq:
3599 destroy_workqueue(srp_remove_wq);
3600 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003601}
3602
3603static void __exit srp_cleanup_module(void)
3604{
3605 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003606 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003607 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003608 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003609 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003610}
3611
3612module_init(srp_init_module);
3613module_exit(srp_cleanup_module);